filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/integration/dss/test_dss_api.py | #!/usr/bin/env python
# coding: utf-8
import errno
from concurrent.futures import ThreadPoolExecutor
import datetime
import filecmp
from dbio.util import tsv
import itertools
import os
import sys
import tempfile
import uuid
import unittest
from fnmatch import fnmatchcase
import boto3
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
import dbio.dss
from dbio.dss.util import iter_paths, object_name_builder, check_s3_bucket_exists
from test import reset_tweak_changes, TEST_DIR
from . import TEST_BUCKET_NAME
@unittest.skipIf(check_s3_bucket_exists(TEST_BUCKET_NAME), "Test bucket does not exist")
class TestDssApi(unittest.TestCase):
staging_bucket = TEST_BUCKET_NAME
@classmethod
def setUpClass(cls):
cls.client = dbio.dss.DSSClient()
def test_set_host(self):
with tempfile.TemporaryDirectory() as home:
with unittest.mock.patch.dict(os.environ, HOME=home):
dev = dbio.dss.DSSClient(
swagger_url="https://dss.dev.ucsc-cgp-redwood.org/v1/swagger.json")
self.assertEqual("dss.dev.ucsc-cgp-redwood.org", dev._swagger_spec['host'])
def test_set_host_multithreaded(self):
num_repeats = 10
num_threads = 2
for repeat in range(num_repeats):
with self.subTest(repeat=repeat):
with tempfile.TemporaryDirectory() as config_dir:
with unittest.mock.patch.dict(os.environ, XDG_CONFIG_HOME=config_dir):
def f(_):
dev = dbio.dss.DSSClient(
swagger_url="https://dss.dev.ucsc-cgp-redwood.org/v1/swagger.json")
self.assertEqual('dss.dev.ucsc-cgp-redwood.org', dev._swagger_spec['host'])
with ThreadPoolExecutor(num_threads) as tpe:
self.assertTrue(all(x is None for x in tpe.map(f, range(num_threads))))
@unittest.skipIf(os.name is 'nt', 'Unable to test on Windows') # TODO windows testing refactor
def test_python_nested_bundle_upload_download(self):
bundle_path = os.path.join(TEST_DIR, "upload", "data")
uploaded_paths = [x.path for x in iter_paths(str(bundle_path))]
uploaded_files = [object_name_builder(p, bundle_path) for p in uploaded_paths]
client = dbio.dss.DSSClient(swagger_url="https://dss.dev.ucsc-cgp-redwood.org/v1/swagger.json")
manifest = client.upload(src_dir=bundle_path,
replica="aws",
staging_bucket=self.staging_bucket)
manifest_files = manifest['files']
self.assertEqual(list(file['name'] for file in manifest_files).sort(), uploaded_files.sort())
bundle_uuid = manifest['bundle_uuid']
with self.subTest(bundle_uuid=bundle_uuid):
with tempfile.TemporaryDirectory() as dest_dir:
client.download(bundle_uuid=bundle_uuid, replica='aws', download_dir=dest_dir)
downloaded_file_names = [x.path for x in iter_paths(dest_dir)]
downloaded_file_paths = [object_name_builder(p, dest_dir) for p in downloaded_file_names]
self.assertEqual(uploaded_files.sort(), downloaded_file_paths.sort())
def test_python_upload_download(self):
bundle_path = os.path.join(TEST_DIR, "res", "bundle")
uploaded_files = set(os.listdir(bundle_path))
manifest = self.client.upload(src_dir=bundle_path,
replica="aws",
staging_bucket=self.staging_bucket)
manifest_files = manifest['files']
bundle_fqid = manifest['bundle_uuid'] + '.' + manifest['version']
self.assertEqual({file['name'] for file in manifest_files}, uploaded_files)
# Work around https://github.com/HumanCellAtlas/data-store/issues/1331
for file in manifest_files:
file['indexed'] = file['name'].endswith('.json')
for metadata_globs in (), ('',), ('*',), ('a[s][s]ay.json',):
for data_globs in (), ('',), ('*',), ('*_1.fastq.gz',):
with self.subTest(metadata_files=metadata_globs, data_files=data_globs):
bundle_uuid = manifest['bundle_uuid']
expect_downloaded_files = {
file['name'] for file in manifest_files
if any(fnmatchcase(file['name'], glob)
for glob in (metadata_globs if file['indexed'] else data_globs))}
if '*' in metadata_globs and '*' in data_globs:
# In the test case where we download all files, add another wrinkle to the test: Upload a new
# version of one of the metadata files. That new file version is not referenced by any
# bundle. The subsequent download should not be affected by that new version since the bundle
# still refers to the old version.
file1, file2 = itertools.islice((f for f in manifest_files if f['name'].endswith('.json')), 2)
file_version = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ")
source_url = "s3://{}/{}/{}".format(self.staging_bucket, file2['uuid'], file2['name'])
self.client.put_file(uuid=file1['uuid'],
version=file_version,
creator_uid=1,
bundle_uuid=bundle_uuid,
source_url=source_url)
with tempfile.TemporaryDirectory() as dest_dir:
self.client.download(bundle_uuid=bundle_uuid,
download_dir=dest_dir,
replica="aws",
data_filter=data_globs,
metadata_filter=metadata_globs)
# Check that contents are the same
try:
downloaded_files = set(os.listdir(os.path.join(dest_dir, bundle_fqid)))
except OSError as e:
if e.errno != errno.ENOENT:
raise
downloaded_files = set()
if '.dbio' in downloaded_files:
# Since we set the download_dir for download, .dbio dir will appear,
# but only if globs are non-empty
assert not all(glob in [(), ('',)] for glob in [metadata_globs, data_globs])
downloaded_files.remove('.dbio')
downloaded_files.remove('bundle.json')
self.assertEqual(expect_downloaded_files, downloaded_files)
for file in downloaded_files:
manifest_entry = next(entry for entry in manifest['files'] if entry['name'] == file)
globs = metadata_globs if manifest_entry['indexed'] else data_globs
self.assertTrue(any(fnmatchcase(file, glob) for glob in globs))
uploaded_file = os.path.join(bundle_path, file)
downloaded_file = os.path.join(dest_dir, bundle_fqid, file)
self.assertTrue(filecmp.cmp(uploaded_file, downloaded_file, False))
def test_python_manifest_download(self):
bundle_path = os.path.join(TEST_DIR, "res", "bundle")
uploaded_files = set(os.listdir(bundle_path))
manifest = self.client.upload(src_dir=bundle_path,
replica="aws",
staging_bucket=self.staging_bucket)
manifest_files = manifest['files']
self.assertEqual({file['name'] for file in manifest_files}, uploaded_files)
# Work around https://github.com/HumanCellAtlas/data-store/issues/1331
for file in manifest_files:
file['indexed'] = file['name'].endswith('.json')
bundle_uuid = manifest['bundle_uuid']
bundle_version = manifest['version']
bundle_fqid = bundle_uuid + '.' + bundle_version
data_files = tuple(file['name'] for file in manifest_files if not file['indexed'])
for bad_bundle in False, True:
with self.subTest(bad_bundle=bad_bundle):
with tempfile.TemporaryDirectory() as work_dir:
cwd = os.getcwd()
os.chdir(work_dir)
try:
with open('manifest.tsv', 'w', newline='') as manifest:
writer = tsv.DictWriter(manifest,
fieldnames=('bundle_uuid',
'bundle_version',
'file_name',
'file_sha256'))
writer.writeheader()
writer.writerow(dict(bundle_uuid=bundle_uuid,
bundle_version=bundle_version,
file_name=data_files[0],
file_sha256=
'9b4c0dde8683f924975d0867903dc7a9'
'67f46bee5c0a025c451b9ba73e43f120'))
if bad_bundle:
writer.writerow(dict(bundle_uuid=str(uuid.uuid4()),
bundle_version=bundle_version,
file_name=data_files[0],
file_sha256=
'9b4c0dde8683f924975d0867903dc7a9'
'67f46bee5c0a025c451b9ba73e43f120'))
dest_dir = os.path.join(work_dir, bundle_fqid)
try:
self.client.download_manifest('manifest.tsv', replica="aws", layout='bundle')
except RuntimeError as e:
self.assertTrue(bad_bundle, "Should only raise with a bad bundle in the manifest")
self.assertEqual('1 download task(s) failed.', e.args[0])
else:
self.assertFalse(bad_bundle)
for file in manifest_files:
uploaded_file = os.path.join(bundle_path, file['name'])
downloaded_file = os.path.join(dest_dir, file['name'])
if file['indexed'] or file['name'] == data_files[0]:
self.assertTrue(filecmp.cmp(uploaded_file, downloaded_file, False))
else:
self.assertTrue(os.path.exists(uploaded_file))
self.assertFalse(os.path.exists(downloaded_file))
finally:
os.chdir(cwd)
@unittest.skipIf(os.name is 'nt', 'Unable to test on Windows') # TODO windows testing refactor
def test_python_upload_lg_file(self):
with tempfile.TemporaryDirectory() as src_dir, tempfile.TemporaryDirectory() as dest_dir:
with tempfile.NamedTemporaryFile(dir=src_dir, suffix=".bin") as fh:
fh.write(os.urandom(64 * 1024 * 1024 + 1))
fh.flush()
client = dbio.dss.DSSClient()
bundle_output = client.upload(src_dir=src_dir, replica="aws", staging_bucket=self.staging_bucket)
client.download(bundle_output['bundle_uuid'], replica="aws", download_dir=dest_dir)
bundle_fqid = bundle_output['bundle_uuid'] + '.' + bundle_output['version']
downloaded_file = os.path.join(dest_dir, bundle_fqid, os.path.basename(fh.name))
self.assertTrue(filecmp.cmp(fh.name, downloaded_file, False))
def test_python_bindings(self):
bundle_path = os.path.join(TEST_DIR, "res", "bundle")
bundle_output = self.client.upload(src_dir=bundle_path, replica="aws", staging_bucket=self.staging_bucket)
bundle_uuid = bundle_output['bundle_uuid']
with tempfile.TemporaryDirectory() as dest_dir:
self.client.download(bundle_uuid=bundle_output['bundle_uuid'], replica="aws", download_dir=dest_dir)
# Test get-files and head-files
file_ = bundle_output['files'][0]
with self.client.get_file.stream(uuid=file_['uuid'], replica="aws") as fh:
while True:
chunk = fh.raw.read(1024)
if chunk == b"":
break
self.assertTrue(self.client.head_file(uuid=file_['uuid'], replica="aws").ok)
# Test get-bundles
res = self.client.get_bundle(uuid=bundle_uuid, replica="aws")
self.assertEqual(res["bundle"]["uuid"], bundle_uuid)
# Test put-files
file_uuid = str(uuid.uuid4())
file_version = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ")
bundle_uuid = str(uuid.uuid4())
source_url = "s3://{}/{}/{}".format(self.staging_bucket, file_['uuid'], file_['name'])
res = self.client.put_file(uuid=file_uuid, creator_uid=1, bundle_uuid=bundle_uuid,
version=file_version, source_url=source_url)
# Test put-bundles
files = [{'indexed': True,
'name': file_['name'],
'uuid': file_uuid,
'version': res['version']}]
res = self.client.put_bundle(uuid=bundle_uuid, files=files, version=file_version, creator_uid=1, replica="aws")
self.assertEqual(res["version"], file_version)
with self.assertRaisesRegexp(Exception, "Missing query parameter 'replica'"):
res = self.client.put_bundle(uuid=bundle_uuid, files=[], version=file_version, creator_uid=1)
def test_python_subscriptions(self):
query = {'bool': {}}
resp = self.client.put_subscription(es_query=query,
callback_url="https://www.test_python_subscriptions.dss.databiosphere.org",
replica="aws")
subscription_uuid = resp['uuid']
resp = self.client.get_subscriptions(replica="aws", subscription_type='elasticsearch')
self.assertTrue(subscription_uuid in [s['uuid'] for s in resp['subscriptions']],
str(subscription_uuid) + ' not found in:\n' + str(resp))
# GET /subscriptions does not support pagination
with self.assertRaises(AttributeError):
self.client.get_subscriptions.iterate(replica="aws", subscription_type='elasticsearch')
resp = self.client.get_subscription(replica="aws", uuid=subscription_uuid, subscription_type='elasticsearch')
self.assertEqual(subscription_uuid, resp['uuid'])
resp = self.client.delete_subscription(uuid=subscription_uuid, replica="aws", subscription_type='elasticsearch')
self.assertIn('timeDeleted', resp)
with self.assertRaisesRegexp(Exception, "Cannot find subscription!"):
resp = self.client.get_subscription(replica="aws", uuid=subscription_uuid, subscription_type='elasticsearch')
# Test subscriptions version 2 (jmespath subscriptions)
resp = self.client.put_subscription(callback_url="https://www.test_python_subscriptions.dss.databiosphere.org",
replica="aws")
subscription_uuid = resp['uuid']
resp = self.client.get_subscriptions(replica="aws", subscription_type="jmespath")
self.assertTrue(subscription_uuid in [s['uuid'] for s in resp['subscriptions']])
# GET /subscriptions does not support pagination
with self.assertRaises(AttributeError):
self.client.get_subscriptions.iterate(replica="aws", subscription_type="jmespath")
resp = self.client.get_subscription(replica="aws", subscription_type="jmespath", uuid=subscription_uuid)
self.assertEqual(subscription_uuid, resp['uuid'])
resp = self.client.delete_subscription(uuid=subscription_uuid, subscription_type="jmespath", replica="aws")
self.assertIn('timeDeleted', resp)
with self.assertRaisesRegexp(Exception, "Cannot find subscription!"):
resp = self.client.get_subscription(replica="aws", subscription_type="jmespath", uuid=subscription_uuid)
def test_search_iteration_pagination(self, limit=128):
query = {}
with self.subTest('Test POST search iteration() method.'):
for ix, result in enumerate(self.client.post_search.iterate(es_query=query, replica="aws")):
self.assertIn("bundle_fqid", result)
if ix > limit:
break
with self.subTest('Test POST search pagination() method.'):
for ix, result in enumerate(self.client.post_search.paginate(es_query=query, replica="aws")):
self.assertIn("es_query", result)
self.assertIn("results", result)
self.assertIn("total_hits", result)
if ix > limit:
break
def test_collections_iteration_pagination(self, limit=128):
with self.subTest('Test GET collections iteration() method.'):
for ix, result in enumerate(self.client.get_collections.iterate()):
self.assertIn("uuid", result)
self.assertIn("version", result)
if ix > limit:
break
with self.subTest('Test GET collections pagination() method.'):
for ix, result in enumerate(self.client.get_collections.paginate()):
self.assertIn("collections", result)
if ix > limit:
break
def test_get_bundle_iteration_pagination(self, limit=128):
bundle_path = os.path.join(TEST_DIR, "res", "bundle")
bundle_output = self.client.upload(src_dir=bundle_path, replica="aws", staging_bucket=self.staging_bucket)
bundle_uuid = bundle_output['bundle_uuid']
with self.subTest('Test GET bundle iterate() method.'):
for ix, result in enumerate(self.client.get_bundle.iterate(uuid=bundle_uuid, replica="aws")):
self.assertIn("name", result)
self.assertIn("content-type", result)
self.assertIn("indexed", result)
self.assertIn("uuid", result)
self.assertIn("version", result)
self.assertIn("size", result)
self.assertIn("crc32c", result)
self.assertIn("s3_etag", result)
self.assertIn("sha1", result)
self.assertIn("sha256", result)
if ix > limit:
break
with self.subTest('Test GET bundle paginate() method.'):
for ix, result in enumerate(self.client.get_bundle.paginate(uuid=bundle_uuid, replica="aws")):
self.assertIn("bundle", result)
if ix > limit:
break
def test_clear_cache(self):
"""Testing clear_cache by comparing the creation date of the old swagger with the refreshed swagger that
replaces it"""
client = dbio.dss.DSSClient()
swagger_filename = client._get_swagger_filename(client.swagger_url)
self.assertTrue(os.path.isfile(swagger_filename), "Pass if file exists initially")
old_swagger = datetime.datetime.fromtimestamp(os.path.getmtime(swagger_filename))
client.clear_cache()
new_swagger = datetime.datetime.fromtimestamp(os.path.getmtime(swagger_filename))
self.assertGreater(new_swagger, old_swagger)
@reset_tweak_changes
def test_python_login_logout_service_account(self):
query = {'bool': {}}
resp = self.client.put_subscription(es_query=query,
callback_url=
"https://www.test_python_login_logout_service_account.dss.databiosphere.org",
replica="aws")
self.assertIn("uuid", resp)
deletion_resp = self.client.delete_subscription(uuid=resp['uuid'], replica='aws',
subscription_type='elasticsearch')
self.assertIn("timeDeleted", deletion_resp)
access_token = "test_access_token"
self.client.login(access_token=access_token)
config = dbio.get_config()
self.assertEqual(config.oauth2_token.access_token, access_token)
self.client.logout()
self.assertNotIn("oauth2_token", config)
@unittest.skipIf(True, "Manual Test")
@reset_tweak_changes
def test_python_login_logout_user_account(self):
config = dbio.get_config()
self.client.logout()
self.assertNotIn("oauth2_token", config)
self.assertNotIn("application_secrets", config)
self.client.login()
self.assertIn("oauth2_token", config)
self.assertIn("application_secrets", config)
query = {'bool': {}}
resp = self.client.put_subscription(es_query=query,
callback_url=
"https://www.test_python_login_logout_service_account.dss.databiosphere.org",
replica="aws")
self.assertIn("uuid", resp)
self.client.delete_subscription(uuid=resp["uuid"], replica="aws", subscription_type='elasticsearch')
self.client.logout()
self.assertNotIn("oauth2_token", config)
if __name__ == "__main__":
unittest.main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_tutorial/test_options/test_completion/test_tutorial008.py | import os
import subprocess
import typer
from typer.testing import CliRunner
from options.autocompletion import tutorial008 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_completion():
result = subprocess.run(
["coverage", "run", mod.__file__, " "],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TUTORIAL008.PY_COMPLETE": "complete_zsh",
"_TYPER_COMPLETE_ARGS": "tutorial008.py --name ",
"_TYPER_COMPLETE_TESTING": "True",
},
)
assert '"Camila":"The reader of books."' in result.stdout
assert '"Carlos":"The writer of scripts."' in result.stdout
assert '"Sebastian":"The type hints guy."' in result.stdout
assert "['--name']" in result.stderr
def test_1():
result = runner.invoke(app, ["--name", "Camila", "--name", "Sebastian"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
assert "Hello Sebastian" in result.output
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/config/config.go | package config
import (
"errors"
"fmt"
"os"
"github.com/CIDARO/iridium/internal/utils"
"gopkg.in/yaml.v2"
)
// Configuration config struct
type Configuration struct {
Server struct {
Host string `yaml:"host"`
Port string `yaml:"port"`
}
Memcache struct {
Host string `yaml:"host"`
Port string `yaml:"port"`
}
Backends []string `yaml:"backends"`
MaxAttempts int `yaml:"max_attempts"`
MaxRetries int `yaml:"max_retries"`
Metrics bool `yaml:"metrics"`
}
// Config is the global configuration object
var Config *Configuration = &Configuration{}
// GetConfig initialized the configuration
func GetConfig(configPath string, vars ...string) {
// Retrieve ENV environment variable
env := os.Getenv("ENV")
// If vars is provided, the first one is the override for the env
if len(vars) > 0 {
env = vars[0]
}
// Check if no env is provided
if env == "" {
// If it is not provided, set it as development
env = "development"
}
// Check if env is either 'test', 'production' or 'development'
if env != "test" && env != "production" && env != "development" {
// Raise an error if it does not match the criteria
utils.Logger.Panic(errors.New("environment must be either 'test', 'production' or 'development'"))
}
// Open the configuration file
file, err := os.Open(fmt.Sprintf("%s/%s.yml", configPath, env))
if err != nil {
// Return an error if it's given
utils.Logger.Panic(err)
}
// Decode the yaml file into the struct
yamlDecoder := yaml.NewDecoder(file)
if err := yamlDecoder.Decode(&Config); err != nil {
utils.Logger.Panic(err)
}
}
// ValidatePath validates the input path
func ValidatePath(path string) (*string, error) {
stat, err := os.Stat(path)
if err != nil {
return nil, err
}
if !stat.IsDir() {
return nil, fmt.Errorf("'%s' is not a valid directory", path)
}
return &path, nil
}
| [
"\"ENV\""
]
| []
| [
"ENV"
]
| [] | ["ENV"] | go | 1 | 0 | |
api/app.py | from __future__ import print_function
#api/app.py
import os
import requests
from flask import Flask, current_app, Response, json, jsonify, request
from flask_cors import CORS
import pandas as pd
import argparse
from datetime import datetime
import os
import sys
import time
import scipy.misc
import cv2
from PIL import Image
import imageio
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from utils import *
from LIP_model import *
N_CLASSES = 20
INPUT_SIZE = (384, 384)
DATA_DIRECTORY = './datasets/examples'
DATA_LIST_PATH = './datasets/examples/list/val.txt'
RESTORE_FROM = './checkpoint/JPPNet-s2'
OUTPUT_DIR = './output/dataset'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
os.makedirs('{}/images'.format(OUTPUT_DIR))
os.makedirs('{}/labels'.format(OUTPUT_DIR))
def create_app():
"""
Create app
"""
app = Flask(__name__)
CORS(app, supports_credentials=True)
def custom_response(res, status_code):
"""
Custom Response Function
"""
return Response(
mimetype="application/json",
response=json.dumps(res),
status=status_code
)
def convert_mask_lip(mask):
LIP_to_FP_dict = {
0: 0,
1: 1,
2: 2,
3: 0,
4: 3,
5: 4,
6: 7,
7: 4,
8: 0,
9: 6,
10: 7,
11: 17,
12: 5,
13: 11,
14: 15,
15: 14,
16: 13,
17: 12,
18: 10,
19: 9
}
LIP_rgb_to_code_dict = {
'0_0_0': 0,
'128_0_0': 1,
'255_0_0': 2,
'0_85_0': 3,
'170_0_51': 4,
'255_85_0': 5,
'0_0_85': 6,
'0_119_221': 7,
'85_85_0': 8,
'0_85_85': 9,
'85_51_0': 10,
'52_86_128': 11,
'0_128_0': 12,
'0_0_255': 13,
'51_170_221': 14,
'0_255_255': 15,
'85_255_170': 16,
'170_255_85': 17,
'255_255_0': 18,
'255_170_0': 19
}
image_bounds_dict = {}
new_matrix = []
for i, row in enumerate(mask):
new_row = []
for j, elem in enumerate(row):
new_col = []
color_str = str(elem[0]) + '_' + str(elem[1]) + '_' + str(elem[2])
LIP_code = LIP_rgb_to_code_dict[color_str]
FP_code = LIP_to_FP_dict[LIP_code]
FP_code = [FP_code]*3
new_row.append(FP_code)
new_matrix.append(new_row)
new_matrix = np.array(new_matrix).astype(np.uint8)
return new_matrix
def getBoundingBoxes(mask):
image_bounds_dict = {}
for i, row in enumerate(mask[0]):
for j, elem in enumerate(row):
color_str = str(elem[0]) + '_' + str(elem[1]) + '_' + str(elem[2])
if color_str not in image_bounds_dict:
image_bounds_dict[color_str] = {
'left': j, 'top': i, 'right': j, 'bottom': i}
else:
previous_left = image_bounds_dict[color_str]['left']
previous_right = image_bounds_dict[color_str]['right']
previous_top = image_bounds_dict[color_str]['top']
previous_bottom = image_bounds_dict[color_str]['bottom']
image_bounds_dict[color_str]['left'] = min(j, previous_left)
image_bounds_dict[color_str]['top'] = min(i, previous_top)
image_bounds_dict[color_str]['right'] = max(j, previous_right)
image_bounds_dict[color_str]['bottom'] = max(i, previous_bottom)
data = []
for key, item in image_bounds_dict.items():
data.append({
'id': key,
'bounds': item
})
return data
@app.route('/', methods=['GET'])
def index():
return 'alive'
@app.route('/getSegmentation', methods=['POST'])
def get_segmentation():
if 'file' not in request.files:
return custom_response({ 'error': 'No file provided' }, 400)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
return custom_response({ 'error': 'File without name forbidden' }, 400)
img_contents = file.read()
with open('{}/images/{}.jpg'.format(OUTPUT_DIR, file.filename.split('.')[0]), "wb") as f:
f.write(img_contents)
# Create queue coordinator.
coord = tf.train.Coordinator()
h, w = INPUT_SIZE
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(DATA_DIRECTORY, DATA_LIST_PATH, None, False, False, coord)
image = reader.read_images_from_binary(img_contents)
image_rev = tf.reverse(image, tf.stack([1]))
image_batch_origin = tf.stack([image, image_rev])
image_batch = tf.image.resize_images(image_batch_origin, [int(h), int(w)])
image_batch075 = tf.image.resize_images(image_batch_origin, [int(h * 0.75), int(w * 0.75)])
image_batch125 = tf.image.resize_images(image_batch_origin, [int(h * 1.25), int(w * 1.25)])
# Create network.
with tf.variable_scope('', reuse=False):
net_100 = JPPNetModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
net_075 = JPPNetModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
net_125 = JPPNetModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES)
# parsing net
parsing_fea1_100 = net_100.layers['res5d_branch2b_parsing']
parsing_fea1_075 = net_075.layers['res5d_branch2b_parsing']
parsing_fea1_125 = net_125.layers['res5d_branch2b_parsing']
parsing_out1_100 = net_100.layers['fc1_human']
parsing_out1_075 = net_075.layers['fc1_human']
parsing_out1_125 = net_125.layers['fc1_human']
# pose net
resnet_fea_100 = net_100.layers['res4b22_relu']
resnet_fea_075 = net_075.layers['res4b22_relu']
resnet_fea_125 = net_125.layers['res4b22_relu']
with tf.variable_scope('', reuse=False):
pose_out1_100, pose_fea1_100 = pose_net(resnet_fea_100, 'fc1_pose')
pose_out2_100, pose_fea2_100 = pose_refine(pose_out1_100, parsing_out1_100, pose_fea1_100, name='fc2_pose')
parsing_out2_100, parsing_fea2_100 = parsing_refine(parsing_out1_100, pose_out1_100, parsing_fea1_100, name='fc2_parsing')
parsing_out3_100, parsing_fea3_100 = parsing_refine(parsing_out2_100, pose_out2_100, parsing_fea2_100, name='fc3_parsing')
with tf.variable_scope('', reuse=True):
pose_out1_075, pose_fea1_075 = pose_net(resnet_fea_075, 'fc1_pose')
pose_out2_075, pose_fea2_075 = pose_refine(pose_out1_075, parsing_out1_075, pose_fea1_075, name='fc2_pose')
parsing_out2_075, parsing_fea2_075 = parsing_refine(parsing_out1_075, pose_out1_075, parsing_fea1_075, name='fc2_parsing')
parsing_out3_075, parsing_fea3_075 = parsing_refine(parsing_out2_075, pose_out2_075, parsing_fea2_075, name='fc3_parsing')
with tf.variable_scope('', reuse=True):
pose_out1_125, pose_fea1_125 = pose_net(resnet_fea_125, 'fc1_pose')
pose_out2_125, pose_fea2_125 = pose_refine(pose_out1_125, parsing_out1_125, pose_fea1_125, name='fc2_pose')
parsing_out2_125, parsing_fea2_125 = parsing_refine(parsing_out1_125, pose_out1_125, parsing_fea1_125, name='fc2_parsing')
parsing_out3_125, parsing_fea3_125 = parsing_refine(parsing_out2_125, pose_out2_125, parsing_fea2_125, name='fc3_parsing')
parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out1_075, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out1_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)
parsing_out2 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out2_100, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out2_075, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out2_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)
parsing_out3 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out3_100, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out3_075, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out3_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)
raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)
head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)
tail_list = tf.unstack(tail_output, num=20, axis=2)
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx]
tail_list_rev[14] = tail_list[15]
tail_list_rev[15] = tail_list[14]
tail_list_rev[16] = tail_list[17]
tail_list_rev[17] = tail_list[16]
tail_list_rev[18] = tail_list[19]
tail_list_rev[19] = tail_list[18]
tail_output_rev = tf.stack(tail_list_rev, axis=2)
tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))
raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_output_all = tf.expand_dims(raw_output_all, dim=0)
raw_output_all = tf.argmax(raw_output_all, dimension=3)
pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor.
# Which variables to load.
restore_var = tf.global_variables()
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.local_variables_initializer())
# Load weights.
loader = tf.train.Saver(var_list=restore_var)
if RESTORE_FROM is not None:
if load(loader, sess, RESTORE_FROM):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Iterate over training steps.
parsing_ = sess.run(pred_all)
img_id = file.filename
msk = decode_labels(parsing_, num_classes=N_CLASSES)
parsing_im = convert_mask_lip(msk[0])
imageio.imwrite('{}/labels/{}.png'.format(OUTPUT_DIR, img_id.split('.')[0]), parsing_im)
coord.request_stop()
bbox = getBoundingBoxes(msk)
return custom_response(bbox, 200)
return app | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
dask_kubernetes/core.py | import asyncio
import copy
import getpass
import logging
import os
import string
import time
import uuid
from weakref import finalize
try:
import yaml
except ImportError:
yaml = False
import dask
import dask.distributed
import distributed.security
from distributed.deploy import SpecCluster, ProcessInterface
from distributed.utils import Log, Logs
import kubernetes_asyncio as kubernetes
from kubernetes_asyncio.client.rest import ApiException
from .objects import (
make_pod_from_dict,
make_service_from_dict,
make_pdb_from_dict,
clean_pod_template,
clean_service_template,
clean_pdb_template,
)
from .auth import ClusterAuth
logger = logging.getLogger(__name__)
SCHEDULER_PORT = 8786
class Pod(ProcessInterface):
"""A superclass for Kubernetes Pods
See Also
--------
Worker
Scheduler
"""
def __init__(
self,
cluster,
core_api,
policy_api,
pod_template,
namespace,
loop=None,
**kwargs
):
self._pod = None
self.cluster = cluster
self.core_api = core_api
self.policy_api = policy_api
self.pod_template = copy.deepcopy(pod_template)
self.base_labels = self.pod_template.metadata.labels
self.namespace = namespace
self.name = None
self.loop = loop
self.kwargs = kwargs
super().__init__()
@property
def cluster_name(self):
return self.pod_template.metadata.labels["dask.org/cluster-name"]
async def start(self, **kwargs):
retry_count = 0 # Retry 10 times
while True:
try:
self._pod = await self.core_api.create_namespaced_pod(
self.namespace, self.pod_template
)
return await super().start(**kwargs)
except ApiException as e:
if retry_count < 10:
logger.debug("Error when creating pod, retrying... - %s", str(e))
await asyncio.sleep(1)
retry_count += 1
else:
raise e
async def close(self, **kwargs):
name, namespace = self._pod.metadata.name, self.namespace
if self._pod:
try:
await self.core_api.delete_namespaced_pod(name, namespace)
except ApiException as e:
if e.reason == "Not Found":
logger.debug(
"Pod %s in namespace %s has been deleated already.",
name,
namespace,
)
else:
raise
await super().close(**kwargs)
async def logs(self):
try:
log = await self.core_api.read_namespaced_pod_log(
self._pod.metadata.name, self.namespace
)
except ApiException as e:
if "waiting to start" in str(e):
log = ""
else:
raise e
return Log(log)
async def describe_pod(self):
self._pod = await self.core_api.read_namespaced_pod(
self._pod.metadata.name, self.namespace
)
return self._pod
def __repr__(self):
return "<Pod %s: status=%s>" % (type(self).__name__, self.status)
class Worker(Pod):
"""A Remote Dask Worker controled by Kubernetes
Parameters
----------
scheduler: str
The address of the scheduler
name (optional):
The name passed to the dask-worker CLI at creation time.
"""
def __init__(self, scheduler: str, name=None, **kwargs):
super().__init__(**kwargs)
self.scheduler = scheduler
self.pod_template.metadata.labels["dask.org/component"] = "worker"
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler
)
)
if name is not None:
worker_name_args = ["--name", str(name)]
self.pod_template.spec.containers[0].args += worker_name_args
class Scheduler(Pod):
"""A Remote Dask Scheduler controled by Kubernetes
Parameters
----------
idle_timeout: str, optional
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
service_wait_timeout_s: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
"""
def __init__(self, idle_timeout: str, service_wait_timeout_s: int = None, **kwargs):
super().__init__(**kwargs)
self.cluster._log("Creating scheduler pod on cluster. This may take some time.")
self.service = None
self._idle_timeout = idle_timeout
self._service_wait_timeout_s = service_wait_timeout_s
if self._idle_timeout is not None:
self.pod_template.spec.containers[0].args += [
"--idle-timeout",
self._idle_timeout,
]
self.pdb = None
async def start(self, **kwargs):
await super().start(**kwargs)
while (await self.describe_pod()).status.phase == "Pending":
await asyncio.sleep(0.1)
while self.address is None:
logs = await self.logs()
for line in logs.splitlines():
if "Scheduler at:" in line:
self.address = line.split("Scheduler at:")[1].strip()
await asyncio.sleep(0.1)
self.service = await self._create_service()
self.address = "tcp://{name}.{namespace}:{port}".format(
name=self.service.metadata.name,
namespace=self.namespace,
port=SCHEDULER_PORT,
)
if self.service.spec.type == "LoadBalancer":
# Wait for load balancer to be assigned
start = time.time()
while self.service.status.load_balancer.ingress is None:
if (
self._service_wait_timeout_s > 0
and time.time() > start + self._service_wait_timeout_s
):
raise asyncio.TimeoutError(
"Timed out waiting for Load Balancer to be provisioned."
)
self.service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
await asyncio.sleep(0.2)
[loadbalancer_ingress] = self.service.status.load_balancer.ingress
loadbalancer_host = loadbalancer_ingress.hostname or loadbalancer_ingress.ip
self.external_address = "tcp://{host}:{port}".format(
host=loadbalancer_host, port=SCHEDULER_PORT
)
# FIXME Set external address when using nodeport service type
# FIXME Create an optional Ingress just in case folks want to configure one
self.pdb = await self._create_pdb()
async def close(self, **kwargs):
if self.service:
await self.core_api.delete_namespaced_service(
self.cluster_name, self.namespace
)
if self.pdb:
await self.policy_api.delete_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
await super().close(**kwargs)
async def _create_service(self):
service_template_dict = dask.config.get("kubernetes.scheduler-service-template")
self.service_template = clean_service_template(
make_service_from_dict(service_template_dict)
)
self.service_template.metadata.name = self.cluster_name
self.service_template.metadata.labels = copy.deepcopy(self.base_labels)
self.service_template.spec.selector["dask.org/cluster-name"] = self.cluster_name
if self.service_template.spec.type is None:
self.service_template.spec.type = dask.config.get(
"kubernetes.scheduler-service-type"
)
await self.core_api.create_namespaced_service(
self.namespace, self.service_template
)
return await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
async def _create_pdb(self):
pdb_template_dict = dask.config.get("kubernetes.scheduler-pdb-template")
self.pdb_template = clean_pdb_template(make_pdb_from_dict(pdb_template_dict))
self.pdb_template.metadata.name = self.cluster_name
self.pdb_template.spec.labels = copy.deepcopy(self.base_labels)
self.pdb_template.spec.selector.match_labels[
"dask.org/cluster-name"
] = self.cluster_name
await self.policy_api.create_namespaced_pod_disruption_budget(
self.namespace, self.pdb_template
)
return await self.policy_api.read_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
class KubeCluster(SpecCluster):
"""Launch a Dask cluster on Kubernetes
This starts a local Dask scheduler and then dynamically launches
Dask workers on a Kubernetes cluster. The Kubernetes cluster is taken
to be either the current one on which this code is running, or as a
fallback, the default one configured in a kubeconfig file.
**Environments**
Your worker pod image should have a similar environment to your local
environment, including versions of Python, dask, cloudpickle, and any
libraries that you may wish to use (like NumPy, Pandas, or Scikit-Learn).
See examples below for suggestions on how to manage and check for this.
**Network**
Since the Dask scheduler is launched locally, for it to work, we need to
be able to open network connections between this local node and all the
workers nodes on the Kubernetes cluster. If the current process is not
already on a Kubernetes node, some network configuration will likely be
required to make this work.
**Resources**
Your Kubernetes resource limits and requests should match the
``--memory-limit`` and ``--nthreads`` parameters given to the
``dask-worker`` command.
Parameters
----------
pod_template: kubernetes.client.V1Pod
A Kubernetes specification for a Pod for a dask worker.
scheduler_pod_template: kubernetes.client.V1Pod (optional)
A Kubernetes specification for a Pod for a dask scheduler.
Defaults to the pod_template.
name: str (optional)
Name given to the pods. Defaults to ``dask-$USER-random``
namespace: str (optional)
Namespace in which to launch the workers.
Defaults to current namespace if available or "default"
n_workers: int
Number of workers on initial launch.
Use ``scale`` to change this number in the future
env: Dict[str, str]
Dictionary of environment variables to pass to worker pod
host: str
Listen address for local scheduler. Defaults to 0.0.0.0
port: int
Port of local scheduler
auth: List[ClusterAuth] (optional)
Configuration methods to attempt in order. Defaults to
``[InCluster(), KubeConfig()]``.
idle_timeout: str (optional)
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
scheduler_service_wait_timeout: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
deploy_mode: str (optional)
Run the scheduler as "local" or "remote".
Defaults to ``"local"``.
**kwargs: dict
Additional keyword arguments to pass to LocalCluster
Examples
--------
>>> from dask_kubernetes import KubeCluster, make_pod_spec
>>> pod_spec = make_pod_spec(image='daskdev/dask:latest',
... memory_limit='4G', memory_request='4G',
... cpu_limit=1, cpu_request=1,
... env={'EXTRA_PIP_PACKAGES': 'fastparquet git+https://github.com/dask/distributed'})
>>> cluster = KubeCluster(pod_spec)
>>> cluster.scale(10)
You can also create clusters with worker pod specifications as dictionaries
or stored in YAML files
>>> cluster = KubeCluster.from_yaml('worker-template.yml')
>>> cluster = KubeCluster.from_dict({...})
Rather than explicitly setting a number of workers you can also ask the
cluster to allocate workers dynamically based on current workload
>>> cluster.adapt()
You can pass this cluster directly to a Dask client
>>> from dask.distributed import Client
>>> client = Client(cluster)
You can verify that your local environment matches your worker environments
by calling ``client.get_versions(check=True)``. This will raise an
informative error if versions do not match.
>>> client.get_versions(check=True)
The ``daskdev/dask`` docker images support ``EXTRA_PIP_PACKAGES``,
``EXTRA_APT_PACKAGES`` and ``EXTRA_CONDA_PACKAGES`` environment variables
to help with small adjustments to the worker environments. We recommend
the use of pip over conda in this case due to a much shorter startup time.
These environment variables can be modified directly from the KubeCluster
constructor methods using the ``env=`` keyword. You may list as many
packages as you like in a single string like the following:
>>> pip = 'pyarrow gcsfs git+https://github.com/dask/distributed'
>>> conda = '-c conda-forge scikit-learn'
>>> KubeCluster.from_yaml(..., env={'EXTRA_PIP_PACKAGES': pip,
... 'EXTRA_CONDA_PACKAGES': conda})
You can also start a KubeCluster with no arguments *if* the worker template
is specified in the Dask config files, either as a full template in
``kubernetes.worker-template`` or a path to a YAML file in
``kubernetes.worker-template-path``.
See https://docs.dask.org/en/latest/configuration.html for more
information about setting configuration values.::
$ export DASK_KUBERNETES__WORKER_TEMPLATE_PATH=worker_template.yaml
>>> cluster = KubeCluster() # automatically finds 'worker_template.yaml'
See Also
--------
KubeCluster.from_yaml
KubeCluster.from_dict
KubeCluster.adapt
"""
def __init__(
self,
pod_template=None,
name=None,
namespace=None,
n_workers=None,
host=None,
port=None,
env=None,
auth=ClusterAuth.DEFAULT,
idle_timeout=None,
deploy_mode=None,
interface=None,
protocol=None,
dashboard_address=None,
security=None,
scheduler_service_wait_timeout=None,
scheduler_pod_template=None,
**kwargs
):
self.pod_template = pod_template
self.scheduler_pod_template = scheduler_pod_template
self._generate_name = name
self._namespace = namespace
self._n_workers = n_workers
self._idle_timeout = idle_timeout
self._deploy_mode = deploy_mode
self._protocol = protocol
self._interface = interface
self._dashboard_address = dashboard_address
self._scheduler_service_wait_timeout = scheduler_service_wait_timeout
self.security = security
if self.security and not isinstance(
self.security, distributed.security.Security
):
raise RuntimeError(
"Security object is not a valid distributed.security.Security object"
)
self.host = host
self.port = port
self.env = env
self.auth = auth
self.kwargs = kwargs
super().__init__(**self.kwargs)
self.name = self.pod_template.metadata.generate_name
def _get_pod_template(self, pod_template, pod_type):
if not pod_template and dask.config.get(
"kubernetes.{}-template".format(pod_type), None
):
d = dask.config.get("kubernetes.{}-template".format(pod_type))
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
if not pod_template and dask.config.get(
"kubernetes.{}-template-path".format(pod_type), None
):
import yaml
fn = dask.config.get("kubernetes.{}-template-path".format(pod_type))
fn = fn.format(**os.environ)
with open(fn) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
return pod_template
def _fill_pod_templates(self, pod_template, pod_type):
pod_template = copy.deepcopy(pod_template)
# Default labels that can't be overwritten
pod_template.metadata.labels["dask.org/cluster-name"] = self._generate_name
pod_template.metadata.labels["dask.org/component"] = pod_type
pod_template.metadata.labels["user"] = escape(getpass.getuser())
pod_template.metadata.labels["app"] = "dask"
pod_template.metadata.namespace = self._namespace
if self.env:
pod_template.spec.containers[0].env.extend(
[
kubernetes.client.V1EnvVar(name=k, value=str(v))
for k, v in self.env.items()
]
)
pod_template.metadata.generate_name = self._generate_name
return pod_template
async def _start(self):
self._generate_name = self._generate_name or dask.config.get("kubernetes.name")
self._namespace = self._namespace or dask.config.get("kubernetes.namespace")
self._idle_timeout = self._idle_timeout or dask.config.get(
"kubernetes.idle-timeout"
)
self._scheduler_service_wait_timeout = (
self._scheduler_service_wait_timeout
or dask.config.get("kubernetes.scheduler-service-wait-timeout")
)
self._deploy_mode = self._deploy_mode or dask.config.get(
"kubernetes.deploy-mode"
)
self._n_workers = (
self._n_workers
if self._n_workers is not None
else dask.config.get("kubernetes.count.start")
)
self.host = self.host or dask.config.get("kubernetes.host")
self.port = (
self.port if self.port is not None else dask.config.get("kubernetes.port")
)
self._protocol = self._protocol or dask.config.get("kubernetes.protocol")
self._interface = self._interface or dask.config.get("kubernetes.interface")
self._dashboard_address = self._dashboard_address or dask.config.get(
"kubernetes.dashboard_address"
)
self.env = (
self.env if self.env is not None else dask.config.get("kubernetes.env")
)
self.pod_template = self._get_pod_template(self.pod_template, pod_type="worker")
self.scheduler_pod_template = self._get_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
if not self.pod_template:
msg = (
"Worker pod specification not provided. See KubeCluster "
"docstring for ways to specify workers"
)
raise ValueError(msg)
base_pod_template = self.pod_template
self.pod_template = clean_pod_template(self.pod_template, pod_type="worker")
if not self.scheduler_pod_template:
self.scheduler_pod_template = base_pod_template
self.scheduler_pod_template.spec.containers[0].args = ["dask-scheduler"]
self.scheduler_pod_template = clean_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
await ClusterAuth.load_first(self.auth)
self.core_api = kubernetes.client.CoreV1Api()
self.policy_api = kubernetes.client.PolicyV1beta1Api()
if self._namespace is None:
self._namespace = _namespace_default()
self._generate_name = self._generate_name.format(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **os.environ
)
self._generate_name = escape(self._generate_name)
self.pod_template = self._fill_pod_templates(
self.pod_template, pod_type="worker"
)
self.scheduler_pod_template = self._fill_pod_templates(
self.scheduler_pod_template, pod_type="scheduler"
)
finalize(
self, _cleanup_resources, self._namespace, self.pod_template.metadata.labels
)
common_options = {
"cluster": self,
"core_api": self.core_api,
"policy_api": self.policy_api,
"namespace": self._namespace,
"loop": self.loop,
}
if self._deploy_mode == "local":
self.scheduler_spec = {
"cls": dask.distributed.Scheduler,
"options": {
"protocol": self._protocol,
"interface": self._interface,
"host": self.host,
"port": self.port,
"dashboard_address": self._dashboard_address,
"security": self.security,
},
}
elif self._deploy_mode == "remote":
self.scheduler_spec = {
"cls": Scheduler,
"options": {
"idle_timeout": self._idle_timeout,
"service_wait_timeout_s": self._scheduler_service_wait_timeout,
"pod_template": self.scheduler_pod_template,
**common_options,
},
}
else:
raise RuntimeError("Unknown deploy mode %s" % self._deploy_mode)
self.new_spec = {
"cls": Worker,
"options": {"pod_template": self.pod_template, **common_options},
}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
await super()._start()
@classmethod
def from_dict(cls, pod_spec, **kwargs):
"""Create cluster with worker pod spec defined by Python dictionary
Examples
--------
>>> spec = {
... 'metadata': {},
... 'spec': {
... 'containers': [{
... 'args': ['dask-worker', '$(DASK_SCHEDULER_ADDRESS)',
... '--nthreads', '1',
... '--death-timeout', '60'],
... 'command': None,
... 'image': 'daskdev/dask:latest',
... 'name': 'dask-worker',
... }],
... 'restartPolicy': 'Never',
... }
... }
>>> cluster = KubeCluster.from_dict(spec, namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_yaml
"""
return cls(make_pod_from_dict(pod_spec), **kwargs)
@classmethod
def from_yaml(cls, yaml_path, **kwargs):
"""Create cluster with worker pod spec defined by a YAML file
We can start a cluster with pods defined in an accompanying YAML file
like the following:
.. code-block:: yaml
kind: Pod
metadata:
labels:
foo: bar
baz: quux
spec:
containers:
- image: daskdev/dask:latest
name: dask-worker
args: [dask-worker, $(DASK_SCHEDULER_ADDRESS), --nthreads, '2', --memory-limit, 8GB]
restartPolicy: Never
Examples
--------
>>> cluster = KubeCluster.from_yaml('pod.yaml', namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_dict
"""
if not yaml:
raise ImportError(
"PyYaml is required to use yaml functionality, please install it!"
)
with open(yaml_path) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
return cls.from_dict(d, **kwargs)
@property
def namespace(self):
return self.pod_template.metadata.namespace
def scale(self, n):
# A shim to maintain backward compatibility
# https://github.com/dask/distributed/issues/3054
maximum = dask.config.get("kubernetes.count.max")
if maximum is not None and maximum < n:
logger.info(
"Tried to scale beyond maximum number of workers %d > %d", n, maximum
)
n = maximum
return super().scale(n)
async def _logs(self, scheduler=True, workers=True):
"""Return logs for the scheduler and workers
Parameters
----------
scheduler : boolean
Whether or not to collect logs for the scheduler
workers : boolean or Iterable[str], optional
A list of worker addresses to select.
Defaults to all workers if `True` or no workers if `False`
Returns
-------
logs: Dict[str]
A dictionary of logs, with one item for the scheduler and one for
each worker
"""
logs = Logs()
if scheduler:
logs["Scheduler"] = await self.scheduler.logs()
if workers:
worker_logs = await asyncio.gather(
*[w.logs() for w in self.workers.values()]
)
for key, log in zip(self.workers, worker_logs):
logs[key] = log
return logs
def _cleanup_resources(namespace, labels):
""" Remove all pods with these labels in this namespace """
import kubernetes
core_api = kubernetes.client.CoreV1Api()
pods = core_api.list_namespaced_pod(namespace, label_selector=format_labels(labels))
for pod in pods.items:
try:
core_api.delete_namespaced_pod(pod.metadata.name, namespace)
logger.info("Deleted pod: %s", pod.metadata.name)
except kubernetes.client.rest.ApiException as e:
# ignore error if pod is already removed
if e.status != 404:
raise
services = core_api.list_namespaced_service(
namespace, label_selector=format_labels(labels)
)
for service in services.items:
try:
core_api.delete_namespaced_service(service.metadata.name, namespace)
logger.info("Deleted service: %s", service.metadata.name)
except kubernetes.client.rest.ApiException as e:
# ignore error if service is already removed
if e.status != 404:
raise
def format_labels(labels):
""" Convert a dictionary of labels into a comma separated string """
if labels:
return ",".join(["{}={}".format(k, v) for k, v in labels.items()])
else:
return ""
def _namespace_default():
"""
Get current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
'default'
Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125
"""
ns_path = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return "default"
def escape(s):
valid_characters = string.ascii_letters + string.digits + "-"
return "".join(c for c in s if c in valid_characters).lower()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/runtime/runtime-gdb_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"bytes"
"fmt"
"internal/testenv"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
)
// NOTE: In some configurations, GDB will segfault when sent a SIGWINCH signal.
// Some runtime tests send SIGWINCH to the entire process group, so those tests
// must never run in parallel with GDB tests.
//
// See issue 39021 and https://sourceware.org/bugzilla/show_bug.cgi?id=26056.
func checkGdbEnvironment(t *testing.T) {
testenv.MustHaveGoBuild(t)
switch runtime.GOOS {
case "darwin":
t.Skip("gdb does not work on darwin")
case "netbsd":
t.Skip("gdb does not work with threads on NetBSD; see https://golang.org/issue/22893 and https://gnats.netbsd.org/52548")
case "windows":
t.Skip("gdb tests fail on Windows: https://golang.org/issue/22687")
case "linux":
if runtime.GOARCH == "ppc64" {
t.Skip("skipping gdb tests on linux/ppc64; see https://golang.org/issue/17366")
}
if runtime.GOARCH == "mips" {
t.Skip("skipping gdb tests on linux/mips; see https://golang.org/issue/25939")
}
case "freebsd":
t.Skip("skipping gdb tests on FreeBSD; see https://golang.org/issue/29508")
case "aix":
if testing.Short() {
t.Skip("skipping gdb tests on AIX; see https://golang.org/issue/35710")
}
case "plan9":
t.Skip("there is no gdb on Plan 9")
}
if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final {
t.Skip("gdb test can fail with GOROOT_FINAL pending")
}
}
func checkGdbVersion(t *testing.T) {
// Issue 11214 reports various failures with older versions of gdb.
out, err := exec.Command("gdb", "--version").CombinedOutput()
if err != nil {
t.Skipf("skipping: error executing gdb: %v", err)
}
re := regexp.MustCompile(`([0-9]+)\.([0-9]+)`)
matches := re.FindSubmatch(out)
if len(matches) < 3 {
t.Skipf("skipping: can't determine gdb version from\n%s\n", out)
}
major, err1 := strconv.Atoi(string(matches[1]))
minor, err2 := strconv.Atoi(string(matches[2]))
if err1 != nil || err2 != nil {
t.Skipf("skipping: can't determine gdb version: %v, %v", err1, err2)
}
if major < 7 || (major == 7 && minor < 7) {
t.Skipf("skipping: gdb version %d.%d too old", major, minor)
}
t.Logf("gdb version %d.%d", major, minor)
}
func checkGdbPython(t *testing.T) {
if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" {
t.Skip("skipping gdb python tests on illumos and solaris; see golang.org/issue/20821")
}
cmd := exec.Command("gdb", "-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')")
out, err := cmd.CombinedOutput()
if err != nil {
t.Skipf("skipping due to issue running gdb: %v", err)
}
if strings.TrimSpace(string(out)) != "go gdb python support" {
t.Skipf("skipping due to lack of python gdb support: %s", out)
}
}
// checkCleanBacktrace checks that the given backtrace is well formed and does
// not contain any error messages from GDB.
func checkCleanBacktrace(t *testing.T, backtrace string) {
backtrace = strings.TrimSpace(backtrace)
lines := strings.Split(backtrace, "\n")
if len(lines) == 0 {
t.Fatalf("empty backtrace")
}
for i, l := range lines {
if !strings.HasPrefix(l, fmt.Sprintf("#%v ", i)) {
t.Fatalf("malformed backtrace at line %v: %v", i, l)
}
}
// TODO(mundaym): check for unknown frames (e.g. "??").
}
const helloSource = `
import "fmt"
import "runtime"
var gslice []string
func main() {
mapvar := make(map[string]string, 13)
slicemap := make(map[string][]string,11)
chanint := make(chan int, 10)
chanstr := make(chan string, 10)
chanint <- 99
chanint <- 11
chanstr <- "spongepants"
chanstr <- "squarebob"
mapvar["abc"] = "def"
mapvar["ghi"] = "jkl"
slicemap["a"] = []string{"b","c","d"}
slicemap["e"] = []string{"f","g","h"}
strvar := "abc"
ptrvar := &strvar
slicevar := make([]string, 0, 16)
slicevar = append(slicevar, mapvar["abc"])
fmt.Println("hi")
runtime.KeepAlive(ptrvar)
_ = ptrvar // set breakpoint here
gslice = slicevar
fmt.Printf("%v, %v, %v\n", slicemap, <-chanint, <-chanstr)
runtime.KeepAlive(mapvar)
} // END_OF_PROGRAM
`
func lastLine(src []byte) int {
eop := []byte("END_OF_PROGRAM")
for i, l := range bytes.Split(src, []byte("\n")) {
if bytes.Contains(l, eop) {
return i
}
}
return 0
}
func TestGdbPython(t *testing.T) {
testGdbPython(t, false)
}
func TestGdbPythonCgo(t *testing.T) {
if runtime.GOARCH == "mips" || runtime.GOARCH == "mipsle" || runtime.GOARCH == "mips64" {
testenv.SkipFlaky(t, 18784)
}
testGdbPython(t, true)
}
func testGdbPython(t *testing.T, cgo bool) {
if cgo {
testenv.MustHaveCGO(t)
}
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
checkGdbPython(t)
dir := t.TempDir()
var buf bytes.Buffer
buf.WriteString("package main\n")
if cgo {
buf.WriteString(`import "C"` + "\n")
}
buf.WriteString(helloSource)
src := buf.Bytes()
// Locate breakpoint line
var bp int
lines := bytes.Split(src, []byte("\n"))
for i, line := range lines {
if bytes.Contains(line, []byte("breakpoint")) {
bp = i
break
}
}
err := os.WriteFile(filepath.Join(dir, "main.go"), src, 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
nLines := lastLine(src)
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe", "main.go")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
args := []string{"-nx", "-q", "--batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "set print thread-events off",
}
if cgo {
// When we build the cgo version of the program, the system's
// linker is used. Some external linkers, like GNU gold,
// compress the .debug_gdb_scripts into .zdebug_gdb_scripts.
// Until gold and gdb can work together, temporarily load the
// python script directly.
args = append(args,
"-ex", "source "+filepath.Join(runtime.GOROOT(), "src", "runtime", "runtime-gdb.py"),
)
} else {
args = append(args,
"-ex", "info auto-load python-scripts",
)
}
args = append(args,
"-ex", "set python print-stack full",
"-ex", fmt.Sprintf("br main.go:%d", bp),
"-ex", "run",
"-ex", "echo BEGIN info goroutines\n",
"-ex", "info goroutines",
"-ex", "echo END\n",
"-ex", "echo BEGIN print mapvar\n",
"-ex", "print mapvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN print slicemap\n",
"-ex", "print slicemap",
"-ex", "echo END\n",
"-ex", "echo BEGIN print strvar\n",
"-ex", "print strvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN print chanint\n",
"-ex", "print chanint",
"-ex", "echo END\n",
"-ex", "echo BEGIN print chanstr\n",
"-ex", "print chanstr",
"-ex", "echo END\n",
"-ex", "echo BEGIN info locals\n",
"-ex", "info locals",
"-ex", "echo END\n",
"-ex", "echo BEGIN goroutine 1 bt\n",
"-ex", "goroutine 1 bt",
"-ex", "echo END\n",
"-ex", "echo BEGIN goroutine all bt\n",
"-ex", "goroutine all bt",
"-ex", "echo END\n",
"-ex", "clear main.go:15", // clear the previous break point
"-ex", fmt.Sprintf("br main.go:%d", nLines), // new break point at the end of main
"-ex", "c",
"-ex", "echo BEGIN goroutine 1 bt at the end\n",
"-ex", "goroutine 1 bt",
"-ex", "echo END\n",
filepath.Join(dir, "a.exe"),
)
got, err := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output:\n%s", got)
if err != nil {
t.Fatalf("gdb exited with error: %v", err)
}
firstLine, _, _ := bytes.Cut(got, []byte("\n"))
if string(firstLine) != "Loading Go Runtime support." {
// This can happen when using all.bash with
// GOROOT_FINAL set, because the tests are run before
// the final installation of the files.
cmd := exec.Command(testenv.GoToolPath(t), "env", "GOROOT")
cmd.Env = []string{}
out, err := cmd.CombinedOutput()
if err != nil && bytes.Contains(out, []byte("cannot find GOROOT")) {
t.Skipf("skipping because GOROOT=%s does not exist", runtime.GOROOT())
}
_, file, _, _ := runtime.Caller(1)
t.Logf("package testing source file: %s", file)
t.Fatalf("failed to load Go runtime support: %s\n%s", firstLine, got)
}
// Extract named BEGIN...END blocks from output
partRe := regexp.MustCompile(`(?ms)^BEGIN ([^\n]*)\n(.*?)\nEND`)
blocks := map[string]string{}
for _, subs := range partRe.FindAllSubmatch(got, -1) {
blocks[string(subs[1])] = string(subs[2])
}
infoGoroutinesRe := regexp.MustCompile(`\*\s+\d+\s+running\s+`)
if bl := blocks["info goroutines"]; !infoGoroutinesRe.MatchString(bl) {
t.Fatalf("info goroutines failed: %s", bl)
}
printMapvarRe1 := regexp.MustCompile(`^\$[0-9]+ = map\[string\]string = {\[(0x[0-9a-f]+\s+)?"abc"\] = (0x[0-9a-f]+\s+)?"def", \[(0x[0-9a-f]+\s+)?"ghi"\] = (0x[0-9a-f]+\s+)?"jkl"}$`)
printMapvarRe2 := regexp.MustCompile(`^\$[0-9]+ = map\[string\]string = {\[(0x[0-9a-f]+\s+)?"ghi"\] = (0x[0-9a-f]+\s+)?"jkl", \[(0x[0-9a-f]+\s+)?"abc"\] = (0x[0-9a-f]+\s+)?"def"}$`)
if bl := blocks["print mapvar"]; !printMapvarRe1.MatchString(bl) &&
!printMapvarRe2.MatchString(bl) {
t.Fatalf("print mapvar failed: %s", bl)
}
// 2 orders, and possible differences in spacing.
sliceMapSfx1 := `map[string][]string = {["e"] = []string = {"f", "g", "h"}, ["a"] = []string = {"b", "c", "d"}}`
sliceMapSfx2 := `map[string][]string = {["a"] = []string = {"b", "c", "d"}, ["e"] = []string = {"f", "g", "h"}}`
if bl := strings.ReplaceAll(blocks["print slicemap"], " ", " "); !strings.HasSuffix(bl, sliceMapSfx1) && !strings.HasSuffix(bl, sliceMapSfx2) {
t.Fatalf("print slicemap failed: %s", bl)
}
chanIntSfx := `chan int = {99, 11}`
if bl := strings.ReplaceAll(blocks["print chanint"], " ", " "); !strings.HasSuffix(bl, chanIntSfx) {
t.Fatalf("print chanint failed: %s", bl)
}
chanStrSfx := `chan string = {"spongepants", "squarebob"}`
if bl := strings.ReplaceAll(blocks["print chanstr"], " ", " "); !strings.HasSuffix(bl, chanStrSfx) {
t.Fatalf("print chanstr failed: %s", bl)
}
strVarRe := regexp.MustCompile(`^\$[0-9]+ = (0x[0-9a-f]+\s+)?"abc"$`)
if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) {
t.Fatalf("print strvar failed: %s", bl)
}
// The exact format of composite values has changed over time.
// For issue 16338: ssa decompose phase split a slice into
// a collection of scalar vars holding its fields. In such cases
// the DWARF variable location expression should be of the
// form "var.field" and not just "field".
// However, the newer dwarf location list code reconstituted
// aggregates from their fields and reverted their printing
// back to its original form.
// Only test that all variables are listed in 'info locals' since
// different versions of gdb print variables in different
// order and with differing amount of information and formats.
if bl := blocks["info locals"]; !strings.Contains(bl, "slicevar") ||
!strings.Contains(bl, "mapvar") ||
!strings.Contains(bl, "strvar") {
t.Fatalf("info locals failed: %s", bl)
}
// Check that the backtraces are well formed.
checkCleanBacktrace(t, blocks["goroutine 1 bt"])
checkCleanBacktrace(t, blocks["goroutine 1 bt at the end"])
btGoroutine1Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?main\.main.+at`)
if bl := blocks["goroutine 1 bt"]; !btGoroutine1Re.MatchString(bl) {
t.Fatalf("goroutine 1 bt failed: %s", bl)
}
if bl := blocks["goroutine all bt"]; !btGoroutine1Re.MatchString(bl) {
t.Fatalf("goroutine all bt failed: %s", bl)
}
btGoroutine1AtTheEndRe := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?main\.main.+at`)
if bl := blocks["goroutine 1 bt at the end"]; !btGoroutine1AtTheEndRe.MatchString(bl) {
t.Fatalf("goroutine 1 bt at the end failed: %s", bl)
}
}
const backtraceSource = `
package main
//go:noinline
func aaa() bool { return bbb() }
//go:noinline
func bbb() bool { return ccc() }
//go:noinline
func ccc() bool { return ddd() }
//go:noinline
func ddd() bool { return f() }
//go:noinline
func eee() bool { return true }
var f = eee
func main() {
_ = aaa()
}
`
// TestGdbBacktrace tests that gdb can unwind the stack correctly
// using only the DWARF debug info.
func TestGdbBacktrace(t *testing.T) {
if runtime.GOOS == "netbsd" {
testenv.SkipFlaky(t, 15603)
}
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir := t.TempDir()
// Build the source code.
src := filepath.Join(dir, "main.go")
err := os.WriteFile(src, []byte(backtraceSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe", "main.go")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break main.eee",
"-ex", "run",
"-ex", "backtrace",
"-ex", "continue",
filepath.Join(dir, "a.exe"),
}
got, err := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output:\n%s", got)
if err != nil {
t.Fatalf("gdb exited with error: %v", err)
}
// Check that the backtrace matches the source code.
bt := []string{
"eee",
"ddd",
"ccc",
"bbb",
"aaa",
"main",
}
for i, name := range bt {
s := fmt.Sprintf("#%v.*main\\.%v", i, name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Fatalf("could not find '%v' in backtrace", s)
}
}
}
const autotmpTypeSource = `
package main
type astruct struct {
a, b int
}
func main() {
var iface interface{} = map[string]astruct{}
var iface2 interface{} = []astruct{}
println(iface, iface2)
}
`
// TestGdbAutotmpTypes ensures that types of autotmp variables appear in .debug_info
// See bug #17830.
func TestGdbAutotmpTypes(t *testing.T) {
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
if runtime.GOOS == "aix" && testing.Short() {
t.Skip("TestGdbAutotmpTypes is too slow on aix/ppc64")
}
dir := t.TempDir()
// Build the source code.
src := filepath.Join(dir, "main.go")
err := os.WriteFile(src, []byte(autotmpTypeSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=all=-N -l", "-o", "a.exe", "main.go")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "step",
"-ex", "info types astruct",
filepath.Join(dir, "a.exe"),
}
got, err := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output:\n%s", got)
if err != nil {
t.Fatalf("gdb exited with error: %v", err)
}
sgot := string(got)
// Check that the backtrace matches the source code.
types := []string{
"[]main.astruct;",
"bucket<string,main.astruct>;",
"hash<string,main.astruct>;",
"main.astruct;",
"hash<string,main.astruct> * map[string]main.astruct;",
}
for _, name := range types {
if !strings.Contains(sgot, name) {
t.Fatalf("could not find %s in 'info typrs astruct' output", name)
}
}
}
const constsSource = `
package main
const aConstant int = 42
const largeConstant uint64 = ^uint64(0)
const minusOne int64 = -1
func main() {
println("hello world")
}
`
func TestGdbConst(t *testing.T) {
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir := t.TempDir()
// Build the source code.
src := filepath.Join(dir, "main.go")
err := os.WriteFile(src, []byte(constsSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=all=-N -l", "-o", "a.exe", "main.go")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "print main.aConstant",
"-ex", "print main.largeConstant",
"-ex", "print main.minusOne",
"-ex", "print 'runtime.mSpanInUse'",
"-ex", "print 'runtime._PageSize'",
filepath.Join(dir, "a.exe"),
}
got, err := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output:\n%s", got)
if err != nil {
t.Fatalf("gdb exited with error: %v", err)
}
sgot := strings.ReplaceAll(string(got), "\r\n", "\n")
if !strings.Contains(sgot, "\n$1 = 42\n$2 = 18446744073709551615\n$3 = -1\n$4 = 1 '\\001'\n$5 = 8192") {
t.Fatalf("output mismatch")
}
}
const panicSource = `
package main
import "runtime/debug"
func main() {
debug.SetTraceback("crash")
crash()
}
func crash() {
panic("panic!")
}
`
// TestGdbPanic tests that gdb can unwind the stack correctly
// from SIGABRTs from Go panics.
func TestGdbPanic(t *testing.T) {
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir := t.TempDir()
// Build the source code.
src := filepath.Join(dir, "main.go")
err := os.WriteFile(src, []byte(panicSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe", "main.go")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "run",
"-ex", "backtrace",
filepath.Join(dir, "a.exe"),
}
got, err := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output:\n%s", got)
if err != nil {
t.Fatalf("gdb exited with error: %v", err)
}
// Check that the backtrace matches the source code.
bt := []string{
`crash`,
`main`,
}
for _, name := range bt {
s := fmt.Sprintf("(#.* .* in )?main\\.%v", name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Fatalf("could not find '%v' in backtrace", s)
}
}
}
const InfCallstackSource = `
package main
import "C"
import "time"
func loop() {
for i := 0; i < 1000; i++ {
time.Sleep(time.Millisecond*5)
}
}
func main() {
go loop()
time.Sleep(time.Second * 1)
}
`
// TestGdbInfCallstack tests that gdb can unwind the callstack of cgo programs
// on arm64 platforms without endless frames of function 'crossfunc1'.
// https://golang.org/issue/37238
func TestGdbInfCallstack(t *testing.T) {
checkGdbEnvironment(t)
testenv.MustHaveCGO(t)
if runtime.GOARCH != "arm64" {
t.Skip("skipping infinite callstack test on non-arm64 arches")
}
t.Parallel()
checkGdbVersion(t)
dir := t.TempDir()
// Build the source code.
src := filepath.Join(dir, "main.go")
err := os.WriteFile(src, []byte(InfCallstackSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe", "main.go")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
// 'setg_gcc' is the first point where we can reproduce the issue with just one 'run' command.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break setg_gcc",
"-ex", "run",
"-ex", "backtrace 3",
"-ex", "disable 1",
"-ex", "continue",
filepath.Join(dir, "a.exe"),
}
got, err := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output:\n%s", got)
if err != nil {
t.Fatalf("gdb exited with error: %v", err)
}
// Check that the backtrace matches
// We check the 3 inner most frames only as they are present certainly, according to gcc_<OS>_arm64.c
bt := []string{
`setg_gcc`,
`crosscall1`,
`threadentry`,
}
for i, name := range bt {
s := fmt.Sprintf("#%v.*%v", i, name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Fatalf("could not find '%v' in backtrace", s)
}
}
}
| [
"\"GOROOT_FINAL\""
]
| []
| [
"GOROOT_FINAL"
]
| [] | ["GOROOT_FINAL"] | go | 1 | 0 | |
lib/srv/exec_test.go | /*
Copyright 2015 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package srv
import (
"fmt"
"net"
"os"
"os/user"
"path"
"path/filepath"
"gopkg.in/check.v1"
"golang.org/x/crypto/ssh"
"github.com/gravitational/teleport/lib/auth"
authority "github.com/gravitational/teleport/lib/auth/testauthority"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/backend/boltbk"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/utils"
)
// ExecSuite also implements ssh.ConnMetadata
type ExecSuite struct {
usr *user.User
ctx *ctx
localAddr net.Addr
remoteAddr net.Addr
}
var _ = check.Suite(&ExecSuite{})
var _ = fmt.Printf
func (s *ExecSuite) SetUpSuite(c *check.C) {
bk, err := boltbk.New(backend.Params{"path": c.MkDir()})
c.Assert(err, check.IsNil)
c.Assert(err, check.IsNil)
a := auth.NewAuthServer(&auth.InitConfig{
Backend: bk,
Authority: authority.New(),
})
// set cluster name
clusterName, err := services.NewClusterName(services.ClusterNameSpecV2{
ClusterName: "localhost",
})
c.Assert(err, check.IsNil)
err = a.SetClusterName(clusterName)
c.Assert(err, check.IsNil)
// set static tokens
staticTokens, err := services.NewStaticTokens(services.StaticTokensSpecV2{
StaticTokens: []services.ProvisionToken{},
})
c.Assert(err, check.IsNil)
err = a.SetStaticTokens(staticTokens)
c.Assert(err, check.IsNil)
utils.InitLoggerForTests()
s.usr, _ = user.Current()
s.ctx = &ctx{isTestStub: true}
s.ctx.login = s.usr.Username
s.ctx.session = &session{id: "xxx"}
s.ctx.teleportUser = "galt"
s.ctx.conn = &ssh.ServerConn{Conn: s}
s.ctx.exec = &execResponse{ctx: s.ctx}
s.ctx.srv = &Server{authService: a, uuid: "00000000-0000-0000-0000-000000000000"}
s.localAddr, _ = utils.ParseAddr("127.0.0.1:3022")
s.remoteAddr, _ = utils.ParseAddr("10.0.0.5:4817")
}
func (s *ExecSuite) TestOSCommandPrep(c *check.C) {
expectedEnv := []string{
"LANG=en_US.UTF-8",
getDefaultEnvPath("1000", defaultLoginDefsPath),
fmt.Sprintf("HOME=%s", s.usr.HomeDir),
fmt.Sprintf("USER=%s", s.usr.Username),
"SHELL=/bin/sh",
"SSH_TELEPORT_USER=galt",
"SSH_SESSION_WEBPROXY_ADDR=<proxyhost>:3080",
"SSH_TELEPORT_HOST_UUID=00000000-0000-0000-0000-000000000000",
"SSH_TELEPORT_CLUSTER_NAME=localhost",
"TERM=xterm",
"SSH_CLIENT=10.0.0.5 4817 3022",
"SSH_CONNECTION=10.0.0.5 4817 127.0.0.1 3022",
"SSH_SESSION_ID=xxx",
}
// empty command (simple shell)
cmd, err := prepInteractiveCommand(s.ctx)
c.Assert(err, check.IsNil)
c.Assert(cmd, check.NotNil)
c.Assert(cmd.Path, check.Equals, "/bin/sh")
c.Assert(cmd.Args, check.DeepEquals, []string{"-sh"})
c.Assert(cmd.Dir, check.Equals, s.usr.HomeDir)
c.Assert(cmd.Env, check.DeepEquals, expectedEnv)
// non-empty command (exec a prog)
s.ctx.isTestStub = true
s.ctx.exec.cmdName = "ls -lh /etc"
cmd, err = prepareCommand(s.ctx)
c.Assert(err, check.IsNil)
c.Assert(cmd, check.NotNil)
c.Assert(cmd.Path, check.Equals, "/bin/sh")
c.Assert(cmd.Args, check.DeepEquals, []string{"/bin/sh", "-c", "ls -lh /etc"})
c.Assert(cmd.Dir, check.Equals, s.usr.HomeDir)
c.Assert(cmd.Env, check.DeepEquals, expectedEnv)
// command without args
s.ctx.exec.cmdName = "top"
cmd, err = prepareCommand(s.ctx)
c.Assert(err, check.IsNil)
c.Assert(cmd.Path, check.Equals, "/bin/sh")
c.Assert(cmd.Args, check.DeepEquals, []string{"/bin/sh", "-c", "top"})
}
func (s *ExecSuite) TestLoginDefsParser(c *check.C) {
expectedEnvSuPath := "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/bar"
expectedSuPath := "PATH=/usr/local/bin:/usr/bin:/bin:/foo"
c.Assert(getDefaultEnvPath("0", "../../fixtures/login.defs"), check.Equals, expectedEnvSuPath)
c.Assert(getDefaultEnvPath("1000", "../../fixtures/login.defs"), check.Equals, expectedSuPath)
c.Assert(getDefaultEnvPath("1000", "bad/file"), check.Equals, defaultEnvPath)
}
// implementation of ssh.Conn interface
func (s *ExecSuite) User() string { return s.usr.Username }
func (s *ExecSuite) SessionID() []byte { return []byte{1, 2, 3} }
func (s *ExecSuite) ClientVersion() []byte { return []byte{1} }
func (s *ExecSuite) ServerVersion() []byte { return []byte{1} }
func (s *ExecSuite) RemoteAddr() net.Addr { return s.remoteAddr }
func (s *ExecSuite) LocalAddr() net.Addr { return s.localAddr }
func (s *ExecSuite) Close() error { return nil }
func (s *ExecSuite) SendRequest(string, bool, []byte) (bool, []byte, error) { return false, nil, nil }
func (s *ExecSuite) OpenChannel(string, []byte) (ssh.Channel, <-chan *ssh.Request, error) {
return nil, nil, nil
}
func (s *ExecSuite) Wait() error { return nil }
// findExecutable helper finds a given executable name (like 'ls') in $PATH
// and returns the full path
func findExecutable(execName string) string {
for _, dir := range filepath.SplitList(os.Getenv("PATH")) {
fp := path.Join(dir, execName)
if utils.IsFile(fp) {
return fp
}
}
return "not found in $PATH: " + execName
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
developer-tools/python/genbootstrap.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate bootstrap.dat
Script will connect to Gulden rpc server and fetch all blocks necessary to create a clean bootstrap.dat file
The results are incremental i.e. you can re-run this periodically (if you leave the files in place) and it will update the exiting file with only the latest blocks instead of completely regenerating it
First run is slow (few hours) but subsequent runs should be reasonably fast as they have much less work to do
"""
from bitcoinrpc.authproxy import AuthServiceProxy
import sys
import string
import os
import binascii
import struct
__copyright__ = 'Copyright (c) 2019 The Gulden developers'
__license__ = 'Distributed under the GULDEN software license, see the accompanying file COPYING'
__author__ = 'Malcolm MacLeod'
__email__ = '[email protected]'
# ===== BEGIN USER SETTINGS =====
rpcuser=os.environ["CHECKPOINT_RPC_USER"]
rpcpass=os.environ["CHECKPOINT_RPC_PASSWORD"]
rpcport=os.environ["CHECKPOINT_RPC_PORT"]
rpcip=os.environ["CHECKPOINT_RPC_IP"]
messageheader=os.environ["CHECKPOINT_MSG_HEADER"]
bootstrapfilename=os.environ["BOOTSTRAP_FILENAME"]
# ====== END USER SETTINGS ======
access = AuthServiceProxy("http://"+rpcuser+":"+rpcpass+"@"+rpcip+":"+rpcport)
bootstrap_start=0
try:
bootstrap_file_last = open(bootstrapfilename+".pos", "r")
bootstrap_start = int(bootstrap_file_last.read());
bootstrap_file_last.close()
print("pre-existing bootstrap, starting from", bootstrap_start)
bootstrap_file = open(bootstrapfilename, "ba")
except IOError as e:
print("no pre-existing bootstrap, starting from 0")
bootstrap_file = open(bootstrapfilename, "wb")
def print_bootstrap(height):
hash = access.getblockhash(height)
block_hex = access.getblock(hash, 0)
block_bin = binascii.unhexlify(block_hex)
bootstrap_file.write(binascii.unhexlify(messageheader))
bootstrap_file.write(struct.pack('i', len(block_bin)))
bootstrap_file.write(block_bin)
# generate bootstrap from genesis->chaintip-100
chain_height = access.getblockcount()
last_bootstrap = chain_height - 100
h = bootstrap_start
while h < last_bootstrap:
if (h % 1000 == 0):
print("writing block ", h)
print_bootstrap(h)
h = h + 1
bootstrap_file.close()
bootstrap_file_last = open(bootstrapfilename+".pos", "w")
bootstrap_file_last.write(str(last_bootstrap));
bootstrap_file_last.close()
print("done writing bootstrap, final height: ", last_bootstrap)
| []
| []
| [
"CHECKPOINT_RPC_IP",
"CHECKPOINT_RPC_PORT",
"CHECKPOINT_RPC_USER",
"CHECKPOINT_MSG_HEADER",
"CHECKPOINT_RPC_PASSWORD",
"BOOTSTRAP_FILENAME"
]
| [] | ["CHECKPOINT_RPC_IP", "CHECKPOINT_RPC_PORT", "CHECKPOINT_RPC_USER", "CHECKPOINT_MSG_HEADER", "CHECKPOINT_RPC_PASSWORD", "BOOTSTRAP_FILENAME"] | python | 6 | 0 | |
mtools/test/test_mlaunch.py | import inspect
import json
import os
import shutil
import sys
import time
import unittest
from bson import SON
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import raises, timed
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from mtools.mlaunch.mlaunch import MLaunchTool
# temporarily skipping mlaunch tests until issues are sorted out
raise SkipTest
class TestMLaunch(object):
"""
This class tests functionality around the mlaunch tool. It has some
additional methods that are helpful for the tests, as well as a setup
and teardown method for all tests.
Don't call tests from other tests. This won't work as each test gets
its own data directory (for debugging).
"""
port = 33333
base_dir = 'data_test_mlaunch'
def __init__(self):
"""Constructor."""
self.use_auth = False
self.data_dir = ''
def setup(self):
"""Start up method to create mlaunch tool and find free port."""
self.tool = MLaunchTool(test=True)
# if the test data path exists, remove it
if os.path.exists(self.base_dir):
shutil.rmtree(self.base_dir)
def teardown(self):
"""Tear down method after each test, removes data directory."""
# kill all running processes
self.tool.discover()
ports = self.tool.get_tagged(['all', 'running'])
processes = self.tool._get_processes().values()
for p in processes:
p.terminate()
p.wait(10)
self.tool.wait_for(ports, to_start=False)
# quick sleep to avoid spurious test failures
time.sleep(1)
# if the test data path exists, remove it
if os.path.exists(self.base_dir):
shutil.rmtree(self.base_dir)
def run_tool(self, arg_str):
"""Wrapper to call self.tool.run() with or without auth."""
# name data directory according to test method name
caller = inspect.stack()[1][3]
self.data_dir = os.path.join(self.base_dir, caller)
# add data directory to arguments for all commands
arg_str += ' --dir %s' % self.data_dir
if arg_str.startswith('init') or arg_str.startswith('--'):
# add --port and --nojournal to init calls
arg_str += ' --port %i --nojournal --smallfiles' % self.port
if self.use_auth:
# add --auth to init calls if flag is set
arg_str += ' --auth'
self.tool.run(arg_str)
# -- tests below ---
@raises(ConnectionFailure)
def test_test(self):
"""TestMLaunch setup and teardown test."""
# test that data dir does not exist
assert not os.path.exists(self.data_dir)
# start mongo process on free test port
self.run_tool("init --single")
# call teardown method within this test
self.teardown()
# test that data dir does not exist anymore
assert not os.path.exists(self.data_dir)
# test that mongod is not running on this port anymore
# (raises ConnectionFailure)
mc = MongoClient('localhost:%i' % self.port,
serverSelectionTimeoutMS=100).server_info()
print(mc['version'])
def test_argv_run(self):
"""
mlaunch: test true command line arguments, instead of passing
into tool.run().
"""
# make command line arguments through sys.argv
sys.argv = ['mlaunch', 'init', '--single', '--dir', self.base_dir,
'--port', str(self.port), '--nojournal']
self.tool.run()
assert self.tool.is_running(self.port)
def test_init_default(self):
"""mlaunch: test that 'init' command can be omitted, is default. """
# make command line arguments through sys.argv
sys.argv = ['mlaunch', '--single', '--dir', self.base_dir,
'--port', str(self.port), '--nojournal']
self.tool.run()
assert self.tool.is_running(self.port)
def test_init_default_arguments(self):
"""
mlaunch: test that 'init' command is default, even when specifying
arguments to run().
"""
self.run_tool("--single")
assert self.tool.is_running(self.port)
def test_single(self):
"""mlaunch: start stand-alone server and tear down again."""
# start mongo process on free test port
self.run_tool("init --single")
# make sure node is running
assert self.tool.is_running(self.port)
# check if data directory and logfile exist
assert os.path.exists(os.path.join(self.data_dir, 'db'))
assert os.path.isfile(os.path.join(self.data_dir, 'mongod.log'))
# check that the tags are set correctly: 'single', 'mongod',
# 'running', <port>
assert set(self.tool.get_tags_of_port(self.port)) == set(['running',
'mongod',
'all',
'single',
str(self.
port)])
def test_replicaset_conf(self):
"""Start replica set of 2 nodes + arbiter and compare rs.conf()."""
# start mongo process on free test port
self.run_tool("init --replicaset --nodes 2 --arbiter")
# check if data directories exist
assert os.path.exists(os.path.join(self.data_dir, 'replset'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs1'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs2'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/arb'))
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for 3 members, exactly one is arbiter
conf = mc['local']['system.replset'].find_one()
assert len(conf['members']) == 3
assert sum(1 for memb in conf['members']
if 'arbiterOnly' in memb and memb['arbiterOnly']) == 1
@timed(60)
@attr('slow')
def test_replicaset_ismaster(self):
"""Start replica set and verify that first node becomes primary."""
# start mongo process on free test port
self.run_tool("init --replicaset")
# wait for primary
assert self.tool._wait_for_primary()
# insert a document and wait to replicate to 2 secondaries
# (10 sec timeout)
mc = MongoClient('localhost:%i' % self.port)
mc.test.smokeWait.insert({}, w=2, wtimeout=10 * 60 * 1000)
@unittest.skip('incompatible with 3.4 CSRS')
def test_sharded_status(self):
"""Start cluster with 2 shards of single nodes, 1 config server."""
# start mongo process on free test port
self.run_tool("init --sharded 2 --single")
# check if data directories and logfile exist
assert os.path.exists(os.path.join(self.data_dir, 'shard01/db'))
assert os.path.exists(os.path.join(self.data_dir, 'shard02/db'))
assert os.path.exists(os.path.join(self.data_dir, 'config/db'))
assert os.path.isfile(os.path.join(self.data_dir, 'mongos.log'))
# create mongo client
mc = MongoClient('localhost:%i' % (self.port))
# check for 2 shards and 1 mongos
assert mc['config']['shards'].count() == 2
assert mc['config']['mongos'].count() == 1
def helper_output_has_line_with(self, keywords, output):
"""Check if output contains a line where all keywords are present."""
return len(filter(None, [all([kw in line for kw in keywords])
for line in output]))
@unittest.skip('incompatible with 3.4 CSRS')
def test_verbose_sharded(self):
"""Test verbose output when creating sharded cluster."""
self.run_tool("init --sharded 2 --replicaset --config 3 "
"--mongos 2 --verbose")
# capture stdout
output = sys.stdout.getvalue().splitlines()
keywords = ('rs1', 'rs2', 'rs3', 'shard01', 'shard02', 'config1',
'config2', 'config3')
# creating directory
for keyword in keywords:
# make sure every directory creation was announced to stdout
assert self.helper_output_has_line_with(['creating directory',
keyword, 'db'], output)
assert self.helper_output_has_line_with(['creating directory',
'mongos'], output)
# launching nodes
for keyword in keywords:
assert self.helper_output_has_line_with(['launching', keyword,
'--port', '--logpath',
'--dbpath'], output)
# mongos
assert self.helper_output_has_line_with(['launching', 'mongos',
'--port', '--logpath',
str(self.port)], output)
assert self.helper_output_has_line_with(['launching', 'mongos',
'--port', '--logpath',
str(self.port + 1)], output)
# some fixed outputs
assert self.helper_output_has_line_with(['waiting for nodes to '
'start'], output)
assert self.helper_output_has_line_with(['adding shards. can take up '
'to 30 seconds'], output)
assert self.helper_output_has_line_with(['writing .mlaunch_startup '
'file'], output)
assert self.helper_output_has_line_with(['done'], output)
# replica sets initialized, shard added
for keyword in ('shard01', 'shard02'):
assert self.helper_output_has_line_with(['replica set', keyword,
'initialized'], output)
assert self.helper_output_has_line_with(['shard', keyword,
'added successfully'],
output)
def test_shard_names(self):
"""mlaunch: test if sharded cluster with explicit shard names works."""
# start mongo process on free test port
self.run_tool("init --sharded tic tac toe --replicaset")
# create mongo client
mc = MongoClient('localhost:%i' % (self.port))
# check that shard names match
shard_names = set(doc['_id'] for doc in mc['config']['shards'].find())
assert shard_names == set(['tic', 'tac', 'toe'])
def test_startup_file(self):
"""mlaunch: create .mlaunch_startup file in data path."""
# Also tests utf-8 to byte conversion and json import
self.run_tool("init --single -v")
# check if the startup file exists
startup_file = os.path.join(self.data_dir, '.mlaunch_startup')
assert os.path.isfile(startup_file)
# compare content of startup file with tool.args
file_contents = self.tool._convert_u2b(json.load(open(startup_file,
'r')))
assert file_contents['parsed_args'] == self.tool.args
assert file_contents['unknown_args'] == self.tool.unknown_args
def test_single_mongos_explicit(self):
"""
mlaunch: test if single mongos is running on start port and creates
<datadir>/mongos.log.
"""
# start 2 shards, 1 config server, 1 mongos
self.run_tool("init --sharded 2 --single --config 1 --mongos 1")
# check if mongos log files exist on correct ports
assert os.path.exists(os.path.join(self.data_dir, 'mongos.log'))
# check for correct port
assert self.tool.get_tagged('mongos') == set([self.port])
def test_single_mongos(self):
"""
mlaunch: test if multiple mongos use separate log files in 'mongos'
subdir.
"""
# start 2 shards, 1 config server, 2 mongos
self.run_tool("init --sharded 2 --single --config 1 --mongos 1")
# check that 2 mongos are running
assert len(self.tool.get_tagged(['mongos', 'running'])) == 1
def test_multiple_mongos(self):
"""
mlaunch: test if multiple mongos use separate log files in 'mongos'
subdir.
"""
# start 2 shards, 1 config server, 2 mongos
self.run_tool("init --sharded 2 --single --config 1 --mongos 2")
# this also tests that mongos are started at the beginning of the
# port range
assert os.path.exists(os.path.join(self.data_dir, 'mongos',
'mongos_%i.log' % (self.port)))
assert os.path.exists(os.path.join(self.data_dir, 'mongos',
'mongos_%i.log' % (self.port + 1)))
# check that 2 mongos are running
assert len(self.tool.get_tagged(['mongos', 'running'])) == 2
def test_filter_valid_arguments(self):
"""Check arguments unknown to mlaunch against mongos and mongod."""
# filter against mongod
result = self.tool._filter_valid_arguments("--slowms 500 -vvv "
"--configdb localhost:27017"
" --foobar".split(),
"mongod")
assert result == "--slowms 500 -vvv"
# filter against mongos
result = self.tool._filter_valid_arguments("--slowms 500 -vvv "
"--configdb localhost:27017"
" --foobar".split(),
"mongos")
assert result == "-vvv --configdb localhost:27017"
def test_large_replicaset_arbiter(self):
"""mlaunch: start large replica set of 7 nodes with arbiter."""
# start mongo process on free test port
# (don't need journal for this test)
self.run_tool("init --replicaset --nodes 6 --arbiter")
# check if data directories exist
assert os.path.exists(os.path.join(self.data_dir, 'replset'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs1'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs2'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs3'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs4'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs5'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs6'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/arb'))
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for 7 members, exactly one arbiter
conf = mc['local']['system.replset'].find_one()
assert len(conf['members']) == 7
assert sum(1 for memb in conf['members']
if 'arbiterOnly' in memb and memb['arbiterOnly']) == 1
# check that 7 nodes are discovered
assert len(self.tool.get_tagged('all')) == 7
def test_large_replicaset_noarbiter(self):
"""mlaunch: start large replica set of 7 nodes without arbiter."""
# start mongo process on free test port
# (don't need journal for this test)
self.run_tool("init --replicaset --nodes 7")
# check if data directories exist
assert os.path.exists(os.path.join(self.data_dir, 'replset'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs1'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs2'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs3'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs4'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs5'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs6'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs7'))
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for 7 members, no arbiters
conf = mc['local']['system.replset'].find_one()
assert len(conf['members']) == 7
assert sum(1 for memb in conf['members']
if 'arbiterOnly' in memb and memb['arbiterOnly']) == 0
def test_stop(self):
"""mlaunch: test stopping all nodes """
self.run_tool("init --replicaset")
self.run_tool("stop")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all(not self.tool.is_running(node) for node in nodes)
def test_kill_default(self):
"""mlaunch: test killing all nodes with default signal."""
# start sharded cluster and kill with default signal (15)
self.run_tool("init --sharded 2 --single")
self.run_tool("kill")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all(not self.tool.is_running(node) for node in nodes)
def test_kill_sigterm(self):
"""mlaunch: test killing all nodes with SIGTERM."""
# start nodes again, this time, kill with string "SIGTERM"
self.run_tool("init --sharded 2 --single")
self.run_tool("kill --signal SIGTERM")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all(not self.tool.is_running(node) for node in nodes)
def test_kill_sigkill(self):
"""mlaunch: test killing all nodes with SIGKILL."""
# start nodes again, this time, kill with signal 9 (SIGKILL)
self.run_tool("init --sharded 2 --single")
self.run_tool("kill --signal 9")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all(not self.tool.is_running(node) for node in nodes)
def test_stop_start(self):
"""mlaunch: test stop and then re-starting nodes."""
# start mongo process on free test port
self.run_tool("init --replicaset")
self.run_tool("stop")
time.sleep(2)
self.run_tool("start")
# make sure all nodes are running
nodes = self.tool.get_tagged('all')
assert all(self.tool.is_running(node) for node in nodes)
@unittest.skip('tags implementation not up to date')
@timed(180)
@attr('slow')
def test_kill_partial(self):
"""Test killing and restarting tagged groups on different tags."""
# key is tag for command line, value is tag for get_tagged
tags = ['shard01', 'shard 1', 'mongos', 'config 1', str(self.port)]
# start large cluster
self.run_tool("init --sharded 2 --replicaset --config 3 --mongos 3")
# make sure all nodes are running
nodes = self.tool.get_tagged('all')
assert all(self.tool.is_running(node) for node in nodes)
# go through all tags, stop nodes for each tag, confirm only
# the tagged ones are down, start again
for tag in tags:
print("--------- %s" % tag)
self.run_tool("kill %s" % tag)
assert self.tool.get_tagged('down') == self.tool.get_tagged(tag)
time.sleep(1)
# short sleep, because travis seems to be sensitive and sometimes
# fails otherwise
self.run_tool("start")
assert len(self.tool.get_tagged('down')) == 0
time.sleep(1)
# make sure primaries are running again
# (we just failed them over above).
# while True is ok, because test times out after some time
while True:
primaries = self.tool.get_tagged('primary')
if len(primaries) == 2:
break
time.sleep(1)
self.tool.discover()
# test for primary, but as nodes lose their tags, needs to be manual
self.run_tool("kill primary")
assert len(self.tool.get_tagged('down')) == 2
def test_restart_with_unkown_args(self):
"""mlaunch: test start command with extra unknown arguments."""
# init environment (sharded, single shards ok)
self.run_tool("init --single")
# get verbosity of mongod, assert it is 0
mc = MongoClient(port=self.port)
loglevel = mc.admin.command(SON([('getParameter', 1), ('logLevel',
1)]))
assert loglevel[u'logLevel'] == 0
# stop and start nodes but pass in unknown_args
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and
# sometimes fails otherwise
time.sleep(1)
self.run_tool("start -vv")
# compare that the nodes are restarted with the new unknown_args,
# assert loglevel is now 2
mc = MongoClient(port=self.port)
loglevel = mc.admin.command(SON([('getParameter', 1), ('logLevel',
1)]))
assert loglevel[u'logLevel'] == 2
# stop and start nodes without unknown args again
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and
# sometimes fails otherwise
time.sleep(1)
self.run_tool("start")
# compare that the nodes are restarted with the previous loglevel
mc = MongoClient(port=self.port)
loglevel = mc.admin.command(SON([('getParameter', 1), ('logLevel',
1)]))
assert loglevel[u'logLevel'] == 0
@unittest.skip('currently not a useful test')
def test_start_stop_single_repeatedly(self):
"""Test starting and stopping single node in short succession."""
# repeatedly start single node
self.run_tool("init --single")
for i in range(10):
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and
# sometimes fails otherwise
time.sleep(1)
self.run_tool("start")
@raises(SystemExit)
def test_init_init_replicaset(self):
"""mlaunch: test calling init a second time on the replica set."""
# init a replica set
self.run_tool("init --replicaset")
# now stop and init again, this should work if everything is
# stopped and identical environment
self.run_tool("stop")
self.run_tool("init --replicaset")
# but another init should fail with a SystemExit
self.run_tool("init --replicaset")
@unittest.skip('currently not a useful test')
def test_start_stop_replicaset_repeatedly(self):
"""Test starting and stopping replica set in short succession."""
# repeatedly start replicaset nodes
self.run_tool("init --replicaset")
for i in range(10):
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and
# sometimes fails otherwise
time.sleep(1)
self.run_tool("start")
@attr('slow')
@attr('auth')
def test_repeat_all_with_auth(self):
"""Repeates all tests in this class (excluding itself) with auth."""
tests = [t for t in inspect.getmembers(self,
predicate=inspect.ismethod)
if t[0].startswith('test_')]
self.use_auth = True
for name, method in tests:
# don't call any tests that use auth already (tagged with
# 'auth' attribute), including this method
if hasattr(method, 'auth'):
continue
setattr(method.__func__, 'description',
method.__doc__.strip() + ', with auth.')
yield (method,)
self.use_auth = False
@attr('auth')
def test_replicaset_with_name(self):
"""mlaunch: test calling init on the replica set with given name."""
self.run_tool("init --replicaset --name testrs")
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for its name
conf = mc['local']['system.replset'].find_one()
assert conf['_id'] == 'testrs'
# TODO
# - test functionality of --binarypath, --verbose
# All tests that use auth need to be decorated with @attr('auth')
def helper_adding_default_user(self, environment):
"""Helper function for the next test: test_adding_default_user()."""
self.run_tool("init %s --auth" % environment)
# connect and authenticate with default credentials:
# user / password on admin database
mc = MongoClient('localhost:%i' % self.port)
mc.admin.authenticate('user', password='password')
# check if the user roles are correctly set to the default roles
user = mc.admin.system.users.find_one()
assert(set([x['role']
for x in user['roles']]) == set(self.tool.
_default_auth_roles))
@attr('auth')
def test_adding_default_user(self):
envs = (
"--single",
"--replicaset",
"--sharded 2 --single",
"--sharded 2 --replicaset",
"--sharded 2 --single --config 3"
)
for env in envs:
method = self.helper_adding_default_user
setattr(method.__func__, 'description', method.__doc__.strip() +
', with ' + env)
yield (method, env)
@attr('auth')
def test_adding_default_user_no_mongos(self):
"""mlaunch: test that even with --mongos 0 there is a user created."""
self.run_tool("init --sharded 2 --single --mongos 0 --auth")
# connect to config server instead to check for credentials (no mongos)
ports = list(self.tool.get_tagged('config'))
mc = MongoClient('localhost:%i' % ports[0])
mc.admin.authenticate('user', password='password')
# check if the user roles are correctly set to the default roles
user = mc.admin.system.users.find_one()
assert(set([x['role']
for x in user['roles']]) == set(self.tool.
_default_auth_roles))
@attr('auth')
def test_adding_custom_user(self):
"""mlaunch: test custom username and password and custom roles."""
self.run_tool("init --single --auth --username corben "
"--password fitzroy --auth-roles dbAdminAnyDatabase "
"readWriteAnyDatabase userAdminAnyDatabase")
# connect and authenticate with default credentials:
# user / password on admin database
mc = MongoClient('localhost:%i' % self.port)
mc.admin.authenticate('corben', password='fitzroy')
# check if the user roles are correctly set to the specified roles
user = mc.admin.system.users.find_one()
print(user)
assert(set([x['role']
for x in user['roles']]) == set(["dbAdminAnyDatabase",
"readWriteAnyDatabase",
"userAdminAnyDatabase"]))
assert user['user'] == 'corben'
def test_existing_environment(self):
"""mlaunch: test warning for overwriting an existing environment."""
self.run_tool("init --single")
self.run_tool("stop")
try:
self.run_tool("init --replicaset")
except SystemExit as e:
assert 'different environment already exists' in e.message
@unittest.skip('mlaunch protocol upgrade is not needed at this point')
def test_upgrade_v1_to_v2(self):
"""mlaunch: test upgrade from protocol version 1 to 2."""
startup_options = {"name": "replset", "replicaset": True,
"dir": "./data", "authentication": False,
"single": False, "arbiter": False, "mongos": 1,
"binarypath": None, "sharded": None, "nodes": 3,
"config": 1, "port": 33333, "restart": False,
"verbose": False}
# create directory
self.run_tool("init --replicaset")
self.run_tool("stop")
# replace startup options
with open(os.path.join(self.base_dir, 'test_upgrade_v1_to_v2',
'.mlaunch_startup'), 'w') as f:
json.dump(startup_options, f, -1)
# now start with old config and check if upgrade worked
self.run_tool("start")
with open(os.path.join(self.base_dir, 'test_upgrade_v1_to_v2',
'.mlaunch_startup'), 'r') as f:
startup_options = json.load(f)
assert startup_options['protocol_version'] == 2
def test_sharded_named_1(self):
"""mlaunch: test --sharded <name> for a single shard."""
self.run_tool("init --sharded foo --single")
assert len(self.tool.get_tagged('foo')) == 1
def test_mlaunch_list(self):
"""mlaunch: test list command """
self.run_tool("init --sharded 2 --replicaset --mongos 2")
self.run_tool("list")
# capture stdout and only keep from actual LIST output
output = sys.stdout.getvalue().splitlines()
output = output[output.index(next(o for o in output
if o.startswith('PROCESS'))):]
assert self.helper_output_has_line_with(['PROCESS', 'STATUS',
'PORT'], output) == 1
assert self.helper_output_has_line_with(['mongos',
'running'], output) == 2
assert self.helper_output_has_line_with(['config server',
'running'], output) == 1
assert self.helper_output_has_line_with(['shard01'], output) == 1
assert self.helper_output_has_line_with(['shard02'], output) == 1
assert self.helper_output_has_line_with(['running',
'running'], output) == 9
def helper_which(self, pgm):
"""equivalent of which command."""
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
def test_mlaunch_binary_path_start(self):
"""Test if --binarypath is persistent between init and start."""
# get true binary path (to test difference to not specifying one)
path = self.helper_which('mongod')
path = path[:path.rfind('/')]
self.run_tool("init --single --binarypath %s" % path)
self.run_tool("stop")
self.run_tool("start")
assert self.tool.loaded_args['binarypath'] == path
assert self.tool.startup_info[str(self.port)].startswith('%s/mongod'
% path)
self.run_tool("stop")
try:
self.run_tool("start --binarypath /some/other/path")
raise Exception
except Exception:
assert self.tool.args['binarypath'] == '/some/other/path'
assert(self.tool.startup_info[str(self.port)].
startswith('/some/other/path/mongod'))
@raises(SystemExit)
def test_single_and_arbiter(self):
"""mlaunch: test --single with --arbiter error."""
self.run_tool("init --single --arbiter")
def test_oplogsize_config(self):
"""mlaunch: test config server never receives --oplogSize parameter."""
self.run_tool("init --sharded 1 --single --oplogSize 19 --verbose")
output = sys.stdout.getvalue().splitlines()
output_launch_config = next(o for o in output if '--configsvr' in o)
assert '--oplogSize' not in output_launch_config
if __name__ == '__main__':
# run individual tests with normal print output
tml = TestMLaunch()
tml.setup()
tml.test_kill_partial()
tml.teardown()
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
profile/profile.go | package profile
import (
"log"
"os"
"runtime"
"runtime/pprof"
)
func MaybeProfile() func() {
return Profile(os.Getenv("CPUPROFILE"), os.Getenv("MEMPROFILE"))
}
func Profile(cpuProfilePath string, memProfilePath string) func() {
var deferFns []func()
if cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
} else {
deferFns = append(deferFns, func() {
pprof.StopCPUProfile()
if err := f.Close(); err != nil {
log.Fatal("could not close CPU profile: ", err)
}
})
}
}
return func() {
for _, fn := range deferFns {
fn()
}
if memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
} else {
runtime.GC() // get up-to-date statistics
if err := f.Close(); err != nil {
log.Fatal("could not close memory profile: ", err)
}
}
}
}
}
| [
"\"CPUPROFILE\"",
"\"MEMPROFILE\""
]
| []
| [
"MEMPROFILE",
"CPUPROFILE"
]
| [] | ["MEMPROFILE", "CPUPROFILE"] | go | 2 | 0 | |
application/evaluate.py | # -*- coding: utf-8 -*-
"""
Evaluation script for final evaluation.
"""
import argparse
import os
from agent import Agent
from functions import BG, FEF, LIP, PFC, Retina, SC, VC, HP, CB
from oculoenv import Environment
from oculoenv import PointToTargetContent, ChangeDetectionContent, OddOneOutContent, VisualSearchContent, \
MultipleObjectTrackingContent, RandomDotMotionDiscriminationContent
from logger import Logger
"""
POINT_TO_TARGET : id=1, difficulty_range=3 (0, 1, 2)
CHANGE_DETECTION : id=2, difficulty_range=5 (0, 2, 4)
ODD_ONE_OUT : id=3, difficulty_range=0
VISUAL_SEARCH : id=4, difficulty_range=6 (0, 2, 5)
MULTIPLE_OBJECT_TRACKING : id=5, difficulty_range=6 (0, 2, 5)
RANDOM_DOT_MOTION_DISCRIMINATION: id=6 difficulty_range=5 (0, 2, 4)
"""
TASK1_DURATION = 60*30
TASK2_DURATION = 60*30
TASK3_DURATION = 60*30
TASK4_DURATION = 60*30
TASK5_DURATION = 60*40
TASK6_DURATION = 60*30
content_class_names = [
"PointToTargetContent",
"ChangeDetectionContent",
"OddOneOutContent",
"VisualSearchContent",
"MultipleObjectTrackingContent",
"RandomDotMotionDiscriminationContent"
]
class TrialResult(object):
""" Entry for one task trial result. """
def __init__(self, content_id, difficulty, reward, info):
self.content_id = content_id
self.difficulty = difficulty
self.reward = reward
if info['result'] == 'success':
self.succeed = 1
else:
self.succeed = 0
self.reaction_step = info['reaction_step']
def __lt__(self, other):
if self.content_id != other.content_id:
return self.content_id < other.content_id
else:
return self.difficulty < other.difficulty
def get_string(self):
return "{},{},{},{},{}".format(self.content_id,
self.difficulty,
self.reward,
self.succeed,
self.reaction_step)
class AggregatedResult(object):
def __init__(self, content_id, difficulty):
self.content_id = content_id
self.difficulty = difficulty
self.trial_results = []
def __lt__(self, other):
if self.content_id != other.content_id:
return self.content_id < other.content_id
else:
return self.difficulty < other.difficulty
def add_trial_result(self, trial_result):
self.trial_results.append(trial_result)
def aggegate(self):
reward_sum = 0
accuracy_sum = 0.0
reaction_step_sum = 0.0
for trial_result in self.trial_results:
reward_sum += trial_result.reward
accuracy_sum += trial_result.succeed
reaction_step_sum += trial_result.reaction_step
if len(self.trial_results) != 0:
average_accuracy = accuracy_sum / len(self.trial_results)
average_reaction_step = reaction_step_sum / len(self.trial_results)
else:
average_accuracy = 0.0
average_reaction_step = 0.0
trial_count = len(self.trial_results)
return (reward_sum, trial_count, average_accuracy, average_reaction_step)
def get_string(self):
reward_sum, trial_count, average_accuray, average_reaction_step = self.aggegate()
return "{},{},{},{},{:.3f},{:.2f}".format(self.content_id,
self.difficulty,
reward_sum,
trial_count,
average_accuray,
average_reaction_step)
class EvaluationTask(object):
def __init__(self, content_id, difficulty, duration):
self.content_id = content_id
self.difficulty = difficulty
self.duration = duration
def evaluate(self, agent):
print("content:{} difficulty:{} start".format(self.content_id, self.difficulty))
content_class_name = content_class_names[self.content_id-1]
content_class = globals()[content_class_name]
if self.difficulty >= 0:
content = content_class(difficulty=self.difficulty)
else:
content = content_class()
env = Environment(content)
obs = env.reset()
reward = 0
done = False
task_reward = 0
results = []
for i in range(self.duration):
image, angle = obs['screen'], obs['angle']
# Choose action by the agent's decision
action = agent(image, angle, reward, done)
# Foward environment one step
obs, reward, done, info = env.step(action)
if 'result' in info:
result = TrialResult(self.content_id,
self.difficulty,
reward,
info,)
results.append(result)
task_reward += reward
assert(done is not True)
print("content:{} difficulty:{} end, reward={}".format(self.content_id,
self.difficulty,
task_reward))
return results, task_reward
tasks = [
EvaluationTask(content_id=1, difficulty=0, duration=TASK1_DURATION),
EvaluationTask(content_id=2, difficulty=0, duration=TASK2_DURATION),
EvaluationTask(content_id=4, difficulty=0, duration=TASK4_DURATION),
EvaluationTask(content_id=5, difficulty=0, duration=TASK5_DURATION),
EvaluationTask(content_id=6, difficulty=0, duration=TASK6_DURATION),
EvaluationTask(content_id=1, difficulty=1, duration=TASK1_DURATION),
EvaluationTask(content_id=2, difficulty=2, duration=TASK2_DURATION),
EvaluationTask(content_id=4, difficulty=2, duration=TASK4_DURATION),
EvaluationTask(content_id=5, difficulty=2, duration=TASK5_DURATION),
EvaluationTask(content_id=6, difficulty=2, duration=TASK6_DURATION),
EvaluationTask(content_id=1, difficulty=2, duration=TASK1_DURATION),
EvaluationTask(content_id=2, difficulty=4, duration=TASK2_DURATION),
EvaluationTask(content_id=4, difficulty=5, duration=TASK4_DURATION),
EvaluationTask(content_id=5, difficulty=5, duration=TASK5_DURATION),
EvaluationTask(content_id=6, difficulty=4, duration=TASK6_DURATION),
EvaluationTask(content_id=3, difficulty=-1, duration=TASK3_DURATION)
]
def aggregate_results(all_trial_results):
aggregated_results = dict()
for task in tasks:
aggeraged_result = AggregatedResult(task.content_id, task.difficulty)
aggregated_results[(task.content_id, task.difficulty)] = aggeraged_result
for trial_result in all_trial_results:
aggreraged_result = aggregated_results[(trial_result.content_id,
trial_result.difficulty)]
aggreraged_result.add_trial_result(trial_result)
return sorted(aggregated_results.values())
def save_results_to_file(results, file_path):
f = open(file_path, mode='w')
with open(file_path, mode='w') as f:
for result in results:
f.write(result.get_string())
f.write("\n")
def save_results(all_trial_results, log_path):
""" Save result into csv files. """
if not os.path.exists(log_path):
os.makedirs(log_path)
# Save raw trial results
sorted_all_trial_results = sorted(all_trial_results)
raw_file_path = "{}/raw_eval.csv".format(log_path)
save_results_to_file(sorted_all_trial_results, raw_file_path)
# Aggregate trial results
aggregated_results = aggregate_results(all_trial_results)
aggregated_file_path = "{}/agg_eval.csv".format(log_path)
save_results_to_file(aggregated_results, aggregated_file_path)
def evaluate(logger, log_path):
retina = Retina()
lip = LIP()
vc = VC()
pfc = PFC()
fef = FEF()
bg = BG(training=False, init_weight_path="./data/bg.pth")
sc = SC()
hp = HP()
cb = CB()
agent = Agent(
retina=retina,
lip=lip,
vc=vc,
pfc=pfc,
fef=fef,
bg=bg,
sc=sc,
hp=hp,
cb=cb
)
#bg.load_model("model.pkl")
total_reward = 0
all_trial_results = []
for i, task in enumerate(tasks):
trial_results, task_reward = task.evaluate(agent)
all_trial_results += trial_results
total_reward += task_reward
logger.log("evaluation_reward", total_reward, i)
# Save result csv files
save_results(all_trial_results, log_path)
print("evaluation finished:")
logger.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", help="Log file name", type=str, default="evaluate0")
args = parser.parse_args()
log_file = args.log_file
# Log is stored 'log' directory
log_path = "log/{}".format(log_file)
logger = Logger(log_path)
# Start evaluation
evaluate(logger, log_path)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | null | null | null |
microservices.go | package main
import (
"fmt"
"net/http"
"os"
"api"
)
func main() {
http.HandleFunc("/", index)
http.HandleFunc("/api/echo", echo)
http.HandleFunc("/api/books", api.BooksHandleFunc) // to retrive books, store books.
http.HandleFunc("/api/books/", api.BookHandleFunc) // to retrieve individual books by isbn, update them, delete them.
http.ListenAndServe(port(), nil)
}
func port() string {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
return ":" + port
}
func index(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Hola Cloud Go.")
}
func echo(w http.ResponseWriter, r *http.Request) {
message := r.URL.Query()["message"][0]
w.Header().Add("Content-Type", "text/plain")
fmt.Fprintf(w, message)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
pkg/kn/flags/channel_types.go | // Copyright © 2020 The Knative Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flags
import (
"fmt"
"strings"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/client/pkg/kn/config"
)
type ChannelTypeFlags struct {
ctype string
}
func (i *ChannelTypeFlags) Add(f *pflag.FlagSet) {
f.StringVar(&i.ctype,
"type",
"",
"Override channel type to create, in the format '--type Group:Version:Kind'. "+
"If flag is not specified, it uses default messaging layer settings for channel type, cluster wide or specific namespace. "+
"You can configure aliases for channel types in kn config and refer the aliases with this flag. "+
"You can also refer inbuilt channel type InMemoryChannel using an alias 'imc' like '--type imc'. "+
"Examples: '--type messaging.knative.dev:v1alpha1:KafkaChannel' for specifying explicit Group:Version:Kind.")
for _, p := range config.GlobalConfig.ChannelTypeMappings() {
//user configration might override the default configuration
ctypeMappings[p.Alias] = schema.GroupVersionKind{
Kind: p.Kind,
Group: p.Group,
Version: p.Version,
}
}
}
// ctypeMappings maps aliases used for channel types to their GroupVersionKind
var ctypeMappings = map[string]schema.GroupVersionKind{
"imcv1beta1": {
Group: "messaging.knative.dev",
Version: "v1beta1",
Kind: "InMemoryChannel",
},
"imc": {
Group: "messaging.knative.dev",
Version: "v1",
Kind: "InMemoryChannel",
},
}
func (i *ChannelTypeFlags) Parse() (*schema.GroupVersionKind, error) {
parts := strings.Split(i.ctype, ":")
switch len(parts) {
case 1:
if typ, ok := ctypeMappings[i.ctype]; ok {
return &typ, nil
}
return nil, fmt.Errorf("Error: unknown channel type alias: '%s'", i.ctype)
case 3:
if parts[0] == "" || parts[1] == "" || parts[2] == "" {
return nil, fmt.Errorf("Error: incorrect value '%s' for '--type', must be in the format 'Group:Version:Kind' or configure an alias in kn config", i.ctype)
}
return &schema.GroupVersionKind{Group: parts[0], Version: parts[1], Kind: parts[2]}, nil
default:
return nil, fmt.Errorf("Error: incorrect value '%s' for '--type', must be in the format 'Group:Version:Kind' or configure an alias in kn config", i.ctype)
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
speedwatcher.py | from __future__ import unicode_literals
import os
import sys
from moviepy.editor import VideoFileClip
from moviepy.editor import concatenate_videoclips
import moviepy.video.fx.all as vfx
import moviepy.audio.fx.all as afx
import pysrt
from progress import progress
import pygame
# desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
class SpeedSilBySubs:
def __init__ (self, speedfactor):
# self.videofile = videofile
# self.subtitlefile = subtitlefile
self.speedfactor = float(speedfactor)
def speed(self, videofile, subtitlefile):
video = VideoFileClip(videofile)
audioclip = video.audio
subs = pysrt.open(subtitlefile)
def getsec(time):
return ((time.hour * 60 + time.minute) * 60 + time.second) + (time.microsecond /(1000 * 1000))
# print(subs)
times = []
new_subs = pysrt.SubRipFile()
for sub in subs:
times.append(getsec(sub.start.to_time()))
times.append(getsec(sub.end.to_time()))
finalclip = None
clips = []
l = len(times)
bul = True
while bul == True:
bul = False
for ind,obj in enumerate(times):
if ind + 1 < 2:
if obj >= times[ind + 1]:
times.pop(ind + 1)
if times[ind] > times[ind+1]:
times.pop(ind + 1)
else:
times.pop(ind)
l = l -2
bul = True
times.insert(0, 0)
duration_change = 0
times.append(video.duration)
for ind,obj in enumerate(times):
progress(ind +1, len(times), status="Speeding clips x" + str(self.speedfactor))
if ind + 1 < len(times):
if video.duration >= times[ind +1] and video.duration >= obj and obj >= 0 and obj >= 0 and obj < times[ind+1]:
clip = video.subclip(obj,times[ind+1])
if ind % 2 == 0 and obj != times[ind+1]:
clips.append(vfx.speedx(clip, factor=self.speedfactor).fx(afx.volumex, 0.4))
duration_change = duration_change + (clip.duration - clips[-1].duration)
else:
clips.append(clip)
part = subs.slice(starts_after={'milliseconds':(obj * 1000) -10}, ends_before={'milliseconds':(times[ind + 1] *1000) +10})
part.shift(seconds=(-1 * duration_change))
for sub in part:
new_subs.append(sub)
# clips[-1].preview()
# print(part)
print("\nConcatenation")
finalclip = concatenate_videoclips(clips)
print("New Length: ", finalclip.duration)
new_subs.save(videofile[:-4]+"-"+str(self.speedfactor)+"x-speeded.srt", encoding='utf-8')
finalclip.write_videofile(videofile[:-4]+"-"+str(self.speedfactor)+"x-speeded.mp4")
return (video.duration, finalclip.duration)
| []
| []
| [
"USERPROFILE"
]
| [] | ["USERPROFILE"] | python | 1 | 0 | |
main/main.go | package main
import (
"encoding/json"
"fmt"
"github.com/3pings/acigo/aci"
"github.com/3pings/chiveAgent/utility"
"log"
"os"
"time"
)
func main() {
var nodeInfo = make(map[string][]string)
token := os.Getenv("SPARKTOKEN")
roomID := os.Getenv("SPARKROOMID")
// Get environment variables for APIC login
debug := os.Getenv("DEBUG") != ""
a, errLogin := login(debug)
if errLogin != nil {
log.Printf("exiting: %v", errLogin)
return
}
defer logout(a)
// display existing nodes
nodes, errList := a.NodeList()
if errList != nil {
log.Printf("could not list nodes: %v", errList)
return
}
// loop through to get temperature data per node
for {
for _, n := range nodes {
nRole := n["role"].(string)
cTime, tErr := time.Parse(time.RFC3339, n["currentTime"].(string))
if tErr != nil {
fmt.Println(tErr)
}
nodeDetails, errList := a.GetNodeTemp(n["dn"].(string), nRole)
if errList != nil {
log.Printf("could not list node details: %v", errList)
return
}
for _, d := range nodeDetails {
nName := n["name"].(string)
nTemp := d["currentMax"].(string)
nodeInfo[nName] = []string{nTemp, cTime.Format("2006-01-02 03:04:05")}
}
}
//Put results of node data collection into json
//Printing today need to add api call
jsonNode, _ := json.Marshal(nodeInfo)
utility.SendSparkMessage(token, roomID, string(jsonNode))
fmt.Println(nodeInfo)
// wait a defined number of seconds before looping back through
time.Sleep(60 * time.Second)
errRefresh := a.Refresh()
if errRefresh != nil {
log.Println(errRefresh)
os.Exit(3)
}
}
}
func login(debug bool) (*aci.Client, error) {
a, errNew := aci.New(aci.ClientOptions{Debug: false})
if errNew != nil {
return nil, fmt.Errorf("login new client error: %v", errNew)
}
// Since credentials have not been specified explicitly under ClientOptions,
// Login() will use env vars: APIC_HOSTS=host, APIC_USER=username, APIC_PASS=pwd
errLogin := a.Login()
if errLogin != nil {
return nil, fmt.Errorf("login error: %v", errLogin)
}
return a, nil
}
func logout(a *aci.Client) {
errLogout := a.Logout()
if errLogout != nil {
log.Printf("logout error: %v", errLogout)
return
}
log.Printf("logout: done")
}
| [
"\"SPARKTOKEN\"",
"\"SPARKROOMID\"",
"\"DEBUG\""
]
| []
| [
"SPARKROOMID",
"SPARKTOKEN",
"DEBUG"
]
| [] | ["SPARKROOMID", "SPARKTOKEN", "DEBUG"] | go | 3 | 0 | |
server/services/metrics/metrics.go | package metrics
import (
"os"
"github.com/prometheus/client_golang/prometheus"
)
const (
MetricsNamespace = "focalboard"
MetricsSubsystemBlocks = "blocks"
MetricsSubsystemWorkspaces = "workspaces"
MetricsSubsystemSystem = "system"
MetricsCloudInstallationLabel = "installationId"
)
type InstanceInfo struct {
Version string
BuildNum string
Edition string
InstallationID string
}
// Metrics used to instrumentate metrics in prometheus
type Metrics struct {
registry *prometheus.Registry
instance *prometheus.GaugeVec
startTime prometheus.Gauge
loginCount prometheus.Counter
loginFailCount prometheus.Counter
blocksInsertedCount prometheus.Counter
blocksDeletedCount prometheus.Counter
blockCount *prometheus.GaugeVec
workspaceCount prometheus.Gauge
blockLastActivity prometheus.Gauge
}
// NewMetrics Factory method to create a new metrics collector
func NewMetrics(info InstanceInfo) *Metrics {
m := &Metrics{}
m.registry = prometheus.NewRegistry()
options := prometheus.ProcessCollectorOpts{
Namespace: MetricsNamespace,
}
m.registry.MustRegister(prometheus.NewProcessCollector(options))
m.registry.MustRegister(prometheus.NewGoCollector())
additionalLabels := map[string]string{}
if info.InstallationID != "" {
additionalLabels[MetricsCloudInstallationLabel] = os.Getenv("MM_CLOUD_INSTALLATION_ID")
}
m.loginCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemSystem,
Name: "login_total",
Help: "Total number of logins.",
ConstLabels: additionalLabels,
})
m.registry.MustRegister(m.loginCount)
m.loginFailCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemSystem,
Name: "login_fail_total",
Help: "Total number of failed logins.",
ConstLabels: additionalLabels,
})
m.registry.MustRegister(m.loginFailCount)
m.instance = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemSystem,
Name: "focalboard_instance_info",
Help: "Instance information for Focalboard.",
ConstLabels: additionalLabels,
}, []string{"Version", "BuildNum", "Edition"})
m.registry.MustRegister(m.instance)
m.instance.WithLabelValues(info.Version, info.BuildNum, info.Edition).Set(1)
m.startTime = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemSystem,
Name: "server_start_time",
Help: "The time the server started.",
ConstLabels: additionalLabels,
})
m.startTime.SetToCurrentTime()
m.registry.MustRegister(m.startTime)
m.blocksInsertedCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemBlocks,
Name: "blocks_inserted_total",
Help: "Total number of blocks inserted.",
ConstLabels: additionalLabels,
})
m.registry.MustRegister(m.blocksInsertedCount)
m.blocksDeletedCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemBlocks,
Name: "blocks_deleted_total",
Help: "Total number of blocks deleted.",
ConstLabels: additionalLabels,
})
m.registry.MustRegister(m.blocksDeletedCount)
m.blockCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemBlocks,
Name: "blocks_total",
Help: "Total number of blocks.",
ConstLabels: additionalLabels,
}, []string{"BlockType"})
m.registry.MustRegister(m.blockCount)
m.workspaceCount = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemWorkspaces,
Name: "workspaces_total",
Help: "Total number of workspaces.",
ConstLabels: additionalLabels,
})
m.registry.MustRegister(m.workspaceCount)
m.blockLastActivity = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: MetricsSubsystemBlocks,
Name: "blocks_last_activity",
Help: "Time of last block insert, update, delete.",
ConstLabels: additionalLabels,
})
m.registry.MustRegister(m.blockLastActivity)
return m
}
func (m *Metrics) IncrementLoginCount(num int) {
if m != nil {
m.loginCount.Add(float64(num))
}
}
func (m *Metrics) IncrementLoginFailCount(num int) {
if m != nil {
m.loginFailCount.Add(float64(num))
}
}
func (m *Metrics) IncrementBlocksInserted(num int) {
if m != nil {
m.blocksInsertedCount.Add(float64(num))
m.blockLastActivity.SetToCurrentTime()
}
}
func (m *Metrics) IncrementBlocksDeleted(num int) {
if m != nil {
m.blocksDeletedCount.Add(float64(num))
m.blockLastActivity.SetToCurrentTime()
}
}
func (m *Metrics) ObserveBlockCount(blockType string, count int64) {
if m != nil {
m.blockCount.WithLabelValues(blockType).Set(float64(count))
}
}
func (m *Metrics) ObserveWorkspaceCount(count int64) {
if m != nil {
m.workspaceCount.Set(float64(count))
}
}
| [
"\"MM_CLOUD_INSTALLATION_ID\""
]
| []
| [
"MM_CLOUD_INSTALLATION_ID"
]
| [] | ["MM_CLOUD_INSTALLATION_ID"] | go | 1 | 0 | |
cmd/ibm-manage-dl-gateway/main.go | package main
import (
"flag"
"fmt"
"os"
"errors"
"time"
"math/rand"
)
var (
apiKey string
approveCreationCmd = flag.NewFlagSet("approve-creation", flag.PanicOnError)
approveDeletionCmd = flag.NewFlagSet("approve-deletion", flag.PanicOnError)
)
type ErrorSlice []error
func (e ErrorSlice) Error() string {
s := ""
for _, err := range e {
s += ", " + err.Error()
}
return "Errors: " + s
}
func setupGlobalFlags() {
for _, fs := range []*flag.FlagSet{approveCreationCmd, approveDeletionCmd} {
fs.StringVar(
&apiKey,
"api-key",
"",
"The IBM Cloud platform API key. You must either set the api-key flag or source it from the IC_API_KEY (higher precedence) or IBMCLOUD_API_KEY environment variable",
)
}
}
func checkCredentials() error{
//TODO check other credentials types
if apiKey == "" {
apiKey = os.Getenv("IC_API_KEY")
if apiKey == "" {
apiKey = os.Getenv("IBMCLOUD_API_KEY")
}
}
if apiKey == "" {
return errors.New("missing required credentials")
}
return nil
}
func main() {
if len(os.Args) < 2 {
fmt.Println("Missing required command. One of [approve-creation, approve-deletion]")
os.Exit(1)
}
if os.Args[1] == "-v" || os.Args[1] == "-version" || os.Args[1] == "--version" || os.Args[1] == "version" {
fmt.Println(Version)
os.Exit(0)
}
setupGlobalFlags()
if os.Args[1] == "approve-creation" {
approveCreationCmd.Usage = func() {
approveCreationCmd.PrintDefaults()
os.Exit(0)
}
gatewayID := approveCreationCmd.String("gateway-id", "", "(Required) Direct Link Connect gateway identifier.")
isGlobal := approveCreationCmd.Bool("global-routing", false, "(Optional) When true gateway can connect to networks outside of their associated region. Default is false.")
isMetered := approveCreationCmd.Bool("metered", true, "(Optional) When true gateway usage is billed per gigabyte. When false there is no per gigabyte usage charge, instead a flat rate is charged for the gateway. Default is true.")
connMode := approveCreationCmd.String("connection-mode", "direct", "(Optional) Connection mode. Mode transit indicates this gateway will be attached to Transit Gateway Service and direct means this gateway will be attached to vpc or classic connection. One of: direct, transit.")
resourceGroupID := approveCreationCmd.String("resource-group-id", "", "(Optional) If unspecified, the account's default resource group is used.")
//TODO AuthenticationKey (BGP MD5 AUTH KEY)
err := approveCreationCmd.Parse(os.Args[2:])
if err != nil {
fmt.Println("Flags parsing error")
panic(err)
}
errs := make([]error, 0)
err = checkCredentials()
if err != nil {
errs = append(errs, err)
}
if *gatewayID == "" {
fmt.Println("Missing required gatewayID flag\nCommand flags:")
approveCreationCmd.PrintDefaults()
os.Exit(1)
}
if len(errs) != 0 {
err = ErrorSlice(errs)
approveCreationCmd.PrintDefaults()
panic(err)
}
client, err := NewClientWithApiKey(apiKey)
if err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
_, err = CreateGatewayActionApprove(*client, *gatewayID, *isGlobal, *isMetered, *connMode, *resourceGroupID)
if err != nil {
panic(err)
}
err = retry(12, 10*time.Second, func() error {
gw, sc, err := GetGateway(*client, *gatewayID)
if err != nil {
return errors.New("fail")
}
if gw == nil || sc == 404 {
return stop{fmt.Errorf("Gateway %s not found", *gatewayID)}
}
if *gw.OperationalStatus == "provisioned" {
fmt.Println("Gateway creation confirmed")
return nil
}
if *gw.OperationalStatus == "delete_pending" || *gw.OperationalStatus == "create_rejected" {
return stop{fmt.Errorf("unexpected Gateway status error: %s", *gw.OperationalStatus)}
}
return errors.New("gateway is still being configured")
})
if err != nil {
panic(err)
}
os.Exit(0)
}
if os.Args[1] == "approve-deletion" {
approveDeletionCmd.Usage = func() {
approveDeletionCmd.PrintDefaults()
os.Exit(0)
}
gatewayName := approveDeletionCmd.String("gateway-name", "", "(Optional) Direct Link Connect gateway name.")
err := approveDeletionCmd.Parse(os.Args[2:])
if err != nil {
fmt.Println("Flags parsing error")
panic(err)
}
errs := make([]error, 0)
err = checkCredentials()
if err != nil {
errs = append(errs, err)
}
if *gatewayName == "" {
fmt.Println("Missing required gatewayName flag\nCommand flags:")
approveDeletionCmd.PrintDefaults()
os.Exit(1)
}
if len(errs) != 0 {
err = ErrorSlice(errs)
approveDeletionCmd.PrintDefaults()
panic(err)
}
client, err := NewClientWithApiKey(apiKey)
if err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
gws, err := ListGateways(*client)
if err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
var gatewayID *string
for _, gw := range gws.Gateways {
if *gw.Name == *gatewayName {
gatewayID = gw.ID
break
}
}
err = DeleteGatewayActionApprove(*client, *gatewayID)
if err != nil {
panic(err)
}
retry(60, 10*time.Second, func() error {
gw, sc, err := GetGateway(*client, *gatewayID)
if err != nil {
return stop{fmt.Errorf("unexpected error during Gateway deletion")}
}
if gw == nil || sc == 404 {
fmt.Println("Gateway deletion confirmed")
return nil
}
return errors.New("gateway is still being configured")
})
os.Exit(0)
}
fmt.Println("Unknown command")
os.Exit(1)
}
func init() {
rand.Seed(time.Now().UnixNano())
}
func retry(attempts int, sleep time.Duration, f func() error) error {
if err := f(); err != nil {
if s, ok := err.(stop); ok {
return s.error
}
if attempts--; attempts > 0 {
jitter := time.Duration(rand.Int63n(int64(sleep)))
sleep = sleep + jitter/2
time.Sleep(sleep)
return retry(attempts, 2*sleep, f)
}
return err
}
return nil
}
type stop struct {
error
}
| [
"\"IC_API_KEY\"",
"\"IBMCLOUD_API_KEY\""
]
| []
| [
"IBMCLOUD_API_KEY",
"IC_API_KEY"
]
| [] | ["IBMCLOUD_API_KEY", "IC_API_KEY"] | go | 2 | 0 | |
main.go | package main
import (
provider "cloud-deploy.io/terraform-provider-cloud-deploy/ghost"
"github.com/hashicorp/terraform/plugin"
)
func main() {
p := plugin.ServeOpts{
ProviderFunc: provider.Provider,
}
plugin.Serve(&p)
}
| []
| []
| []
| [] | [] | go | null | null | null |
welcome.py | # Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/test')
def test_method():
return "this is a test"
@app.route('/myapp')
def WelcomeToMyapp():
return 'Welcome again to my app running on Bluemix!'
@app.route('/api/people')
def GetPeople():
list = [
{'name': 'John', 'age': 28},
{'name': 'Bill', 'val': 26}
]
return jsonify(results=list)
@app.route('/api/people/<name>')
def SayHello(name):
message = {
'message': 'Hello ' + name
}
return jsonify(results=message)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
pbiblisite/pbiblisite/asgi.py | """
ASGI config for pbiblisite project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pbiblisite.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/opencxd/opencxd.go | package main
import (
"encoding/hex"
"os"
"os/signal"
"syscall"
"github.com/mit-dci/lit/coinparam"
"github.com/mit-dci/lit/crypto/koblitz"
flags "github.com/jessevdk/go-flags"
util "github.com/mit-dci/opencx/chainutils"
"github.com/mit-dci/opencx/cxdb"
"github.com/mit-dci/opencx/cxdb/cxdbmemory"
"github.com/mit-dci/opencx/cxdb/cxdbsql"
"github.com/mit-dci/opencx/cxrpc"
"github.com/mit-dci/opencx/cxserver"
"github.com/mit-dci/opencx/logging"
"github.com/mit-dci/opencx/match"
)
type opencxConfig struct {
ConfigFile string
// stuff for files and directories
LogFilename string `long:"logFilename" description:"Filename for output log file"`
OpencxHomeDir string `long:"dir" description:"Location of the root directory for opencxd"`
// stuff for ports
Rpcport uint16 `short:"p" long:"rpcport" description:"Set RPC port to connect to"`
Rpchost string `long:"rpchost" description:"Set RPC host to listen to"`
// logging and debug parameters
LogLevel []bool `short:"v" description:"Set verbosity level to verbose (-v), very verbose (-vv) or very very verbose (-vvv)"`
// logging for lit nodes (find something better than w)
LitLogLevel []bool `short:"w" description:"Set verbosity level to verbose (-w), very verbose (-ww) or very very verbose (-www)"`
// Resync?
Resync bool `short:"r" long:"resync" description:"Do you want to resync all chains?"`
// networks that we can connect to
Vtchost string `long:"vtc" description:"Connect to Vertcoin full node. Specify a socket address."`
Btchost string `long:"btc" description:"Connect to bitcoin full node. Specify a socket address."`
Ltchost string `long:"ltc" description:"Connect to a litecoin full node. Specify a socket address."`
Tn3host string `long:"tn3" description:"Connect to bitcoin testnet3. Specify a socket address."`
Lt4host string `long:"lt4" description:"Connect to litecoin testnet4. Specify a socket address."`
Tvtchost string `long:"tvtc" description:"Connect to Vertcoin test node. Specify a socket address."`
Reghost string `long:"reg" description:"Connect to bitcoin regtest. Specify a socket address."`
Litereghost string `long:"litereg" description:"Connect to litecoin regtest. Specify a socket address."`
Rtvtchost string `long:"rtvtc" description:"Connect to Vertcoin regtest node. Specify a socket address."`
// configuration for concurrent RPC users.
MaxPeers uint16 `long:"numpeers" description:"Maximum number of peers that you'd like to support"`
MinPeerPort uint16 `long:"minpeerport" description:"Port to start creating ports for peers at"`
Lithost string `long:"lithost" description:"Host for the lightning node on the exchange to run"`
Litport uint16 `long:"litport" description:"Port for the lightning node on the exchange to run"`
Whitelist []string `long:"whitelist" description:"If using pinky swear settlement, this is the default whitelist"`
// filename for key
KeyFileName string `long:"keyfilename" short:"k" description:"Filename for private key within root opencx directory used to send transactions"`
// auth or unauth rpc?
AuthenticatedRPC bool `long:"authrpc" description:"Whether or not to use authenticated RPC"`
// support lightning or not to support lightning?
LightningSupport bool `long:"lightning" description:"Whether or not to support lightning on the exchange"`
}
var (
defaultHomeDir = os.Getenv("HOME")
// used as defaults before putting into parser
defaultOpencxHomeDirName = defaultHomeDir + "/.opencx/opencxd/"
defaultRpcport = uint16(12345)
defaultRpchost = "localhost"
defaultMaxPeers = uint16(64)
defaultMinPeerPort = uint16(25565)
defaultLithost = "localhost"
defaultLitport = uint16(12346)
// Yes we want to use noise-rpc
defaultAuthenticatedRPC = true
// Yes we want lightning
defaultLightningSupport = true
)
// newConfigParser returns a new command line flags parser.
func newConfigParser(conf *opencxConfig, options flags.Options) *flags.Parser {
parser := flags.NewParser(conf, options)
return parser
}
func main() {
var err error
conf := opencxConfig{
OpencxHomeDir: defaultOpencxHomeDirName,
Rpcport: defaultRpcport,
Rpchost: defaultRpchost,
MaxPeers: defaultMaxPeers,
MinPeerPort: defaultMinPeerPort,
Lithost: defaultLithost,
Litport: defaultLitport,
AuthenticatedRPC: defaultAuthenticatedRPC,
LightningSupport: defaultLightningSupport,
}
// Check and load config params
key := opencxSetup(&conf)
// Generate the coin list based on the parameters we know
coinList := generateCoinList(&conf)
var pairList []*match.Pair
if pairList, err = match.GenerateAssetPairs(coinList); err != nil {
logging.Fatalf("Could not generate asset pairs from coin list: %s", err)
}
logging.Infof("Creating limit engines...")
var mengines map[match.Pair]match.LimitEngine
if mengines, err = cxdbsql.CreateLimitEngineMap(pairList); err != nil {
logging.Fatalf("Error creating limit engine map with coinlist for opencxd: %s", err)
}
var setEngines map[*coinparam.Params]match.SettlementEngine
if len(conf.Whitelist) != 0 {
whitelist := make([][33]byte, len(conf.Whitelist))
var pkBytes []byte
for i, str := range conf.Whitelist {
if pkBytes, err = hex.DecodeString(str); err != nil {
logging.Fatalf("Error decoding string for whitelist: %s", err)
}
if len(pkBytes) != 33 {
logging.Fatalf("One pubkey not 33 bytes")
}
logging.Infof("Adding %x to the whitelist", pkBytes)
copy(whitelist[i][:], pkBytes)
}
whitelistMap := make(map[*coinparam.Params][][33]byte)
for _, coin := range coinList {
whitelistMap[coin] = whitelist
}
logging.Infof("Creating pinky swear engines...")
if setEngines, err = cxdbmemory.CreatePinkySwearEngineMap(whitelistMap, true); err != nil {
logging.Fatalf("Error creating pinky swear settlement engine map for opencxd: %s", err)
}
} else {
logging.Infof("Creating settlement engines...")
if setEngines, err = cxdbsql.CreateSettlementEngineMap(coinList); err != nil {
logging.Fatalf("Error creating settlement engine map for opencxd: %s", err)
}
}
if setEngines == nil {
logging.Fatalf("Error, nil setEngines map, this should not ever happen")
}
logging.Infof("Creating limit orderbooks...")
var limBooks map[match.Pair]match.LimitOrderbook
if limBooks, err = cxdbsql.CreateLimitOrderbookMap(pairList); err != nil {
logging.Fatalf("Error creating limit orderbook map for opencxd: %s", err)
}
logging.Infof("Creating deposit stores...")
var depositStores map[*coinparam.Params]cxdb.DepositStore
if depositStores, err = cxdbsql.CreateDepositStoreMap(coinList); err != nil {
logging.Fatalf("Error creating deposit store map for opencxd: %s", err)
}
logging.Infof("Creating settlement stores...")
var setStores map[*coinparam.Params]cxdb.SettlementStore
if setStores, err = cxdbsql.CreateSettlementStoreMap(coinList); err != nil {
logging.Fatalf("Error creating settlement store map for opencxd: %s", err)
}
// Anyways, here's where we set the server
var ocxServer *cxserver.OpencxServer
if ocxServer, err = cxserver.InitServer(setEngines, mengines, limBooks, depositStores, setStores, conf.OpencxHomeDir); err != nil {
logging.Fatalf("Error initializing server for opencxd: %s", err)
}
// For debugging but also it looks nice
for _, coin := range coinList {
logging.Infof("Coin supported: %s", coin.Name)
}
// Check that the private key exists and if it does, load it
if err = ocxServer.SetupServerKeys(key); err != nil {
logging.Fatalf("Error setting up server keys: \n%s", err)
}
// Generate the host param list
// the host params are all of the coinparams / coins we support
// this coinparam list is generated from the configuration file with generateHostParams
hpList := util.HostParamList(generateHostParams(&conf))
// Set up all chain hooks and wallets
if err = ocxServer.SetupAllWallets(hpList, "wallit/", conf.Resync); err != nil {
logging.Fatalf("Error setting up wallets: \n%s", err)
return
}
if conf.LightningSupport {
// start the lit node for the exchange
if err = ocxServer.SetupLitNode(key, "lit", "http://hubris.media.mit.edu:46580", "", ""); err != nil {
logging.Fatalf("Error starting lit node: \n%s", err)
}
// register important event handlers -- figure out something better with lightning connection interface
logging.Infof("registering sigproof handler")
ocxServer.ExchangeNode.Events.RegisterHandler("qln.chanupdate.sigproof", ocxServer.GetSigProofHandler())
logging.Infof("done registering sigproof handler")
logging.Infof("registering opconfirm handler")
ocxServer.ExchangeNode.Events.RegisterHandler("qln.chanupdate.opconfirm", ocxServer.GetOPConfirmHandler())
logging.Infof("done registering opconfirm handler")
logging.Infof("registering push handler")
ocxServer.ExchangeNode.Events.RegisterHandler("qln.chanupdate.push", ocxServer.GetPushHandler())
logging.Infof("done registering push handler")
// Waited until the wallets are started, time to link them!
if err = ocxServer.LinkAllWallets(); err != nil {
logging.Fatalf("Could not link wallets: \n%s", err)
}
// Listen on a bunch of ports according to the number of peers you want to support.
for portNum := conf.MinPeerPort; portNum < conf.MinPeerPort+conf.MaxPeers; portNum++ {
var _ string
if _, err = ocxServer.ExchangeNode.TCPListener(int(portNum)); err != nil {
return
}
// logging.Infof("Listening for connections with address %s on port %d", addr, portNum)
}
// Setup lit node rpc
go ocxServer.SetupLitRPCConnect(conf.Lithost, conf.Litport)
}
var rpcListener *cxrpc.OpencxRPCCaller
if rpcListener, err = cxrpc.CreateRPCForServer(ocxServer); err != nil {
logging.Fatalf("Error creating rpc caller for server: %s", err)
}
// SIGINT and SIGTERM and SIGQUIT handler for CTRL-c, KILL, CTRL-/, etc.
go func() {
logging.Infof("Notifying signals")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGQUIT)
signal.Notify(sigs, syscall.SIGTERM)
signal.Notify(sigs, syscall.SIGINT)
for {
signal := <-sigs
logging.Infof("Received %s signal, Stopping server gracefully...", signal.String())
// stop rpc listener
if err = rpcListener.Stop(); err != nil {
logging.Fatalf("Error killing server: %s", err)
}
return
}
}()
if !conf.AuthenticatedRPC {
// this tells us when the rpclisten is done
logging.Infof(" === will start to listen on rpc ===")
if err = rpcListener.RPCListen(conf.Rpchost, conf.Rpcport); err != nil {
logging.Fatalf("Error listening for rpc for server: %s", err)
}
} else {
privkey, _ := koblitz.PrivKeyFromBytes(koblitz.S256(), key[:])
// this tells us when the rpclisten is done
logging.Infof(" === will start to listen on noise-rpc ===")
if err = rpcListener.NoiseListen(privkey, conf.Rpchost, conf.Rpcport); err != nil {
logging.Fatalf("Error listening for noise rpc for server: %s", err)
}
}
// wait until the listener dies - this does not return anything
rpcListener.WaitUntilDead()
return
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
pkg/operator/sync_signer_v4_00.go | package operator
import (
"bytes"
"fmt"
"os"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/openshift/library-go/pkg/crypto"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
scsv1 "github.com/openshift/service-ca-operator/pkg/apis/serviceca/v1"
"github.com/openshift/service-ca-operator/pkg/operator/operatorclient"
"github.com/openshift/service-ca-operator/pkg/operator/v4_00_assets"
)
// syncSigningController_v4_00_to_latest takes care of synchronizing (not upgrading) the thing we're managing.
// most of the time the sync method will be good for a large span of minor versions
func syncSigningController_v4_00_to_latest(c serviceCAOperator, operatorConfig *scsv1.ServiceCA) error {
var err error
requiredNamespace := resourceread.ReadNamespaceV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/ns.yaml"))
_, _, err = resourceapply.ApplyNamespace(c.corev1Client, c.eventRecorder, requiredNamespace)
if err != nil {
return fmt.Errorf("%q: %v", "ns", err)
}
requiredClusterRole := resourceread.ReadClusterRoleV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/clusterrole.yaml"))
_, _, err = resourceapply.ApplyClusterRole(c.rbacv1Client, c.eventRecorder, requiredClusterRole)
if err != nil {
return fmt.Errorf("%q: %v", "clusterrole", err)
}
requiredClusterRoleBinding := resourceread.ReadClusterRoleBindingV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/clusterrolebinding.yaml"))
_, _, err = resourceapply.ApplyClusterRoleBinding(c.rbacv1Client, c.eventRecorder, requiredClusterRoleBinding)
if err != nil {
return fmt.Errorf("%q: %v", "clusterrolebinding", err)
}
requiredRole := resourceread.ReadRoleV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/role.yaml"))
_, _, err = resourceapply.ApplyRole(c.rbacv1Client, c.eventRecorder, requiredRole)
if err != nil {
return fmt.Errorf("%q: %v", "role", err)
}
requiredRoleBinding := resourceread.ReadRoleBindingV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/rolebinding.yaml"))
_, _, err = resourceapply.ApplyRoleBinding(c.rbacv1Client, c.eventRecorder, requiredRoleBinding)
if err != nil {
return fmt.Errorf("%q: %v", "rolebinding", err)
}
requiredSA := resourceread.ReadServiceAccountV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/sa.yaml"))
_, saModified, err := resourceapply.ApplyServiceAccount(c.corev1Client, c.eventRecorder, requiredSA)
if err != nil {
return fmt.Errorf("%q: %v", "sa", err)
}
// TODO create a new configmap whenever the data value changes
_, configMapModified, err := manageSigningConfigMap_v4_00_to_latest(c.corev1Client, c.eventRecorder, operatorConfig)
if err != nil {
return fmt.Errorf("%q: %v", "configmap", err)
}
_, signingSecretModified, err := manageSigningSecret_v4_00_to_latest(c.corev1Client, c.eventRecorder)
if err != nil {
return fmt.Errorf("%q: %v", "signing-key", err)
}
var forceDeployment bool
if saModified { // SA modification can cause new tokens
forceDeployment = true
}
if signingSecretModified {
forceDeployment = true
}
if configMapModified {
forceDeployment = true
}
// our configmaps and secrets are in order, now it is time to create the DS
// TODO check basic preconditions here
_, _, err = manageSignerDeployment_v4_00_to_latest(c.appsv1Client, c.eventRecorder, operatorConfig, forceDeployment)
return err
}
func manageSigningConfigMap_v4_00_to_latest(client coreclientv1.ConfigMapsGetter, eventRecorder events.Recorder, operatorConfig *scsv1.ServiceCA) (*corev1.ConfigMap, bool, error) {
configMap := resourceread.ReadConfigMapV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/cm.yaml"))
defaultConfig := v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/defaultconfig.yaml")
requiredConfigMap, _, err := resourcemerge.MergeConfigMap(configMap, "controller-config.yaml", nil, defaultConfig, operatorConfig.Spec.ServiceServingCertSignerConfig.Raw)
if err != nil {
return nil, false, err
}
return resourceapply.ApplyConfigMap(client, eventRecorder, requiredConfigMap)
}
// TODO manage rotation in addition to initial creation
func manageSigningSecret_v4_00_to_latest(client coreclientv1.SecretsGetter, eventRecorder events.Recorder) (*corev1.Secret, bool, error) {
secret := resourceread.ReadSecretV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/signing-secret.yaml"))
existing, err := client.Secrets(secret.Namespace).Get(secret.Name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
return existing, false, err
}
ca, err := crypto.MakeSelfSignedCAConfig(serviceServingCertSignerName(), 365)
if err != nil {
return existing, false, err
}
certBytes := &bytes.Buffer{}
keyBytes := &bytes.Buffer{}
if err := ca.WriteCertConfig(certBytes, keyBytes); err != nil {
return existing, false, err
}
secret.Data["tls.crt"] = certBytes.Bytes()
secret.Data["tls.key"] = keyBytes.Bytes()
return resourceapply.ApplySecret(client, eventRecorder, secret)
}
func manageSignerDeployment_v4_00_to_latest(client appsclientv1.AppsV1Interface, eventRecorder events.Recorder, options *scsv1.ServiceCA, forceDeployment bool) (*appsv1.Deployment, bool, error) {
required := resourceread.ReadDeploymentV1OrDie(v4_00_assets.MustAsset("v4.0.0/service-serving-cert-signer-controller/deployment.yaml"))
required.Spec.Template.Spec.Containers[0].Image = os.Getenv("CONTROLLER_IMAGE")
required.Spec.Template.Spec.Containers[0].Args = append(required.Spec.Template.Spec.Containers[0].Args, fmt.Sprintf("-v=%s", options.Spec.LogLevel))
return resourceapply.ApplyDeployment(client, eventRecorder, required, getGeneration(client, operatorclient.TargetNamespace, required.Name), forceDeployment)
}
func serviceServingCertSignerName() string {
return fmt.Sprintf("%s@%d", "openshift-service-serving-signer", time.Now().Unix())
}
| [
"\"CONTROLLER_IMAGE\""
]
| []
| [
"CONTROLLER_IMAGE"
]
| [] | ["CONTROLLER_IMAGE"] | go | 1 | 0 | |
tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
import wandb.sdk.internal.datastore
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--name',default='wsod-mmdet')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
os.environ["WANDB_RUN_GROUP"] = "experiment-" + wandb.util.generate_id()
args = parse_args()
wandb.init(project=args.name)
# hyperparameter_defaults = dict(
# oam_max_num=10,
# score_thr1=0.3,
# score_thr2=0.7,
# empty_cf=30,
# lr=0.001,
# )
wandb.config.oam_max_num = 20
wandb.config.score_thr2 = 0.05
wandb.config.empty_cf = 30
wandb.config.ss_cf_thr = 50
# wandb.config.lr = 0.008
wandb.config.warm_iter = -1
wandb.config.strong_shot = 26
# wandb.init(config=hyperparameter_defaults)
wandb.config.config_file = args.config
wandb.config.work_dir = args.work_dir
wandb.config.max_map = 0
wandb.config.map = 0
wandb.config.loss_weak_scale = 1.0
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
wandb.config.time = timestamp
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
wandb.config.logfile = log_file
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
wandb.config.val_data = cfg.data.val['ann_file']
if cfg.data.train['type'] == 'RepeatDataset':
wandb.config.train_data_type = cfg.data.train['dataset']['type']
wandb.config.repeat_times = cfg.data.train['times']
wandb.config.ann_file = cfg.data.train['dataset']['ann_file']
else:
wandb.config.train_data_type = cfg.data.train['type']
wandb.config.repeat_times = 1
wandb.config.ann_file = cfg.data.train['ann_file']
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
# wandb.save(os.path.join(wandb.config.work_dir,'mymodel.h5'))
# fitlog.finish()
wandb.save("mymodel.h5")
if __name__ == '__main__':
main()
| []
| []
| [
"LOCAL_RANK",
"WANDB_RUN_GROUP"
]
| [] | ["LOCAL_RANK", "WANDB_RUN_GROUP"] | python | 2 | 0 | |
dev/Tools/build/waf-1.7.13/lmbrwaflib/qt5.py | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2015 (ita)
# Modifications copyright Amazon.com, Inc. or its affiliates.
"""
Tool Description
================
This tool helps with finding Qt5 tools and libraries,
and also provides syntactic sugar for using Qt5 tools.
The following snippet illustrates the tool usage::
def options(opt):
opt.load('compiler_cxx qt5')
def configure(conf):
conf.load('compiler_cxx qt5')
def build(bld):
bld(
features = ['qt5','cxx','cxxprogram'],
uselib = ['QTCORE','QTGUI','QTOPENGL','QTSVG'],
source = 'main.cpp textures.qrc aboutDialog.ui',
target = 'window',
)
Here, the UI description and resource files will be processed
to generate code.
Usage
=====
Load the "qt5" tool.
You also need to edit your sources accordingly:
- the normal way of doing things is to have your C++ files
include the .moc file.
This is regarded as the best practice (and provides much faster
compilations).
It also implies that the include paths have beenset properly.
- to have the include paths added automatically, use the following::
from waflib.TaskGen import feature, before_method, after_method
@feature('cxx')
@after_method('process_source')
@before_method('apply_incpaths')
def add_includes_paths(self):
incs = set(self.to_list(getattr(self, 'includes', '')))
for x in self.compiled_tasks:
incs.add(x.inputs[0].parent.path_from(self.path))
self.includes = list(incs)
Note: another tool provides Qt processing that does not require
.moc includes, see 'playground/slow_qt/'.
A few options (--qt{dir,bin,...}) and environment variables
(QT5_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool,
tool path selection, etc; please read the source for more info.
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os, sys, re, time, shutil, stat
from waflib.Tools import cxx
from waflib import Task, Utils, Options, Errors, Context
from waflib.TaskGen import feature, after_method, extension, before_method
from waflib.Configure import conf
from waflib.Tools import c_preproc
from waflib import Logs
from generate_uber_files import UBER_HEADER_COMMENT
import copy_tasks
from collections import defaultdict
from threading import Lock
from sets import Set
import lmbr_setup_tools
MOC_H = ['.h', '.hpp', '.hxx', '.hh']
"""
File extensions associated to the .moc files
"""
EXT_RCC = ['.qrc']
"""
File extension for the resource (.qrc) files
"""
EXT_UI = ['.ui']
"""
File extension for the user interface (.ui) files
"""
EXT_QT5 = ['.cpp', '.cc', '.cxx', '.C', '.mm']
"""
File extensions of C++ files that may require a .moc processing
"""
QT5_LIBS = '''
qtmain
Qt5Bluetooth
Qt5CLucene
Qt5Concurrent
Qt5Core
Qt5DBus
Qt5Declarative
Qt5DesignerComponents
Qt5Designer
Qt5Gui
Qt5Help
Qt5MacExtras
Qt5MultimediaQuick_p
Qt5Multimedia
Qt5MultimediaWidgets
Qt5Network
Qt5Nfc
Qt5OpenGL
Qt5Positioning
Qt5PrintSupport
Qt5Qml
Qt5QuickParticles
Qt5Quick
Qt5QuickTest
Qt5Script
Qt5ScriptTools
Qt5Sensors
Qt5SerialPort
Qt5Sql
Qt5Svg
Qt5Test
Qt5WebKit
Qt5WebKitWidgets
Qt5WebChannel
Qt5Widgets
Qt5WinExtras
Qt5X11Extras
Qt5XmlPatterns
Qt5Xml'''
# Search pattern to find the required #include <*.moc> lines in the source code to identify the header files that need
# moc'ing. The path of the moc file must be relative to the current project root
INCLUDE_MOC_RE = re.compile(r'\s*\#include\s+[\"<](.*.moc)[\">]',flags=re.MULTILINE)
INCLUDE_SRC_RE = re.compile(r'\s*\#include\s+[\"<](.*.(cpp|cxx|cc))[\">]',flags=re.MULTILINE)
QOBJECT_RE = re.compile(r'\s*Q_OBJECT\s*', flags=re.MULTILINE)
# Derive a specific moc_files.<idx> folder name based on the base bldnode and idx
def get_target_qt5_root(ctx, target_name, idx):
base_qt_node = ctx.bldnode.make_node('qt5/{}.{}'.format(target_name,idx))
return base_qt_node
# Change a target node from a changed extension to one marked as QT code generated
# The qt5 generated files are restricted to the build folder. That means
# each project cannot use any QT generated artifacts that do no exist within its project boundaries.
def change_target_qt5_node(ctx, project_path, target_name, relpath_target, idx):
relpath_project = project_path.relpath()
if relpath_target.startswith(relpath_project):
# Strip out the project relative path and use that as the target_qt5 relative path
restricted_path = relpath_target.replace(relpath_project,'')
elif relpath_target.startswith('..'):
# Special case. If the target and project rel paths dont align, then the target node is outside of the
# project folder. (ie there is a qt-related file in the waf_files that is outside the project's context path)
# If the file is an include or moc file, it must reside inside the project context, because it will be
# included based on an expected project relative path
target_node_name_lower = relpath_target.lower()
if target_node_name_lower.endswith(".moc") or target_node_name_lower.endswith(".h"):
ctx.fatal("QT target {} for project {} cannot exist outside of its source folder context.".format(relpath_target, target_name))
restricted_path = "__/{}.{}/{}".format(target_name, idx, target_name)
else:
restricted_path = relpath_target
target_node_subdir = os.path.dirname(restricted_path)
# Change the output target to the specific moc file folder
output_qt_dir = get_target_qt5_root(ctx, target_name,idx).make_node(target_node_subdir)
output_qt_dir.mkdir()
output_qt_node = output_qt_dir.make_node(os.path.split(relpath_target)[1])
return output_qt_node
class qxx(Task.classes['cxx']):
"""
Each C++ file can have zero or several .moc files to create.
They are known only when the files are scanned (preprocessor)
To avoid scanning the c++ files each time (parsing C/C++), the results
are retrieved from the task cache (bld.node_deps/bld.raw_deps).
The moc tasks are also created *dynamically* during the build.
"""
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
if 'qt5' in self.generator.features and self.env.QMAKE:
# If QT5 is enabled, then signal that moc scanning is needed
self.moc_done = 0
else:
# Otherwise, signal that moc scanning can be skipped
self.moc_done = 1
self.dep_moc_files = {}
def __str__(self):
"string to display to the user"
env = self.env
src_str = ' '.join([a.nice_path() for a in self.inputs])
tgt_str = ' '.join([a.nice_path() for a in self.outputs])
if self.outputs and self.inputs:
sep = ' -> '
else:
sep = ''
name = self.__class__.__name__.replace('_task', '') + ' (' + env['PLATFORM'] + '|' + env['CONFIGURATION'] + ')'
return '%s: %s%s%s\n' % (name, src_str, sep, tgt_str)
def runnable_status(self):
"""
Compute the task signature to make sure the scanner was executed. Create the
moc tasks by using :py:meth:`waflib.Tools.qt5.qxx.add_moc_tasks` (if necessary),
then postpone the task execution (there is no need to recompute the task signature).
"""
status = Task.Task.runnable_status(self)
if not self.moc_done:
# ask the task if it needs to rebuild. This will include checking all dependencies (.moc files included)
# that may have changed. If the task doesn't need to rebuild, no point in scanning for all the individual
# moc tasks that need to be added
if status != Task.RUN_ME:
return status
self.add_moc_tasks()
# At this point, the moc task should be done, recycle and try the status check again
self.moc_done = 1
return Task.ASK_LATER
return status
def create_moc_task(self, h_node, moc_filename):
"""
If several libraries use the same classes, it is possible that moc will run several times (Issue 1318)
It is not possible to change the file names, but we can assume that the moc transformation will be identical,
and the moc tasks can be shared in a global cache.
The defines passed to moc will then depend on task generator order. If this is not acceptable, then
use the tool slow_qt5 instead (and enjoy the slow builds... :-( )
"""
cache_key = '{}.{}'.format(h_node.abspath(),self.generator.target_uid)
try:
moc_cache = self.generator.bld.moc_cache
except AttributeError:
moc_cache = self.generator.bld.moc_cache = {}
try:
return moc_cache[cache_key]
except KeyError:
relpath_target = os.path.join(h_node.parent.relpath(), moc_filename)
target_node = change_target_qt5_node(self.generator.bld,
self.generator.path,
self.generator.name,
relpath_target,
self.generator.target_uid)
tsk = moc_cache[cache_key] = Task.classes['moc'](env=self.env, generator=self.generator)
tsk.set_inputs(h_node)
tsk.set_outputs(target_node)
self.dep_moc_files[target_node] = False
if self.generator:
self.generator.tasks.append(tsk)
# direct injection in the build phase (safe because called from the main thread)
gen = self.generator.bld.producer
gen.outstanding.insert(0, tsk)
gen.total += 1
return tsk
def moc_h_ext(self):
try:
ext = Options.options.qt_header_ext.split()
except AttributeError:
pass
if not ext:
ext = MOC_H
return ext
def add_moc_tasks(self):
node = self.inputs[0]
src_scan = node.read()
# Determine if this is an uber file to see if we need to go one level deeper
if src_scan.startswith(UBER_HEADER_COMMENT):
# This is an uber file, handle uber files differently
self.add_moc_task_uber(node,src_scan)
else:
# Process the source file (for mocs)
self.add_moc_tasks_for_node(node,src_scan)
del src_scan #free up the text as soon as possible
def scan_node_contents_for_moc_tasks(self,node_contents):
base_node = self.generator.path
include_moc_node_rel_paths = INCLUDE_MOC_RE.findall(node_contents)
moctasks = []
for include_moc_node_rel_path in include_moc_node_rel_paths:
base_name = os.path.splitext(include_moc_node_rel_path)[0]
# We are only allowing to include mocing header files that are relative to the project folder
header_node = None
for moc_ext in self.moc_h_ext():
# use search_node(), it will not create a node if the node is not found, and won't create bogus nodes while searching
header_node = base_node.search_node('{}{}'.format(base_name, moc_ext))
if header_node:
break
if not header_node:
raise Errors.WafError('No source found for {} which is a moc file. Is the file included in .waf_files?'.format(base_name))
moc_filename = '{}.moc'.format(os.path.splitext(header_node.name)[0])
# create the moc task
task = self.create_moc_task(header_node, moc_filename)
moctasks.append(task)
return moctasks
def add_moc_task_uber(self, node, node_contents):
'''
Handle uber files by grepping for all the includes of source files and performing the moc scanning there
'''
moctasks = []
include_source_rel_paths = INCLUDE_SRC_RE.findall(node_contents)
for include_source_rel_path, include_source_extension in include_source_rel_paths:
source_node = node.parent.find_node(include_source_rel_path)
if source_node is None:
source_node = self.generator.path.find_node(include_source_rel_path)
if source_node is not None:
source_node_contents = source_node.read()
moctasks += self.scan_node_contents_for_moc_tasks(source_node_contents)
del source_node_contents #free up the text as soon as possible
# simple scheduler dependency: run the moc task before others
self.run_after.update(set(moctasks))
def add_moc_tasks_for_node(self, node, node_contents):
'''
Create the moc tasks greping the source file for all the #includes
'''
moctasks = self.scan_node_contents_for_moc_tasks(node_contents)
# simple scheduler dependency: run the moc task before others
self.run_after.update(set(moctasks))
class trans_update(Task.Task):
"""Update a .ts files from a list of C++ files"""
run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}'
color = 'BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
"""
Parser for *.qrc* files
"""
def __init__(self):
self.buf = []
self.files = []
def startElement(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(str(''.join(self.buf)))
def characters(self, cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_rcc_task(self, node):
"Create rcc and cxx tasks for *.qrc* files"
# Do not create tasks for project_generation builds
if self.env['PLATFORM'] == 'project_generator':
return None
# Do not perform any task if QMAKE is not installed
if not self.env.QMAKE:
return None
# For QRC Processing, we cannot make the generated rcc file from the qrc source as a separate compile unit
# when creating static libs. It appears that MSVC will optimize the required static methods required to
# initialize the resources for the static lib. In order to work around this, the generated file from the
# qrc will need to be created as a header and included into a cpp that is consumed by the app/shared library
# that is linking against it
is_static_lib = 'stlib' == getattr(self,'_type','')
if not getattr(self, 'rcc_tasks', False):
self.rcc_tasks = []
if is_static_lib:
rcc_filename = 'rcc_%s.h' % os.path.splitext(node.name)[0]
relpath_target = os.path.join(node.parent.relpath(), rcc_filename)
rcnode = change_target_qt5_node(self.bld,
self.path,
self.name,
relpath_target,
self.target_uid)
qrc_task = self.create_task('rcc', node, rcnode)
self.rcc_tasks.append(qrc_task)
return qrc_task
else:
rcc_filename = '%s_rc.cpp' % os.path.splitext(node.name)[0]
relpath_target = os.path.join(node.parent.relpath(), rcc_filename)
rcnode = change_target_qt5_node(self.bld,
self.path,
self.name,
relpath_target,
self.target_uid)
qrc_task = self.create_task('rcc', node, rcnode)
self.rcc_tasks.append(qrc_task)
cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o'))
cpptask.dep_nodes.append(node)
cpptask.set_run_after(qrc_task)
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks = [cpptask]
return cpptask
@feature('qt5')
@after_method('process_source')
def add_rcc_dependencies(self):
# are there rcc tasks?
if not getattr(self, 'rcc_tasks', False):
return
rcc_tasks = set(self.rcc_tasks)
for task in self.tasks:
if any(isinstance(task, Task.classes[c]) for c in ['qxx', 'cxx', 'c']):
task.run_after |= rcc_tasks
@feature('qt5')
@after_method('apply_link')
def create_automoc_task(self):
if hasattr(self, 'header_files') and len(self.header_files) > 0:
header_nodes = self.to_nodes(self.header_files)
task = self.create_task('automoc', header_nodes)
# this may mutate the link task, must run the link task after this task
self.link_task.set_run_after(task)
@extension(*EXT_UI)
def create_uic_task(self, node):
"hook for uic tasks"
# Do not create tasks for project_generation builds
if self.env['PLATFORM'] == 'project_generator':
return None
if not self.env.QMAKE:
return None
if not getattr(self, 'uic_tasks', False):
self.uic_tasks = []
uictask = self.create_task('ui5', node)
ui_filename = self.env['ui_PATTERN'] % node.name[:-3]
relpath_target = os.path.join(node.parent.relpath(), ui_filename)
target_node = change_target_qt5_node(self.bld,
self.path,
self.name,
relpath_target,
self.target_uid)
uictask.outputs = [target_node]
self.uic_tasks.append(uictask)
@feature('qt5')
@after_method('process_source')
def add_uic_dependencies(self):
# are there uic tasks?
if not getattr(self, 'uic_tasks', False):
return
uic_tasks = set(self.uic_tasks)
for task in self.tasks:
if task.__class__.__name__ in ['qxx', 'cxx', 'c']:
task.run_after |= uic_tasks
@extension('.ts')
def add_lang(self, node):
"""add all the .ts file into self.lang"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('qt5')
@before_method('apply_incpaths')
def apply_qt5_includes(self):
# Make sure the QT is enabled, otherwise whatever module is using this feature will fail
if not self.env.QMAKE:
return
base_moc_node = get_target_qt5_root(self.bld,
self.name,
self.target_uid)
if not hasattr(self, 'includes'):
self.includes = []
self.includes.append(base_moc_node)
if self.env.PLATFORM == 'win_x64_clang':
self.env.append_unique('CXXFLAGS', '-Wno-ignored-pragmas')
@feature('qt5')
@after_method('set_link_outputs')
def apply_qt5(self):
"""
Add MOC_FLAGS which may be necessary for moc::
def build(bld):
bld.program(features='qt5', source='main.cpp', target='app', use='QTCORE')
The additional parameters are:
:param lang: list of translation files (\*.ts) to process
:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
:param update: whether to process the C++ files to update the \*.ts files (use **waf --translate**)
:type update: bool
:param langname: if given, transform the \*.ts files into a .qrc files to include in the binary file
:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
"""
# Make sure the QT is enabled, otherwise whatever module is using this feature will fail
if not self.env.QMAKE:
return
# If no type is defined, this is just a stub task that shouldn't handle any additional build/link tasks
if not hasattr(self,'_type'):
return
if getattr(self, 'lang', None):
qmtasks = []
for x in self.to_list(self.lang):
if isinstance(x, str):
x = self.path.find_resource(x + '.ts')
qm_filename = '%s.qm' % os.path.splitext(x.name)[0]
relpath_target = os.path.join(x.parent.relpath(), qm_filename)
new_qm_node = change_target_qt5_node(self.bld,
self.path,
self.name,
relpath_target,
self.target_uid)
qmtask = self.create_task('ts2qm', x, new_qm_node)
qmtasks.append(qmtask)
if getattr(self, 'update', None) and Options.options.trans_qt5:
cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [
a.inputs[0] for a in self.tasks if getattr(a, 'inputs', None) and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update', cxxnodes, x.inputs)
if getattr(self, 'langname', None):
qmnodes = [x.outputs[0] for x in qmtasks]
assert(isinstance(self.langname, str))
qrc_filename = '%s.qrc' % self.langname
relpath_target = os.path.join(self.path.relpath(), qrc_filename)
new_rc_node = change_target_qt5_node(self.bld,
self.path,
self.name,
relpath_target,
self.target_uid)
t = self.create_task('qm2rcc', qmnodes, new_rc_node)
for x in qmtasks:
t.set_run_after(x)
k = create_rcc_task(self, t.outputs[0])
if k:
self.link_task.inputs.append(k.outputs[0])
k.set_run_after(t)
lst = []
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag) < 2: continue
f = flag[0:2]
if f in ('-D', '-I', '/D', '/I'):
if f[0] == '/':
lst.append('-' + flag[1:])
else:
lst.append(flag)
if len(self.env['DEFINES']) > 0:
for defined_value in self.env['DEFINES']:
lst.append( '-D'+defined_value )
# Apply additional QT defines for all MOCing
additional_flags = ['-DQT_LARGEFILE_SUPPORT',
'-DQT_DLL',
'-DQT_CORE_LIB',
'-DQT_GUI_LIB']
for additional_flag in additional_flags:
if not lst.__contains__(additional_flag):
lst.append(additional_flag)
self.env.append_value('MOC_FLAGS', lst)
@extension(*EXT_QT5)
def cxx_hook(self, node):
"""
Re-map C++ file extensions to the :py:class:`waflib.Tools.qt5.qxx` task.
"""
if 'qt5' in self.features:
return self.create_compiled_task('qxx', node)
else:
return self.create_compiled_task('cxx', node)
# QT tasks involve code generation, so we need to also check if the generated code is still there
class QtTask(Task.Task):
def runnable_status(self):
missing_output = False
for output in self.outputs:
if not os.path.exists(output.abspath()):
missing_output = True
break
if missing_output:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
return Task.RUN_ME
status = Task.Task.runnable_status(self)
return status
class automoc(Task.Task):
def create_moc_tasks(self, moc_headers):
moc_names = set()
for moc_header in moc_headers:
moc_node_name = os.path.splitext(moc_header.name)[0]
# Make sure we don't have two moc files with the same name
suffix = None
while (moc_node_name + ("_%i" % suffix if suffix else "")) in moc_names:
suffix = suffix + 1 if suffix else 2
if suffix:
moc_node_name += "_%i" % suffix
moc_names.add(moc_node_name)
cpp_filename = '%s_moc.cpp' % moc_node_name
relpath_target = os.path.join(moc_header.parent.relpath(), cpp_filename)
moc_node = change_target_qt5_node(self.generator.bld,
self.generator.path,
self.generator.name,
relpath_target,
self.generator.target_uid)
moc_task = self.generator.create_task('moc', moc_header, moc_node)
# Include the precompiled header, if applicable
if getattr(self.generator, 'pch_header', None) is not None:
moc_task.env['MOC_FLAGS'] = moc_task.env['MOC_FLAGS'] + ['-b', self.generator.pch_header]
cpp_task = self.generator.create_compiled_task('cxx', moc_node)
# Ignore warnings in generated code
is_msvc = cpp_task.env['CXX_NAME'] == 'msvc'
moc_cxx_flags = [flag for flag in cpp_task.env['CXXFLAGS'] if not flag.startswith('/W' if is_msvc else '-W')]
if is_msvc and '/EHsc' not in moc_cxx_flags:
moc_cxx_flags.append('/EHsc')
elif not is_msvc and '-w' not in moc_cxx_flags:
moc_cxx_flags.append('-w')
cpp_task.env['CXXFLAGS'] = moc_cxx_flags
# Define Q_MOC_BUILD for the (rare) case where a header might need to check to see if it's been included by
# a _moc file.
cpp_task.env.append_unique('DEFINES', 'Q_MOC_BUILD')
cpp_task.set_run_after(moc_task)
# add cpp output to link task.
# Modifying the task should be ok because the link task is already registered as a run_after of
# the automoc task (this task), and runnable_status in run on the main thread
self.generator.link_task.inputs.append(cpp_task.outputs[0])
self.generator.link_task.set_run_after(cpp_task)
# direct injection in the build phase (safe because runnable_status is only called from the main thread)
producer = self.generator.bld.producer
producer.outstanding.insert(0, moc_task) # insert the moc_task, its ready to run
producer.outstanding.append(cpp_task) # append the cpp_task, it must wait for the moc task completion anyways
producer.total += 2
def runnable_status(self):
# check if any of the inputs have changed, or the input list has changed, or the dependencies have changed
status = Task.Task.runnable_status(self)
moc_headers = []
if Task.RUN_ME == status:
# run the automoc scan to generate the up-to-date contents
for header_node in self.inputs:
header_contents = header_node.read()
# For now, only work on headers that opt in with an AUTOMOC comment
if "AUTOMOC" not in header_contents:
continue
header_contents = c_preproc.re_cpp.sub(c_preproc.repl, header_contents)
if QOBJECT_RE.search(header_contents):
moc_headers.append(header_node)
# store on task, will be added to the node_deps in post_run
self.moc_headers = moc_headers
else:
# signatures didn't change, grab the saved nodes
moc_headers = self.generator.bld.node_deps[self.uid()]
# build the qt tasks, and add them to the link task
self.create_moc_tasks(moc_headers)
return status
def scan(self):
moc_headers = self.generator.bld.node_deps.get(self.uid(), [])
return (moc_headers, [])
def post_run(self):
self.generator.bld.node_deps[self.uid()] = getattr(self, 'moc_headers', [])
Task.Task.post_run(self)
class rcc(QtTask):
"""
Process *.qrc* files
"""
color = 'BLUE'
run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_in = ['.qrc']
def __init__(self, *k, **kw):
QtTask.__init__(self, *k, **kw)
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def parse_deps(self):
"""Parse the *.qrc* files"""
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(), 'r')
try:
parser.parse(fi)
finally:
fi.close()
self.rcc_deps_paths = curHandler.files
def lookup_deps(self, root, deps_paths):
nodes = []
names = []
for x in deps_paths:
nd = root.find_resource(x)
if nd:
nodes.append(nd)
else:
names.append(x)
return (nodes, names)
def scan(self):
resolved_nodes = self.generator.bld.node_deps.get(self.uid(), [])
unresolved_names = self.generator.bld.raw_deps.get(self.uid(), [])
return (resolved_nodes, unresolved_names)
def post_run(self):
self.parse_deps()
# convert input dependency files to nodes. Care must be taken in this block wrt thread safety because it creates nodes
if 'msvcdeps' in sys.modules:
# msvcdeps is run on the worker threads, it may conflict with generate_deps, which is also creating node at
# compile time. Defer to msvcdeps module to handle thread locking
(nodes, names) = sys.modules['msvcdeps'].sync_lookup_deps(self.inputs[0].parent, self.rcc_deps_paths)
else:
(nodes, names) = self.lookup_deps(self.inputs[0].parent, self.rcc_deps_paths)
del self.rcc_deps_paths
# store dependencies in build
self.generator.bld.node_deps[self.uid()] = nodes
self.generator.bld.raw_deps[self.uid()] = names
# delete signature to force a rebuild of signature. Scan() will be called to store the deps
try:
del self.cache_sig
except:
pass
# call base class to regenerate signature
super(rcc, self).post_run()
class moc(QtTask):
"""
Create *.moc* files
"""
color = 'BLUE'
run_str = '${QT_MOC} ${MOC_FLAGS} ${SRC} -o ${TGT}'
class fake_moc(QtTask):
"""
Create dummy *.moc files - this is a temporary workaround while we migrate to autmoc
"""
color = 'BLUE'
def post_run(self):
self.outputs[0].write("/* Dummy moc file, this will eventually be removed */\n")
super(fake_moc, self).post_run(self)
class ui5(QtTask):
"""
Process *.ui* files
"""
color = 'BLUE'
run_str = '${QT_UIC} ${SRC} -o ${TGT}'
ext_in = ['.ui']
class ts2qm(QtTask):
"""
Create *.qm* files from *.ts* files
"""
color = 'BLUE'
run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(QtTask):
"""
Transform *.qm* files into *.rc* files
"""
color = 'BLUE'
after = 'ts2qm'
def run(self):
"""Create a qrc file including the inputs"""
txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs])
code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt
self.outputs[0].write(code)
bin_cache = {}
# maintain a cache set of platforms that don't have Qt
# so that we don't needlessly search multiple times, and
# so that the user doesn't get numerous warnings of the same thing
QT_SDK_MISSING = Set()
@conf
def get_qt_version(self):
# at the end, try to find qmake in the paths given
# keep the one with the highest version
version = None
paths = []
prev_ver = ['5', '0', '0']
for qmk in ('qmake-qt5', 'qmake5', 'qmake'):
try:
qmake = self.find_program(qmk, path_list=paths, silent_output=True)
except self.errors.ConfigurationError:
pass
else:
try:
version = self.cmd_and_log([qmake] + ['-query', 'QT_VERSION'], quiet=Context.BOTH).strip()
except self.errors.WafError:
version = None
pass
# qmake could not be found easily, rely on qtchooser
if version is None:
try:
self.find_program('qtchooser', silent_output=True)
except self.errors.ConfigurationError:
pass
else:
cmd = [self.env.QTCHOOSER] + ['-qt=5', '-run-tool=qmake']
try:
version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION'], quiet=Context.BOTH).strip()
except self.errors.WafError:
pass
return version
def _prepare_lib_folder_for_linux(qt_lib_path):
# this functions sets up the qt linux shared library, for example
# libQt5Xml.so -> libQt5Xml.so.5.6.2
# libQt5Xml.so.5 -> libQt5Xml.so.5.6.2
# libQt5Xml.so.5.6 -> libQt5Xml.so.5.6.2
import glob
library_files = glob.glob(os.path.join(qt_lib_path, 'lib*.so*'))
for lib_path in library_files:
if os.path.islink(lib_path):
continue
lib_path_basename = os.path.basename(lib_path)
new_lib_path, ext = os.path.splitext(lib_path)
while ext != '.so':
if os.path.lexists(new_lib_path) is False:
os.symlink(lib_path_basename, new_lib_path)
Logs.debug('Made link: {} -> {}'.format(lib_path, new_lib_path))
new_lib_path, ext = os.path.splitext(new_lib_path)
@conf
def find_qt5_binaries(self, platform):
# platform has to be passed in, as it hasn't been set in the env
# when this function is called
global QT_SDK_MISSING
if platform in QT_SDK_MISSING:
return False
env = self.env
opt = Options.options
qtbin = getattr(opt, 'qtbin', '')
platformDirectoryMapping = {
'win_x64_vs2013': 'msvc2013_64',
'win_x64_vs2015': 'msvc2015_64',
'win_x64_vs2017': 'msvc2015_64', # Not an error, VS2017 links with VS2015 binaries
'win_x64_clang': 'msvc2015_64',
'darwin_x64': 'clang_64',
'linux_x64': 'gcc_64'
}
if not platformDirectoryMapping.has_key(platform):
self.fatal('Platform %s is not supported by our Qt waf scripts!' % platform)
# Get the QT dir from the third party settings
qtdir, enabled, roles, _ = self.tp.get_third_party_path(platform, 'qt')
# If the path was not resolved, it could be an invalid alias (missing from the SetupAssistantConfig.json)
if not qtdir:
raise Errors.WafError("Invalid required QT alias for platform {}".format(platform))
# If the path was resolved, we still need to make sure the 3rd party is enabled based on the roles
if not enabled:
error_message = "Unable to resolve Qt because it is not enabled in Setup Assistant. \nMake sure that at least " \
"one of the following roles is enabled: [{}]".format(', '.join(roles))
raise Errors.WafError(error_message)
qtdir = os.path.join(qtdir, platformDirectoryMapping[platform])
paths = []
if qtdir:
qtbin = os.path.join(qtdir, 'bin')
# the qt directory has been given from QT5_ROOT - deduce the qt binary path
if not qtdir:
qtdir = os.environ.get('QT5_ROOT', '')
qtbin = os.environ.get('QT5_BIN', None) or os.path.join(qtdir, 'bin')
if qtbin:
paths = [qtbin]
qmake_cache_key = qtdir + '_QMAKE'
if qmake_cache_key in bin_cache:
self.env.QMAKE = bin_cache[qmake_cache_key]
else:
# at the end, try to find qmake in the paths given
# keep the one with the highest version
cand = None
prev_ver = ['5', '0', '0']
for qmk in ('qmake-qt5', 'qmake5', 'qmake'):
try:
qmake = self.find_program(qmk, path_list=paths, silent_output=True)
except self.errors.ConfigurationError:
pass
else:
try:
version = self.cmd_and_log([qmake] + ['-query', 'QT_VERSION']).strip()
except self.errors.WafError:
pass
else:
if version:
new_ver = version.split('.')
if new_ver > prev_ver:
cand = qmake
prev_ver = new_ver
# qmake could not be found easily, rely on qtchooser
if not cand:
try:
self.find_program('qtchooser')
except self.errors.ConfigurationError:
pass
else:
cmd = [self.env.QTCHOOSER] + ['-qt=5', '-run-tool=qmake']
try:
version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION'])
except self.errors.WafError:
pass
else:
cand = os.path.normpath(cmd)
if cand:
self.env.QMAKE = cand
bin_cache[qmake_cache_key] = cand
else:
# If we cannot find qmake, we will assume that QT is not available or a selected option
# Therefore, we cannot build the lumberyard editor and tools
Logs.warn('[WARN] Unable to find the appropriate QT library. Make sure you have QT installed if you wish to compile the Lumberyard Editor and tools.')
QT_SDK_MISSING.add(platform)
return False
qmake_cache_key = qtdir + '_QT_INSTALL_BINS'
if qmake_cache_key in bin_cache:
self.env.QT_INSTALL_BINS = qtbin = bin_cache[qmake_cache_key]
else:
query_qt_bin_result = self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_BINS']).strip() + os.sep
self.env.QT_INSTALL_BINS = qtbin = os.path.normpath(query_qt_bin_result) + os.sep
bin_cache[qmake_cache_key] = qtbin
paths.insert(0, qtbin)
def _get_qtlib_subfolder(name):
qt_subdir = os.path.join(qtdir, name)
if not os.path.exists(qt_subdir):
self.fatal('Unable to find QT lib folder {}'.format(name))
return qt_subdir
# generate symlinks for the library files within the lib folder
if platform == "linux_x64":
_prepare_lib_folder_for_linux(_get_qtlib_subfolder("lib"))
def find_bin(lst, var):
if var in env:
return
cache_key = qtdir + '_' + var
if cache_key in bin_cache:
env[var] = bin_cache[cache_key]
return
for f in lst:
try:
ret = self.find_program(f, path_list=paths, silent_output=True)
except self.errors.ConfigurationError:
pass
else:
env[var] = os.path.normpath(ret)
bin_cache[cache_key] = os.path.normpath(ret)
break
find_bin(['uic-qt5', 'uic'], 'QT_UIC')
if not env.QT_UIC:
# If we find qmake but not the uic compiler, then the QT installation is corrupt/invalid
self.fatal('Detected an invalid/corrupt version of QT, please check your installation')
uic_version_cache_key = qtdir + '_UICVERSION'
if uic_version_cache_key not in bin_cache:
uicver = self.cmd_and_log([env.QT_UIC] + ['-version'], output=Context.BOTH, quiet=True)
uicver = ''.join(uicver).strip()
uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '')
if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1:
self.fatal('this uic compiler is for qt3 or qt5, add uic for qt5 to your path')
bin_cache[uic_version_cache_key] = uicver
find_bin(['moc-qt5', 'moc'], 'QT_MOC')
find_bin(['rcc-qt5', 'rcc'], 'QT_RCC')
find_bin(['lrelease-qt5', 'lrelease'], 'QT_LRELEASE')
find_bin(['lupdate-qt5', 'lupdate'], 'QT_LUPDATE')
env['UIC_ST'] = '%s -o %s'
env['MOC_ST'] = '-o'
env['ui_PATTERN'] = 'ui_%s.h'
env['QT_LRELEASE_FLAGS'] = ['-silent']
env.MOCCPPPATH_ST = '-I%s'
env.MOCDEFINES_ST = '-D%s'
env.QT_BIN_DIR = _get_qtlib_subfolder('bin')
env.QT_LIB_DIR = _get_qtlib_subfolder('lib')
env.QT_QML_DIR = _get_qtlib_subfolder('qml')
env.QT_PLUGINS_DIR = _get_qtlib_subfolder('plugins')
return True
@conf
def find_qt5_libraries(self):
qtlibs = getattr(Options.options, 'qtlibs', None) or os.environ.get("QT5_LIBDIR", None)
if not qtlibs:
try:
qtlibs = self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir = self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_PREFIX']).strip() + os.sep
qtlibs = os.path.join(qtdir, 'lib')
self.msg('Found the Qt5 libraries in', qtlibs)
qtincludes = os.environ.get("QT5_INCLUDES", None) or self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_HEADERS']).strip()
env = self.env
if not 'PKG_CONFIG_PATH' in os.environ:
os.environ['PKG_CONFIG_PATH'] = '%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib' % (qtlibs, qtlibs)
if Utils.unversioned_sys_platform() == "darwin":
if qtlibs:
env.append_unique('FRAMEWORKPATH',qtlibs)
# Keep track of platforms that were checked (there is no need to do a multiple report)
checked_darwin = False
checked_linux = False
checked_win_x64 = False
validated_platforms = self.get_available_platforms()
for validated_platform in validated_platforms:
is_platform_darwin = validated_platform in (['darwin_x64', 'ios', 'appletv'])
is_platform_linux = validated_platform in (['linux_x64_gcc'])
is_platform_win_x64 = validated_platform.startswith('win_x64')
for i in self.qt5_vars:
uselib = i.upper()
# Platform is darwin_x64 / mac
if is_platform_darwin:
# QT for darwin does not have '5' in the name, so we need to remove it
darwin_adjusted_name = i.replace('Qt5','Qt')
# Since at least qt 4.7.3 each library locates in separate directory
frameworkName = darwin_adjusted_name + ".framework"
qtDynamicLib = os.path.join(qtlibs, frameworkName, darwin_adjusted_name)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_{}_{}'.format(validated_platform,uselib), darwin_adjusted_name)
if not checked_darwin:
self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN')
else:
if not checked_darwin:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtlibs, frameworkName, 'Headers'))
# Detect the debug versions of the library
uselib_debug = i.upper() + "D"
darwin_adjusted_name_debug = '{}_debug'.format(darwin_adjusted_name)
qtDynamicLib_debug = os.path.join(qtlibs, frameworkName, darwin_adjusted_name_debug)
if os.path.exists(qtDynamicLib_debug):
env.append_unique('FRAMEWORK_{}_{}'.format(validated_platform, uselib_debug), darwin_adjusted_name)
if not checked_darwin:
self.msg('Checking for %s_debug' % i, qtDynamicLib_debug, 'GREEN')
else:
if not checked_darwin:
self.msg('Checking for %s_debug' % i, False, 'YELLOW')
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib_debug), os.path.join(qtlibs, frameworkName, 'Headers'))
# Platform is linux+gcc
elif is_platform_linux:
qtDynamicLib = os.path.join(qtlibs, "lib" + i + ".so")
qtStaticLib = os.path.join(qtlibs, "lib" + i + ".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i)
if not checked_linux:
self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i)
if not checked_linux:
self.msg('Checking for %s' % i, qtStaticLib, 'GREEN')
else:
if not checked_linux:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('LIBPATH_{}_{}'.format(validated_platform,uselib), qtlibs)
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), qtincludes)
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtincludes, i))
# Platform is win_x64
elif is_platform_win_x64:
# Release library names are like QtCore5
for k in ("lib%s.a", "lib%s5.a", "%s.lib", "%s5.lib"):
lib = os.path.join(qtlibs, k % i)
if os.path.exists(lib):
env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i + k[k.find("%s") + 2 : k.find('.')])
if not checked_win_x64:
self.msg('Checking for %s' % i, lib, 'GREEN')
break
else:
if not checked_win_x64:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('LIBPATH_{}_{}'.format(validated_platform,uselib), qtlibs)
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), qtincludes)
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtincludes, i.replace('Qt5', 'Qt')))
# Debug library names are like QtCore5d
uselib = i.upper() + "D"
for k in ("lib%sd.a", "lib%sd5.a", "%sd.lib", "%sd5.lib"):
lib = os.path.join(qtlibs, k % i)
if os.path.exists(lib):
env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i + k[k.find("%s") + 2 : k.find('.')])
if not checked_win_x64:
self.msg('Checking for %s' % i, lib, 'GREEN')
break
else:
if not checked_win_x64:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('LIBPATH_{}_{}'.format(validated_platform,uselib), qtlibs)
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), qtincludes)
env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtincludes, i.replace('Qt5', 'Qt')))
else:
# The current target platform is not supported for QT5
Logs.debug('lumberyard: QT5 detection not supported for platform {}'.format(validated_platform))
pass
if is_platform_darwin:
checked_darwin = True
elif is_platform_linux:
checked_linux = True
elif is_platform_win_x64:
checked_win_x64 = True
@conf
def simplify_qt5_libs(self):
# the libpaths make really long command-lines
# remove the qtcore ones from qtgui, etc
env = self.env
def process_lib(vars_, coreval):
validated_platforms = self.get_available_platforms()
for validated_platform in validated_platforms:
for d in vars_:
var = d.upper()
if var == 'QTCORE':
continue
value = env['LIBPATH_{}_{}'.format(validated_platform, var)]
if value:
core = env[coreval]
accu = []
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_{}_{}'.format(validated_platform, var)] = accu
process_lib(self.qt5_vars, 'LIBPATH_QTCORE')
process_lib(self.qt5_vars_debug, 'LIBPATH_QTCORE_DEBUG')
@conf
def add_qt5_rpath(self):
# rpath if wanted
env = self.env
if getattr(Options.options, 'want_rpath', False):
def process_rpath(vars_, coreval):
validated_platforms = self.get_available_platforms()
for validated_platform in validated_platforms:
for d in vars_:
var = d.upper()
value = env['LIBPATH_{}_{}'.format(validated_platform, var)]
if value:
core = env[coreval]
accu = []
for lib in value:
if var != 'QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_{}_{}'.format(validated_platform, var)] = accu
process_rpath(self.qt5_vars, 'LIBPATH_QTCORE')
process_rpath(self.qt5_vars_debug, 'LIBPATH_QTCORE_DEBUG')
@conf
def set_qt5_libs_to_check(self):
if not hasattr(self, 'qt5_vars'):
self.qt5_vars = QT5_LIBS
self.qt5_vars = Utils.to_list(self.qt5_vars)
if not hasattr(self, 'qt5_vars_debug'):
self.qt5_vars_debug = [a + '_debug' for a in self.qt5_vars]
self.qt5_vars_debug = Utils.to_list(self.qt5_vars_debug)
@conf
def set_qt5_defines(self):
if sys.platform != 'win32':
return
validated_platforms = self.get_available_platforms()
for validated_platform in validated_platforms:
for x in self.qt5_vars:
y=x.replace('Qt5', 'Qt')[2:].upper()
self.env.append_unique('DEFINES_{}_{}'.format(validated_platform,x.upper()), 'QT_%s_LIB' % y)
self.env.append_unique('DEFINES_{}_{}_DEBUG'.format(validated_platform,x.upper()), 'QT_%s_LIB' % y)
def options(opt):
"""
Command-line options
"""
opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries')
opt.add_option('--header-ext',
type='string',
default='',
help='header extension for moc files',
dest='qt_header_ext')
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i, type='string', default='', dest=i)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt5", default=False)
SUPPORTED_QTLIB_PLATFORMS = ['win_x64_vs2013', 'win_x64_vs2015', 'win_x64_vs2017', 'win_x64_clang', 'darwin_x64', 'linux_x64']
PLATFORM_TO_QTGA_SUBFOLDER = {
"win_x64_vs2013": ["win32/vc120/qtga.dll", "win32/vc120/qtgad.dll", "win32/vc120/qtgad.pdb"],
"win_x64_vs2015": ["win32/vc140/qtga.dll", "win32/vc140/qtgad.dll", "win32/vc140/qtgad.pdb"],
"win_x64_vs2017": ["win32/vc140/qtga.dll", "win32/vc140/qtgad.dll", "win32/vc140/qtgad.pdb"], # Not an error, VS2017 links with VS2015 binaries
"win_x64_clang": ["win32/vc140/qtga.dll", "win32/vc140/qtgad.dll", "win32/vc140/qtgad.pdb"],
"darwin_x64": ["macx/libqtga.dylib", "macx/libqtga_debug.dylib"],
"linux_x64": []
}
IGNORE_QTLIB_PATTERNS = [
# cmake Not needed
os.path.normcase('lib/cmake'),
# Special LY built plugins that will be copied from a different source
'qtga.dll',
'qtga.pdb',
'qtgad.dll',
'qtgad.pdb',
'libqttga.dylib',
'libqttga_debug.dylib'
]
ICU_DLLS = [
"icudt54",
"icuin54",
"icuuc54"
]
WINDOWS_RC_QT_DLLS = [
"Qt5Core",
"Qt5Gui",
"Qt5Network",
"Qt5Qml",
"Qt5Quick",
"Qt5Svg",
"Qt5Widgets",
]
WINDOWS_LMBRSETUP_QT_DLLS = [
"Qt5Core",
"Qt5Gui",
"Qt5Network",
"Qt5Qml",
"Qt5Quick",
"Qt5Svg",
"Qt5Widgets",
"Qt5Concurrent",
"Qt5WinExtras",
"libEGL",
"libGLESv2"
]
WINDOWS_MAIN_QT_DLLS = [
"Qt5Core",
"Qt5Gui",
"Qt5Network",
"Qt5Qml",
"Qt5Quick",
"Qt5Svg",
"Qt5Widgets",
"Qt5Bluetooth",
"Qt5CLucene",
"Qt5Concurrent",
"Qt5DBus",
"Qt5DesignerComponents",
"Qt5Designer",
"Qt5Help",
"Qt5MultimediaQuick_p",
"Qt5Multimedia",
"Qt5MultimediaWidgets",
"Qt5Nfc",
"Qt5OpenGL",
"Qt5Positioning",
"Qt5PrintSupport",
"Qt5QuickParticles",
"Qt5QuickTest",
"Qt5Script",
"Qt5ScriptTools",
"Qt5Sensors",
"Qt5SerialPort",
"Qt5Sql",
"Qt5Test",
"Qt5WebChannel",
"Qt5WebKit",
"Qt5WebKitWidgets",
"Qt5WinExtras",
"Qt5XmlPatterns",
"Qt5Xml",
"libEGL",
"libGLESv2"
]
@conf
def qtlib_bootstrap(self, platform, configuration):
global QT_SDK_MISSING
if platform in QT_SDK_MISSING:
return
def _copy_folder(src, dst, qt_type, pattern, is_ignore):
dst_type = os.path.normcase(os.path.join(dst, qt_type))
return copy_tasks.copy_tree2(src, dst_type, False, pattern, is_ignore, False)
def _copy_file(src_path, dest_path):
src = os.path.normcase(src_path)
dst = os.path.normcase(dest_path)
copy_file = copy_tasks.should_overwrite_file(src, dst)
if copy_file:
try:
# In case the file is readonly, we'll remove the existing file first
if os.path.exists(dst):
os.chmod(dst, stat.S_IWRITE)
except Exception as err:
Logs.warn('[WARN] Unable to make target file {} writable {}'.format(dst, err.message))
try:
shutil.copy2(src, dst)
except Exception as err:
Logs.warn('[WARN] Unable to copy {} to {}: {}'.format(src, dst, err.message))
return 1
else:
return 0
def _copy_dlls(qt_dlls_source, target_folder):
if not os.path.exists(target_folder):
os.makedirs(target_folder)
copied = 0
for qtdll in qt_dlls_source:
src_dll = os.path.join(self.env.QT_BIN_DIR, qtdll)
dst_dll = os.path.join(target_folder, qtdll)
copied += _copy_file(src_dll, dst_dll)
return copied
def _copy_qtlib_folder(ctx, dst, current_platform, patterns, is_required_pattern):
# Used to track number of files copied by this function
num_files_copied = 0
# Create the qtlibs subfolder
dst_qtlib = os.path.normcase(os.path.join(dst, 'qtlibs'))
if not os.path.exists(dst_qtlib):
os.makedirs(dst_qtlib)
# If qt fails to configure, the folder copies below will give meaningless errors.
# Test for this condition and error out here
if not ctx.env.QT_LIB_DIR:
Logs.warn('unable to find QT')
return num_files_copied
# Copy the libs for qtlibs
lib_pattern = patterns
if 'lib' in patterns:
lib_pattern = patterns['lib']
num_files_copied += _copy_folder(ctx.env.QT_LIB_DIR, dst_qtlib, 'lib', lib_pattern, is_required_pattern)
# special setup for linux_x64 platform
if platform == 'linux_x64':
_prepare_lib_folder_for_linux(os.path.join(dst_qtlib, 'lib'))
# Copy the qml for qtlibs
qml_pattern = patterns
if 'qml' in patterns:
qml_pattern = patterns['qml']
num_files_copied += _copy_folder(ctx.env.QT_QML_DIR, dst_qtlib, 'qml', qml_pattern, is_required_pattern)
# Copy the plugins for qtlibs
plugins_pattern = patterns
if 'plugins' in patterns:
plugins_pattern = patterns['plugins']
num_files_copied += _copy_folder(ctx.env.QT_PLUGINS_DIR, dst_qtlib, 'plugins', plugins_pattern, is_required_pattern)
# Copy the extra txt files
qt_base = os.path.normpath(ctx.ThirdPartyPath('qt',''))
num_files_copied += _copy_file(os.path.join(qt_base, 'QT-NOTICE.TXT'),
os.path.join(dst_qtlib, 'QT-NOTICE.TXT'))
num_files_copied += _copy_file(os.path.join(qt_base, 'ThirdPartySoftware_Listing.txt'),
os.path.join(dst_qtlib, 'ThirdPartySoftware_Listing.txt'))
qt_tga_files = PLATFORM_TO_QTGA_SUBFOLDER.get(current_platform, [])
qt_tga_src_root = os.path.normcase(ctx.Path('Tools/Redistributables/QtTgaImageFormatPlugin'))
for qt_tga_file in qt_tga_files:
if not is_copy_pdbs and qt_tga_file.endswith('.pdb'):
continue
source_qt_tga = os.path.normcase(os.path.join(qt_tga_src_root, qt_tga_file))
dest_qt_tga = os.path.normcase(
os.path.join(dst_qtlib, 'plugins/imageformats', os.path.basename(qt_tga_file)))
num_files_copied += _copy_file(source_qt_tga, dest_qt_tga)
return num_files_copied
def _copy_qt_dlls(ctx, dst, copy_dll_list):
debug_dll_fn = lambda qt: qt + ('d.dll' if is_debug else '.dll')
ext_dll_fn = lambda dll: dll + '.dll'
ext_pdb_fn = lambda pdb: pdb + '.pdb'
qt_main_dlls = [debug_dll_fn(qt) for qt in copy_dll_list]
qt_main_dlls += [ext_dll_fn(icu) for icu in ICU_DLLS]
if is_debug and is_copy_pdbs:
qt_main_dlls += [ext_pdb_fn(qt) for qt in copy_dll_list]
num_files_copied = 0
try:
if not os.path.exists(ctx.env.QT_BIN_DIR):
Logs.debug('Unable to locate QT Bin folder: {}.'.format(ctx.env.QT_BIN_DIR))
QT_SDK_MISSING.add(platform)
return num_files_copied
except TypeError:
Logs.debug('Unable to locate QT Bin folder.')
QT_SDK_MISSING.add(platform)
return num_files_copied
# Copy the QT.dlls to the main configuration output folder
num_files_copied += _copy_dlls(qt_main_dlls, dst)
return num_files_copied
is_copy_pdbs = self.is_option_true('copy_3rd_party_pdbs')
output_paths = self.get_output_folders(platform, configuration)
if len(output_paths) != 1:
self.fatal('Assertion error: Multiple output paths returned')
output_path = output_paths[0].abspath()
if not os.path.exists(output_path):
os.makedirs(output_path)
# Check if current configuration is a debug build
is_debug = configuration.startswith('debug')
# For windows, we will bootstrap copy the Qt Dlls to the main and rc subfolder
# (for non-test and non-dedicated configurations)
if platform in ['win_x64_clang', 'win_x64_vs2017', 'win_x64_vs2015', 'win_x64_vs2013'] and configuration in \
['debug', 'profile', 'debug_test', 'profile_test', 'debug_dedicated', 'profile_dedicated']:
copy_timer = Utils.Timer()
# Check if current configuration is a debug build
is_debug = configuration.startswith('debug')
# Copy all the dlls required by Qt
# Copy to the current configuration's BinXXX folder
files_copied = _copy_qt_dlls(self, output_path, WINDOWS_MAIN_QT_DLLS)
# Copy to the current configuration's BinXXX/rc folder
files_copied += _copy_qt_dlls(self, os.path.join(output_path, 'rc'), WINDOWS_RC_QT_DLLS)
# Copy to the LmbrSetup folder
files_copied += _copy_qt_dlls(self,
self.Path(self.get_lmbr_setup_tools_output_folder()), lmbr_setup_tools.LMBR_SETUP_QT_FILTERS['win']['Modules'])
# Report the sync job, but only report the number of files if any were actually copied
if files_copied > 0:
Logs.info('[INFO] Copied Qt DLLs to target folder: {} files copied. ({})'
.format(files_copied, str(copy_timer)))
else:
if Logs.verbose > 1:
Logs.info('[INFO] Skipped qt dll copy to target folder. ({})'.format(str(copy_timer)))
# Check if this is a platform that supports the qtlib folder synchronization
if platform in SUPPORTED_QTLIB_PLATFORMS:
copy_timer = Utils.Timer()
# Used as a pattern-set to ignore certain qt library files
ignore_lib_patterns = IGNORE_QTLIB_PATTERNS if is_copy_pdbs else IGNORE_QTLIB_PATTERNS + ['.pdb']
# Copy the entire qtlib folder to current output path
# Contains lib, plugins and qml folders, and license information
files_copied = _copy_qtlib_folder(self, output_path, platform, ignore_lib_patterns, False)
lmbr_configuration_key = 'debug' if is_debug else 'profile'
lmbr_platform_key = ''
for key in lmbr_setup_tools.LMBR_SETUP_QT_FILTERS:
if platform.startswith(key):
lmbr_platform_key = key
break
if not lmbr_platform_key:
Logs.error('Cannot find the current configuration ({}) to setup LmbrSetup folder.'.format(platform))
files_copied += _copy_qtlib_folder(self,
self.Path(self.get_lmbr_setup_tools_output_folder()),
platform, lmbr_setup_tools.LMBR_SETUP_QT_FILTERS[lmbr_platform_key]['qtlibs'][lmbr_configuration_key], True)
# Report the sync job, but only report the number of files if any were actually copied
if files_copied > 0:
Logs.info('[INFO] Copied qtlibs folder to target folder: {} files copied. ({})'
.format(files_copied, str(copy_timer)))
else:
if Logs.verbose > 1:
Logs.info('[INFO] Copied qtlibs folder to target folder: No files copied. ({})'
.format(str(copy_timer)))
| []
| []
| [
"QT5_ROOT",
"QT5_LIBDIR",
"QT5_BIN",
"PKG_CONFIG_PATH",
"QT5_INCLUDES"
]
| [] | ["QT5_ROOT", "QT5_LIBDIR", "QT5_BIN", "PKG_CONFIG_PATH", "QT5_INCLUDES"] | python | 5 | 0 | |
paths.go | package astilectron
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// Paths represents the set of paths needed by Astilectron
type Paths struct {
appExecutable string
appIconDarwinSrc string
appIconDefaultSrc string
astilectronApplication string
astilectronDirectory string
astilectronDownloadSrc string
astilectronDownloadDst string
astilectronUnzipSrc string
baseDirectory string
dataDirectory string
electronDirectory string
electronDownloadSrc string
electronDownloadDst string
electronUnzipSrc string
provisionStatus string
vendorDirectory string
}
// newPaths creates new paths
func newPaths(os, arch string, o Options) (p *Paths, err error) {
// Init base directory path
p = &Paths{}
if err = p.initBaseDirectory(o.BaseDirectoryPath); err != nil {
err = fmt.Errorf("initializing base directory failed: %w", err)
return
}
// Init data directory path
if err = p.initDataDirectory(o.DataDirectoryPath, o.AppName); err != nil {
err = fmt.Errorf("initializing data directory failed: %w", err)
return
}
// Init other paths
//!\\ Order matters
p.appIconDarwinSrc = o.AppIconDarwinPath
if len(p.appIconDarwinSrc) > 0 && !filepath.IsAbs(p.appIconDarwinSrc) {
p.appIconDarwinSrc = filepath.Join(p.dataDirectory, p.appIconDarwinSrc)
}
p.appIconDefaultSrc = o.AppIconDefaultPath
if len(p.appIconDefaultSrc) > 0 && !filepath.IsAbs(p.appIconDefaultSrc) {
p.appIconDefaultSrc = filepath.Join(p.dataDirectory, p.appIconDefaultSrc)
}
p.vendorDirectory = filepath.Join(p.dataDirectory, "vendor")
p.provisionStatus = filepath.Join(p.vendorDirectory, "status.json")
p.astilectronDirectory = filepath.Join(p.vendorDirectory, "astilectron")
p.astilectronApplication = filepath.Join(p.astilectronDirectory, "main.js")
p.astilectronDownloadSrc = AstilectronDownloadSrc(o.VersionAstilectron)
p.astilectronDownloadDst = filepath.Join(p.vendorDirectory, fmt.Sprintf("astilectron-v%s.zip", o.VersionAstilectron))
p.astilectronUnzipSrc = filepath.Join(p.astilectronDownloadDst, fmt.Sprintf("astilectron-%s", o.VersionAstilectron))
if o.CustomElectronPath == "" {
p.electronDirectory = filepath.Join(p.vendorDirectory, fmt.Sprintf("electron-%s-%s", os, arch))
p.electronDownloadSrc = ElectronDownloadSrc(os, arch, o.VersionElectron)
p.electronDownloadDst = filepath.Join(p.vendorDirectory, fmt.Sprintf("electron-%s-%s-v%s.zip", os, arch, o.VersionElectron))
p.electronUnzipSrc = p.electronDownloadDst
p.initAppExecutable(os, o.AppName)
} else {
p.appExecutable = o.CustomElectronPath
}
return
}
// initBaseDirectory initializes the base directory path
func (p *Paths) initBaseDirectory(baseDirectoryPath string) (err error) {
// No path specified in the options
p.baseDirectory = baseDirectoryPath
if len(p.baseDirectory) == 0 {
// Retrieve executable path
var ep string
if ep, err = os.Executable(); err != nil {
err = fmt.Errorf("retrieving executable path failed: %w", err)
return
}
p.baseDirectory = filepath.Dir(ep)
}
// We need the absolute path
if p.baseDirectory, err = filepath.Abs(p.baseDirectory); err != nil {
err = fmt.Errorf("computing absolute path failed: %w", err)
return
}
return
}
func (p *Paths) initDataDirectory(dataDirectoryPath, appName string) (err error) {
// Path is specified in the options
if len(dataDirectoryPath) > 0 {
// We need the absolute path
if p.dataDirectory, err = filepath.Abs(dataDirectoryPath); err != nil {
err = fmt.Errorf("computing absolute path of %s failed: %w", dataDirectoryPath, err)
return
}
return
}
// If the APPDATA env exists, we use it
if v := os.Getenv("APPDATA"); len(v) > 0 {
p.dataDirectory = filepath.Join(v, appName)
return
}
// Default to base directory path
p.dataDirectory = p.baseDirectory
return
}
// AstilectronDownloadSrc returns the download URL of the (currently platform-independent) astilectron zip file
func AstilectronDownloadSrc(versionAstilectron string) string {
return fmt.Sprintf("https://github.com/asticode/astilectron/archive/v%s.zip", versionAstilectron)
}
// ElectronDownloadSrc returns the download URL of the platform-dependant electron zipfile
func ElectronDownloadSrc(os, arch, versionElectron string) string {
// Get OS name
var o string
switch strings.ToLower(os) {
case "darwin":
o = "darwin"
case "linux":
o = "linux"
case "windows":
o = "win32"
}
// Get arch name
var a = "ia32"
if strings.ToLower(arch) == "amd64" {
a = "x64"
} else if strings.ToLower(arch) == "arm" && o == "linux" {
a = "armv7l"
} else if strings.ToLower(arch) == "arm64" {
a = "arm64"
}
// Return url
return fmt.Sprintf("https://github.com/electron/electron/releases/download/v%s/electron-v%s-%s-%s.zip", versionElectron, versionElectron, o, a)
}
// initAppExecutable initializes the app executable path
func (p *Paths) initAppExecutable(os, appName string) {
switch os {
case "darwin":
if appName == "" {
appName = "Electron"
}
p.appExecutable = filepath.Join(p.electronDirectory, appName+".app", "Contents", "MacOS", appName)
case "linux":
p.appExecutable = filepath.Join(p.electronDirectory, "electron")
case "windows":
p.appExecutable = filepath.Join(p.electronDirectory, "electron.exe")
}
}
// AppExecutable returns the app executable path
func (p Paths) AppExecutable() string {
return p.appExecutable
}
// AppIconDarwinSrc returns the darwin app icon path
func (p Paths) AppIconDarwinSrc() string {
return p.appIconDarwinSrc
}
// AppIconDefaultSrc returns the default app icon path
func (p Paths) AppIconDefaultSrc() string {
return p.appIconDefaultSrc
}
// BaseDirectory returns the base directory path
func (p Paths) BaseDirectory() string {
return p.baseDirectory
}
// AstilectronApplication returns the astilectron application path
func (p Paths) AstilectronApplication() string {
return p.astilectronApplication
}
// AstilectronDirectory returns the astilectron directory path
func (p Paths) AstilectronDirectory() string {
return p.astilectronDirectory
}
// AstilectronDownloadDst returns the astilectron download destination path
func (p Paths) AstilectronDownloadDst() string {
return p.astilectronDownloadDst
}
// AstilectronDownloadSrc returns the astilectron download source path
func (p Paths) AstilectronDownloadSrc() string {
return p.astilectronDownloadSrc
}
// AstilectronUnzipSrc returns the astilectron unzip source path
func (p Paths) AstilectronUnzipSrc() string {
return p.astilectronUnzipSrc
}
// DataDirectory returns the data directory path
func (p Paths) DataDirectory() string {
return p.dataDirectory
}
// ElectronDirectory returns the electron directory path
func (p Paths) ElectronDirectory() string {
return p.electronDirectory
}
// ElectronDownloadDst returns the electron download destination path
func (p Paths) ElectronDownloadDst() string {
return p.electronDownloadDst
}
// ElectronDownloadSrc returns the electron download source path
func (p Paths) ElectronDownloadSrc() string {
return p.electronDownloadSrc
}
// ElectronUnzipSrc returns the electron unzip source path
func (p Paths) ElectronUnzipSrc() string {
return p.electronUnzipSrc
}
// ProvisionStatus returns the provision status path
func (p Paths) ProvisionStatus() string {
return p.provisionStatus
}
// VendorDirectory returns the vendor directory path
func (p Paths) VendorDirectory() string {
return p.vendorDirectory
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | go | 1 | 0 | |
service/quicksight/api_op_DeleteDataSet.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package quicksight
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
smithy "github.com/awslabs/smithy-go"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Deletes a dataset.
func (c *Client) DeleteDataSet(ctx context.Context, params *DeleteDataSetInput, optFns ...func(*Options)) (*DeleteDataSetOutput, error) {
stack := middleware.NewStack("DeleteDataSet", smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
addawsRestjson1_serdeOpDeleteDataSetMiddlewares(stack)
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpDeleteDataSetValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDataSet(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err := handler.Handle(ctx, params)
if err != nil {
return nil, &smithy.OperationError{
ServiceID: ServiceID,
OperationName: "DeleteDataSet",
Err: err,
}
}
out := result.(*DeleteDataSetOutput)
out.ResultMetadata = metadata
return out, nil
}
type DeleteDataSetInput struct {
// The AWS account ID.
//
// This member is required.
AwsAccountId *string
// The ID for the dataset that you want to create. This ID is unique per AWS Region
// for each AWS account.
//
// This member is required.
DataSetId *string
}
type DeleteDataSetOutput struct {
// The Amazon Resource Name (ARN) of the dataset.
Arn *string
// The ID for the dataset that you want to create. This ID is unique per AWS Region
// for each AWS account.
DataSetId *string
// The AWS request ID for this operation.
RequestId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addawsRestjson1_serdeOpDeleteDataSetMiddlewares(stack *middleware.Stack) {
stack.Serialize.Add(&awsRestjson1_serializeOpDeleteDataSet{}, middleware.After)
stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteDataSet{}, middleware.After)
}
func newServiceMetadataMiddleware_opDeleteDataSet(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "quicksight",
OperationName: "DeleteDataSet",
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
hyperglass/configuration/__init__.py | # https://github.com/checktheroads/hyperglass
"""
Imports configuration varibles from configuration files and returns default values if undefined.
"""
# Standard Imports
import os
import math
import logging
# Module Imports
import toml
import logzero
from logzero import logger
# Project Imports
import hyperglass
# Project Directories
working_dir = os.path.dirname(os.path.abspath(__file__))
hyperglass_root = os.path.dirname(hyperglass.__file__)
# TOML Imports
config = toml.load(os.path.join(working_dir, "configuration.toml"))
devices = toml.load(os.path.join(working_dir, "devices.toml"))
def debug_state():
"""Returns string for logzero log level"""
state = config.get("debug", False)
return state
# Logzero Configuration
if debug_state():
logzero.loglevel(logging.DEBUG)
else:
logzero.loglevel(logging.INFO)
def blacklist():
"""Returns list of subnets/IPs defined in blacklist.toml"""
blacklist_config = config["blacklist"]
return blacklist_config
def requires_ipv6_cidr(nos):
"""Returns boolean for input NOS association with the NOS list defined in \
requires_ipv6_cidr.toml"""
nos_list = config["requires_ipv6_cidr"]
return bool(nos in nos_list)
def networks():
"""Returns dictionary of ASNs as keys, list of associated locations as values. Imported as a \
Jinja2 variable on the main page that populates the network/ASN select class."""
asn_dict = {}
routers_list = devices["router"]
for router_config in routers_list.values():
asn = router_config["asn"]
if asn in asn_dict:
asn_dict[asn].append(router_config["location"])
else:
asn_dict[asn] = [router_config["location"]]
return asn_dict
def hostnames():
"""Returns list of all router hostnames for input validation"""
hostname_list = []
routers_list = devices["router"]
for router in routers_list:
hostname_list.append(router)
return hostname_list
def locations_list():
"""Returns a dictionary of ASNs as keys, list of associated locations, router hostnames, and \
router display names as keys. Used by Flask to populate the /routers/<asn> route, which is \
ingested by a JS Ajax call to populate the list of locations associated with the selected \
network/ASN on the main page."""
networks_dict = {}
routers_list = devices["router"]
for router in routers_list:
asn = routers_list[router]["asn"]
if asn in networks_dict:
networks_dict[asn].append(
dict(
location=routers_list[router]["location"],
hostname=router,
display_name=routers_list[router]["display_name"],
)
)
else:
networks_dict[asn] = [
dict(
location=routers_list[router]["location"],
hostname=router,
display_name=routers_list[router]["display_name"],
)
]
return networks_dict
def codes():
"""Reusable status code numbers"""
code_dict = {
# 200: renders standard display text
"success": 200,
# 405: Renders Bulma "warning" class notification with message text
"warning": 405,
# 415: Renders Bulma "danger" class notification with message text
"danger": 415,
# 504: Renders Bulma "danger" class notifiction, used for Ping/Traceroute errors
"error": 504,
}
return code_dict
def codes_reason():
"""Reusable status code descriptions"""
code_desc_dict = {
"200": "Valid Query",
"405": "Query Not Allowed",
"415": "Query Invalid",
"504": "Unable to reach Ping target",
}
return code_desc_dict
def rest_list():
"""Returns list of supported hyperglass API types"""
rest = ["frr", "bird"]
return rest
def scrape_list():
"""Returns list of configured network operating systems"""
config_commands = toml.load(os.path.join(working_dir, "commands.toml"))
scrape = []
for nos in config_commands:
scrape.append(nos)
return scrape
def supported_nos():
"""Combines scrape_list & rest_list for full list of supported network operating systems"""
scrape = scrape_list()
rest = rest_list()
supported = scrape + rest
return supported
def command(nos):
"""Associates input NOS with matched commands defined in commands.toml"""
config_commands = toml.load(os.path.join(working_dir, "commands.toml"))
commands = None
if nos in scrape_list():
commands = {
"dual": config_commands[nos][0]["dual"],
"ipv4": config_commands[nos][0]["ipv4"],
"ipv6": config_commands[nos][0]["ipv6"],
}
return commands
def credential(cred):
"""Associates input credential key name with configured credential username & password in \
devices.toml."""
c_list = devices["credential"]
return dict(username=c_list[cred]["username"], password=c_list[cred]["password"])
def device(dev):
"""Associates input device key name with configured device attributes in devices.toml"""
device_config = devices["router"][dev]
return dict(
address=device_config.get("address"),
asn=device_config.get("asn"),
src_addr_ipv4=device_config.get("src_addr_ipv4"),
src_addr_ipv6=device_config.get("src_addr_ipv6"),
credential=device_config.get("credential"),
location=device_config.get("location"),
display_name=device_config.get("display_name"),
port=device_config.get("port"),
type=device_config.get("type"),
proxy=device_config.get("proxy"),
)
def proxy(prx):
"""Associates input proxy key name with configured proxy attributes in devices.toml"""
proxy_config = devices["proxy"][prx]
return dict(
address=proxy_config["address"],
username=proxy_config["username"],
password=proxy_config["password"],
type=proxy_config["type"],
ssh_command=proxy_config["ssh_command"],
)
def params():
"""Builds combined nested dictionary of all parameters defined in configuration.toml, and if \
undefined, uses a default value"""
# pylint: disable=too-many-statements
# Dear PyLint, this function is intended to be long AF, because hyperglass is inteded to be \
# customizable AF. It would also be silly AF to break this into multiple functions, and you'd \
# probably still complain. <3 -ML
general = {}
branding = {}
features = {}
messages = {}
general["primary_asn"] = config["general"].get("primary_asn", "65000")
general["org_name"] = config["general"].get("org_name", "The Company")
general["google_analytics"] = config["general"].get("google_analytics", "")
general["redis_host"] = config["general"].get(
"redis_host", os.environ.get("REDIS_HOST", "localhost")
)
general["redis_port"] = config["general"].get(
"redis_port", os.environ.get("REDIS_PORT", 6379)
)
features["rate_limit"] = config["features"]["rate_limit"]
features["rate_limit"]["redis_id"] = config["features"]["rate_limit"].get(
"redis_id", 1
)
features["rate_limit"]["query"] = config["features"]["rate_limit"]["query"]
features["rate_limit"]["query"]["rate"] = config["features"]["rate_limit"][
"query"
].get("rate", 5)
features["rate_limit"]["query"]["period"] = config["features"]["rate_limit"].get(
"period", "minute"
)
features["rate_limit"]["query"]["title"] = config["features"]["rate_limit"][
"query"
].get("title", "Query Limit Reached")
features["rate_limit"]["query"]["message"] = config["features"]["rate_limit"][
"query"
].get(
"message",
f"""Query limit of {features["rate_limit"]["query"]["rate"]} per \
{features["rate_limit"]["query"]["period"]} reached. Please wait one minute and try \
again.""",
)
features["rate_limit"]["query"]["button"] = config["features"]["rate_limit"][
"query"
].get("button", "Try Again")
features["rate_limit"]["message"] = config["features"]["rate_limit"].get(
"message",
f"""Query limit of {features["rate_limit"]["query"]} per minute reached. \
Please wait one minute and try again.""",
)
features["rate_limit"]["site"] = config["features"]["rate_limit"]["site"]
features["rate_limit"]["site"]["rate"] = config["features"]["rate_limit"][
"site"
].get("rate", 60)
features["rate_limit"]["site"]["period"] = config["features"]["rate_limit"][
"site"
].get("period", "minute")
features["rate_limit"]["site"]["title"] = config["features"]["rate_limit"][
"site"
].get("title", "Limit Reached")
features["rate_limit"]["site"]["subtitle"] = config["features"]["rate_limit"][
"site"
].get(
"subtitle",
f'You have accessed this site more than {features["rate_limit"]["site"]["rate"]} '
f'times in the last {features["rate_limit"]["site"]["period"]}.',
)
features["rate_limit"]["site"]["button"] = config["features"]["rate_limit"][
"site"
].get("button", "Try Again")
features["cache"] = config["features"]["cache"]
features["cache"]["redis_id"] = config["features"]["cache"].get("redis_id", 0)
features["cache"]["timeout"] = config["features"]["cache"].get("timeout", 120)
features["cache"]["show_text"] = config["features"]["cache"].get("show_text", True)
features["cache"]["text"] = config["features"]["cache"].get(
"text",
f'Results will be cached for {math.ceil(features["cache"]["timeout"] / 60)} minutes.',
)
features["bgp_route"] = config["features"]["bgp_route"]
features["bgp_route"]["enable"] = config["features"]["bgp_route"].get(
"enable", True
)
features["bgp_community"] = config["features"]["bgp_community"]
features["bgp_community"]["enable"] = config["features"]["bgp_community"].get(
"enable", True
)
features["bgp_community"]["regex"] = config["features"]["bgp_community"]["regex"]
features["bgp_community"]["regex"]["decimal"] = config["features"]["bgp_community"][
"regex"
].get("decimal", r"^[0-9]{1,10}$")
features["bgp_community"]["regex"]["extended_as"] = config["features"][
"bgp_community"
]["regex"].get("extended_as", r"^([0-9]{0,5})\:([0-9]{1,5})$")
features["bgp_community"]["regex"]["large"] = config["features"]["bgp_community"][
"regex"
].get("large", r"^([0-9]{1,10})\:([0-9]{1,10})\:[0-9]{1,10}$")
features["bgp_aspath"] = config["features"]["bgp_aspath"]
features["bgp_aspath"]["enable"] = config["features"]["bgp_aspath"].get(
"enable", True
)
features["bgp_aspath"]["regex"] = config["features"]["bgp_aspath"]["regex"]
features["bgp_aspath"]["regex"]["mode"] = config["features"]["bgp_aspath"][
"regex"
].get("mode", "asplain")
features["bgp_aspath"]["regex"]["asplain"] = config["features"]["bgp_aspath"][
"regex"
].get("asplain", r"^(\^|^\_)(\d+\_|\d+\$|\d+\(\_\.\+\_\))+$")
features["bgp_aspath"]["regex"]["asdot"] = config["features"]["bgp_aspath"][
"regex"
].get("asdot", r"^(\^|^\_)((\d+\.\d+)\_|(\d+\.\d+)\$|(\d+\.\d+)\(\_\.\+\_\))+$")
features["bgp_aspath"]["regex"]["pattern"] = config["features"]["bgp_aspath"][
"regex"
].get(features["bgp_aspath"]["regex"]["mode"], None)
features["ping"] = config["features"]["ping"]
features["ping"]["enable"] = config["features"]["ping"].get("enable", True)
features["traceroute"] = config["features"]["traceroute"]
features["traceroute"]["enable"] = config["features"]["traceroute"].get(
"enable", True
)
features["max_prefix"] = config["features"]["max_prefix"]
features["max_prefix"]["enable"] = config["features"]["max_prefix"].get(
"enable", False
)
features["max_prefix"]["ipv4"] = config["features"]["max_prefix"].get("ipv4", 24)
features["max_prefix"]["ipv6"] = config["features"]["max_prefix"].get("ipv6", 64)
features["max_prefix"]["message"] = config["features"]["max_prefix"].get(
"message",
"Prefix length must be smaller than /{m}. <b>{i}</b> is too specific.",
)
messages["no_query_type"] = config["messages"].get(
"no_query_type", "Query Type must be specified."
)
messages["no_location"] = config["messages"].get(
"no_location", "A location must be selected."
)
messages["no_input"] = config["messages"].get(
"no_input", "A target must be specified"
)
messages["not_allowed"] = config["messages"].get(
"not_allowed", "<b>{i}</b> is not allowed."
)
messages["requires_ipv6_cidr"] = config["messages"].get(
"requires_ipv6_cidr",
"<b>{d}</b> requires IPv6 BGP lookups to be in CIDR notation.",
)
messages["invalid_ip"] = config["messages"].get(
"invalid_ip", "<b>{i}</b> is not a valid IP address."
)
messages["invalid_dual"] = config["messages"].get(
"invalid_dual", "<b>{i}</b> is an invalid {qt}."
)
messages["general"] = config["messages"].get("general", "An error occurred.")
messages["directed_cidr"] = config["messages"].get(
"directed_cidr", "<b>{q}</b> queries can not be in CIDR format."
)
branding["site_name"] = config["branding"].get("site_name", "hyperglass")
branding["footer"] = config["branding"]["footer"]
branding["footer"]["enable"] = config["branding"]["footer"].get("enable", True)
branding["credit"] = config["branding"]["credit"]
branding["credit"]["enable"] = config["branding"]["credit"].get("enable", True)
branding["peering_db"] = config["branding"]["peering_db"]
branding["peering_db"]["enable"] = config["branding"]["peering_db"].get(
"enable", True
)
branding["text"] = config["branding"]["text"]
branding["text"]["query_type"] = config["branding"]["text"].get(
"query_type", "Query Type"
)
branding["text"]["title_mode"] = config["branding"]["text"].get(
"title_mode", "logo_only"
)
branding["text"]["title"] = config["branding"]["text"].get("title", "hyperglass")
branding["text"]["subtitle"] = config["branding"]["text"].get(
"subtitle", f'AS{general["primary_asn"]}'
)
branding["text"]["results"] = config["branding"]["text"].get("results", "Results")
branding["text"]["location"] = config["branding"]["text"].get(
"location", "Select Location..."
)
branding["text"]["query_placeholder"] = config["branding"]["text"].get(
"query_placeholder", "IP, Prefix, Community, or AS Path"
)
branding["text"]["bgp_route"] = config["branding"]["text"].get(
"bgp_route", "BGP Route"
)
branding["text"]["bgp_community"] = config["branding"]["text"].get(
"bgp_community", "BGP Community"
)
branding["text"]["bgp_aspath"] = config["branding"]["text"].get(
"bgp_aspath", "BGP AS Path"
)
branding["text"]["ping"] = config["branding"]["text"].get("ping", "Ping")
branding["text"]["traceroute"] = config["branding"]["text"].get(
"traceroute", "Traceroute"
)
branding["text"]["404"]["title"] = config["branding"]["text"]["404"].get(
"title", "Error"
)
branding["text"]["404"]["subtitle"] = config["branding"]["text"]["404"].get(
"subtitle", "Page Not Found"
)
branding["text"]["500"]["title"] = config["branding"]["text"]["500"].get(
"title", "Error"
)
branding["text"]["500"]["subtitle"] = config["branding"]["text"]["500"].get(
"subtitle", "Something Went Wrong"
)
branding["text"]["500"]["button"] = config["branding"]["text"]["500"].get(
"button", "Home"
)
branding["text"]["504"]["message"] = config["branding"]["text"]["504"].get(
"message", "Unable to reach <b>{target}</b>."
)
branding["logo"] = config["branding"]["logo"]
branding["logo"]["path"] = config["branding"]["logo"].get(
"path", "static/images/hyperglass-dark.png"
)
branding["logo"]["width"] = config["branding"]["logo"].get("width", 384)
branding["logo"]["favicons"] = config["branding"]["logo"].get(
"favicons", "static/images/favicon/"
)
branding["color"] = config["branding"]["color"]
branding["color"]["background"] = config["branding"]["color"].get(
"background", "#fbfffe"
)
branding["color"]["button_submit"] = config["branding"]["color"].get(
"button_submit", "#40798c"
)
branding["color"]["danger"] = config["branding"]["color"].get("danger", "#ff3860")
branding["color"]["progress_bar"] = config["branding"]["color"].get(
"progress_bar", "#40798c"
)
branding["color"]["tag"]["type"] = config["branding"]["color"]["tag"].get(
"type", "#ff5e5b"
)
branding["color"]["tag"]["type_title"] = config["branding"]["color"]["tag"].get(
"type_title", "#330036"
)
branding["color"]["tag"]["location"] = config["branding"]["color"]["tag"].get(
"location", "#40798c"
)
branding["color"]["tag"]["location_title"] = config["branding"]["color"]["tag"].get(
"location_title", "#330036"
)
branding["font"] = config["branding"]["font"]
branding["font"]["primary"] = config["branding"]["font"]["primary"]
branding["font"]["primary"]["name"] = config["branding"]["font"]["primary"].get(
"name", "Nunito"
)
branding["font"]["primary"]["url"] = config["branding"]["font"]["primary"].get(
"url", "https://fonts.googleapis.com/css?family=Nunito:400,600,700"
)
branding["font"]["mono"] = config["branding"]["font"]["mono"]
branding["font"]["mono"]["name"] = config["branding"]["font"]["mono"].get(
"name", "Fira Mono"
)
branding["font"]["mono"]["url"] = config["branding"]["font"]["mono"].get(
"url", "https://fonts.googleapis.com/css?family=Fira+Mono"
)
branding["font"]["mono"]["size"] = config["branding"]["font"]["mono"].get(
"size", "0.95em"
)
params_dict = dict(
general=general, branding=branding, features=features, messages=messages
)
return params_dict
| []
| []
| [
"REDIS_PORT",
"REDIS_HOST"
]
| [] | ["REDIS_PORT", "REDIS_HOST"] | python | 2 | 0 | |
main.go | package main
import (
"context"
"os"
"flag"
"io"
"log"
"net/http"
"strconv"
"time"
"cloud.google.com/go/storage"
"github.com/gorilla/mux"
"google.golang.org/api/option"
"io/ioutil"
"path/filepath"
"gopkg.in/yaml.v2"
"github.com/daichirata/gcsproxy/headers"
)
var (
bind = flag.String("b", "127.0.0.1:8080", "Bind address")
verbose = flag.Bool("v", false, "Show access log")
credentials = flag.String("c", "", "The path to the keyfile. If not present, client will use your default application credentials.")
)
var (
client *storage.Client
ctx = context.Background()
)
func handleError(w http.ResponseWriter, err error) {
if err != nil {
if err == storage.ErrObjectNotExist {
http.Error(w, "", http.StatusNotFound)
} else {
http.Error(w, "", http.StatusInternalServerError)
}
return
}
}
func header(r *http.Request, key string) (string, bool) {
if r.Header == nil {
return "", false
}
if candidate := r.Header[key]; len(candidate) > 0 {
return candidate[0], true
}
return "", false
}
func setStrHeader(w http.ResponseWriter, key string, value string) {
if value != "" {
w.Header().Add(key, value)
}
}
func setIntHeader(w http.ResponseWriter, key string, value int64) {
if value > 0 {
w.Header().Add(key, strconv.FormatInt(value, 10))
}
}
func setTimeHeader(w http.ResponseWriter, key string, value time.Time) {
if !value.IsZero() {
w.Header().Add(key, value.UTC().Format(http.TimeFormat))
}
}
type wrapResponseWriter struct {
http.ResponseWriter
status int
}
func (w *wrapResponseWriter) WriteHeader(status int) {
w.ResponseWriter.WriteHeader(status)
w.status = status
}
type Config struct {
Buckets map[string]string `yaml:"buckets"`
}
var config Config
func wrapper(fn func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
proc := time.Now()
writer := &wrapResponseWriter{
ResponseWriter: w,
status: http.StatusOK,
}
fn(writer, r)
addr := r.RemoteAddr
if ip, found := header(r, "X-Forwarded-For"); found {
addr = ip
}
if *verbose {
log.Printf("[%s] %.3f %d %s %s",
addr,
time.Now().Sub(proc).Seconds(),
writer.status,
r.Method,
r.URL,
)
}
}
}
func check(e error) {
if e != nil {
panic(e)
}
}
func proxy(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
obj := client.Bucket(config.Buckets[r.Host]).Object(params["object"])
err := headers.SetHeaders(ctx, obj, w)
if err != nil {
handleError(w, err)
return
}
objr, err := obj.NewReader(ctx)
if err != nil {
handleError(w, err)
return
}
io.Copy(w, objr)
}
func main() {
flag.Parse()
var err error
if *credentials != "" {
client, err = storage.NewClient(ctx, option.WithCredentialsFile(*credentials))
} else {
client, err = storage.NewClient(ctx)
}
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
var buckets_config_path = os.Getenv("BUCKETS_CONFIG_PATH")
if buckets_config_path == "" {
log.Fatalf("empty BUCKETS_CONFIG_PATH variable")
}
filename, _ := filepath.Abs(buckets_config_path)
yamlFile, err := ioutil.ReadFile(filename)
check(err)
err = yaml.Unmarshal(yamlFile, &config)
check(err)
r := mux.NewRouter()
r.HandleFunc("/{object:.*}", wrapper(proxy)).Methods("GET", "HEAD")
log.Printf("[service] listening on %s", *bind)
if err := http.ListenAndServe(*bind, r); err != nil {
log.Fatal(err)
}
}
| [
"\"BUCKETS_CONFIG_PATH\""
]
| []
| [
"BUCKETS_CONFIG_PATH"
]
| [] | ["BUCKETS_CONFIG_PATH"] | go | 1 | 0 | |
providers/security/puppetsec/puppet_security_test.go | // Copyright (c) 2020-2021, R.I. Pienaar and the Choria Project contributors
//
// SPDX-License-Identifier: Apache-2.0
package puppetsec
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/choria-io/go-choria/build"
"github.com/choria-io/go-choria/config"
"github.com/choria-io/go-choria/providers/security"
"github.com/choria-io/go-choria/srvcache"
"github.com/sirupsen/logrus"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestPuppetSecurity(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Providers/Security/Puppet")
}
var _ = Describe("PuppetSSL", func() {
var mockctl *gomock.Controller
var resolver *MockResolver
var cfg *Config
var err error
var prov *PuppetSecurity
var l *logrus.Logger
BeforeEach(func() {
mockctl = gomock.NewController(GinkgoT())
resolver = NewMockResolver(mockctl)
os.Setenv("MCOLLECTIVE_CERTNAME", "rip.mcollective")
cfg = &Config{
SSLDir: filepath.Join("..", "testdata", "good"),
Identity: "rip.mcollective",
PuppetCAHost: "puppet",
PuppetCAPort: 8140,
DisableSRV: true,
useFakeUID: true,
fakeUID: 500,
}
l = logrus.New()
l.SetOutput(GinkgoWriter)
prov, err = New(WithConfig(cfg), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
mockctl.Finish()
})
It("Should implement the provider interface", func() {
f := func(p security.Provider) {}
f(prov)
Expect(prov.Provider()).To(Equal("puppet"))
})
Describe("WithChoriaConfig", func() {
It("Should disable SRV when the CA is configured", func() {
c, err := config.NewConfig(filepath.Join("..", "testdata", "puppetca.cfg"))
Expect(err).ToNot(HaveOccurred())
prov, err = New(WithChoriaConfig(&build.Info{}, c), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
Expect(prov.conf.DisableSRV).To(BeTrue())
})
It("Should support OverrideCertname", func() {
c := config.NewConfigForTests()
c.OverrideCertname = "override.choria"
prov, err = New(WithChoriaConfig(&build.Info{}, c), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
Expect(prov.conf.Identity).To(Equal("override.choria"))
})
// TODO: windows
if runtime.GOOS != "windows" {
It("Should fail when it cannot determine user identity", func() {
c := config.NewConfigForTests()
c.OverrideCertname = ""
v := os.Getenv("USER")
defer os.Setenv("USER", v)
os.Unsetenv("USER")
os.Unsetenv("MCOLLECTIVE_CERTNAME")
_, err = New(WithChoriaConfig(&build.Info{}, c), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).To(MatchError("could not determine client identity, ensure USER environment variable is set"))
})
It("Should use the user SSL directory when not configured", func() {
c, err := config.NewDefaultConfig()
Expect(err).ToNot(HaveOccurred())
prov, err = New(WithChoriaConfig(&build.Info{}, c), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
d, err := userSSlDir()
Expect(err).ToNot(HaveOccurred())
Expect(prov.conf.SSLDir).To(Equal(d))
})
}
It("Should copy all the relevant settings", func() {
c, err := config.NewDefaultConfig()
Expect(err).ToNot(HaveOccurred())
c.DisableTLSVerify = true
c.Choria.SSLDir = "/stub"
c.Choria.PuppetCAHost = "stubhost"
c.Choria.PuppetCAPort = 8080
prov, err = New(WithChoriaConfig(&build.Info{}, c), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
Expect(prov.conf.AllowList).To(Equal([]string{"\\.mcollective$", "\\.choria$"}))
Expect(prov.conf.PrivilegedUsers).To(Equal([]string{"\\.privileged.mcollective$", "\\.privileged.choria$"}))
Expect(prov.conf.DisableTLSVerify).To(BeTrue())
Expect(prov.conf.SSLDir).To(Equal("/stub"))
Expect(prov.conf.PuppetCAHost).To(Equal("stubhost"))
Expect(prov.conf.PuppetCAPort).To(Equal(8080))
})
})
Describe("Validate", func() {
It("Should handle missing files", func() {
cfg.SSLDir = filepath.Join("testdata", "allmissing")
cfg.Identity = "test.mcollective"
prov, err = New(WithConfig(cfg), WithResolver(resolver), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
errs, ok := prov.Validate()
Expect(ok).To(BeFalse())
Expect(errs).To(HaveLen(3))
Expect(errs[0]).To(Equal(fmt.Sprintf("public certificate %s does not exist", filepath.Join(cfg.SSLDir, "certs", "test.mcollective.pem"))))
Expect(errs[1]).To(Equal(fmt.Sprintf("private key %s does not exist", filepath.Join(cfg.SSLDir, "private_keys", "test.mcollective.pem"))))
Expect(errs[2]).To(Equal(fmt.Sprintf("CA %s does not exist", filepath.Join(cfg.SSLDir, "certs", "ca.pem"))))
})
It("Should accept valid directories", func() {
cfg.Identity = "rip.mcollective"
errs, ok := prov.Validate()
Expect(errs).To(HaveLen(0))
Expect(ok).To(BeTrue())
})
})
Describe("Identity", func() {
It("Should support OverrideCertname", func() {
cfg.Identity = "bob.choria"
prov.reinit()
Expect(prov.Identity()).To(Equal("bob.choria"))
})
})
Describe("cachePath", func() {
It("Should get the right cache path", func() {
path := prov.cachePath("rip.mcollective")
Expect(err).ToNot(HaveOccurred())
Expect(path).To(Equal(filepath.FromSlash(filepath.Join(cfg.SSLDir, "choria_security", "public_certs", "rip.mcollective.pem"))))
})
})
Describe("certCacheDir", func() {
It("Should determine the right directory", func() {
path := prov.certCacheDir()
Expect(err).ToNot(HaveOccurred())
Expect(path).To(Equal(filepath.FromSlash(filepath.Join(cfg.SSLDir, "choria_security", "public_certs"))))
})
})
Describe("writeCSR", func() {
It("should not write over existing CSRs", func() {
cfg.Identity = "na.mcollective"
prov.reinit()
kpath := prov.privateKeyPath()
csrpath := prov.csrPath()
defer os.Remove(kpath)
defer os.Remove(csrpath)
key, err := prov.writePrivateKey()
Expect(err).ToNot(HaveOccurred())
prov.conf.Identity = "rip.mcollective"
prov.reinit()
_, err = prov.writeCSR(key, "rip.mcollective", "choria.io")
Expect(err).To(MatchError("a certificate request already exist for rip.mcollective"))
})
It("Should create a valid CSR", func() {
prov.conf.Identity = "na.mcollective"
prov.reinit()
kpath := prov.privateKeyPath()
csrpath := prov.csrPath()
defer os.Remove(kpath)
defer os.Remove(csrpath)
key, err := prov.writePrivateKey()
Expect(err).ToNot(HaveOccurred())
_, err = prov.writeCSR(key, "na.mcollective", "choria.io")
Expect(err).ToNot(HaveOccurred())
csrpem, err := os.ReadFile(csrpath)
Expect(err).ToNot(HaveOccurred())
pb, _ := pem.Decode(csrpem)
req, err := x509.ParseCertificateRequest(pb.Bytes)
Expect(err).ToNot(HaveOccurred())
Expect(req.Subject.CommonName).To(Equal("na.mcollective"))
Expect(req.Subject.OrganizationalUnit).To(Equal([]string{"choria.io"}))
})
})
Describe("writePrivateKey", func() {
It("Should not write over existing private keys", func() {
cfg.Identity = "rip.mcollective"
key, err := prov.writePrivateKey()
Expect(err).To(MatchError("a private key already exist for rip.mcollective"))
Expect(key).To(BeNil())
})
It("Should create new keys", func() {
cfg.Identity = "na.mcollective"
prov.reinit()
path := prov.privateKeyPath()
defer os.Remove(path)
key, err := prov.writePrivateKey()
Expect(err).ToNot(HaveOccurred())
Expect(key).ToNot(BeNil())
Expect(path).To(BeAnExistingFile())
})
})
Describe("csrExists", func() {
It("Should detect existing keys", func() {
cfg.Identity = "rip.mcollective"
prov.reinit()
Expect(prov.csrExists()).To(BeTrue())
})
It("Should detect absent keys", func() {
cfg.Identity = "na.mcollective"
prov.reinit()
Expect(prov.csrExists()).To(BeFalse())
})
})
Describe("puppetCA", func() {
It("Should use supplied config when SRV is disabled", func() {
cfg.DisableSRV = true
s := prov.puppetCA()
Expect(s.Host()).To(Equal("puppet"))
Expect(s.Port()).To(Equal(uint16(8140)))
Expect(s.Scheme()).To(Equal("https"))
})
It("Should use supplied config when no srv resolver is given", func() {
prov, err = New(WithConfig(cfg), WithLog(l.WithFields(logrus.Fields{})))
Expect(err).ToNot(HaveOccurred())
resolver.EXPECT().QuerySrvRecords(gomock.Any()).Times(0)
s := prov.puppetCA()
Expect(s.Host()).To(Equal("puppet"))
Expect(s.Port()).To(Equal(uint16(8140)))
Expect(s.Scheme()).To(Equal("https"))
})
It("Should return defaults when SRV fails", func() {
resolver.EXPECT().QuerySrvRecords([]string{"_x-puppet-ca._tcp", "_x-puppet._tcp"}).Return(srvcache.NewServers(), errors.New("simulated error"))
cfg.DisableSRV = false
s := prov.puppetCA()
Expect(s.Host()).To(Equal("puppet"))
Expect(s.Port()).To(Equal(uint16(8140)))
Expect(s.Scheme()).To(Equal("https"))
})
It("Should use SRV records", func() {
ans := srvcache.NewServers(
srvcache.NewServer("p1", 8080, "http"),
srvcache.NewServer("p2", 8081, "https"),
)
resolver.EXPECT().QuerySrvRecords([]string{"_x-puppet-ca._tcp", "_x-puppet._tcp"}).Return(ans, nil)
cfg.DisableSRV = false
s := prov.puppetCA()
Expect(s.Host()).To(Equal("p1"))
Expect(s.Port()).To(Equal(uint16(8080)))
Expect(s.Scheme()).To(Equal("http"))
})
})
})
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
vunit/simulator_interface.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015-2017, Lars Asplund [email protected]
"""
Generic simulator interface
"""
from __future__ import print_function
import sys
import os
import subprocess
from vunit.ostools import Process, simplify_path
from vunit.exceptions import CompileError
from vunit.color_printer import NO_COLOR_PRINTER
class SimulatorInterface(object):
"""
Generic simulator interface
"""
name = None
supports_gui_flag = False
package_users_depend_on_bodies = False
compile_options = []
sim_options = []
# True if simulator supports ANSI colors in GUI mode
supports_colors_in_gui = False
def __init__(self, output_path, gui):
self._output_path = output_path
self._gui = gui
@property
def output_path(self):
return self._output_path
@property
def use_color(self):
return (not self._gui) or self.supports_colors_in_gui
@staticmethod
def add_arguments(parser):
"""
Add command line arguments
"""
pass
@staticmethod
def supports_vhdl_2008_contexts():
"""
Returns True when this simulator supports VHDL 2008 contexts
"""
return True
@staticmethod
def find_executable(executable):
"""
Return a list of all executables found in PATH
"""
path = os.environ['PATH']
paths = path.split(os.pathsep)
_, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
result = []
if isfile(executable):
result.append(executable)
for prefix in paths:
file_name = os.path.join(prefix, executable)
if isfile(file_name):
# the file exists, we have a shot at spawn working
result.append(file_name)
return result
@classmethod
def find_prefix(cls):
"""
Find prefix by looking at VUNIT_<SIMULATOR_NAME>_PATH environment variable
"""
prefix = os.environ.get("VUNIT_" + cls.name.upper() + "_PATH", None)
if prefix is not None:
return prefix
return cls.find_prefix_from_path()
@classmethod
def find_prefix_from_path(cls):
"""
Find simulator toolchain prefix from PATH environment variable
"""
return None
@classmethod
def is_available(cls):
"""
Returns True if simulator is available
"""
return cls.find_prefix() is not None
@classmethod
def find_toolchain(cls, executables, constraints=None):
"""
Find the first path prefix containing all executables
"""
constraints = [] if constraints is None else constraints
if not executables:
return None
all_paths = [[os.path.abspath(os.path.dirname(executables))
for executables in cls.find_executable(name)]
for name in executables]
for path0 in all_paths[0]:
if all([path0 in paths for paths in all_paths] +
[constraint(path0) for constraint in constraints]):
return path0
return None
@classmethod
def get_osvvm_coverage_api(cls):
"""
Returns simulator name when OSVVM coverage API is supported, None otherwise.
"""
return None
@classmethod
def supports_vhdl_package_generics(cls):
"""
Returns True when this simulator supports VHDL package generics
"""
return False
def post_process(self, output_path):
"""
Hook for simulator interface to perform post processing such as creating coverage reports
"""
pass
def add_simulator_specific(self, project):
"""
Hook for the simulator interface to add simulator specific things to the project
"""
pass
def compile_project(self, project, printer=NO_COLOR_PRINTER, continue_on_error=False):
"""
Compile the project
"""
self.add_simulator_specific(project)
self.setup_library_mapping(project)
self.compile_source_files(project, printer, continue_on_error)
def simulate(self, output_path, test_suite_name, config, elaborate_only):
"""
Simulate
"""
pass
def setup_library_mapping(self, project):
"""
Implemented by specific simulators
"""
pass
def __compile_source_file(self, source_file, printer):
"""
Compiles a single source file and prints status information
"""
try:
command = self.compile_source_file_command(source_file)
except CompileError:
command = None
printer.write("failed", fg="ri")
printer.write("\n")
printer.write("File type not supported by %s simulator\n" % (self.name))
return False
try:
output = check_output(command,
env=self.get_env())
printer.write("passed", fg="gi")
printer.write("\n")
printer.write(output)
except subprocess.CalledProcessError as err:
printer.write("failed", fg="ri")
printer.write("\n")
printer.write("=== Command used: ===\n%s\n"
% (subprocess.list2cmdline(command)))
printer.write("\n")
printer.write("=== Command output: ===\n%s\n" % err.output)
return False
return True
def compile_source_files(self, project, printer=NO_COLOR_PRINTER, continue_on_error=False):
"""
Use compile_source_file_command to compile all source_files
"""
dependency_graph = project.create_dependency_graph()
failures = []
source_files = project.get_files_in_compile_order(dependency_graph=dependency_graph)
source_files_to_skip = set()
max_library_name = 0
max_source_file_name = 0
if source_files:
max_library_name = max(len(source_file.library.name) for source_file in source_files)
max_source_file_name = max(len(simplify_path(source_file.name)) for source_file in source_files)
for source_file in source_files:
printer.write(
'Compiling into %s %s ' % (
(source_file.library.name + ":").ljust(max_library_name + 1),
simplify_path(source_file.name).ljust(max_source_file_name)))
sys.stdout.flush()
if source_file in source_files_to_skip:
printer.write("skipped", fg="rgi")
printer.write("\n")
continue
if self.__compile_source_file(source_file, printer):
project.update(source_file)
else:
source_files_to_skip.update(dependency_graph.get_dependent([source_file]))
failures.append(source_file)
if not continue_on_error:
break
if failures:
printer.write("Compile failed\n", fg='ri')
raise CompileError
if source_files:
printer.write("Compile passed\n", fg='gi')
else:
printer.write("Re-compile not needed\n")
def compile_source_file_command(self, source_file): # pylint: disable=unused-argument
raise NotImplementedError
@staticmethod
def get_env():
"""
Allows inheriting classes to overload this to modify environment variables
"""
return None # Default environment
def isfile(file_name):
"""
Case insensitive os.path.isfile
"""
if not os.path.isfile(file_name):
return False
return os.path.basename(file_name) in os.listdir(os.path.dirname(file_name))
def run_command(command, cwd=None, env=None):
"""
Run a command
"""
try:
proc = Process(command, cwd=cwd, env=env)
proc.consume_output()
return True
except Process.NonZeroExitCode:
pass
return False
def check_output(command, env=None):
"""
Wrapper arround subprocess.check_output
"""
try:
output = subprocess.check_output(command,
env=env,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
err.output = err.output.decode("utf-8")
raise err
return output.decode("utf-8")
class Option(object):
"""
A compile or sim option
"""
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def validate(self, value):
pass
class BooleanOption(Option):
"""
Must be a boolean
"""
def validate(self, value):
if value not in (True, False):
raise ValueError("Option %r must be a boolean. Got %r"
% (self.name, value))
class StringOption(Option):
"""
Must be a string
"""
def validate(self, value):
if not is_string_not_iterable(value):
raise ValueError("Option %r must be a string. Got %r"
% (self.name, value))
class ListOfStringOption(Option):
"""
Must be a list of strings
"""
def validate(self, value):
def fail():
raise ValueError("Option %r must be a list of strings. Got %r"
% (self.name, value))
if is_string_not_iterable(value):
fail()
try:
for elem in value:
if not is_string_not_iterable(elem):
fail()
except TypeError:
fail()
class VHDLAssertLevelOption(Option):
"""
VHDL assert level
"""
_legal_values = ("warning", "error", "failure")
def __init__(self):
Option.__init__(self, "vhdl_assert_stop_level")
def validate(self, value):
if value not in self._legal_values:
raise ValueError("Option %r must be one of %s. Got %r"
% (self.name, self._legal_values, value))
def is_string_not_iterable(value):
"""
Returns True if value is a string and not another iterable
"""
if sys.version_info.major == 3:
return isinstance(value, str)
return isinstance(value, (str, unicode)) # pylint: disable=undefined-variable
| []
| []
| [
"PATH",
"VUNIT_\" + cls.name.upper"
]
| [] | ["PATH", "VUNIT_\" + cls.name.upper"] | python | 2 | 0 | |
src/cmd/internal/pprof/commands/commands.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package commands defines and manages the basic pprof commands
package commands
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"time"
"cmd/internal/browser"
"cmd/internal/pprof/plugin"
"cmd/internal/pprof/report"
"cmd/internal/pprof/svg"
"cmd/internal/pprof/tempfile"
)
// Commands describes the commands accepted by pprof.
type Commands map[string]*Command
// Command describes the actions for a pprof command. Includes a
// function for command-line completion, the report format to use
// during report generation, any postprocessing functions, and whether
// the command expects a regexp parameter (typically a function name).
type Command struct {
Complete Completer // autocomplete for interactive mode
Format int // report format to generate
PostProcess PostProcessor // postprocessing to run on report
HasParam bool // Collect a parameter from the CLI
Usage string // Help text
}
// Completer is a function for command-line autocompletion
type Completer func(prefix string) string
// PostProcessor is a function that applies post-processing to the report output
type PostProcessor func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error
// PProf returns the basic pprof report-generation commands
func PProf(c Completer, interactive **bool) Commands {
return Commands{
// Commands that require no post-processing.
"tags": {nil, report.Tags, nil, false, "Outputs all tags in the profile"},
"raw": {c, report.Raw, nil, false, "Outputs a text representation of the raw profile"},
"dot": {c, report.Dot, nil, false, "Outputs a graph in DOT format"},
"top": {c, report.Text, nil, false, "Outputs top entries in text form"},
"tree": {c, report.Tree, nil, false, "Outputs a text rendering of call graph"},
"text": {c, report.Text, nil, false, "Outputs top entries in text form"},
"disasm": {c, report.Dis, nil, true, "Output annotated assembly for functions matching regexp or address"},
"list": {c, report.List, nil, true, "Output annotated source for functions matching regexp"},
"peek": {c, report.Tree, nil, true, "Output callers/callees of functions matching regexp"},
// Save binary formats to a file
"callgrind": {c, report.Callgrind, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format"},
"proto": {c, report.Proto, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format"},
// Generate report in DOT format and postprocess with dot
"gif": {c, report.Dot, invokeDot("gif"), false, "Outputs a graph image in GIF format"},
"pdf": {c, report.Dot, invokeDot("pdf"), false, "Outputs a graph in PDF format"},
"png": {c, report.Dot, invokeDot("png"), false, "Outputs a graph image in PNG format"},
"ps": {c, report.Dot, invokeDot("ps"), false, "Outputs a graph in PS format"},
// Save SVG output into a file after including svgpan library
"svg": {c, report.Dot, saveSVGToFile(), false, "Outputs a graph in SVG format"},
// Visualize postprocessed dot output
"eog": {c, report.Dot, invokeVisualizer(interactive, invokeDot("svg"), "svg", []string{"eog"}), false, "Visualize graph through eog"},
"evince": {c, report.Dot, invokeVisualizer(interactive, invokeDot("pdf"), "pdf", []string{"evince"}), false, "Visualize graph through evince"},
"gv": {c, report.Dot, invokeVisualizer(interactive, invokeDot("ps"), "ps", []string{"gv --noantialias"}), false, "Visualize graph through gv"},
"web": {c, report.Dot, invokeVisualizer(interactive, saveSVGToFile(), "svg", browsers()), false, "Visualize graph through web browser"},
// Visualize HTML directly generated by report.
"weblist": {c, report.WebList, invokeVisualizer(interactive, awayFromTTY("html"), "html", browsers()), true, "Output annotated source in HTML for functions matching regexp or address"},
}
}
// browsers returns a list of commands to attempt for web visualization
// on the current platform
func browsers() []string {
var cmds []string
for _, cmd := range browser.Commands() {
cmds = append(cmds, strings.Join(cmd, " "))
}
return cmds
}
// NewCompleter creates an autocompletion function for a set of commands.
func NewCompleter(cs Commands) Completer {
return func(line string) string {
switch tokens := strings.Fields(line); len(tokens) {
case 0:
// Nothing to complete
case 1:
// Single token -- complete command name
found := ""
for c := range cs {
if strings.HasPrefix(c, tokens[0]) {
if found != "" {
return line
}
found = c
}
}
if found != "" {
return found
}
default:
// Multiple tokens -- complete using command completer
if c, ok := cs[tokens[0]]; ok {
if c.Complete != nil {
lastTokenIdx := len(tokens) - 1
lastToken := tokens[lastTokenIdx]
if strings.HasPrefix(lastToken, "-") {
lastToken = "-" + c.Complete(lastToken[1:])
} else {
lastToken = c.Complete(lastToken)
}
return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ")
}
}
}
return line
}
}
// awayFromTTY saves the output in a file if it would otherwise go to
// the terminal screen. This is used to avoid dumping binary data on
// the screen.
func awayFromTTY(format string) PostProcessor {
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
if output == os.Stdout && ui.IsTerminal() {
tempFile, err := tempfile.New("", "profile", "."+format)
if err != nil {
return err
}
ui.PrintErr("Generating report in ", tempFile.Name())
_, err = fmt.Fprint(tempFile, input)
return err
}
_, err := fmt.Fprint(output, input)
return err
}
}
func invokeDot(format string) PostProcessor {
divert := awayFromTTY(format)
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
if _, err := exec.LookPath("dot"); err != nil {
ui.PrintErr("Cannot find dot, have you installed Graphviz?")
return err
}
cmd := exec.Command("dot", "-T"+format)
var buf bytes.Buffer
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, &buf, os.Stderr
if err := cmd.Run(); err != nil {
return err
}
return divert(&buf, output, ui)
}
}
func saveSVGToFile() PostProcessor {
generateSVG := invokeDot("svg")
divert := awayFromTTY("svg")
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
baseSVG := &bytes.Buffer{}
generateSVG(input, baseSVG, ui)
massaged := &bytes.Buffer{}
fmt.Fprint(massaged, svg.Massage(*baseSVG))
return divert(massaged, output, ui)
}
}
var vizTmpDir string
func makeVizTmpDir() error {
if vizTmpDir != "" {
return nil
}
name, err := ioutil.TempDir("", "pprof-")
if err != nil {
return err
}
tempfile.DeferDelete(name)
vizTmpDir = name
return nil
}
func invokeVisualizer(interactive **bool, format PostProcessor, suffix string, visualizers []string) PostProcessor {
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
if err := makeVizTmpDir(); err != nil {
return err
}
tempFile, err := tempfile.New(vizTmpDir, "pprof", "."+suffix)
if err != nil {
return err
}
tempfile.DeferDelete(tempFile.Name())
if err = format(input, tempFile, ui); err != nil {
return err
}
tempFile.Close() // on windows, if the file is Open, start cannot access it.
// Try visualizers until one is successful
for _, v := range visualizers {
// Separate command and arguments for exec.Command.
args := strings.Split(v, " ")
if len(args) == 0 {
continue
}
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
viewer.Stderr = os.Stderr
if err = viewer.Start(); err == nil {
// The viewer might just send a message to another program
// to open the file. Give that program a little time to open the
// file before we remove it.
time.Sleep(1 * time.Second)
if !**interactive {
// In command-line mode, wait for the viewer to be closed
// before proceeding
return viewer.Wait()
}
return nil
}
}
return err
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
dotracker.py | from __future__ import division
import sys
import os
import numpy as np
from PIL import Image
import src.siamese as siam
from src.tracker import tracker
from src.parse_arguments import parse_arguments
from src.region_to_bbox import region_to_bbox
import cv2
# import tensorflow as tf
import time
import ffmpeg
import tensorflow as tf
def main(process, queue, box, video):
# avoid printing TF debugging information
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# TODO: allow parameters from command line or leave everything in json files?
hp, evaluation, run, env, design = parse_arguments()
# Set size for use with tf.image.resize_images with align_corners=True.
# For example,
# [1 4 7] => [1 2 3 4 5 6 7] (length 3*(3-1)+1)
# instead of
# [1 4 7] => [1 1 2 3 4 5 6 7 7] (length 3*3)
final_score_sz = hp.response_up * (design.score_sz - 1) + 1
# build TF graph once for all
image, templates_z, scores = siam.build_tracking_graph(final_score_sz, design, env)
# read radio
# width = 640
# height = 480
# process1 = (
# ffmpeg
# .input('tcp://192.168.1.155:8300',vcodec='h264',r = 24,probesize=32,fflags="nobuffer",flags="low_delay",analyzeduration=1)
# .output('pipe:', format='rawvideo',pix_fmt="rgb24")
# .run_async(pipe_stdout=True)
# )
## model
# model_path = './frozen_inference_graph.pb'
# odapi = DetectorAPI(path_to_ckpt=model_path)
# while True :
# in_bytes = process1.stdout.read(width * height * 3)
# if not in_bytes :
# print ("none")
# video = (np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]))
# video = cv2.cvtColor(video, cv2.COLOR_RGB2BGR)
# read target from mat
# box = odapi.processFrame(video)
box[2] -= box[0]
box[3] -= box[1]
box[0] += box[2]/2
box[1] += box[3]/2
print ('box', box)
pos_x, pos_y, target_w, target_h = box[0], box[1], box[2], box[3]
tracker(hp, run, design, video, pos_x, pos_y, target_w, target_h, final_score_sz,
image, templates_z, scores, process, queue)
print ('done')
def _compile_results(gt, bboxes, dist_threshold):
l = np.size(bboxes, 0)
gt4 = np.zeros((l, 4))
new_distances = np.zeros(l)
new_ious = np.zeros(l)
n_thresholds = 50
precisions_ths = np.zeros(n_thresholds)
for i in range(l):
gt4[i, :] = region_to_bbox(gt[i, :], center=False)
new_distances[i] = _compute_distance(bboxes[i, :], gt4[i, :])
new_ious[i] = _compute_iou(bboxes[i, :], gt4[i, :])
# what's the percentage of frame in which center displacement is inferior to given threshold? (OTB metric)
precision = sum(new_distances < dist_threshold)/np.size(new_distances) * 100
# find above result for many thresholds, then report the AUC
thresholds = np.linspace(0, 25, n_thresholds+1)
thresholds = thresholds[-n_thresholds:]
# reverse it so that higher values of precision goes at the beginning
thresholds = thresholds[::-1]
for i in range(n_thresholds):
precisions_ths[i] = sum(new_distances < thresholds[i])/np.size(new_distances)
# integrate over the thresholds
precision_auc = np.trapz(precisions_ths)
# per frame averaged intersection over union (OTB metric)
iou = np.mean(new_ious) * 100
return l, precision, precision_auc, iou
def _init_video(env, evaluation, video):
video_folder = os.path.join(env.root_dataset, evaluation.dataset, video)
frame_name_list = [f for f in os.listdir(video_folder) if f.endswith(".jpg")]
frame_name_list = [os.path.join(env.root_dataset, evaluation.dataset, video, '') + s for s in frame_name_list]
frame_name_list.sort()
with Image.open(frame_name_list[0]) as img:
frame_sz = np.asarray(img.size)
frame_sz[1], frame_sz[0] = frame_sz[0], frame_sz[1]
# read the initialization from ground truth
gt_file = os.path.join(video_folder, 'groundtruth.txt')
gt = np.genfromtxt(gt_file, delimiter=',')
n_frames = len(frame_name_list)
assert n_frames == len(gt), 'Number of frames and number of GT lines should be equal.'
return gt, frame_name_list, frame_sz, n_frames
def _compute_distance(boxA, boxB):
a = np.array((boxA[0]+boxA[2]/2, boxA[1]+boxA[3]/2))
b = np.array((boxB[0]+boxB[2]/2, boxB[1]+boxB[3]/2))
dist = np.linalg.norm(a - b)
assert dist >= 0
assert dist != float('Inf')
return dist
def _compute_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[0] + boxA[2], boxB[0] + boxB[2])
yB = min(boxA[1] + boxA[3], boxB[1] + boxB[3])
if xA < xB and yA < yB:
# compute the area of intersection rectangle
interArea = (xB - xA) * (yB - yA)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = boxA[2] * boxA[3]
boxBArea = boxB[2] * boxB[3]
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(boxAArea + boxBArea - interArea)
else:
iou = 0
assert iou >= 0
assert iou <= 1.01
return iou
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
test/utils/image/manifest.go | /*
Copyright 2017 The Kubernetes Authors.
Copyright 2020 Authors of Arktos - file modified.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package image
import (
"fmt"
"io/ioutil"
"os"
yaml "gopkg.in/yaml.v2"
)
// RegistryList holds public and private image registries
type RegistryList struct {
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
E2eRegistry string `yaml:"e2eRegistry"`
InvalidRegistry string `yaml:"invalidRegistry"`
GcRegistry string `yaml:"gcRegistry"`
GoogleContainerRegistry string `yaml:"googleContainerRegistry"`
PrivateRegistry string `yaml:"privateRegistry"`
SampleRegistry string `yaml:"sampleRegistry"`
CirrosRegistry string `yaml:"cirrosRegistry"`
}
// Config holds an images registry, name, and version
type Config struct {
registry string
name string
version string
}
// SetRegistry sets an image registry in a Config struct
func (i *Config) SetRegistry(registry string) {
i.registry = registry
}
// SetName sets an image name in a Config struct
func (i *Config) SetName(name string) {
i.name = name
}
// SetVersion sets an image version in a Config struct
func (i *Config) SetVersion(version string) {
i.version = version
}
func initReg() RegistryList {
registry := RegistryList{
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
DockerLibraryRegistry: "docker.io/library",
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
InvalidRegistry: "invalid.com/invalid",
GcRegistry: "k8s.gcr.io",
GoogleContainerRegistry: "gcr.io/google-containers",
PrivateRegistry: "gcr.io/k8s-authenticated-test",
SampleRegistry: "gcr.io/google-samples",
CirrosRegistry: "download.cirros-cloud.net",
}
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
if repoList == "" {
return registry
}
fileContent, err := ioutil.ReadFile(repoList)
if err != nil {
panic(fmt.Errorf("Error reading '%v' file contents: %v", repoList, err))
}
err = yaml.Unmarshal(fileContent, ®istry)
if err != nil {
panic(fmt.Errorf("Error unmarshalling '%v' YAML file: %v", repoList, err))
}
return registry
}
var (
registry = initReg()
dockerLibraryRegistry = registry.DockerLibraryRegistry
e2eRegistry = registry.E2eRegistry
gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry
gcRegistry = registry.GcRegistry
googleContainerRegistry = registry.GoogleContainerRegistry
invalidRegistry = registry.InvalidRegistry
// PrivateRegistry is an image repository that requires authentication
PrivateRegistry = registry.PrivateRegistry
sampleRegistry = registry.SampleRegistry
cirros = registry.CirrosRegistry
// Preconfigured image configs
imageConfigs = initImageConfigs()
)
const (
// CRDConversionWebhook image
CRDConversionWebhook = iota
// AdmissionWebhook image
AdmissionWebhook
// Agnhost image
Agnhost
// Alpine image
Alpine
// APIServer image
APIServer
// AppArmorLoader image
AppArmorLoader
// AuditProxy image
AuditProxy
// AuthenticatedAlpine image
AuthenticatedAlpine
// AuthenticatedWindowsNanoServer image
AuthenticatedWindowsNanoServer
// BusyBox image
BusyBox
// CheckMetadataConcealment image
CheckMetadataConcealment
// CudaVectorAdd image
CudaVectorAdd
// CudaVectorAdd2 image
CudaVectorAdd2
// Dnsutils image
Dnsutils
// DebianBase image
DebianBase
// EchoServer image
EchoServer
// EntrypointTester image
EntrypointTester
// Etcd image
Etcd
// GBFrontend image
GBFrontend
// GBRedisSlave image
GBRedisSlave
// InClusterClient image
InClusterClient
// Invalid image
Invalid
// InvalidRegistryImage image
InvalidRegistryImage
// IpcUtils image
IpcUtils
// Iperf image
Iperf
// JessieDnsutils image
JessieDnsutils
// Kitten image
Kitten
// Mounttest image
Mounttest
// MounttestUser image
MounttestUser
// Nautilus image
Nautilus
// Net image
Net
// Netexec image
Netexec
// Nettest image
Nettest
// Nginx image
Nginx
// NginxNew image
NginxNew
// Nonewprivs image
Nonewprivs
// NonRoot runs with a default user of 1234
NonRoot
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
// Pause image
Pause
// Perl image
Perl
// Porter image
Porter
// PrometheusDummyExporter image
PrometheusDummyExporter
// PrometheusToSd image
PrometheusToSd
// Redis image
Redis
// ResourceConsumer image
ResourceConsumer
// ResourceController image
ResourceController
// SdDummyExporter image
SdDummyExporter
// ServeHostname image
ServeHostname
// StartupScript image
StartupScript
// TestWebserver image
TestWebserver
// VolumeNFSServer image
VolumeNFSServer
// VolumeISCSIServer image
VolumeISCSIServer
// VolumeGlusterServer image
VolumeGlusterServer
// VolumeRBDServer image
VolumeRBDServer
// WindowsNanoServer image
WindowsNanoServer
// Cirros image
Cirros
)
func initImageConfigs() map[int]Config {
configs := map[int]Config{}
configs[CRDConversionWebhook] = Config{e2eRegistry, "crd-conversion-webhook", "1.13rev2"}
configs[AdmissionWebhook] = Config{e2eRegistry, "webhook", "1.15v1"}
configs[Agnhost] = Config{e2eRegistry, "agnhost", "2.0"}
configs[Alpine] = Config{dockerLibraryRegistry, "alpine", "3.7"}
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
configs[APIServer] = Config{e2eRegistry, "sample-apiserver", "1.17"}
configs[AppArmorLoader] = Config{e2eRegistry, "apparmor-loader", "1.0"}
configs[AuditProxy] = Config{e2eRegistry, "audit-proxy", "1.0"}
configs[BusyBox] = Config{dockerLibraryRegistry, "busybox", "1.29"}
configs[CheckMetadataConcealment] = Config{e2eRegistry, "metadata-concealment", "1.2"}
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{e2eRegistry, "cuda-vector-add", "2.0"}
configs[Dnsutils] = Config{e2eRegistry, "dnsutils", "1.1"}
configs[DebianBase] = Config{googleContainerRegistry, "debian-base", "0.4.1"}
configs[EchoServer] = Config{e2eRegistry, "echoserver", "2.2"}
configs[EntrypointTester] = Config{e2eRegistry, "entrypoint-tester", "1.0"}
configs[Etcd] = Config{gcRegistry, "etcd", "3.4.4"}
configs[GBFrontend] = Config{sampleRegistry, "gb-frontend", "v6"}
configs[GBRedisSlave] = Config{sampleRegistry, "gb-redisslave", "v3"}
configs[InClusterClient] = Config{e2eRegistry, "inclusterclient", "1.0"}
configs[Invalid] = Config{gcRegistry, "invalid-image", "invalid-tag"}
configs[InvalidRegistryImage] = Config{invalidRegistry, "alpine", "3.1"}
configs[IpcUtils] = Config{e2eRegistry, "ipc-utils", "1.0"}
configs[Iperf] = Config{e2eRegistry, "iperf", "1.0"}
configs[JessieDnsutils] = Config{e2eRegistry, "jessie-dnsutils", "1.0"}
configs[Kitten] = Config{e2eRegistry, "kitten", "1.0"}
configs[Mounttest] = Config{e2eRegistry, "mounttest", "1.0"}
configs[MounttestUser] = Config{e2eRegistry, "mounttest-user", "1.0"}
configs[Nautilus] = Config{e2eRegistry, "nautilus", "1.0"}
configs[Net] = Config{e2eRegistry, "net", "1.0"}
configs[Netexec] = Config{e2eRegistry, "netexec", "1.1"}
configs[Nettest] = Config{e2eRegistry, "nettest", "1.0"}
configs[Nginx] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"}
configs[NginxNew] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"}
configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"}
configs[NonRoot] = Config{e2eRegistry, "nonroot", "1.0"}
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
configs[Pause] = Config{gcRegistry, "pause", "3.1"}
configs[Perl] = Config{dockerLibraryRegistry, "perl", "5.26"}
configs[Porter] = Config{e2eRegistry, "porter", "1.0"}
configs[PrometheusDummyExporter] = Config{e2eRegistry, "prometheus-dummy-exporter", "v0.1.0"}
configs[PrometheusToSd] = Config{e2eRegistry, "prometheus-to-sd", "v0.5.0"}
configs[Redis] = Config{e2eRegistry, "redis", "1.0"}
configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"}
configs[ResourceController] = Config{e2eRegistry, "resource-consumer-controller", "1.0"}
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
configs[ServeHostname] = Config{e2eRegistry, "serve-hostname", "1.1"}
configs[StartupScript] = Config{googleContainerRegistry, "startup-script", "v1"}
configs[TestWebserver] = Config{e2eRegistry, "test-webserver", "1.0"}
configs[VolumeNFSServer] = Config{e2eRegistry, "volume/nfs", "1.0"}
configs[VolumeISCSIServer] = Config{e2eRegistry, "volume/iscsi", "2.0"}
configs[VolumeGlusterServer] = Config{e2eRegistry, "volume/gluster", "1.0"}
configs[VolumeRBDServer] = Config{e2eRegistry, "volume/rbd", "1.0.1"}
configs[WindowsNanoServer] = Config{e2eRegistry, "windows-nanoserver", "v1"}
configs[Cirros] = Config{cirros, "cirros-0.5.1-x86_64-disk.img", "0.5.1"}
return configs
}
// GetImageConfigs returns the map of imageConfigs
func GetImageConfigs() map[int]Config {
return imageConfigs
}
// GetConfig returns the Config object for an image
func GetConfig(image int) Config {
return imageConfigs[image]
}
// GetE2EImage returns the fully qualified URI to an image (including version)
func GetE2EImage(image int) string {
return fmt.Sprintf("%s/%s:%s", imageConfigs[image].registry, imageConfigs[image].name, imageConfigs[image].version)
}
// GetE2EImage returns the fully qualified URI to an image (including version)
func (i *Config) GetE2EImage() string {
return fmt.Sprintf("%s/%s:%s", i.registry, i.name, i.version)
}
// Cirros image url format is site/version/name
func GetE2EVmImage(image int) string {
return fmt.Sprintf("%s/%s:%s", imageConfigs[image].registry, imageConfigs[image].version, imageConfigs[image].name)
}
func (i *Config) GetE2EVmImage() string {
return fmt.Sprintf("%s/%s:%s", i.registry, i.version, i.name)
}
// GetPauseImageName returns the pause image name with proper version
func GetPauseImageName() string {
return GetE2EImage(Pause)
}
// Default VM test image with 0.5.1 version of cirros image
// download.cirros-cloud.net/0.5.1/cirros-0.5.1-x86_64-disk.img
func GetDefaultVmE2EImage() string {
return GetE2EVmImage(Cirros)
}
| [
"\"KUBE_TEST_REPO_LIST\""
]
| []
| [
"KUBE_TEST_REPO_LIST"
]
| [] | ["KUBE_TEST_REPO_LIST"] | go | 1 | 0 | |
pkg/mod/gopkg.in/[email protected]/encode_test.go | package yaml_test
import (
"bytes"
"fmt"
"math"
"strconv"
"strings"
"time"
"net"
"os"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
)
type jsonNumberT string
func (j jsonNumberT) Int64() (int64, error) {
val, err := strconv.Atoi(string(j))
if err != nil {
return 0, err
}
return int64(val), nil
}
func (j jsonNumberT) Float64() (float64, error) {
return strconv.ParseFloat(string(j), 64)
}
func (j jsonNumberT) String() string {
return string(j)
}
var marshalIntTest = 123
var marshalTests = []struct {
value interface{}
data string
}{
{
nil,
"null\n",
}, {
(*marshalerType)(nil),
"null\n",
}, {
&struct{}{},
"{}\n",
}, {
map[string]string{"v": "hi"},
"v: hi\n",
}, {
map[string]interface{}{"v": "hi"},
"v: hi\n",
}, {
map[string]string{"v": "true"},
"v: \"true\"\n",
}, {
map[string]string{"v": "false"},
"v: \"false\"\n",
}, {
map[string]interface{}{"v": true},
"v: true\n",
}, {
map[string]interface{}{"v": false},
"v: false\n",
}, {
map[string]interface{}{"v": 10},
"v: 10\n",
}, {
map[string]interface{}{"v": -10},
"v: -10\n",
}, {
map[string]uint{"v": 42},
"v: 42\n",
}, {
map[string]interface{}{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]int64{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]uint64{"v": 4294967296},
"v: 4294967296\n",
}, {
map[string]interface{}{"v": "10"},
"v: \"10\"\n",
}, {
map[string]interface{}{"v": 0.1},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float64(0.1)},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float32(0.99)},
"v: 0.99\n",
}, {
map[string]interface{}{"v": -0.1},
"v: -0.1\n",
}, {
map[string]interface{}{"v": math.Inf(+1)},
"v: .inf\n",
}, {
map[string]interface{}{"v": math.Inf(-1)},
"v: -.inf\n",
}, {
map[string]interface{}{"v": math.NaN()},
"v: .nan\n",
}, {
map[string]interface{}{"v": nil},
"v: null\n",
}, {
map[string]interface{}{"v": ""},
"v: \"\"\n",
}, {
map[string][]string{"v": []string{"A", "B"}},
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- |-\n B\n C\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
}, {
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
"a:\n b: c\n",
}, {
map[string]interface{}{"a": "-"},
"a: '-'\n",
},
// Simple values.
{
&marshalIntTest,
"123\n",
},
// Structures
{
&struct{ Hello string }{"world"},
"hello: world\n",
}, {
&struct {
A struct {
B string
}
}{struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{&struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{},
"a: null\n",
}, {
&struct{ A int }{1},
"a: 1\n",
}, {
&struct{ A []int }{[]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct{ A [2]int }{[2]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct {
B int "a"
}{1},
"a: 1\n",
}, {
&struct{ A bool }{true},
"a: true\n",
},
// Conditional flag
{
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{1, 0},
"a: 1\n",
}, {
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{0, 0},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{nil},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{}},
"a: {x: 0}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{0, 1}},
"{}\n",
}, {
&struct {
A float64 "a,omitempty"
B float64 "b,omitempty"
}{1, 0},
"a: 1\n",
},
{
&struct {
T1 time.Time "t1,omitempty"
T2 time.Time "t2,omitempty"
T3 *time.Time "t3,omitempty"
T4 *time.Time "t4,omitempty"
}{
T2: time.Date(2018, 1, 9, 10, 40, 47, 0, time.UTC),
T4: newTime(time.Date(2098, 1, 9, 10, 40, 47, 0, time.UTC)),
},
"t2: 2018-01-09T10:40:47Z\nt4: 2098-01-09T10:40:47Z\n",
},
// Nil interface that implements Marshaler.
{
map[string]yaml.Marshaler{
"a": nil,
},
"a: null\n",
},
// Flow flag
{
&struct {
A []int "a,flow"
}{[]int{1, 2}},
"a: [1, 2]\n",
}, {
&struct {
A map[string]string "a,flow"
}{map[string]string{"b": "c", "d": "e"}},
"a: {b: c, d: e}\n",
}, {
&struct {
A struct {
B, D string
} "a,flow"
}{struct{ B, D string }{"c", "e"}},
"a: {b: c, d: e}\n",
},
// Unexported field
{
&struct {
u int
A int
}{0, 1},
"a: 1\n",
},
// Ignored field
{
&struct {
A int
B int "-"
}{1, 2},
"a: 1\n",
},
// Struct inlining
{
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
"a: 1\nb: 2\nc: 3\n",
},
// Map inlining
{
&struct {
A int
C map[string]int `yaml:",inline"`
}{1, map[string]int{"b": 2, "c": 3}},
"a: 1\nb: 2\nc: 3\n",
},
// Duration
{
map[string]time.Duration{"a": 3 * time.Second},
"a: 3s\n",
},
// Issue #24: bug in map merging logic.
{
map[string]string{"a": "<foo>"},
"a: <foo>\n",
},
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
// with old YAML 1.1 parsers.
{
map[string]string{"a": "1:1"},
"a: \"1:1\"\n",
},
// Binary data.
{
map[string]string{"a": "\x00"},
"a: \"\\0\"\n",
}, {
map[string]string{"a": "\x80\x81\x82"},
"a: !!binary gIGC\n",
}, {
map[string]string{"a": strings.Repeat("\x90", 54)},
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
},
// Ordered maps.
{
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
},
// Encode unicode as utf-8 rather than in escaped form.
{
map[string]string{"a": "你好"},
"a: 你好\n",
},
// Support encoding.TextMarshaler.
{
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
"a: 1.2.3.4\n",
},
// time.Time gets a timestamp tag.
{
map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)},
"a: 2015-02-24T18:19:39Z\n",
},
{
map[string]*time.Time{"a": newTime(time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC))},
"a: 2015-02-24T18:19:39Z\n",
},
{
// This is confirmed to be properly decoded in Python (libyaml) without a timestamp tag.
map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 123456789, time.FixedZone("FOO", -3*60*60))},
"a: 2015-02-24T18:19:39.123456789-03:00\n",
},
// Ensure timestamp-like strings are quoted.
{
map[string]string{"a": "2015-02-24T18:19:39Z"},
"a: \"2015-02-24T18:19:39Z\"\n",
},
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
{
map[string]string{"a": "b: c"},
"a: 'b: c'\n",
},
// Containing hash mark ('#') in string should be quoted
{
map[string]string{"a": "Hello #comment"},
"a: 'Hello #comment'\n",
},
{
map[string]string{"a": "你好 #comment"},
"a: '你好 #comment'\n",
},
{
map[string]interface{}{"a": jsonNumberT("5")},
"a: 5\n",
},
{
map[string]interface{}{"a": jsonNumberT("100.5")},
"a: 100.5\n",
},
{
map[string]interface{}{"a": jsonNumberT("bogus")},
"a: bogus\n",
},
// Ensure that strings do not wrap
{
map[string]string{"a": "abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 "},
"a: 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 '\n",
},
}
func (s *S) TestMarshal(c *C) {
defer os.Setenv("TZ", os.Getenv("TZ"))
os.Setenv("TZ", "UTC")
for i, item := range marshalTests {
c.Logf("test %d: %q", i, item.data)
data, err := yaml.Marshal(item.value)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, item.data)
}
}
func (s *S) TestEncoderSingleDocument(c *C) {
for i, item := range marshalTests {
c.Logf("test %d. %q", i, item.data)
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf)
err := enc.Encode(item.value)
c.Assert(err, Equals, nil)
err = enc.Close()
c.Assert(err, Equals, nil)
c.Assert(buf.String(), Equals, item.data)
}
}
func (s *S) TestEncoderMultipleDocuments(c *C) {
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf)
err := enc.Encode(map[string]string{"a": "b"})
c.Assert(err, Equals, nil)
err = enc.Encode(map[string]string{"c": "d"})
c.Assert(err, Equals, nil)
err = enc.Close()
c.Assert(err, Equals, nil)
c.Assert(buf.String(), Equals, "a: b\n---\nc: d\n")
}
func (s *S) TestEncoderWriteError(c *C) {
enc := yaml.NewEncoder(errorWriter{})
err := enc.Encode(map[string]string{"a": "b"})
c.Assert(err, ErrorMatches, `yaml: write error: some write error`) // Data not flushed yet
}
type errorWriter struct{}
func (errorWriter) Write([]byte) (int, error) {
return 0, fmt.Errorf("some write error")
}
var marshalErrorTests = []struct {
value interface{}
error string
panic string
}{{
value: &struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
}, {
value: &struct {
A int
B map[string]int ",inline"
}{1, map[string]int{"a": 2}},
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
}}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
if item.panic != "" {
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
} else {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
}
func (s *S) TestMarshalTypeCache(c *C) {
var data []byte
var err error
func() {
type T struct{ A int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
func() {
type T struct{ B int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
c.Assert(string(data), Equals, "b: 0\n")
}
var marshalerTests = []struct {
data string
value interface{}
}{
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
{"_: 10\n", 10},
{"_: null\n", nil},
{"_: BAR!\n", "BAR!"},
}
type marshalerType struct {
value interface{}
}
func (o marshalerType) MarshalText() ([]byte, error) {
panic("MarshalText called on type with MarshalYAML")
}
func (o marshalerType) MarshalYAML() (interface{}, error) {
return o.value, nil
}
type marshalerValue struct {
Field marshalerType "_"
}
func (s *S) TestMarshaler(c *C) {
for _, item := range marshalerTests {
obj := &marshalerValue{}
obj.Field.value = item.value
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, string(item.data))
}
}
func (s *S) TestMarshalerWholeDocument(c *C) {
obj := &marshalerType{}
obj.value = map[string]string{"hello": "world!"}
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hello: world!\n")
}
type failingMarshaler struct{}
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
return nil, failingErr
}
func (s *S) TestMarshalerError(c *C) {
_, err := yaml.Marshal(&failingMarshaler{})
c.Assert(err, Equals, failingErr)
}
func (s *S) TestSortedOutput(c *C) {
order := []interface{}{
false,
true,
1,
uint(1),
1.0,
1.1,
1.2,
2,
uint(2),
2.0,
2.1,
"",
".1",
".2",
".a",
"1",
"2",
"a!10",
"a/0001",
"a/002",
"a/3",
"a/10",
"a/11",
"a/0012",
"a/100",
"a~10",
"ab/1",
"b/1",
"b/01",
"b/2",
"b/02",
"b/3",
"b/03",
"b1",
"b01",
"b3",
"c2.10",
"c10.2",
"d1",
"d7",
"d7abc",
"d12",
"d12a",
}
m := make(map[interface{}]int)
for _, k := range order {
m[k] = 1
}
data, err := yaml.Marshal(m)
c.Assert(err, IsNil)
out := "\n" + string(data)
last := 0
for i, k := range order {
repr := fmt.Sprint(k)
if s, ok := k.(string); ok {
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
repr = `"` + repr + `"`
}
}
index := strings.Index(out, "\n"+repr+":")
if index == -1 {
c.Fatalf("%#v is not in the output: %#v", k, out)
}
if index < last {
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
}
last = index
}
}
func newTime(t time.Time) *time.Time {
return &t
}
| [
"\"TZ\""
]
| []
| [
"TZ"
]
| [] | ["TZ"] | go | 1 | 0 | |
main_test.go | package crud_test
import (
"database/sql"
"fmt"
"os"
"testing"
"time"
"github.com/azer/crud/v2"
_ "github.com/go-sql-driver/mysql"
"github.com/stretchr/testify/assert"
)
var DB *crud.DB
type UserProfile struct {
Id int `json:"id" sql:"auto-increment primary-key required"`
Name string `json:"name" sql:"required"`
Bio string `json:"bio" sql:"type=text"`
Email string `json:"e-mail" sql:"name=email unique"`
Attachment []byte `json:"attachment"`
Modified int64 `json:"modified" sql:"name=modified_col"`
}
type UserProfileNull struct {
Id sql.NullInt64 `json:"id" sql:"auto-increment primary-key required"`
Name sql.NullString `json:"name" sql:"required"`
Bio sql.NullString `json:"bio" sql:"type=text"`
Email sql.NullString `json:"e-mail" sql:"name=email"`
Modified sql.NullInt64 `json:"modified" sql:"name=modified"`
}
type Mixed struct {
Id int `json:"-" sql:" primary-key auto-increment unsigned name=id table-name=__mixed__ "`
UserId int `json:"-" valid:"User.Id~Specified user was not found" sql:" name=user_id"`
Secret string `json:"-" valid:"required" sql:" name=secret"`
CreatedAt int64 `json:"-" sql:"default=0 name=created_at"`
UpdatedAt int64 `json:"-" sql:"default=0 name=updated_at"`
}
type Post struct {
Id int `json:"id" sql:"auto-increment primary-key required table-name=renamed_posts"`
Title string `json:"title"`
Text string `json:"text"`
CreatedAt time.Time `json:"created_at"`
}
type Foo struct {
Id int
APIKey string
YOLO bool
Beast string
}
type EmbeddedFoo struct {
Foo
Span int
Eggs string
}
type FooSlice []Foo
type FooPTRSlice []*Foo
type CustomTableName struct {
Foo int `sql:"table-name=yolo"`
}
func init() {
fmt.Println("db:", os.Getenv("DATABASE_URL"))
var err error
DB, err = crud.Connect("mysql", os.Getenv("DATABASE_URL"))
if err != nil {
panic(err)
}
}
func TestPing(t *testing.T) {
assert.Nil(t, DB.Ping())
}
func TestExecuteSQL(t *testing.T) {
result, err := DB.Client.Exec("SHOW TABLES LIKE 'shouldnotexist'")
assert.Nil(t, err)
l, err := result.LastInsertId()
assert.Equal(t, err, nil)
assert.Equal(t, l, int64(0))
a, err := result.RowsAffected()
assert.Equal(t, err, nil)
assert.Equal(t, a, int64(0))
}
func TestCreateTables(t *testing.T) {
err := DB.CreateTables(UserProfile{}, Post{})
assert.Nil(t, err)
assert.True(t, DB.CheckIfTableExists("user_profiles"))
assert.True(t, DB.CheckIfTableExists("renamed_posts"))
}
func TestDropTables(t *testing.T) {
err := DB.DropTables(UserProfile{}, Post{})
assert.Nil(t, err)
assert.False(t, DB.CheckIfTableExists("user_profiles"))
assert.False(t, DB.CheckIfTableExists("posts"))
}
| [
"\"DATABASE_URL\"",
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | go | 1 | 0 | |
xds/src/main/java/io/grpc/xds/XdsNameResolver.java | /*
* Copyright 2019 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.xds;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.gson.Gson;
import io.grpc.Attributes;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientCall;
import io.grpc.ClientInterceptor;
import io.grpc.Context;
import io.grpc.Deadline;
import io.grpc.ForwardingClientCall.SimpleForwardingClientCall;
import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener;
import io.grpc.InternalConfigSelector;
import io.grpc.InternalLogId;
import io.grpc.LoadBalancer.PickSubchannelArgs;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.NameResolver;
import io.grpc.Status;
import io.grpc.SynchronizationContext;
import io.grpc.internal.DelayedClientCall;
import io.grpc.internal.GrpcUtil;
import io.grpc.internal.ObjectPool;
import io.grpc.xds.HttpFault.FaultAbort;
import io.grpc.xds.HttpFault.FaultDelay;
import io.grpc.xds.HttpFault.FractionalPercent;
import io.grpc.xds.Matchers.FractionMatcher;
import io.grpc.xds.Matchers.HeaderMatcher;
import io.grpc.xds.Matchers.PathMatcher;
import io.grpc.xds.ThreadSafeRandom.ThreadSafeRandomImpl;
import io.grpc.xds.VirtualHost.Route;
import io.grpc.xds.VirtualHost.Route.RouteAction;
import io.grpc.xds.VirtualHost.Route.RouteAction.ClusterWeight;
import io.grpc.xds.VirtualHost.Route.RouteAction.HashPolicy;
import io.grpc.xds.VirtualHost.Route.RouteMatch;
import io.grpc.xds.XdsClient.LdsResourceWatcher;
import io.grpc.xds.XdsClient.LdsUpdate;
import io.grpc.xds.XdsClient.RdsResourceWatcher;
import io.grpc.xds.XdsClient.RdsUpdate;
import io.grpc.xds.XdsLogger.XdsLogLevel;
import io.grpc.xds.XdsNameResolverProvider.CallCounterProvider;
import io.grpc.xds.XdsNameResolverProvider.XdsClientPoolFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import javax.annotation.Nullable;
/**
* A {@link NameResolver} for resolving gRPC target names with "xds:" scheme.
*
* <p>Resolving a gRPC target involves contacting the control plane management server via xDS
* protocol to retrieve service information and produce a service config to the caller.
*
* @see XdsNameResolverProvider
*/
final class XdsNameResolver extends NameResolver {
static final CallOptions.Key<String> CLUSTER_SELECTION_KEY =
CallOptions.Key.create("io.grpc.xds.CLUSTER_SELECTION_KEY");
static final CallOptions.Key<Long> RPC_HASH_KEY =
CallOptions.Key.create("io.grpc.xds.RPC_HASH_KEY");
@VisibleForTesting
static boolean enableTimeout =
Boolean.parseBoolean(System.getenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT"));
@VisibleForTesting
static final Metadata.Key<String> DOWNSTREAM_NODE_KEY =
Metadata.Key.of("x-envoy-downstream-service-node", Metadata.ASCII_STRING_MARSHALLER);
@VisibleForTesting
static final Metadata.Key<String> HEADER_DELAY_KEY =
Metadata.Key.of("x-envoy-fault-delay-request", Metadata.ASCII_STRING_MARSHALLER);
@VisibleForTesting
static final Metadata.Key<String> HEADER_DELAY_PERCENTAGE_KEY =
Metadata.Key.of("x-envoy-fault-delay-request-percentage", Metadata.ASCII_STRING_MARSHALLER);
@VisibleForTesting
static final Metadata.Key<String> HEADER_ABORT_HTTP_STATUS_KEY =
Metadata.Key.of("x-envoy-fault-abort-request", Metadata.ASCII_STRING_MARSHALLER);
@VisibleForTesting
static final Metadata.Key<String> HEADER_ABORT_GRPC_STATUS_KEY =
Metadata.Key.of("x-envoy-fault-abort-grpc-request", Metadata.ASCII_STRING_MARSHALLER);
@VisibleForTesting
static final Metadata.Key<String> HEADER_ABORT_PERCENTAGE_KEY =
Metadata.Key.of("x-envoy-fault-abort-request-percentage", Metadata.ASCII_STRING_MARSHALLER);
@VisibleForTesting
static AtomicLong activeFaultInjectedStreamCounter = new AtomicLong();
private final InternalLogId logId;
private final XdsLogger logger;
private final String authority;
private final ServiceConfigParser serviceConfigParser;
private final SynchronizationContext syncContext;
private final ScheduledExecutorService scheduler;
private final XdsClientPoolFactory xdsClientPoolFactory;
private final ThreadSafeRandom random;
private final XxHash64 hashFunc = XxHash64.INSTANCE;
private final ConcurrentMap<String, AtomicInteger> clusterRefs = new ConcurrentHashMap<>();
private final ConfigSelector configSelector = new ConfigSelector();
private volatile RoutingConfig routingConfig = RoutingConfig.empty;
private Listener2 listener;
private ObjectPool<XdsClient> xdsClientPool;
private XdsClient xdsClient;
private CallCounterProvider callCounterProvider;
private ResolveState resolveState;
XdsNameResolver(String name, ServiceConfigParser serviceConfigParser,
SynchronizationContext syncContext, ScheduledExecutorService scheduler) {
this(name, serviceConfigParser, syncContext, scheduler,
SharedXdsClientPoolProvider.getDefaultProvider(), ThreadSafeRandomImpl.instance);
}
@VisibleForTesting
XdsNameResolver(String name, ServiceConfigParser serviceConfigParser,
SynchronizationContext syncContext, ScheduledExecutorService scheduler,
XdsClientPoolFactory xdsClientPoolFactory, ThreadSafeRandom random) {
authority = GrpcUtil.checkAuthority(checkNotNull(name, "name"));
this.serviceConfigParser = checkNotNull(serviceConfigParser, "serviceConfigParser");
this.syncContext = checkNotNull(syncContext, "syncContext");
this.scheduler = checkNotNull(scheduler, "scheduler");
this.xdsClientPoolFactory = checkNotNull(xdsClientPoolFactory, "xdsClientPoolFactory");
this.random = checkNotNull(random, "random");
logId = InternalLogId.allocate("xds-resolver", name);
logger = XdsLogger.withLogId(logId);
logger.log(XdsLogLevel.INFO, "Created resolver for {0}", name);
}
@Override
public String getServiceAuthority() {
return authority;
}
@Override
public void start(Listener2 listener) {
this.listener = checkNotNull(listener, "listener");
try {
xdsClientPool = xdsClientPoolFactory.getXdsClientPool();
} catch (Exception e) {
listener.onError(
Status.UNAVAILABLE.withDescription("Failed to initialize xDS").withCause(e));
return;
}
xdsClient = xdsClientPool.getObject();
callCounterProvider = SharedCallCounterMap.getInstance();
resolveState = new ResolveState();
resolveState.start();
}
@Override
public void shutdown() {
logger.log(XdsLogLevel.INFO, "Shutdown");
if (resolveState != null) {
resolveState.stop();
}
if (xdsClient != null) {
xdsClient = xdsClientPool.returnObject(xdsClient);
}
}
@VisibleForTesting
static Map<String, ?> generateServiceConfigWithMethodTimeoutConfig(long timeoutNano) {
String timeout = timeoutNano / 1_000_000_000.0 + "s";
Map<String, Object> methodConfig = new HashMap<>();
methodConfig.put(
"name", Collections.singletonList(Collections.emptyMap()));
methodConfig.put("timeout", timeout);
return Collections.singletonMap(
"methodConfig", Collections.singletonList(Collections.unmodifiableMap(methodConfig)));
}
@VisibleForTesting
static Map<String, ?> generateServiceConfigWithLoadBalancingConfig(Collection<String> clusters) {
Map<String, Object> childPolicy = new HashMap<>();
for (String cluster : clusters) {
List<Map<String, Map<String, String>>> lbPolicy =
Collections.singletonList(
Collections.singletonMap(
"cds_experimental", Collections.singletonMap("cluster", cluster)));
childPolicy.put(cluster, Collections.singletonMap("lbPolicy", lbPolicy));
}
return Collections.singletonMap("loadBalancingConfig",
Collections.singletonList(
Collections.singletonMap(
"cluster_manager_experimental", Collections.singletonMap(
"childPolicy", Collections.unmodifiableMap(childPolicy)))));
}
@VisibleForTesting
XdsClient getXdsClient() {
return xdsClient;
}
private void updateResolutionResult() {
Map<String, ?> rawServiceConfig =
generateServiceConfigWithLoadBalancingConfig(clusterRefs.keySet());
if (logger.isLoggable(XdsLogLevel.INFO)) {
logger.log(
XdsLogLevel.INFO, "Generated service config:\n{0}", new Gson().toJson(rawServiceConfig));
}
ConfigOrError parsedServiceConfig = serviceConfigParser.parseServiceConfig(rawServiceConfig);
Attributes attrs =
Attributes.newBuilder()
.set(InternalXdsAttributes.XDS_CLIENT_POOL, xdsClientPool)
.set(InternalXdsAttributes.CALL_COUNTER_PROVIDER, callCounterProvider)
.set(InternalConfigSelector.KEY, configSelector)
.build();
ResolutionResult result =
ResolutionResult.newBuilder()
.setAttributes(attrs)
.setServiceConfig(parsedServiceConfig)
.build();
listener.onResult(result);
}
@VisibleForTesting
@Nullable
static VirtualHost findVirtualHostForHostName(List<VirtualHost> virtualHosts, String hostName) {
// Domain search order:
// 1. Exact domain names: ``www.foo.com``.
// 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.
// 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.
// 4. Special wildcard ``*`` matching any domain.
//
// The longest wildcards match first.
// Assuming only a single virtual host in the entire route configuration can match
// on ``*`` and a domain must be unique across all virtual hosts.
int matchingLen = -1; // longest length of wildcard pattern that matches host name
boolean exactMatchFound = false; // true if a virtual host with exactly matched domain found
VirtualHost targetVirtualHost = null; // target VirtualHost with longest matched domain
for (VirtualHost vHost : virtualHosts) {
for (String domain : vHost.domains()) {
boolean selected = false;
if (matchHostName(hostName, domain)) { // matching
if (!domain.contains("*")) { // exact matching
exactMatchFound = true;
targetVirtualHost = vHost;
break;
} else if (domain.length() > matchingLen) { // longer matching pattern
selected = true;
} else if (domain.length() == matchingLen && domain.startsWith("*")) { // suffix matching
selected = true;
}
}
if (selected) {
matchingLen = domain.length();
targetVirtualHost = vHost;
}
}
if (exactMatchFound) {
break;
}
}
return targetVirtualHost;
}
/**
* Returns {@code true} iff {@code hostName} matches the domain name {@code pattern} with
* case-insensitive.
*
* <p>Wildcard pattern rules:
* <ol>
* <li>A single asterisk (*) matches any domain.</li>
* <li>Asterisk (*) is only permitted in the left-most or the right-most part of the pattern,
* but not both.</li>
* </ol>
*/
@VisibleForTesting
static boolean matchHostName(String hostName, String pattern) {
checkArgument(hostName.length() != 0 && !hostName.startsWith(".") && !hostName.endsWith("."),
"Invalid host name");
checkArgument(pattern.length() != 0 && !pattern.startsWith(".") && !pattern.endsWith("."),
"Invalid pattern/domain name");
hostName = hostName.toLowerCase(Locale.US);
pattern = pattern.toLowerCase(Locale.US);
// hostName and pattern are now in lower case -- domain names are case-insensitive.
if (!pattern.contains("*")) {
// Not a wildcard pattern -- hostName and pattern must match exactly.
return hostName.equals(pattern);
}
// Wildcard pattern
if (pattern.length() == 1) {
return true;
}
int index = pattern.indexOf('*');
// At most one asterisk (*) is allowed.
if (pattern.indexOf('*', index + 1) != -1) {
return false;
}
// Asterisk can only match prefix or suffix.
if (index != 0 && index != pattern.length() - 1) {
return false;
}
// HostName must be at least as long as the pattern because asterisk has to
// match one or more characters.
if (hostName.length() < pattern.length()) {
return false;
}
if (index == 0 && hostName.endsWith(pattern.substring(1))) {
// Prefix matching fails.
return true;
}
// Pattern matches hostname if suffix matching succeeds.
return index == pattern.length() - 1
&& hostName.startsWith(pattern.substring(0, pattern.length() - 1));
}
private final class ConfigSelector extends InternalConfigSelector {
@Override
public Result selectConfig(PickSubchannelArgs args) {
// Index ASCII headers by key, multi-value headers are concatenated for matching purposes.
Map<String, String> asciiHeaders = new HashMap<>();
Metadata headers = args.getHeaders();
for (String headerName : headers.keys()) {
if (headerName.endsWith(Metadata.BINARY_HEADER_SUFFIX)) {
continue;
}
Metadata.Key<String> key = Metadata.Key.of(headerName, Metadata.ASCII_STRING_MARSHALLER);
Iterable<String> values = headers.getAll(key);
if (values != null) {
asciiHeaders.put(headerName, Joiner.on(",").join(values));
}
}
// Special hack for exposing headers: "content-type".
asciiHeaders.put("content-type", "application/grpc");
String cluster = null;
Route selectedRoute = null;
HttpFault selectedFaultConfig;
RoutingConfig routingCfg;
do {
routingCfg = routingConfig;
selectedFaultConfig = routingCfg.faultConfig;
for (Route route : routingCfg.routes) {
if (matchRoute(route.routeMatch(), "/" + args.getMethodDescriptor().getFullMethodName(),
asciiHeaders, random)) {
selectedRoute = route;
if (routingCfg.applyFaultInjection && route.httpFault() != null) {
selectedFaultConfig = route.httpFault();
}
break;
}
}
if (selectedRoute == null) {
return Result.forError(
Status.UNAVAILABLE.withDescription("Could not find xDS route matching RPC"));
}
RouteAction action = selectedRoute.routeAction();
if (action.cluster() != null) {
cluster = action.cluster();
} else if (action.weightedClusters() != null) {
int totalWeight = 0;
for (ClusterWeight weightedCluster : action.weightedClusters()) {
totalWeight += weightedCluster.weight();
}
int select = random.nextInt(totalWeight);
int accumulator = 0;
for (ClusterWeight weightedCluster : action.weightedClusters()) {
accumulator += weightedCluster.weight();
if (select < accumulator) {
cluster = weightedCluster.name();
if (routingCfg.applyFaultInjection && weightedCluster.httpFault() != null) {
selectedFaultConfig = weightedCluster.httpFault();
}
break;
}
}
}
} while (!retainCluster(cluster));
// TODO(chengyuanzhang): avoid service config generation and parsing for each call.
Map<String, ?> rawServiceConfig = Collections.emptyMap();
if (enableTimeout) {
Long timeoutNano = selectedRoute.routeAction().timeoutNano();
if (timeoutNano == null) {
timeoutNano = routingCfg.fallbackTimeoutNano;
}
if (timeoutNano > 0) {
rawServiceConfig = generateServiceConfigWithMethodTimeoutConfig(timeoutNano);
}
}
ConfigOrError parsedServiceConfig = serviceConfigParser.parseServiceConfig(rawServiceConfig);
Object config = parsedServiceConfig.getConfig();
if (config == null) {
releaseCluster(cluster);
return Result.forError(
parsedServiceConfig.getError().augmentDescription(
"Failed to parse service config (method config)"));
}
if (selectedFaultConfig != null && selectedFaultConfig.maxActiveFaults() != null
&& activeFaultInjectedStreamCounter.get() >= selectedFaultConfig.maxActiveFaults()) {
selectedFaultConfig = null;
}
if (selectedFaultConfig != null) {
if (!selectedFaultConfig.upstreamCluster().equals(cluster)) {
selectedFaultConfig = null;
} else if (!selectedFaultConfig.downstreamNodes().isEmpty()) {
String downstreamNode = headers.get(DOWNSTREAM_NODE_KEY);
if (downstreamNode == null
|| !selectedFaultConfig.downstreamNodes().contains(downstreamNode)) {
selectedFaultConfig = null;
}
}
}
if (selectedFaultConfig != null
&& !matchHeaders(selectedFaultConfig.headers(), asciiHeaders)) {
selectedFaultConfig = null;
}
Long delayNanos = null;
Status abortStatus = null;
if (selectedFaultConfig != null) {
if (selectedFaultConfig.faultDelay() != null) {
delayNanos = determineFaultDelayNanos(selectedFaultConfig.faultDelay(), headers);
}
if (selectedFaultConfig.faultAbort() != null) {
abortStatus = determineFaultAbortStatus(selectedFaultConfig.faultAbort(), headers);
}
}
final String finalCluster = cluster;
final Long finalDelayNanos = delayNanos;
final Status finalAbortStatus = abortStatus;
final long hash = generateHash(selectedRoute.routeAction().hashPolicies(), asciiHeaders);
class ClusterSelectionInterceptor implements ClientInterceptor {
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
final MethodDescriptor<ReqT, RespT> method, CallOptions callOptions,
final Channel next) {
final CallOptions callOptionsForCluster =
callOptions.withOption(CLUSTER_SELECTION_KEY, finalCluster)
.withOption(RPC_HASH_KEY, hash);
Supplier<ClientCall<ReqT, RespT>> configApplyingCallSupplier =
new Supplier<ClientCall<ReqT, RespT>>() {
@Override
public ClientCall<ReqT, RespT> get() {
return new SimpleForwardingClientCall<ReqT, RespT>(
next.newCall(method, callOptionsForCluster)) {
@Override
public void start(Listener<RespT> listener, Metadata headers) {
listener = new SimpleForwardingClientCallListener<RespT>(listener) {
boolean committed;
@Override
public void onHeaders(Metadata headers) {
committed = true;
releaseCluster(finalCluster);
delegate().onHeaders(headers);
}
@Override
public void onClose(Status status, Metadata trailers) {
if (!committed) {
releaseCluster(finalCluster);
}
delegate().onClose(status, trailers);
}
};
delegate().start(listener, headers);
}
};
}
};
Executor callExecutor = callOptions.getExecutor();
if (callExecutor == null) { // This should never happen in practice because
// ManagedChannelImpl.ConfigSelectingClientCall always provides CallOptions with
// a callExecutor.
// TODO(https://github.com/grpc/grpc-java/issues/7868)
callExecutor = MoreExecutors.directExecutor();
}
if (finalDelayNanos != null && finalAbortStatus != null) {
return new ActiveFaultCountingClientCall<>(
new DelayInjectedCall<>(
finalDelayNanos, callExecutor, scheduler, callOptionsForCluster.getDeadline(),
Suppliers.ofInstance(
new FailingClientCall<ReqT, RespT>(finalAbortStatus, callExecutor))));
}
if (finalAbortStatus != null) {
return new ActiveFaultCountingClientCall<>(
new FailingClientCall<ReqT, RespT>(finalAbortStatus, callExecutor));
}
if (finalDelayNanos != null) {
return new ActiveFaultCountingClientCall<>(
new DelayInjectedCall<>(
finalDelayNanos, callExecutor, scheduler, callOptionsForCluster.getDeadline(),
configApplyingCallSupplier));
}
return configApplyingCallSupplier.get();
}
}
return
Result.newBuilder()
.setConfig(config)
.setInterceptor(new ClusterSelectionInterceptor())
.build();
}
private boolean retainCluster(String cluster) {
AtomicInteger refCount = clusterRefs.get(cluster);
if (refCount == null) {
return false;
}
int count;
do {
count = refCount.get();
if (count == 0) {
return false;
}
} while (!refCount.compareAndSet(count, count + 1));
return true;
}
private void releaseCluster(final String cluster) {
int count = clusterRefs.get(cluster).decrementAndGet();
if (count == 0) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (clusterRefs.get(cluster).get() == 0) {
clusterRefs.remove(cluster);
updateResolutionResult();
}
}
});
}
}
private long generateHash(List<HashPolicy> hashPolicies, Map<String, String> headers) {
Long hash = null;
for (HashPolicy policy : hashPolicies) {
Long newHash = null;
if (policy.type() == HashPolicy.Type.HEADER) {
if (headers.containsKey(policy.headerName())) {
String value = headers.get(policy.headerName());
if (policy.regEx() != null && policy.regExSubstitution() != null) {
value = policy.regEx().matcher(value).replaceAll(policy.regExSubstitution());
}
newHash = hashFunc.hashAsciiString(value);
}
} else if (policy.type() == HashPolicy.Type.CHANNEL_ID) {
newHash = hashFunc.hashLong(logId.getId());
}
if (newHash != null ) {
// Rotating the old value prevents duplicate hash rules from cancelling each other out
// and preserves all of the entropy.
long oldHash = hash != null ? ((hash << 1L) | (hash >> 63L)) : 0;
hash = oldHash ^ newHash;
}
// If the policy is a terminal policy and a hash has been generated, ignore
// the rest of the hash policies.
if (policy.isTerminal() && hash != null) {
break;
}
}
return hash == null ? random.nextLong() : hash;
}
@Nullable
private Long determineFaultDelayNanos(FaultDelay faultDelay, Metadata headers) {
Long delayNanos;
FractionalPercent fractionalPercent = faultDelay.percent();
if (faultDelay.headerDelay()) {
try {
int delayMillis = Integer.parseInt(headers.get(HEADER_DELAY_KEY));
delayNanos = TimeUnit.MILLISECONDS.toNanos(delayMillis);
String delayPercentageStr = headers.get(HEADER_DELAY_PERCENTAGE_KEY);
if (delayPercentageStr != null) {
int delayPercentage = Integer.parseInt(delayPercentageStr);
if (delayPercentage >= 0 && delayPercentage < fractionalPercent.numerator()) {
fractionalPercent =
FractionalPercent.create(delayPercentage, fractionalPercent.denominatorType());
}
}
} catch (NumberFormatException e) {
return null; // treated as header_delay not applicable
}
} else {
delayNanos = faultDelay.delayNanos();
}
if (random.nextInt(1_000_000) >= getRatePerMillion(fractionalPercent)) {
return null;
}
return delayNanos;
}
@Nullable
private Status determineFaultAbortStatus(FaultAbort faultAbort, Metadata headers) {
Status abortStatus = null;
FractionalPercent fractionalPercent = faultAbort.percent();
if (faultAbort.headerAbort()) {
try {
String grpcCodeStr = headers.get(HEADER_ABORT_GRPC_STATUS_KEY);
if (grpcCodeStr != null) {
int grpcCode = Integer.parseInt(grpcCodeStr);
abortStatus = Status.fromCodeValue(grpcCode);
}
String httpCodeStr = headers.get(HEADER_ABORT_HTTP_STATUS_KEY);
if (httpCodeStr != null) {
int httpCode = Integer.parseInt(httpCodeStr);
abortStatus = GrpcUtil.httpStatusToGrpcStatus(httpCode);
}
String abortPercentageStr = headers.get(HEADER_ABORT_PERCENTAGE_KEY);
if (abortPercentageStr != null) {
int abortPercentage =
Integer.parseInt(headers.get(HEADER_ABORT_PERCENTAGE_KEY));
if (abortPercentage >= 0 && abortPercentage < fractionalPercent.numerator()) {
fractionalPercent =
FractionalPercent.create(abortPercentage, fractionalPercent.denominatorType());
}
}
} catch (NumberFormatException e) {
return null; // treated as header_abort not applicable
}
} else {
abortStatus = faultAbort.status();
}
if (random.nextInt(1_000_000) >= getRatePerMillion(fractionalPercent)) {
return null;
}
return abortStatus;
}
}
private static int getRatePerMillion(FractionalPercent percent) {
int numerator = percent.numerator();
FractionalPercent.DenominatorType type = percent.denominatorType();
switch (type) {
case TEN_THOUSAND:
numerator *= 100;
break;
case HUNDRED:
numerator *= 10_000;
break;
case MILLION:
default:
break;
}
if (numerator > 1_000_000 || numerator < 0) {
numerator = 1_000_000;
}
return numerator;
}
/**
* A forwarding client call that counts active fault injections.
*/
private final class ActiveFaultCountingClientCall<ReqT, RespT> extends
SimpleForwardingClientCall<ReqT, RespT> {
ActiveFaultCountingClientCall(ClientCall<ReqT, RespT> faultInjectedDelegate) {
super(faultInjectedDelegate);
activeFaultInjectedStreamCounter.incrementAndGet();
}
@Override
public void start(Listener<RespT> listener, Metadata headers) {
listener = new SimpleForwardingClientCallListener<RespT>(listener) {
@Override
public void onClose(Status status, Metadata trailers) {
delegate().onClose(status, trailers);
activeFaultInjectedStreamCounter.decrementAndGet();
}
};
delegate().start(listener, headers);
}
}
/** A {@link DelayedClientCall} with a fixed delay. */
private static final class DelayInjectedCall<ReqT, RespT> extends DelayedClientCall<ReqT, RespT> {
final Object lock = new Object();
ScheduledFuture<?> delayTask;
boolean cancelled;
DelayInjectedCall(
long delayNanos, Executor callExecutor, ScheduledExecutorService scheduler,
@Nullable Deadline deadline,
final Supplier<? extends ClientCall<ReqT, RespT>> callSupplier) {
super(callExecutor, scheduler, deadline);
ScheduledFuture<?> task = scheduler.schedule(
new Runnable() {
@Override
public void run() {
setCall(callSupplier.get());
}
},
delayNanos,
NANOSECONDS);
synchronized (lock) {
if (cancelled) {
task.cancel(false);
return;
}
delayTask = task;
}
}
@Override
protected void callCancelled() {
ScheduledFuture<?> savedDelayTask;
synchronized (lock) {
cancelled = true;
savedDelayTask = delayTask;
}
if (savedDelayTask != null) {
savedDelayTask.cancel(false);
}
}
}
/** An implementation of {@link ClientCall} that fails when started. */
private static final class FailingClientCall<ReqT, RespT> extends ClientCall<ReqT, RespT> {
final Status error;
final Executor callExecutor;
final Context context;
FailingClientCall(Status error, Executor callExecutor) {
this.error = error;
this.callExecutor = callExecutor;
this.context = Context.current();
}
@Override
public void start(final ClientCall.Listener<RespT> listener, Metadata headers) {
callExecutor.execute(
new Runnable() {
@Override
public void run() {
Context previous = context.attach();
try {
listener.onClose(error, new Metadata());
} finally {
context.detach(previous);
}
}
});
}
@Override
public void request(int numMessages) {}
@Override
public void cancel(String message, Throwable cause) {}
@Override
public void halfClose() {}
@Override
public void sendMessage(ReqT message) {}
}
@VisibleForTesting
static boolean matchRoute(RouteMatch routeMatch, String fullMethodName,
Map<String, String> headers, ThreadSafeRandom random) {
if (!matchPath(routeMatch.pathMatcher(), fullMethodName)) {
return false;
}
if (!matchHeaders(routeMatch.headerMatchers(), headers)) {
return false;
}
FractionMatcher fraction = routeMatch.fractionMatcher();
return fraction == null || random.nextInt(fraction.denominator()) < fraction.numerator();
}
@VisibleForTesting
static boolean matchPath(PathMatcher pathMatcher, String fullMethodName) {
if (pathMatcher.path() != null) {
return pathMatcher.caseSensitive()
? pathMatcher.path().equals(fullMethodName)
: pathMatcher.path().equalsIgnoreCase(fullMethodName);
} else if (pathMatcher.prefix() != null) {
return pathMatcher.caseSensitive()
? fullMethodName.startsWith(pathMatcher.prefix())
: fullMethodName.toLowerCase().startsWith(pathMatcher.prefix().toLowerCase());
}
return pathMatcher.regEx().matches(fullMethodName);
}
private static boolean matchHeaders(
List<HeaderMatcher> headerMatchers, Map<String, String> headers) {
for (HeaderMatcher headerMatcher : headerMatchers) {
if (!matchHeader(headerMatcher, headers.get(headerMatcher.name()))) {
return false;
}
}
return true;
}
@VisibleForTesting
static boolean matchHeader(HeaderMatcher headerMatcher, @Nullable String value) {
if (headerMatcher.present() != null) {
return (value == null) == headerMatcher.present().equals(headerMatcher.inverted());
}
if (value == null) {
return false;
}
boolean baseMatch;
if (headerMatcher.exactValue() != null) {
baseMatch = headerMatcher.exactValue().equals(value);
} else if (headerMatcher.safeRegEx() != null) {
baseMatch = headerMatcher.safeRegEx().matches(value);
} else if (headerMatcher.range() != null) {
long numValue;
try {
numValue = Long.parseLong(value);
baseMatch = numValue >= headerMatcher.range().start()
&& numValue <= headerMatcher.range().end();
} catch (NumberFormatException ignored) {
baseMatch = false;
}
} else if (headerMatcher.prefix() != null) {
baseMatch = value.startsWith(headerMatcher.prefix());
} else {
baseMatch = value.endsWith(headerMatcher.suffix());
}
return baseMatch != headerMatcher.inverted();
}
private class ResolveState implements LdsResourceWatcher {
private final ConfigOrError emptyServiceConfig =
serviceConfigParser.parseServiceConfig(Collections.<String, Object>emptyMap());
private final ResolutionResult emptyResult =
ResolutionResult.newBuilder()
.setServiceConfig(emptyServiceConfig)
// let channel take action for no config selector
.build();
private boolean stopped;
private Set<String> existingClusters;
@Nullable
private String rdsResource;
@Nullable
private RdsResourceWatcher rdsWatcher;
private long httpMaxStreamDurationNano;
private boolean applyFaultInjection;
@Nullable
private HttpFault httpFilterFaultConfig;
@Override
public void onChanged(final LdsUpdate update) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (stopped) {
return;
}
logger.log(XdsLogLevel.INFO, "Receive LDS resource update: {0}", update);
httpMaxStreamDurationNano = update.httpMaxStreamDurationNano;
applyFaultInjection = update.hasFaultInjection;
httpFilterFaultConfig = applyFaultInjection ? update.httpFault : null;
List<VirtualHost> virtualHosts = update.virtualHosts;
String rdsName = update.rdsName;
if (rdsName != null && rdsName.equals(rdsResource)) {
return;
}
cleanUpRdsWatcher();
if (virtualHosts != null) {
updateRoutes(virtualHosts);
} else {
rdsResource = rdsName;
rdsWatcher = new RdsResourceWatcherImpl();
logger.log(XdsLogLevel.INFO, "Start watching RDS resource {0}", rdsResource);
xdsClient.watchRdsResource(rdsResource, rdsWatcher);
}
}
});
}
@Override
public void onError(final Status error) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (stopped) {
return;
}
logger.log(
XdsLogLevel.WARNING,
"Received error from xDS client {0}: {1}", xdsClient, error.getDescription());
listener.onError(error);
}
});
}
@Override
public void onResourceDoesNotExist(final String resourceName) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (stopped) {
return;
}
logger.log(XdsLogLevel.INFO, "LDS resource {0} unavailable", resourceName);
cleanUpRdsWatcher();
listener.onResult(emptyResult);
}
});
}
private void start() {
logger.log(XdsLogLevel.INFO, "Start watching LDS resource {0}", authority);
xdsClient.watchLdsResource(authority, this);
}
private void stop() {
logger.log(XdsLogLevel.INFO, "Stop watching LDS resource {0}", authority);
stopped = true;
cleanUpRdsWatcher();
xdsClient.cancelLdsResourceWatch(authority, this);
}
private void updateRoutes(List<VirtualHost> virtualHosts) {
VirtualHost virtualHost = findVirtualHostForHostName(virtualHosts, authority);
if (virtualHost == null) {
logger.log(XdsLogLevel.WARNING,
"Failed to find virtual host matching hostname {0}", authority);
listener.onResult(emptyResult);
return;
}
List<Route> routes = virtualHost.routes();
HttpFault faultConfig = httpFilterFaultConfig;
if (applyFaultInjection && virtualHost.httpFault() != null) {
faultConfig = virtualHost.httpFault();
}
Set<String> clusters = new HashSet<>();
for (Route route : routes) {
RouteAction action = route.routeAction();
if (action.cluster() != null) {
clusters.add(action.cluster());
} else if (action.weightedClusters() != null) {
for (ClusterWeight weighedCluster : action.weightedClusters()) {
clusters.add(weighedCluster.name());
}
}
}
Set<String> addedClusters =
existingClusters == null ? clusters : Sets.difference(clusters, existingClusters);
Set<String> deletedClusters =
existingClusters == null
? Collections.<String>emptySet() : Sets.difference(existingClusters, clusters);
existingClusters = clusters;
boolean shouldUpdateResult = false;
for (String cluster : addedClusters) {
if (clusterRefs.containsKey(cluster)) {
clusterRefs.get(cluster).incrementAndGet();
} else {
clusterRefs.put(cluster, new AtomicInteger(1));
shouldUpdateResult = true;
}
}
// Update service config to include newly added clusters.
if (shouldUpdateResult) {
updateResolutionResult();
}
// Make newly added clusters selectable by config selector and deleted clusters no longer
// selectable.
routingConfig = new RoutingConfig(
httpMaxStreamDurationNano, routes, applyFaultInjection, faultConfig);
shouldUpdateResult = false;
for (String cluster : deletedClusters) {
int count = clusterRefs.get(cluster).decrementAndGet();
if (count == 0) {
clusterRefs.remove(cluster);
shouldUpdateResult = true;
}
}
if (shouldUpdateResult) {
updateResolutionResult();
}
}
private void cleanUpRdsWatcher() {
if (rdsWatcher != null) {
logger.log(XdsLogLevel.INFO, "Stop watching RDS resource {0}", rdsResource);
xdsClient.cancelRdsResourceWatch(rdsResource, rdsWatcher);
rdsResource = null;
rdsWatcher = null;
}
}
private class RdsResourceWatcherImpl implements RdsResourceWatcher {
@Override
public void onChanged(final RdsUpdate update) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (RdsResourceWatcherImpl.this != rdsWatcher) {
return;
}
updateRoutes(update.virtualHosts);
}
});
}
@Override
public void onError(final Status error) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (RdsResourceWatcherImpl.this != rdsWatcher) {
return;
}
logger.log(
XdsLogLevel.WARNING,
"Received error from xDS client {0}: {1}", xdsClient, error.getDescription());
listener.onError(error);
}
});
}
@Override
public void onResourceDoesNotExist(final String resourceName) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (RdsResourceWatcherImpl.this != rdsWatcher) {
return;
}
logger.log(XdsLogLevel.INFO, "RDS resource {0} unavailable", resourceName);
listener.onResult(emptyResult);
}
});
}
}
}
/**
* VirtualHost-level configuration for request routing.
*/
private static class RoutingConfig {
private final long fallbackTimeoutNano;
private final List<Route> routes;
private final boolean applyFaultInjection;
@Nullable
private final HttpFault faultConfig;
private static final RoutingConfig empty =
new RoutingConfig(0L, Collections.<Route>emptyList(), false, null);
private RoutingConfig(
long fallbackTimeoutNano, List<Route> routes, boolean applyFaultInjection,
HttpFault faultConfig) {
this.fallbackTimeoutNano = fallbackTimeoutNano;
this.routes = routes;
this.applyFaultInjection = applyFaultInjection;
this.faultConfig = faultConfig;
}
}
}
| [
"\"GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT\""
]
| []
| [
"GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT"
]
| [] | ["GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT"] | java | 1 | 0 | |
tests/providers/google/cloud/operators/test_kubernetes_engine.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import unittest
from unittest import mock
from unittest.mock import PropertyMock
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.providers.google.cloud.operators.kubernetes_engine import (
GKECreateClusterOperator,
GKEDeleteClusterOperator,
GKEStartPodOperator,
)
TEST_GCP_PROJECT_ID = 'test-id'
PROJECT_LOCATION = 'test-location'
PROJECT_TASK_ID = 'test-task-id'
CLUSTER_NAME = 'test-cluster-name'
PROJECT_BODY = {'name': 'test-name'}
PROJECT_BODY_CREATE_DICT = {'name': 'test-name', 'initial_node_count': 1}
PROJECT_BODY_CREATE_CLUSTER = type("Cluster", (object,), {"name": "test-name", "initial_node_count": 1})()
TASK_NAME = 'test-task-name'
NAMESPACE = ('default',)
IMAGE = 'bash'
GCLOUD_COMMAND = "gcloud container clusters get-credentials {} --zone {} --project {}"
KUBE_ENV_VAR = 'KUBECONFIG'
FILE_NAME = '/tmp/mock_name'
class TestGoogleCloudPlatformContainerOperator(unittest.TestCase):
@parameterized.expand((body,) for body in [PROJECT_BODY_CREATE_DICT, PROJECT_BODY_CREATE_CLUSTER])
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute(self, body, mock_hook):
operator = GKECreateClusterOperator(
project_id=TEST_GCP_PROJECT_ID, location=PROJECT_LOCATION, body=body, task_id=PROJECT_TASK_ID
)
operator.execute(None)
mock_hook.return_value.create_cluster.assert_called_once_with(
cluster=body, project_id=TEST_GCP_PROJECT_ID
)
@parameterized.expand(
(body,)
for body in [
None,
{'missing_name': 'test-name', 'initial_node_count': 1},
{'name': 'test-name', 'missing_initial_node_count': 1},
type('Cluster', (object,), {'missing_name': 'test-name', 'initial_node_count': 1})(),
type('Cluster', (object,), {'name': 'test-name', 'missing_initial_node_count': 1})(),
]
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute_error_body(self, body, mock_hook):
with pytest.raises(AirflowException):
GKECreateClusterOperator(
project_id=TEST_GCP_PROJECT_ID, location=PROJECT_LOCATION, body=body, task_id=PROJECT_TASK_ID
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute_error_project_id(self, mock_hook):
with pytest.raises(AirflowException):
GKECreateClusterOperator(location=PROJECT_LOCATION, body=PROJECT_BODY, task_id=PROJECT_TASK_ID)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute_error_location(self, mock_hook):
with pytest.raises(AirflowException):
GKECreateClusterOperator(
project_id=TEST_GCP_PROJECT_ID, body=PROJECT_BODY, task_id=PROJECT_TASK_ID
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute(self, mock_hook):
operator = GKEDeleteClusterOperator(
project_id=TEST_GCP_PROJECT_ID,
name=CLUSTER_NAME,
location=PROJECT_LOCATION,
task_id=PROJECT_TASK_ID,
)
operator.execute(None)
mock_hook.return_value.delete_cluster.assert_called_once_with(
name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute_error_project_id(self, mock_hook):
with pytest.raises(AirflowException):
GKEDeleteClusterOperator(location=PROJECT_LOCATION, name=CLUSTER_NAME, task_id=PROJECT_TASK_ID)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute_error_cluster_name(self, mock_hook):
with pytest.raises(AirflowException):
GKEDeleteClusterOperator(
project_id=TEST_GCP_PROJECT_ID, location=PROJECT_LOCATION, task_id=PROJECT_TASK_ID
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute_error_location(self, mock_hook):
with pytest.raises(AirflowException):
GKEDeleteClusterOperator(
project_id=TEST_GCP_PROJECT_ID, name=CLUSTER_NAME, task_id=PROJECT_TASK_ID
)
class TestGKEPodOperator(unittest.TestCase):
def setUp(self):
self.gke_op = GKEStartPodOperator(
project_id=TEST_GCP_PROJECT_ID,
location=PROJECT_LOCATION,
cluster_name=CLUSTER_NAME,
task_id=PROJECT_TASK_ID,
name=TASK_NAME,
namespace=NAMESPACE,
image=IMAGE,
)
def test_template_fields(self):
assert set(KubernetesPodOperator.template_fields).issubset(GKEStartPodOperator.template_fields)
@mock.patch.dict(os.environ, {})
@mock.patch(
"airflow.hooks.base.BaseHook.get_connections",
return_value=[
Connection(
extra=json.dumps(
{"extra__google_cloud_platform__keyfile_dict": '{"private_key": "r4nd0m_k3y"}'}
)
)
],
)
@mock.patch('airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute')
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GoogleBaseHook')
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.execute_in_subprocess')
@mock.patch('tempfile.NamedTemporaryFile')
def test_execute(self, file_mock, mock_execute_in_subprocess, mock_gcp_hook, exec_mock, get_con_mock):
type(file_mock.return_value.__enter__.return_value).name = PropertyMock(
side_effect=[FILE_NAME, '/path/to/new-file']
)
self.gke_op.execute(None)
mock_gcp_hook.return_value.provide_authorized_gcloud.assert_called_once()
mock_execute_in_subprocess.assert_called_once_with(
[
'gcloud',
'container',
'clusters',
'get-credentials',
CLUSTER_NAME,
'--zone',
PROJECT_LOCATION,
'--project',
TEST_GCP_PROJECT_ID,
]
)
assert self.gke_op.config_file == FILE_NAME
def test_config_file_throws_error(self):
with pytest.raises(AirflowException):
GKEStartPodOperator(
project_id=TEST_GCP_PROJECT_ID,
location=PROJECT_LOCATION,
cluster_name=CLUSTER_NAME,
task_id=PROJECT_TASK_ID,
name=TASK_NAME,
namespace=NAMESPACE,
image=IMAGE,
config_file="/path/to/alternative/kubeconfig",
)
@mock.patch.dict(os.environ, {})
@mock.patch(
"airflow.hooks.base.BaseHook.get_connections",
return_value=[
Connection(
extra=json.dumps(
{"extra__google_cloud_platform__keyfile_dict": '{"private_key": "r4nd0m_k3y"}'}
)
)
],
)
@mock.patch('airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute')
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GoogleBaseHook')
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.execute_in_subprocess')
@mock.patch('tempfile.NamedTemporaryFile')
def test_execute_with_internal_ip(
self, file_mock, mock_execute_in_subprocess, mock_gcp_hook, exec_mock, get_con_mock
):
self.gke_op.use_internal_ip = True
type(file_mock.return_value.__enter__.return_value).name = PropertyMock(
side_effect=[FILE_NAME, '/path/to/new-file']
)
self.gke_op.execute(None)
mock_gcp_hook.return_value.provide_authorized_gcloud.assert_called_once()
mock_execute_in_subprocess.assert_called_once_with(
[
'gcloud',
'container',
'clusters',
'get-credentials',
CLUSTER_NAME,
'--zone',
PROJECT_LOCATION,
'--project',
TEST_GCP_PROJECT_ID,
'--internal-ip',
]
)
assert self.gke_op.config_file == FILE_NAME
| []
| []
| []
| [] | [] | python | 0 | 0 | |
data/scripts/templates/object/tangible/hair/trandoshan/shared_hair_trandoshan_male_s04.py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/trandoshan/shared_hair_trandoshan_male_s04.iff"
result.attribute_template_id = -1
result.stfName("hair_name","ridges")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | []
| []
| []
| [] | [] | python | null | null | null |
quantumclient/client.py | # Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import urlparse
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
import httplib2
from quantumclient.common import exceptions
from quantumclient.common import utils
_logger = logging.getLogger(__name__)
if 'QUANTUMCLIENT_DEBUG' in os.environ and os.environ['QUANTUMCLIENT_DEBUG']:
ch = logging.StreamHandler()
_logger.setLevel(logging.DEBUG)
_logger.addHandler(ch)
class ServiceCatalog(object):
"""Helper methods for dealing with a Keystone Service Catalog."""
def __init__(self, resource_dict):
self.catalog = resource_dict
def get_token(self):
"""Fetch token details fron service catalog"""
token = {'id': self.catalog['access']['token']['id'],
'expires': self.catalog['access']['token']['expires'], }
try:
token['user_id'] = self.catalog['access']['user']['id']
token['tenant_id'] = (
self.catalog['access']['token']['tenant']['id'])
except:
# just leave the tenant and user out if it doesn't exist
pass
return token
def url_for(self, attr=None, filter_value=None,
service_type='network', endpoint_type='adminURL'):
"""Fetch the admin URL from the Quantum service for
a particular endpoint attribute. If none given, return
the first. See tests for sample service catalog."""
catalog = self.catalog['access'].get('serviceCatalog', [])
matching_endpoints = []
for service in catalog:
if service['type'] != service_type:
continue
endpoints = service['endpoints']
for endpoint in endpoints:
if not filter_value or endpoint.get(attr) == filter_value:
matching_endpoints.append(endpoint)
if not matching_endpoints:
raise exceptions.EndpointNotFound()
elif len(matching_endpoints) > 1:
raise exceptions.AmbiguousEndpoints(message=matching_endpoints)
else:
return matching_endpoints[0][endpoint_type]
class HTTPClient(httplib2.Http):
"""Handles the REST calls and responses, include authn"""
USER_AGENT = 'python-quantumclient'
def __init__(self, username=None, tenant_name=None,
password=None, auth_url=None,
token=None, region_name=None, timeout=None,
endpoint_url=None, insecure=False,
auth_strategy='keystone', **kwargs):
super(HTTPClient, self).__init__(timeout=timeout)
self.username = username
self.tenant_name = tenant_name
self.password = password
self.auth_url = auth_url.rstrip('/') if auth_url else None
self.region_name = region_name
self.auth_token = token
self.content_type = 'application/json'
self.endpoint_url = endpoint_url
self.auth_strategy = auth_strategy
# httplib2 overrides
self.force_exception_to_status_code = True
self.disable_ssl_certificate_validation = insecure
def _cs_request(self, *args, **kwargs):
kargs = {}
kargs.setdefault('headers', kwargs.get('headers', {}))
kargs['headers']['User-Agent'] = self.USER_AGENT
if 'content_type' in kwargs:
kargs['headers']['Content-Type'] = kwargs['content_type']
kargs['headers']['Accept'] = kwargs['content_type']
else:
kargs['headers']['Content-Type'] = self.content_type
kargs['headers']['Accept'] = self.content_type
if 'body' in kwargs:
kargs['body'] = kwargs['body']
resp, body = self.request(*args, **kargs)
utils.http_log(_logger, args, kargs, resp, body)
status_code = self.get_status_code(resp)
if status_code == 401:
raise exceptions.Unauthorized(message=body)
elif status_code == 403:
raise exceptions.Forbidden(message=body)
return resp, body
def do_request(self, url, method, **kwargs):
if not self.endpoint_url:
self.authenticate()
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
if self.auth_token:
kwargs.setdefault('headers', {})
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(self.endpoint_url + url, method,
**kwargs)
return resp, body
except exceptions.Unauthorized as ex:
if not self.endpoint_url:
self.authenticate()
resp, body = self._cs_request(
self.management_url + url, method, **kwargs)
return resp, body
else:
raise ex
def _extract_service_catalog(self, body):
""" Set the client's service catalog from the response data. """
self.service_catalog = ServiceCatalog(body)
try:
sc = self.service_catalog.get_token()
self.auth_token = sc['id']
self.auth_tenant_id = sc.get('tenant_id')
self.auth_user_id = sc.get('user_id')
except KeyError:
raise exceptions.Unauthorized()
self.endpoint_url = self.service_catalog.url_for(
attr='region', filter_value=self.region_name,
endpoint_type='adminURL')
def authenticate(self):
if self.auth_strategy != 'keystone':
raise exceptions.Unauthorized(message='unknown auth strategy')
body = {'auth': {'passwordCredentials':
{'username': self.username,
'password': self.password, },
'tenantName': self.tenant_name, }, }
token_url = self.auth_url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
tmp_follow_all_redirects = self.follow_all_redirects
self.follow_all_redirects = True
try:
resp, body = self._cs_request(token_url, "POST",
body=json.dumps(body),
content_type="application/json")
finally:
self.follow_all_redirects = tmp_follow_all_redirects
status_code = self.get_status_code(resp)
if status_code != 200:
raise exceptions.Unauthorized(message=body)
if body:
try:
body = json.loads(body)
except ValueError:
pass
else:
body = None
self._extract_service_catalog(body)
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
else:
return response.status
| []
| []
| [
"QUANTUMCLIENT_DEBUG"
]
| [] | ["QUANTUMCLIENT_DEBUG"] | python | 1 | 0 | |
examples/fsqlalchemy1/app.py | """
Copyright 2019-2020 by J. Christopher Wagner (jwag). All rights reserved.
:license: MIT, see LICENSE for more details.
Very simple application.
Uses built-in models.
Shows using roles and permissions to protect endpoints.
You can run the flask cli against this as well (once you have first created a
real DB) (from top level directory):
PYTHONPATH=. SQLALCHEMY_DATABASE_URI="sqlite:////var/tmp/test.db" \
FLASK_APP=examples/fsqlalchemy1/app.py \
flask users create -a [email protected]
"""
import os
from flask import Flask, abort, current_app, render_template_string
from flask.json import JSONEncoder
from flask_sqlalchemy import SQLAlchemy
from flask_babel import Babel
from flask_security import (
Security,
SQLAlchemyUserDatastore,
auth_required,
current_user,
hash_password,
permissions_accepted,
permissions_required,
roles_accepted,
)
from flask_security.models import fsqla_v2 as fsqla
# Create app
app = Flask(__name__)
app.config["DEBUG"] = True
# generated using: secrets.token_urlsafe()
app.config["SECRET_KEY"] = "pf9Wkove4IKEAXvy-cQkeDPhv9Cb3Ag-wyJILbq_dFw"
app.config["SECURITY_PASSWORD_HASH"] = "argon2"
# argon2 uses double hashing by default - so provide key.
# For python3: secrets.SystemRandom().getrandbits(128)
app.config["SECURITY_PASSWORD_SALT"] = "146585145368132386173505678016728509634"
# Take password complexity seriously
app.config["SECURITY_PASSWORD_COMPLEXITY_CHECKER"] = "zxcvbn"
# Allow registration of new users without confirmation
app.config["SECURITY_REGISTERABLE"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get(
"SQLALCHEMY_DATABASE_URI", "sqlite://"
)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# As of Flask-SQLAlchemy 2.4.0 it is easy to pass in options directly to the
# underlying engine. This option makes sure that DB connections from the pool
# are still valid. Important for entire application since many DBaaS options
# automatically close idle connections.
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = {"pool_pre_ping": True}
app.json_encoder = JSONEncoder
# Create database connection object
db = SQLAlchemy(app)
# Define models - for this example - we change the default table names
fsqla.FsModels.set_db_info(db, user_table_name="myuser", role_table_name="myrole")
class Role(db.Model, fsqla.FsRoleMixin):
__tablename__ = "myrole"
pass
class User(db.Model, fsqla.FsUserMixin):
__tablename__ = "myuser"
blogs = db.relationship("Blog", backref="user", lazy="dynamic")
pass
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("myuser.id"), nullable=False)
title = db.Column(db.Text)
text = db.Column(db.UnicodeText)
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
app.security = Security(app, user_datastore)
# Setup Babel - not strictly necessary but since our virtualenv has Flask-Babel
# we need to initialize it
Babel(app)
# Set this so unit tests can mock out.
app.blog_cls = Blog
# Create users and roles (and first blog!)
@app.before_first_request
def create_users():
if current_app.testing:
return
db.create_all()
user_datastore.find_or_create_role(
name="admin",
permissions={"admin-read", "admin-write", "user-read", "user-write"},
)
user_datastore.find_or_create_role(
name="monitor", permissions={"admin-read", "user-read"}
)
user_datastore.find_or_create_role(
name="user", permissions={"user-read", "user-write"}
)
user_datastore.find_or_create_role(name="reader", permissions={"user-read"})
if not user_datastore.find_user(email="[email protected]"):
user_datastore.create_user(
email="[email protected]", password=hash_password("password"), roles=["admin"]
)
if not user_datastore.find_user(email="[email protected]"):
user_datastore.create_user(
email="[email protected]", password=hash_password("password"), roles=["monitor"]
)
real_user = user_datastore.find_user(email="[email protected]")
if not real_user:
real_user = user_datastore.create_user(
email="[email protected]", password=hash_password("password"), roles=["user"]
)
if not user_datastore.find_user(email="[email protected]"):
user_datastore.create_user(
email="[email protected]", password=hash_password("password"), roles=["reader"]
)
# create initial blog
blog = app.blog_cls(
title="First Blog", text="my first blog is short", user=real_user
)
db.session.add(blog)
db.session.commit()
print(f"First blog id {blog.id}")
# Views
# Note that we always add @auth_required so that if a client isn't logged in
# we will get a proper '401' and redirected to login page.
@app.route("/")
@auth_required()
def home():
return render_template_string("Hello {{ current_user.email }}")
@app.route("/admin")
@auth_required()
@permissions_accepted("admin-read", "admin-write")
def admin():
return render_template_string(
"Hello on admin page. Current user {} password is {}".format(
current_user.email, current_user.password
)
)
@app.route("/ops")
@auth_required()
@roles_accepted("monitor")
def monitor():
# Example of using just a role. Note that 'admin' can't access this
# since it doesn't have the 'monitor' role - even though it has
# all the permissions that the 'monitor' role has.
return render_template_string("Hello OPS")
@app.route("/blog/<bid>", methods=["GET", "POST"])
@auth_required()
@permissions_required("user-write")
def update_blog(bid):
# Yes caller has write permission - but do they OWN this blog?
blog = current_app.blog_cls.query.get(bid)
if not blog:
abort(404)
if current_user != blog.user:
abort(403)
return render_template_string("Yes, {{ current_user.email }} can update blog")
@app.route("/myblogs", methods=["GET"])
@auth_required()
@permissions_accepted("user-read")
def list_my_blogs():
blogs = current_user.blogs
blist = ""
cnt = 0
for blog in blogs:
blist += f" {blog.title}"
cnt += 1
if not blogs:
abort(404)
return render_template_string(f"Found {cnt} of yours with titles {blist}")
if __name__ == "__main__":
app.run(port=5003)
| []
| []
| [
"SQLALCHEMY_DATABASE_URI"
]
| [] | ["SQLALCHEMY_DATABASE_URI"] | python | 1 | 0 | |
aliauth/auth.go | package aliauth
import (
"os"
"encoding/json"
"log"
)
//Configuration struct for representing Ali-cloud credentials
type Configuration struct {
AliAccessKeyID string
AliAccessKeySecret string
}
//Config from alicloudconfig.json
var Config Configuration
//LoadConfig represents Load Ali-cloud config from alicloudconfig.json or environment variables
func LoadConfig() {
// Read from file first.
var home string = os.Getenv("HOME")
file, _ := os.Open(home + "/alicloudconfig.json")
// Defer the closing of our jsonFile so that we can parse it later on
defer file.Close()
// We initialize Configuration struct
decoder := json.NewDecoder(file)
Config = Configuration{}
_ = decoder.Decode(&Config)
if Config.AliAccessKeyID == "" || Config.AliAccessKeySecret == "" {
// If alicloudconfig.json doesn't exist, look for credentials as environment variables.
Config.AliAccessKeyID = os.Getenv("AliAccessKeyID")
Config.AliAccessKeySecret = os.Getenv("AliAccessKeySecret")
if Config.AliAccessKeyID == "" || Config.AliAccessKeySecret == "" {
log.Fatalln("Cannot Get Ali access key and secret key")
}
}
}
| [
"\"HOME\"",
"\"AliAccessKeyID\"",
"\"AliAccessKeySecret\""
]
| []
| [
"HOME",
"AliAccessKeyID",
"AliAccessKeySecret"
]
| [] | ["HOME", "AliAccessKeyID", "AliAccessKeySecret"] | go | 3 | 0 | |
tests/query_test/test_insert_parquet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Targeted Impala insert tests
import os
from collections import namedtuple
from datetime import (datetime, date)
from decimal import Decimal
from subprocess import check_call
from parquet.ttypes import ColumnOrder, SortingColumn, TypeDefinedOrder, ConvertedType
from tests.common.environ import impalad_basedir
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.parametrize import UniqueDatabase
from tests.common.skip import (SkipIfEC, SkipIfIsilon, SkipIfLocal, SkipIfS3, SkipIfABFS,
SkipIfADLS)
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.test_result_verifier import verify_query_result_is_equal
from tests.common.test_vector import ImpalaTestDimension
from tests.util.filesystem_utils import get_fs_path
from tests.util.get_parquet_metadata import (decode_stats_value,
get_parquet_metadata_from_hdfs_folder)
PARQUET_CODECS = ['none', 'snappy', 'gzip', 'zstd', 'lz4']
IMPALA_HOME = os.environ['IMPALA_HOME']
class RoundFloat():
"""Class to compare floats after rounding them to a specified number of digits. This
can be used in scenarios where floating point precision is an issue.
"""
def __init__(self, value, num_digits):
self.value = value
self.num_digits = num_digits
def __eq__(self, numeral):
"""Compares this objects's value to a numeral after rounding it."""
return round(self.value, self.num_digits) == round(numeral, self.num_digits)
class TimeStamp():
"""Class to construct timestamps with a default format specifier."""
def __init__(self, value):
# This member must be called 'timetuple'. Only if this class has a member called
# 'timetuple' will the datetime __eq__ function forward an unknown equality check to
# this method by returning NotImplemented:
# https://docs.python.org/2/library/datetime.html#datetime.datetime
self.timetuple = datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f')
def __eq__(self, other_timetuple):
"""Compares this objects's value to another timetuple."""
return self.timetuple == other_timetuple
class Date():
"""Class to compare dates specified as year-month-day to dates specified as days since
epoch.
"""
def __init__(self, year, month, day):
self.days_since_epoch = (date(year, month, day) - date(1970, 1, 1)).days
def __eq__(self, other_days_since_eopch):
return self.days_since_epoch == other_days_since_eopch
ColumnStats = namedtuple('ColumnStats', ['name', 'min', 'max', 'null_count'])
# Test a smaller parquet file size as well
# TODO: these tests take a while so we don't want to go through too many sizes but
# we should in more exhaustive testing
PARQUET_FILE_SIZES = [0, 32 * 1024 * 1024]
class TestInsertParquetQueries(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestInsertParquetQueries, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value. This is needed should we decide
# to run the insert tests in parallel (otherwise there will be two tests inserting
# into the same table at the same time for the same file format).
# TODO: When we do decide to run these tests in parallel we could create unique temp
# tables for each test case to resolve the concurrency problems.
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0],
sync_ddl=[1]))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension("compression_codec", *PARQUET_CODECS))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension("file_size", *PARQUET_FILE_SIZES))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').compression_codec == 'none')
@SkipIfEC.oom
@SkipIfLocal.multiple_impalad
@UniqueDatabase.parametrize(sync_ddl=True)
def test_insert_parquet(self, vector, unique_database):
vector.get_value('exec_option')['PARQUET_FILE_SIZE'] = \
vector.get_value('file_size')
vector.get_value('exec_option')['COMPRESSION_CODEC'] = \
vector.get_value('compression_codec')
self.run_test_case('insert_parquet', vector, unique_database, multiple_impalad=True)
class TestParquetQueriesMultiCodecs(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestParquetQueriesMultiCodecs, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value.
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0],
sync_ddl=[1]))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
@UniqueDatabase.parametrize(sync_ddl=True)
def test_insert_parquet_multi_codecs(self, vector, unique_database):
# Tests that parquet files are written/read correctly when using multiple codecs
self.run_test_case('QueryTest/insert_parquet_multi_codecs', vector, unique_database,
multiple_impalad=True)
base_table = "{0}.{1}".format(unique_database, "t1_default")
test_table = "{0}.{1}".format(unique_database, "t1_zstd_gzip")
# select all rows and compare the data in base_table and test_table
base_result = self.execute_query("select * from {0} order by c3".format(base_table))
test_result = self.execute_query("select * from {0} order by c3".format(test_table))
verify_query_result_is_equal(test_result.data, base_result.data)
class TestInsertParquetInvalidCodec(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertParquetInvalidCodec, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value.
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0],
sync_ddl=[1]))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension("compression_codec", 'bzip2'))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').compression_codec == 'none')
@SkipIfLocal.multiple_impalad
def test_insert_parquet_invalid_codec(self, vector):
vector.get_value('exec_option')['COMPRESSION_CODEC'] = \
vector.get_value('compression_codec')
self.run_test_case('QueryTest/insert_parquet_invalid_codec', vector,
multiple_impalad=True)
class TestInsertParquetVerifySize(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestInsertParquetVerifySize, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value.
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0],
sync_ddl=[1]))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').compression_codec == 'none')
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension("compression_codec", *PARQUET_CODECS))
@SkipIfIsilon.hdfs_block_size
@SkipIfLocal.hdfs_client
def test_insert_parquet_verify_size(self, vector, unique_database):
# Test to verify that the result file size is close to what we expect.
tbl_name = "parquet_insert_size"
fq_tbl_name = unique_database + "." + tbl_name
location = get_fs_path("test-warehouse/{0}.db/{1}/"
.format(unique_database, tbl_name))
create = ("create table {0} like tpch_parquet.orders stored as parquet"
.format(fq_tbl_name, location))
query = "insert overwrite {0} select * from tpch.orders".format(fq_tbl_name)
block_size = 40 * 1024 * 1024
self.execute_query(create)
vector.get_value('exec_option')['PARQUET_FILE_SIZE'] = block_size
vector.get_value('exec_option')['COMPRESSION_CODEC'] =\
vector.get_value('compression_codec')
vector.get_value('exec_option')['num_nodes'] = 1
self.execute_query(query, vector.get_value('exec_option'))
# Get the files in hdfs and verify. There can be at most 1 file that is smaller
# that the block_size. The rest should be within 80% of it and not over.
found_small_file = False
sizes = self.filesystem_client.get_all_file_sizes(location)
for size in sizes:
assert size < block_size, "File size greater than expected.\
Expected: {0}, Got: {1}".format(block_size, size)
if size < block_size * 0.80:
assert not found_small_file
found_small_file = True
class TestHdfsParquetTableWriter(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsParquetTableWriter, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_def_level_encoding(self, vector, unique_database, tmpdir):
"""IMPALA-3376: Tests that parquet files are written to HDFS correctly by generating a
parquet table and running the parquet-reader tool on it, which performs sanity
checking, such as that the correct number of definition levels were encoded.
"""
table_name = "test_hdfs_parquet_table_writer"
qualified_table_name = "%s.%s" % (unique_database, table_name)
self.execute_query("create table %s stored as parquet as select l_linenumber from "
"tpch_parquet.lineitem limit 180000" % qualified_table_name)
hdfs_file = get_fs_path('/test-warehouse/%s.db/%s/*.parq'
% (unique_database, table_name))
check_call(['hdfs', 'dfs', '-copyToLocal', hdfs_file, tmpdir.strpath])
for root, subdirs, files in os.walk(tmpdir.strpath):
for f in files:
if not f.endswith('parq'):
continue
check_call([os.path.join(IMPALA_HOME, "bin/run-binary.sh"),
os.path.join(impalad_basedir, 'util/parquet-reader'), '--file',
os.path.join(tmpdir.strpath, str(f))])
def test_sorting_columns(self, vector, unique_database, tmpdir):
"""Tests that RowGroup::sorting_columns gets populated when the table has SORT BY
columns."""
source_table = "functional_parquet.alltypessmall"
target_table = "test_write_sorting_columns"
qualified_target_table = "{0}.{1}".format(unique_database, target_table)
hdfs_path = get_fs_path("/test-warehouse/{0}.db/{1}/".format(unique_database,
target_table))
# Create table
query = "create table {0} sort by (int_col, id) like {1} stored as parquet".format(
qualified_target_table, source_table)
self.execute_query(query)
# Insert data
query = ("insert into {0} partition(year, month) select * from {1}").format(
qualified_target_table, source_table)
self.execute_query(query)
# Download hdfs files and extract rowgroup metadata
file_metadata_list = get_parquet_metadata_from_hdfs_folder(hdfs_path, tmpdir.strpath)
row_groups = []
for file_metadata in file_metadata_list:
row_groups.extend(file_metadata.row_groups)
# Verify that the files have the sorted_columns set
expected = [SortingColumn(4, False, False), SortingColumn(0, False, False)]
for row_group in row_groups:
assert row_group.sorting_columns == expected
def test_set_column_orders(self, vector, unique_database, tmpdir):
"""Tests that the Parquet writers set FileMetaData::column_orders."""
source_table = "functional_parquet.alltypessmall"
target_table = "test_set_column_orders"
qualified_target_table = "{0}.{1}".format(unique_database, target_table)
hdfs_path = get_fs_path("/test-warehouse/{0}.db/{1}/".format(unique_database,
target_table))
# Create table
query = "create table {0} like {1} stored as parquet".format(qualified_target_table,
source_table)
self.execute_query(query)
# Insert data
query = ("insert into {0} partition(year, month) select * from {1}").format(
qualified_target_table, source_table)
self.execute_query(query)
# Download hdfs files and verify column orders
file_metadata_list = get_parquet_metadata_from_hdfs_folder(hdfs_path, tmpdir.strpath)
expected_col_orders = [ColumnOrder(TYPE_ORDER=TypeDefinedOrder())] * 11
for file_metadata in file_metadata_list:
assert file_metadata.column_orders == expected_col_orders
def test_read_write_integer_logical_types(self, vector, unique_database, tmpdir):
"""IMPALA-5052: Read and write signed integer parquet logical types
This test creates a src_tbl like a parquet file. The parquet file was generated
to have columns with different signed integer logical types. The test verifies
that parquet file written by the hdfs parquet table writer using the generated
file has the same column type metadata as the generated one."""
hdfs_path = (os.environ['DEFAULT_FS'] + "/test-warehouse/{0}.db/"
"signed_integer_logical_types.parquet").format(unique_database)
self.filesystem_client.copy_from_local(os.environ['IMPALA_HOME'] +
'/testdata/data/signed_integer_logical_types.parquet', hdfs_path)
# Create table with signed integer logical types
src_tbl = "{0}.{1}".format(unique_database, "read_write_logical_type_src")
create_tbl_stmt = """create table {0} like parquet "{1}"
stored as parquet""".format(src_tbl, hdfs_path)
result = self.execute_query_expect_success(self.client, create_tbl_stmt)
# Check to see if the src_tbl column types matches the schema of the parquet
# file from which it was generated
result_src = self.execute_query_expect_success(self.client, "describe %s" %src_tbl)
for line in result_src.data:
line_split = line.split()
if line_split[0] == "id":
assert line_split[1] == 'int'
elif line_split[0] == "tinyint_col":
assert line_split[1] == 'tinyint'
elif line_split[0] == "smallint_col":
assert line_split[1] == 'smallint'
elif line_split[0] == "int_col":
assert line_split[1] == 'int'
else:
assert line_split[0] == 'bigint_col' and line_split[1] == 'bigint'
# Insert values in this table
insert_stmt = "insert into table {0} values(1, 2, 3, 4, 5)".format(src_tbl)
result = self.execute_query_expect_success(self.client, insert_stmt)
# To test the integer round tripping, a new dst_tbl is created by using the parquet
# file written by the src_tbl and running the following tests -
# 1. inserting same values into src and dst table and reading it back and comparing
# them.
# 2. Ensuring that the column types in dst_tbl matches the column types in the
# schema of the parquet file that was used to generate the src_tbl
result = self.execute_query_expect_success(self.client, "show files in %s" %src_tbl)
hdfs_path = result.data[0].split("\t")[0]
dst_tbl = "{0}.{1}".format(unique_database, "read_write_logical_type_dst")
create_tbl_stmt = 'create table {0} like parquet "{1}"'.format(dst_tbl, hdfs_path)
result = self.execute_query_expect_success(self.client, create_tbl_stmt)
result_dst = self.execute_query_expect_success(self.client, "describe %s" % dst_tbl)
for line in result_dst.data:
line_split = line.split()
if line_split[0] == "id":
assert line_split[1] == 'int'
elif line_split[0] == "tinyint_col":
assert line_split[1] == 'tinyint'
elif line_split[0] == "smallint_col":
assert line_split[1] == 'smallint'
elif line_split[0] == "int_col":
assert line_split[1] == 'int'
else:
assert line_split[0] == 'bigint_col' and line_split[1] == 'bigint'
insert_stmt = "insert into table {0} values(1, 2, 3, 4, 5)".format(dst_tbl)
self.execute_query_expect_success(self.client, insert_stmt)
# Check that the values inserted are same in both src and dst tables
result_src = self.execute_query_expect_success(self.client, "select * from %s"
% src_tbl)
result_dst = self.execute_query_expect_success(self.client, "select * from %s"
% dst_tbl)
assert result_src.data == result_dst.data
def _ctas_and_get_metadata(self, vector, unique_database, tmp_dir, source_table,
table_name="test_hdfs_parquet_table_writer"):
"""CTAS 'source_table' into a Parquet table and returns its Parquet metadata."""
qualified_table_name = "{0}.{1}".format(unique_database, table_name)
hdfs_path = get_fs_path('/test-warehouse/{0}.db/{1}/'.format(unique_database,
table_name))
# Setting num_nodes = 1 ensures that the query is executed on the coordinator,
# resulting in a single parquet file being written.
query = ("create table {0} stored as parquet as select * from {1}").format(
qualified_table_name, source_table)
vector.get_value('exec_option')['num_nodes'] = 1
self.execute_query_expect_success(self.client, query,
vector.get_value('exec_option'))
file_metadata_list = get_parquet_metadata_from_hdfs_folder(hdfs_path, tmp_dir)
assert len(file_metadata_list) == 1
assert file_metadata_list[0] is not None
return file_metadata_list[0]
@staticmethod
def _get_schema(schemas, column_name):
"""Searches 'schemas' for a schema with name 'column_name'. Asserts if non is found.
"""
for schema in schemas:
if schema.name == column_name:
return schema
assert False, "schema element %s not found" % column_name
@staticmethod
def _check_only_one_member_var_is_set(obj, var_name):
"""Checks that 'var_name' is the only member of 'obj' that is not None. Useful to
check Thrift unions."""
keys = [k for k, v in vars(obj).iteritems() if v is not None]
assert keys == [var_name]
def _check_no_logical_type(self, schemas, column_name):
"""Checks that the schema with name 'column_name' has no logical or converted type."""
schema = self._get_schema(schemas, column_name)
assert schema.converted_type is None
assert schema.logicalType is None
def _check_int_logical_type(self, schemas, column_name, bit_width):
"""Checks that the schema with name 'column_name' has logical and converted type that
describe a signed integer with 'bit_width' bits."""
schema = self._get_schema(schemas, column_name)
bit_width_to_converted_type_map = {
8: ConvertedType.INT_8,
16: ConvertedType.INT_16,
32: ConvertedType.INT_32,
64: ConvertedType.INT_64
}
assert schema.converted_type == bit_width_to_converted_type_map[bit_width]
assert schema.logicalType is not None
self._check_only_one_member_var_is_set(schema.logicalType, "INTEGER")
assert schema.logicalType.INTEGER is not None
assert schema.logicalType.INTEGER.bitWidth == bit_width
assert schema.logicalType.INTEGER.isSigned
def _check_decimal_logical_type(self, schemas, column_name, precision, scale):
"""Checks that the schema with name 'column_name' has logical and converted type that
describe a decimal with given 'precision' and 'scale'."""
schema = self._get_schema(schemas, column_name)
assert schema.converted_type == ConvertedType.DECIMAL
assert schema.precision == precision
assert schema.scale == scale
assert schema.logicalType is not None
self._check_only_one_member_var_is_set(schema.logicalType, "DECIMAL")
assert schema.logicalType.DECIMAL.precision == precision
assert schema.logicalType.DECIMAL.scale == scale
def test_logical_types(self, vector, unique_database, tmpdir):
"""Tests that the Parquet writers set logical type and converted type correctly
for all types except DECIMAL"""
source = "functional.alltypestiny"
file_metadata = \
self._ctas_and_get_metadata(vector, unique_database, tmpdir.strpath, source)
schemas = file_metadata.schema
self._check_int_logical_type(schemas, "tinyint_col", 8)
self._check_int_logical_type(schemas, "smallint_col", 16)
self._check_int_logical_type(schemas, "int_col", 32)
self._check_int_logical_type(schemas, "bigint_col", 64)
self._check_no_logical_type(schemas, "bool_col")
self._check_no_logical_type(schemas, "float_col")
self._check_no_logical_type(schemas, "double_col")
# By default STRING has no logical type, see IMPALA-5982.
self._check_no_logical_type(schemas, "string_col")
# Currently TIMESTAMP is written as INT96 and has no logical type.
# This test will break once INT64 becomes the default Parquet type for TIMESTAMP
# columns in the future (IMPALA-5049).
self._check_no_logical_type(schemas, "timestamp_col")
def test_decimal_logical_types(self, vector, unique_database, tmpdir):
"""Tests that the Parquet writers set logical type and converted type correctly
for DECIMAL type."""
source = "functional.decimal_tiny"
file_metadata = \
self._ctas_and_get_metadata(vector, unique_database, tmpdir.strpath, source)
schemas = file_metadata.schema
self._check_decimal_logical_type(schemas, "c1", 10, 4)
self._check_decimal_logical_type(schemas, "c2", 15, 5)
self._check_decimal_logical_type(schemas, "c3", 1, 1)
def _check_int64_timestamp_logical_type(self, schemas, column_name, unit):
"""Checks that the schema with name 'column_name' has logical and converted type that
describe a timestamp with the given unit."""
schema = self._get_schema(schemas, column_name)
assert schema.logicalType is not None
self._check_only_one_member_var_is_set(schema.logicalType, "TIMESTAMP")
assert schema.logicalType.TIMESTAMP.unit is not None
self._check_only_one_member_var_is_set(
schema.logicalType.TIMESTAMP.unit, unit.upper())
# Non UTC-normalized timestamps have no converted_type to avoid confusing older
# readers that would interpret these as UTC-normalized.
assert schema.converted_type is None
assert not schema.logicalType.TIMESTAMP.isAdjustedToUTC
def _ctas_and_check_int64_timestamps(self, vector, unique_database, tmpdir, unit):
"""CTAS a table using 'unit' int64 timestamps and checks columns metadata."""
source = "functional.alltypestiny"
timestamp_type = 'int64_' + unit
vector.get_value('exec_option')['parquet_timestamp_type'] = timestamp_type
file_metadata = self._ctas_and_get_metadata(vector, unique_database, tmpdir.strpath,
source, table_name=timestamp_type)
schemas = file_metadata.schema
self._check_int64_timestamp_logical_type(schemas, "timestamp_col", unit)
def test_int64_timestamp_logical_type(self, vector, unique_database, tmpdir):
"""Tests that correct metadata is written for int64 timestamps."""
self._ctas_and_check_int64_timestamps(vector, unique_database, tmpdir, "millis")
self._ctas_and_check_int64_timestamps(vector, unique_database, tmpdir, "micros")
self._ctas_and_check_int64_timestamps(vector, unique_database, tmpdir, "nanos")
@SkipIfIsilon.hive
@SkipIfLocal.hive
@SkipIfS3.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
# TODO: Should we move this to test_parquet_stats.py?
class TestHdfsParquetTableStatsWriter(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsParquetTableStatsWriter, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def _decode_row_group_stats(self, schemas, row_group_stats):
"""Decodes and return a list of statistics for a single row group."""
decoded = []
assert len(schemas) == len(row_group_stats)
for schema, stats in zip(schemas, row_group_stats):
if stats is None:
decoded.append(None)
continue
min_value = None
max_value = None
if stats.min_value is not None and stats.max_value is not None:
min_value = decode_stats_value(schema, stats.min_value)
max_value = decode_stats_value(schema, stats.max_value)
null_count = stats.null_count
assert null_count is not None
decoded.append(ColumnStats(schema.name, min_value, max_value, null_count))
assert len(decoded) == len(schemas)
return decoded
def _get_row_group_stats_from_file_metadata(self, file_metadata):
"""Returns a list of statistics for each row group in Parquet file metadata
'file_metadata'. The result is a two-dimensional list, containing stats by
row group and column."""
# We only support flat schemas, the additional element is the root element.
schemas = file_metadata.schema[1:]
file_stats = []
for row_group in file_metadata.row_groups:
num_columns = len(row_group.columns)
assert num_columns == len(schemas)
column_stats = [c.meta_data.statistics for c in row_group.columns]
file_stats.append(self._decode_row_group_stats(schemas, column_stats))
return file_stats
def _get_row_group_stats_from_hdfs_folder(self, hdfs_path, tmp_dir):
"""Returns a list of statistics for each row group in all parquet files i 'hdfs_path'.
'tmp_dir' needs to be supplied by the caller and will be used to store temporary
files. The caller is responsible for cleaning up 'tmp_dir'. The result is a
two-dimensional list, containing stats by row group and column."""
row_group_stats = []
file_metadata_list = get_parquet_metadata_from_hdfs_folder(hdfs_path, tmp_dir)
for file_metadata in file_metadata_list:
row_group_stats.extend(self._get_row_group_stats_from_file_metadata(file_metadata))
return row_group_stats
def _validate_parquet_stats(self, hdfs_path, tmp_dir, expected_values,
skip_col_idxs = None):
"""Validates that 'hdfs_path' contains exactly one parquet file and that the rowgroup
statistics in that file match the values in 'expected_values'. Columns indexed by
'skip_col_idx' are excluded from the verification of the expected values. 'tmp_dir'
needs to be supplied by the caller and will be used to store temporary files. The
caller is responsible for cleaning up 'tmp_dir'.
"""
skip_col_idxs = skip_col_idxs or []
# The caller has to make sure that the table fits into a single row group. We enforce
# it here to make sure the results are predictable and independent of how the data
# could get written across multiple files.
row_group_stats = self._get_row_group_stats_from_hdfs_folder(hdfs_path, tmp_dir)
assert(len(row_group_stats)) == 1
table_stats = row_group_stats[0]
num_columns = len(table_stats)
assert num_columns == len(expected_values)
for col_idx, stats, expected in zip(range(num_columns), table_stats, expected_values):
if col_idx in skip_col_idxs:
continue
if not expected:
assert not stats
continue
assert stats == expected
def _ctas_table_and_verify_stats(self, vector, unique_database, tmp_dir, source_table,
expected_values,
table_name="test_hdfs_parquet_table_writer"):
"""Copies 'source_table' into a parquet table and makes sure that the row group
statistics in the resulting parquet file match those in 'expected_values'. 'tmp_dir'
needs to be supplied by the caller and will be used to store temporary files. The
caller is responsible for cleaning up 'tmp_dir'.
"""
qualified_table_name = "{0}.{1}".format(unique_database, table_name)
hdfs_path = get_fs_path('/test-warehouse/{0}.db/{1}/'.format(unique_database,
table_name))
# Setting num_nodes = 1 ensures that the query is executed on the coordinator,
# resulting in a single parquet file being written.
self.execute_query("drop table if exists {0}".format(qualified_table_name))
query = ("create table {0} stored as parquet as select * from {1}").format(
qualified_table_name, source_table)
vector.get_value('exec_option')['num_nodes'] = 1
self.execute_query(query, vector.get_value('exec_option'))
self._validate_parquet_stats(hdfs_path, tmp_dir, expected_values)
def test_write_statistics_alltypes(self, vector, unique_database, tmpdir):
"""Test that writing a parquet file populates the rowgroup statistics with the correct
values.
"""
# Expected values for functional.alltypes
expected_min_max_values = [
ColumnStats('id', 0, 7299, 0),
ColumnStats('bool_col', False, True, 0),
ColumnStats('tinyint_col', 0, 9, 0),
ColumnStats('smallint_col', 0, 9, 0),
ColumnStats('int_col', 0, 9, 0),
ColumnStats('bigint_col', 0, 90, 0),
ColumnStats('float_col', 0, RoundFloat(9.9, 1), 0),
ColumnStats('double_col', 0, RoundFloat(90.9, 1), 0),
ColumnStats('date_string_col', '01/01/09', '12/31/10', 0),
ColumnStats('string_col', '0', '9', 0),
ColumnStats('timestamp_col', TimeStamp('2009-01-01 00:00:00.0'),
TimeStamp('2010-12-31 05:09:13.860000'), 0),
ColumnStats('year', 2009, 2010, 0),
ColumnStats('month', 1, 12, 0),
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
"functional.alltypes", expected_min_max_values)
def test_write_statistics_date(self, vector, unique_database, tmpdir):
"""Test that writing Date values to a parquet file populates the rowgroup statistics
with the correct values.
Date column statistics are tested separately as Date type is not supported across
all file formats, therefore we couldn't add a Date column to 'alltypes' table yet.
"""
expected_min_max_values = [
ColumnStats('id_col', 0, 31, 0),
ColumnStats('date_col', Date(1, 1, 1), Date(9999, 12, 31), 2),
ColumnStats('date_part', Date(1, 1, 1), Date(9999, 12, 31), 0),
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
"functional.date_tbl", expected_min_max_values)
def test_write_statistics_decimal(self, vector, unique_database, tmpdir):
"""Test that writing a parquet file populates the rowgroup statistics with the correct
values for decimal columns.
"""
# Expected values for functional.decimal_tbl
expected_min_max_values = [
ColumnStats('d1', 1234, 132842, 0),
ColumnStats('d2', 111, 2222, 0),
ColumnStats('d3', Decimal('1.23456789'), Decimal('12345.6789'), 0),
ColumnStats('d4', Decimal('0.123456789'), Decimal('0.123456789'), 0),
ColumnStats('d5', Decimal('0.1'), Decimal('12345.789'), 0),
ColumnStats('d6', 1, 1, 0)
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
"functional.decimal_tbl", expected_min_max_values)
def test_write_statistics_multi_page(self, vector, unique_database, tmpdir):
"""Test that writing a parquet file populates the rowgroup statistics with the correct
values. This test write a single parquet file with several pages per column.
"""
# Expected values for tpch_parquet.customer
expected_min_max_values = [
ColumnStats('c_custkey', 1, 150000, 0),
ColumnStats('c_name', 'Customer#000000001', 'Customer#000150000', 0),
ColumnStats('c_address', ' 2uZwVhQvwA', 'zzxGktzXTMKS1BxZlgQ9nqQ', 0),
ColumnStats('c_nationkey', 0, 24, 0),
ColumnStats('c_phone', '10-100-106-1617', '34-999-618-6881', 0),
ColumnStats('c_acctbal', Decimal('-999.99'), Decimal('9999.99'), 0),
ColumnStats('c_mktsegment', 'AUTOMOBILE', 'MACHINERY', 0),
ColumnStats('c_comment', ' Tiresias according to the slyly blithe instructions '
'detect quickly at the slyly express courts. express dinos wake ',
'zzle. blithely regular instructions cajol', 0),
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
"tpch_parquet.customer", expected_min_max_values)
def test_write_statistics_null(self, vector, unique_database, tmpdir):
"""Test that we don't write min/max statistics for null columns. Ensure null_count
is set for columns with null values."""
expected_min_max_values = [
ColumnStats('a', 'a', 'a', 0),
ColumnStats('b', '', '', 0),
ColumnStats('c', None, None, 1),
ColumnStats('d', None, None, 1),
ColumnStats('e', None, None, 1),
ColumnStats('f', 'a\x00b', 'a\x00b', 0),
ColumnStats('g', '\x00', '\x00', 0)
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
"functional.nulltable", expected_min_max_values)
def test_write_statistics_char_types(self, vector, unique_database, tmpdir):
"""Test that Impala correctly writes statistics for char columns."""
table_name = "test_char_types"
qualified_table_name = "{0}.{1}".format(unique_database, table_name)
create_table_stmt = "create table {0} (c3 char(3), vc varchar, st string);".format(
qualified_table_name)
self.execute_query(create_table_stmt)
insert_stmt = """insert into {0} values
(cast("def" as char(3)), "ghj xyz", "abc xyz"),
(cast("abc" as char(3)), "def 123 xyz", "lorem ipsum"),
(cast("xy" as char(3)), "abc banana", "dolor dis amet")
""".format(qualified_table_name)
self.execute_query(insert_stmt)
expected_min_max_values = [
ColumnStats('c3', 'abc', 'xy', 0),
ColumnStats('vc', 'abc banana', 'ghj xyz', 0),
ColumnStats('st', 'abc xyz', 'lorem ipsum', 0)
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
qualified_table_name, expected_min_max_values)
def test_write_statistics_negative(self, vector, unique_database, tmpdir):
"""Test that Impala correctly writes statistics for negative values."""
view_name = "test_negative_view"
qualified_view_name = "{0}.{1}".format(unique_database, view_name)
# Create a view to generate test data with negative values by negating every other
# row.
create_view_stmt = """create view {0} as select
id * cast(pow(-1, id % 2) as int) as id,
int_col * cast(pow(-1, id % 2) as int) as int_col,
bigint_col * cast(pow(-1, id % 2) as bigint) as bigint_col,
float_col * pow(-1, id % 2) as float_col,
double_col * pow(-1, id % 2) as double_col
from functional.alltypes""".format(qualified_view_name)
self.execute_query(create_view_stmt)
expected_min_max_values = [
ColumnStats('id', -7299, 7298, 0),
ColumnStats('int_col', -9, 8, 0),
ColumnStats('bigint_col', -90, 80, 0),
ColumnStats('float_col', RoundFloat(-9.9, 1), RoundFloat(8.8, 1), 0),
ColumnStats('double_col', RoundFloat(-90.9, 1), RoundFloat(80.8, 1), 0),
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
qualified_view_name, expected_min_max_values)
def test_write_statistics_multiple_row_groups(self, vector, unique_database, tmpdir):
"""Test that writing multiple row groups works as expected. This is done by inserting
into a table using the SORT BY clause and then making sure that the min and max values
of row groups don't overlap."""
source_table = "tpch_parquet.orders"
target_table = "test_hdfs_parquet_table_writer"
qualified_target_table = "{0}.{1}".format(unique_database, target_table)
hdfs_path = get_fs_path("/test-warehouse/{0}.db/{1}/".format(
unique_database, target_table))
# Insert a large amount of data on a single backend with a limited parquet file size.
# This will result in several files being written, exercising code that tracks
# statistics for row groups.
query = "create table {0} sort by (o_orderkey) like {1} stored as parquet".format(
qualified_target_table, source_table)
self.execute_query(query, vector.get_value('exec_option'))
query = ("insert into {0} select * from {1}").format(
qualified_target_table, source_table)
vector.get_value('exec_option')['num_nodes'] = 1
vector.get_value('exec_option')['parquet_file_size'] = 8 * 1024 * 1024
self.execute_query(query, vector.get_value('exec_option'))
# Get all stats for the o_orderkey column
row_group_stats = self._get_row_group_stats_from_hdfs_folder(hdfs_path,
tmpdir.strpath)
assert len(row_group_stats) > 1
orderkey_stats = [s[0] for s in row_group_stats]
# Make sure that they don't overlap by ordering by the min value, then looking at
# boundaries.
orderkey_stats.sort(key = lambda s: s.min)
for l, r in zip(orderkey_stats, orderkey_stats[1:]):
assert l.max <= r.min
def test_write_statistics_float_infinity(self, vector, unique_database, tmpdir):
"""Test that statistics for -inf and inf are written correctly."""
table_name = "test_float_infinity"
qualified_table_name = "{0}.{1}".format(unique_database, table_name)
create_table_stmt = "create table {0} (f float, d double);".format(
qualified_table_name)
self.execute_query(create_table_stmt)
insert_stmt = """insert into {0} values
(cast('-inf' as float), cast('-inf' as double)),
(cast('inf' as float), cast('inf' as double))""".format(qualified_table_name)
self.execute_query(insert_stmt)
expected_min_max_values = [
ColumnStats('f', float('-inf'), float('inf'), 0),
ColumnStats('d', float('-inf'), float('inf'), 0),
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
qualified_table_name, expected_min_max_values)
def test_write_null_count_statistics(self, vector, unique_database, tmpdir):
"""Test that writing a parquet file populates the rowgroup statistics with the correct
null_count. This test ensures that the null_count is correct for a table with multiple
null values."""
# Expected values for tpch_parquet.customer
expected_min_max_values = [
ColumnStats('id', '8600000US00601', '8600000US999XX', 0),
ColumnStats('zip', '00601', '999XX', 0),
ColumnStats('description1', '\"00601 5-Digit ZCTA', '\"999XX 5-Digit ZCTA', 0),
ColumnStats('description2', ' 006 3-Digit ZCTA\"', ' 999 3-Digit ZCTA\"', 0),
ColumnStats('income', 0, 189570, 29),
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
"functional_parquet.zipcode_incomes", expected_min_max_values)
def test_write_int64_timestamp_statistics(self, vector, unique_database, tmpdir):
"""Test that writing a parquet file populates the rowgroup statistics correctly for
int64 milli/micro/nano timestamps."""
table_name = "int96_nanos"
qualified_table_name = "{0}.{1}".format(unique_database, table_name)
create_table_stmt = "create table {0} (ts timestamp);".format(qualified_table_name)
self.execute_query(create_table_stmt)
insert_stmt = """insert into {0} values
("1969-12-31 23:59:59.999999999"),
("1970-01-01 00:00:00.001001001")""".format(qualified_table_name)
self.execute_query(insert_stmt)
vector.get_value('exec_option')['parquet_timestamp_type'] = "int64_millis"
expected_min_max_values = [
ColumnStats('ts', -1, 1, 0)
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
qualified_table_name,
expected_min_max_values,
table_name="int64_millis")
vector.get_value('exec_option')['parquet_timestamp_type'] = "int64_micros"
expected_min_max_values = [
ColumnStats('ts', -1, 1001, 0)
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
qualified_table_name,
expected_min_max_values,
table_name="int64_micros")
# Insert values that fall outside the valid range for int64_nanos. These should
# be inserted as NULLs and not affect min/max stats.
insert_stmt = """insert into {0} values
("1677-09-21 00:12:43.145224191"),
("2262-04-11 23:47:16.854775808")""".format(qualified_table_name)
self.execute_query(insert_stmt)
vector.get_value('exec_option')['parquet_timestamp_type'] = "int64_nanos"
expected_min_max_values = [
ColumnStats('ts', -1, 1001001, 2)
]
self._ctas_table_and_verify_stats(vector, unique_database, tmpdir.strpath,
qualified_table_name,
expected_min_max_values,
table_name="int64_nanos")
def test_too_many_columns(self, vector, unique_database):
"""Test that writing a Parquet table with too many columns results in an error."""
num_cols = 12000
query = "create table %s.wide stored as parquet as select \n" % unique_database
query += ", ".join(map(str, xrange(num_cols)))
query += ";\n"
result = self.execute_query_expect_failure(self.client, query)
assert "Minimum required block size must be less than 2GB" in str(result)
| []
| []
| [
"IMPALA_HOME",
"DEFAULT_FS"
]
| [] | ["IMPALA_HOME", "DEFAULT_FS"] | python | 2 | 0 | |
django/django_fundamentals/django_intro/ninjagold/ninjagold/wsgi.py | """
WSGI config for ninjagold project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ninjagold.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
estatisticas_facebook/config/settings/production.py | """
Production settings for Estatisticas Facebook project.
- Use WhiteNoise for serving static files
- Use Amazon's S3 for storing uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
import logging
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
control = 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIRY, AWS_EXPIRY)
AWS_HEADERS = {
'Cache-Control': bytes(control, encoding='latin-1')
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Estatisticas Facebook <[email protected]>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Estatisticas Facebook]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| []
| []
| []
| [] | [] | python | 0 | 0 | |
app.py | #! usr/bin/python
# -*- coding: utf-8 -*-
import os
from os.path import join, dirname
from dotenv import load_dotenv
from flask import Flask, request, jsonify
from sqlalchemy import create_engine
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import scoped_session, sessionmaker
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# OR, the same with increased verbosity:
load_dotenv(dotenv_path, verbose=True)
url = os.environ["DB_URL_FORMAT"]
url = url.format(os.environ["DB_USER_NAME"], os.environ["DB_PASSWORD"], os.environ["DB_HOST"], os.environ["DB_PORT"],
os.environ["DB_NAME"])
if os.environ["ENV"] == 'local':
engine = create_engine('sqlite:///lugyone.db', convert_unicode=True)
else :
engine = create_engine(url,echo=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = str(engine.url)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
alchemy_app = SQLAlchemy(app)
if os.environ["ENV"] != 'production':
app.debug = True
else:
app.debug = False
@app.route('/')
def hello_world():
return 'Hello, World!'
if __name__ == "__main__":
app.run(host="0.0.0.0", port=int(os.environ["PORT"])) | []
| []
| [
"PORT",
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_URL_FORMAT",
"ENV",
"DB_NAME",
"DB_USER_NAME"
]
| [] | ["PORT", "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_URL_FORMAT", "ENV", "DB_NAME", "DB_USER_NAME"] | python | 8 | 0 | |
advanced/react-django/APITestProject/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'APITestProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
components/nodemanager-service/managers/awsec2/awsec2.go | package awsec2
import (
"context"
"fmt"
"os"
"time"
"sort"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/chef/automate/components/compliance-service/api/common"
"github.com/chef/automate/components/compliance-service/inspec"
"github.com/chef/automate/components/compliance-service/inspec-agent/types"
"github.com/chef/automate/components/compliance-service/utils"
"github.com/chef/automate/components/compliance-service/utils/pool"
"github.com/chef/automate/components/nodemanager-service/api/manager"
"github.com/chef/automate/components/nodemanager-service/pgdb"
"github.com/chef/automate/lib/stringutils"
)
const DefaultRegion string = "us-east-1"
type nodesTaskResult struct {
Nodes []*manager.ManagerNode
Region string
}
type AwsCreds struct {
AccessKeyId string
SecretAccessKey string
ArnRole string
SessionToken string
Region string
}
// New instantiates a new AWS session and returns the API client
func New(AwsAccessKeyId string, AwsSecretAccessKey string, arnRole string, sessionToken string) (*AwsCreds, error) {
logrus.Infof("Credential len: access key %d and secret %d", len(AwsAccessKeyId), len(AwsSecretAccessKey))
// Create new AwsCreds client
region := os.Getenv("AWS_REGION")
if region == "" {
region = DefaultRegion
}
return &AwsCreds{
AccessKeyId: AwsAccessKeyId,
SecretAccessKey: AwsSecretAccessKey,
ArnRole: arnRole,
SessionToken: sessionToken,
Region: region,
}, nil
}
// newClient instantiates a new AWS session and returns the API client
func (creds *AwsCreds) newClient(AwsRegion string) (*ec2.EC2, error) {
if AwsRegion == "" {
AwsRegion = creds.Region
}
sess := session.Must(session.NewSession())
if len(creds.AccessKeyId) == 0 && len(creds.ArnRole) == 0 && len(creds.SecretAccessKey) == 0 {
// users running automate in aws connect with no credentials!
return ec2.New(sess, &aws.Config{Region: aws.String(AwsRegion)}), nil
}
cred := creds.getCredsForAwsConfig(sess)
// Create new aws client session
return ec2.New(sess, &aws.Config{Region: aws.String(AwsRegion), Credentials: cred}), nil
}
// newSTS instantiates a new AWS session and returns the STS API client
func (creds *AwsCreds) newSTS(AwsRegion string) (*sts.STS, error) {
if AwsRegion == "" {
AwsRegion = creds.Region
}
sess := session.Must(session.NewSession())
if len(creds.AccessKeyId) == 0 && len(creds.ArnRole) == 0 && len(creds.SecretAccessKey) == 0 {
// users running automate in aws connect with no credentials!
return sts.New(sess, &aws.Config{Region: aws.String(AwsRegion)}), nil
}
cred := creds.getCredsForAwsConfig(sess)
// Create new aws sts client session
return sts.New(sess, &aws.Config{Region: aws.String(AwsRegion), Credentials: cred}), nil
}
func (creds *AwsCreds) getCredsForAwsConfig(sess *session.Session) *credentials.Credentials {
if len(creds.ArnRole) > 0 {
// if the user has provided a role, we will prioritize this method
return stscreds.NewCredentials(sess, creds.ArnRole)
}
return credentials.NewStaticCredentials(creds.AccessKeyId, creds.SecretAccessKey, creds.SessionToken)
}
func (creds *AwsCreds) newIAM() (*iam.IAM, error) {
sess := session.Must(session.NewSession())
if len(creds.AccessKeyId) == 0 && len(creds.ArnRole) == 0 && len(creds.SecretAccessKey) == 0 {
// users running automate in aws connect with no credentials!
return iam.New(sess), nil
}
cred := creds.getCredsForAwsConfig(sess)
return iam.New(sess, &aws.Config{Credentials: cred}), nil
}
func (creds *AwsCreds) GetAccountAlias(ctx context.Context) (string, error) {
svc, err := creds.newIAM()
if err != nil {
return "", err
}
input := &iam.ListAccountAliasesInput{}
result, err := svc.ListAccountAliasesWithContext(ctx, input)
if err != nil {
return "", err
}
if len(result.AccountAliases) > 0 {
return *result.AccountAliases[0], nil
}
return "", nil
}
func (creds *AwsCreds) GetAccountID(ctx context.Context) (string, error) {
client, err := creds.newSTS(creds.Region)
if err != nil {
return "", errors.Wrap(err, "GetAccountID unable to create a session connection to STS")
}
params := sts.GetCallerIdentityInput{}
resp, err := client.GetCallerIdentityWithContext(ctx, ¶ms)
if err != nil {
return "", errors.Wrap(&utils.InvalidError{Msg: err.Error()}, "GetAccountID unable to call GetCallerIdentity API")
}
return *resp.Account, nil
}
// getRegions returns all regions available for this AWS account
func (creds *AwsCreds) getRegions(ctx context.Context, filters []*string) ([]string, error) {
client, err := creds.newClient(creds.Region)
if err != nil {
return nil, err
}
var result *ec2.DescribeRegionsOutput
regions := make([]string, 0)
if len(filters) > 0 {
// this allows us to send filters like "get all regions for eu* and us-west*"
filtersList := formatRegionFilters(filters)
result, err = client.DescribeRegionsWithContext(ctx, &ec2.DescribeRegionsInput{
Filters: filtersList,
})
} else {
result, err = client.DescribeRegionsWithContext(ctx, &ec2.DescribeRegionsInput{})
}
if err != nil {
return nil, err
}
for _, region := range result.Regions {
regions = append(regions, *region.RegionName)
}
return regions, nil
}
func formatRegionFilters(filters []*string) []*ec2.Filter {
return []*ec2.Filter{{Name: aws.String("endpoint"), Values: filters}}
}
// GetRegions returns a list of the names of AWS regions visible to this set of AWS credentials.
func (creds *AwsCreds) GetRegions(ctx context.Context) ([]string, error) {
client, err := creds.newClient(creds.Region)
if err != nil {
return nil, err
}
result, err := client.DescribeRegionsWithContext(ctx, &ec2.DescribeRegionsInput{})
if err != nil {
return nil, err
}
regions := make([]string, 0)
for _, region := range result.Regions {
regions = append(regions, *region.RegionName)
}
return regions, nil
}
// QueryField returns node fields(tags, tags:environment, regions)
func (client *AwsCreds) QueryField(ctx context.Context, nodesFilters []*common.Filter, field string) ([]string, error) {
var err error
resultArray := make([]string, 0)
if field == "regions" {
regionsFilters := make([]*string, 0)
// if the field is regions, we're requesting all the regions, hence the empty regionsFilters array
resultArray, err = client.getRegions(ctx, regionsFilters)
if err != nil {
return nil, err
}
return resultArray, nil
}
nodes, err := client.QueryNodes(ctx, nodesFilters, false)
if err != nil {
return nil, err
}
uniqueMap, err := handleFieldFilterTags(nodes, field)
if err != nil {
return nil, err
}
for k := range uniqueMap {
resultArray = append(resultArray, k)
}
sort.Strings(resultArray)
return resultArray, nil
}
func handleFieldFilterTags(nodesMap map[string][]*manager.ManagerNode, field string) (map[string]interface{}, error) {
uniqueMap := make(map[string]interface{}, 0)
for _, nodes := range nodesMap {
if field == "tags" {
for _, node := range nodes {
for _, tag := range node.Tags {
uniqueMap[tag.Key] = nil
}
}
} else if strings.HasPrefix(field, "tags:") {
tagKey := field[5:]
for _, node := range nodes {
for _, tag := range node.Tags {
if tag.Key == tagKey {
uniqueMap[tag.Value] = nil
}
}
}
} else {
return nil, fmt.Errorf("Invalid field filter")
}
}
return uniqueMap, nil
}
func handleRegionFilters(regions []*string, excRegions []*string) ([]*string, []*string) {
searchRegions := make([]*string, 0)
excludedRegions := make([]*string, 0)
// to do a search on a region, we search against the endpoint value,
// so we need to prepend the search value with an asterisk to account for the text
// before the region part of the full endpoint string, and append with an asterisk if the user
// did not do so already
var searchRegion *string
for _, region := range regions {
wildSearch := *region
if !strings.HasPrefix(wildSearch, "*") {
wildSearch = "*" + wildSearch
}
if strings.HasSuffix(*region, "*") {
searchRegion = &wildSearch
} else {
valCopy := wildSearch + "*"
searchRegion = &valCopy
}
searchRegions = append(searchRegions, searchRegion)
}
// we manually remove the excluded regions from the list of regions
// to search against, so here we just need to remove any trailing * the user may have attached
for _, region := range excRegions {
excRegion := strings.TrimSuffix(*region, "*")
excludedRegions = append(excludedRegions, &excRegion)
}
return searchRegions, excludedRegions
}
// QueryNodes returns nodes based on a map of filters
func (client *AwsCreds) QueryNodes(ctx context.Context, nodesFilters []*common.Filter, ssm bool) (map[string][]*manager.ManagerNode, error) {
nodes := make(map[string][]*manager.ManagerNode, 0)
excludedNodes := make([]*manager.ManagerNode, 0)
ec2Filters, excludedEc2Filters, regions, excludedRegions := nodesFiltersToEc2Filters(nodesFilters)
searchRegions, excludedRegions := handleRegionFilters(regions, excludedRegions)
reg, err := client.getRegions(ctx, searchRegions)
if err != nil {
return nil, err
}
// remove exclude values from list of regions
for _, region := range excludedRegions {
reg = stringutils.SliceFilter(reg, func(s string) bool {
return !strings.Contains(s, *region)
})
}
regions = aws.StringSlice(reg)
startTime := time.Now()
poolOfTasks := pool.NewPool(client.getNodeInstancePoolTasks(ctx, regions, ec2Filters, ssm), len(regions))
poolOfTasks.Run()
logrus.Debugf("QueryNodes time to run in parallel: %s", time.Since(startTime))
for _, task := range poolOfTasks.Tasks {
//here we do a Type Assertion to assert that task.Result holds the type nodesTaskResult.. if so, taskNodes is assigned the type and value of the desired type.
taskNodes := task.Result.(nodesTaskResult)
if len(taskNodes.Nodes) > 0 {
nodes[taskNodes.Region] = taskNodes.Nodes
}
}
if len(excludedEc2Filters) > 1 {
// b/c aws api does not support exclusion, we are doing it in the code here
// if len(excludedEc2Filters) > 1 (accounting for the running-instance-state filter), then we know that the value of nodes is all nodes across the specified regions,
// with no tag filters. now we fetch the list of nodes based on the non-negated excludedEc2Filters, so we can
// compare the arrays and remove from nodes the list of instances that comes back from the filtered search
poolOfTasks := pool.NewPool(client.getNodeInstancePoolTasks(ctx, regions, excludedEc2Filters, ssm), len(regions))
poolOfTasks.Run()
logrus.Debugf("QueryNodes time to run in parallel for excluded filters search: %s", time.Since(startTime))
for _, task := range poolOfTasks.Tasks {
taskNodes := task.Result.(nodesTaskResult)
if len(taskNodes.Nodes) > 0 {
excludedNodes = append(excludedNodes, taskNodes.Nodes...)
}
}
nodes = handleExcludedNodes(nodes, excludedNodes)
}
if !poolOfTasks.HasErrors() {
logrus.Debugf("We had NO errors while retrieving nodes from aws!!")
} else {
logrus.Errorf("We had errors retrieving nodes from aws: %+v", poolOfTasks.GetErrors())
}
return nodes, nil
}
func handleExcludedNodes(nodes map[string][]*manager.ManagerNode, excludedNodes []*manager.ManagerNode) map[string][]*manager.ManagerNode {
for _, excludedNode := range excludedNodes {
nodes[excludedNode.Region] = removeMatchingNodeMngr(nodes[excludedNode.Region], excludedNode)
}
return nodes
}
func removeMatchingNodeMngr(arr []*manager.ManagerNode, matcher *manager.ManagerNode) []*manager.ManagerNode {
newNodesArr := make([]*manager.ManagerNode, 0)
for _, arrNode := range arr {
if arrNode.Id != matcher.Id {
newNodesArr = append(newNodesArr, arrNode)
}
}
return newNodesArr
}
// nodesFiltersToEc2Filters returns nodes based on a map of filters
func nodesFiltersToEc2Filters(nodesFilters []*common.Filter) ([]*ec2.Filter, []*ec2.Filter, []*string, []*string) {
ec2Filters := make([]*ec2.Filter, 0)
excludedEc2Filters := make([]*ec2.Filter, 0)
regions := make([]*string, 0)
excludedRegions := make([]*string, 0)
for _, filter := range nodesFilters {
if filter == nil {
logrus.Warn("nodesFiltersToEc2Filters: received a node filter == nil")
continue
}
zaMap := kvArrayToMap(filter)
for key, values := range zaMap {
if key == "region" {
if filter.Exclude {
excludedRegions = values
} else {
regions = values
}
} else {
if filter.Exclude {
excludedEc2Filters = append(excludedEc2Filters, &ec2.Filter{
Name: aws.String(fmt.Sprintf("tag:%s", key)),
Values: values,
})
} else {
ec2Filters = append(ec2Filters, &ec2.Filter{
Name: aws.String(fmt.Sprintf("tag:%s", key)),
Values: values,
})
}
}
}
}
// Hardcoding this filter to ensure that we only get running instances
ec2Filters = append(ec2Filters, &ec2.Filter{
Name: aws.String("instance-state-name"),
Values: []*string{aws.String("running")},
})
excludedEc2Filters = append(excludedEc2Filters, &ec2.Filter{
Name: aws.String("instance-state-name"),
Values: []*string{aws.String("running")},
})
return ec2Filters, excludedEc2Filters, regions, excludedRegions
}
func getSSMStatusForInstances(ctx context.Context, region string) (map[string]string, error) {
ssmInstances := make(map[string]string, 0)
// call out to node with ssm api to see if it supports ssm
sess := session.Must(session.NewSession(&aws.Config{Region: aws.String(region)}))
svc := ssm.New(sess)
// not specifying any instance ids will result in getting the instance information for all instances
nextToken := aws.String("no_token_to_start_with")
params := &ssm.DescribeInstanceInformationInput{}
for nextToken != nil {
instances, err := svc.DescribeInstanceInformationWithContext(ctx, params)
nextToken = instances.NextToken
if instances.NextToken != nil {
logrus.Debugf("NextToken received, len(instances): %d", len(instances.InstanceInformationList))
params.NextToken = nextToken
}
if err != nil {
return ssmInstances, errors.Wrap(err, "getSSMStatusForInstances unable to call DescribeInstanceInformation")
}
for _, inst := range instances.InstanceInformationList {
ssmInstances[*inst.InstanceId] = *inst.PingStatus
}
}
return ssmInstances, nil
}
func (client *AwsCreds) getNodeInstancePoolTasks(ctx context.Context, regions []*string, filters []*ec2.Filter, ssmBool bool) []*pool.Task {
var tasks = make([]*pool.Task, 0)
for _, region := range regions {
ec2Client, err := client.newClient(*region)
if err != nil {
logrus.Errorf("Could not connect to ec2 for region: %s", *region)
continue
}
logrus.Debugf("Getting nodes for AWS region: %s", *ec2Client.Config.Region)
f := func() (pool.TaskResult, error) {
var ntr = nodesTaskResult{}
nodes := make([]*manager.ManagerNode, 0)
ssmInstances := make(map[string]string, 0)
var ssmErr error
if ssmBool {
ssmInstances, ssmErr = getSSMStatusForInstances(ctx, *ec2Client.Config.Region)
if ssmErr != nil {
// only logging the error here, we will not always have access to ssm status for instances,
// this is reserved for users running in aws ec2
logrus.Warnf("getNodeInstancePoolTasks unable to get ssm status for instances, %s", ssmErr.Error())
}
}
nextToken := aws.String("no_token_to_start_with")
// Can't use MaxResults param for DescribeInstancesInput as Filters might contain tags and this is not supported by the AWS API
params := &ec2.DescribeInstancesInput{Filters: filters}
for nextToken != nil {
instances, err := ec2Client.DescribeInstancesWithContext(ctx, params)
nextToken = instances.NextToken
if instances.NextToken != nil {
logrus.Debugf("NextToken received, len(nodes): %d", len(nodes))
params.NextToken = nextToken
}
if err != nil {
logrus.Errorf("getNodeInstancePoolTasks unable to describe instances")
ntr.Nodes = nodes
ntr.Region = *region
return pool.TaskResult(ntr), err
}
for idx := range instances.Reservations {
for _, inst := range instances.Reservations[idx].Instances {
nodes = append(nodes, parseInstanceInfo(inst, *ec2Client.Config.Region, ssmInstances))
}
}
}
ntr.Nodes = nodes
ntr.Region = *ec2Client.Config.Region
//Here we convert type (cast) from nodesTaskResult to pool.TaskResult
return pool.TaskResult(ntr), nil
}
tasks = append(tasks, pool.NewTask(f))
}
return tasks
}
func parseInstanceInfo(inst *ec2.Instance, region string, ssmInstances map[string]string) *manager.ManagerNode {
var platform, ip, name string
if inst.PublicIpAddress != nil {
ip = *inst.PublicIpAddress
}
if len(ip) == 0 && inst.PrivateIpAddress != nil {
logrus.Infof("public ip address not found for node %s; getting private ip address", *inst.InstanceId)
ip = *inst.PrivateIpAddress
}
instanceTags := make([]*common.Kv, 0)
for _, tag := range inst.Tags {
instanceTags = append(instanceTags, &common.Kv{Key: *tag.Key, Value: *tag.Value})
if *tag.Key == "Name" {
name = *tag.Value
}
}
if inst.Platform != nil {
platform = *inst.Platform
}
if len(name) == 0 {
name = *inst.PublicDnsName
}
logrus.Infof("Registering %s node %s from aws account with ip %s", platform, name, ip)
ssmConnectionStatus := ssmInstances[*inst.InstanceId]
return &manager.ManagerNode{
Name: name,
Id: *inst.InstanceId,
Host: *inst.PublicDnsName,
PublicIp: ip,
Tags: instanceTags,
Platform: platform,
Region: region,
Ssm: ssmConnectionStatus,
}
}
func kvArrayToMap(filter *common.Filter) map[string][]*string {
zaMap := make(map[string][]*string, 0)
for _, val := range filter.Values {
// without this value copy, the map array will have pointers to the last added item. Think due to clever go memory management
valCopy := val
zaMap[filter.Key] = append(zaMap[filter.Key], &valCopy)
}
return zaMap
}
// TestConnectivity tests if we can reach AWS and can get regions and nodes
func (creds *AwsCreds) TestConnectivity(ctx context.Context) error {
client, err := creds.newClient(creds.Region)
if err != nil {
return err
}
_, err = client.DescribeRegionsWithContext(ctx, &ec2.DescribeRegionsInput{DryRun: aws.Bool(true)})
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok || awsErr.Code() != "DryRunOperation" {
return utils.ProcessUnauthenticated(awsErr, "Unsuccessful DescribeRegions check")
}
}
_, err = client.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{DryRun: aws.Bool(true)})
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok || awsErr.Code() != "DryRunOperation" {
return utils.ProcessUnauthenticated(awsErr, "Unsuccessful DescribeInstances check")
}
}
_, err = client.DescribeInstanceStatusWithContext(ctx, &ec2.DescribeInstanceStatusInput{DryRun: aws.Bool(true)})
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok || awsErr.Code() != "DryRunOperation" {
return utils.ProcessUnauthenticated(awsErr, "Unsuccessful DescribeInstanceStatus check")
}
}
return nil
}
type statesTaskResult struct {
States []pgdb.InstanceState
}
func (client *AwsCreds) getInstanceStatusPoolTasks(ctx context.Context, regions []string) []*pool.Task {
var tasks = make([]*pool.Task, 0)
for _, region := range regions {
ec2Client, err := client.newClient(region)
if err != nil {
logrus.Errorf("Could not connect to ec2 for region: %s", region)
continue
}
logrus.Debugf("Getting nodes for AWS region: %s", *ec2Client.Config.Region)
regionVal := region
f := func() (pool.TaskResult, error) {
var str = statesTaskResult{}
nextToken := aws.String("no_token_to_start_with")
instanceStates := make([]pgdb.InstanceState, 0)
t := true
params := &ec2.DescribeInstanceStatusInput{IncludeAllInstances: &t}
for nextToken != nil {
instances, err := ec2Client.DescribeInstanceStatusWithContext(ctx, params)
nextToken = instances.NextToken
if instances.NextToken != nil {
logrus.Debugf("NextToken received, len(instanceStates): %d", len(instanceStates))
params.NextToken = nextToken
}
if err != nil {
str.States = instanceStates
return pool.TaskResult(str), err
}
for idx := range instances.InstanceStatuses {
instanceStates = append(instanceStates, pgdb.InstanceState{
ID: *instances.InstanceStatuses[idx].InstanceId,
State: *instances.InstanceStatuses[idx].InstanceState.Name,
Region: regionVal,
})
}
}
str.States = instanceStates
return pool.TaskResult(str), nil
}
tasks = append(tasks, pool.NewTask(f))
}
return tasks
}
// QueryStatus returns an array of instanceState based on regions and instanceIds given to it
func (client *AwsCreds) QueryStatus(ctx context.Context, regions []string) ([]pgdb.InstanceState, error) {
var err error
instanceStates := make([]pgdb.InstanceState, 0)
startTime := time.Now()
if len(regions) == 0 {
regions, err = client.getRegions(ctx, []*string{})
if err != nil {
logrus.Errorf("getInstanceStatusPoolTasks unable to get regions")
}
}
poolOfTasks := pool.NewPool(client.getInstanceStatusPoolTasks(ctx, regions), len(regions))
poolOfTasks.Run()
logrus.Debugf("QueryStatus time to run in parallel: %s", time.Since(startTime))
for _, task := range poolOfTasks.Tasks {
//here we do a Type Assertion to assert that task.Result holds the type statesTaskResult.. if so, taskStates is assigned the type and value of the desired type.
taskStates := task.Result.(statesTaskResult)
if len(taskStates.States) > 0 {
instanceStates = append(instanceStates, taskStates.States...)
}
}
logrus.Debugf("Got %d instances back for all regions.", len(instanceStates))
if !poolOfTasks.HasErrors() {
logrus.Debugf("We had NO errors while retrieving nodes from aws!!")
} else {
// TODO (@vj): do we return the error (instead of only logging), if only one of the tasks
// had an error? we were previously completely ignoring the error. I will revisit this
// error handling in the next cleanup, and either log with comment why or return
logrus.Errorf("We had errors retrieving nodes from aws: %s", poolOfTasks.GetErrors())
}
return instanceStates, nil
}
type SSM struct {
}
func NewSSM() *SSM {
return new(SSM)
}
func (s *SSM) SendSSMJob(ctx context.Context, job *types.InspecJob, script string, scriptType string) error {
*job.NodeStatus = types.StatusRunning
output, err := s.Send(ctx, job, script, scriptType)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == ssm.ErrCodeInvalidInstanceId {
// we don't want to return the error here,
// we want to mark the node as failed, and return the awserr
logrus.Errorf("unreachable node detected: %s", job.SourceID)
} else {
logrus.Errorf("unable to run scan: %s %s", job.SourceID, err.Error())
}
*job.NodeStatus = types.StatusFailed
return awsErr
}
}
if output != nil && output.Command != nil {
commandID := *output.Command.CommandId
status := *output.Command.Status
statusDetails := *output.Command.StatusDetails
// call ssm api to find out if job done
// https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_Command.html
// Pending | InProgress | Success | Cancelled | Failed | TimedOut | Cancelling
for status == "Pending" || status == "InProgress" || status == "" { // we include the empty string check to ensure we keep going on throttle errors
// keep polling for status
time.Sleep(3) // sleep a little to avoid excessive throttling errors
status, statusDetails, err = s.GetCommandStatus(ctx, commandID, job.TargetConfig.Region)
if err != nil {
// return any error except throttling. we'll re-loop for those
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() != "ThrottlingException" { // not sure why i can't find this one defined somewhere..
logrus.Errorf("SendSSMJob unable to get command status: %s", err.Error())
*job.NodeStatus = types.StatusFailed
return awsErr
}
}
}
}
// the ssm command will return the exit code of the command script, which we are modifying to accommodate our needs
// so success should be successful and failure should be an inspec execution failure (not control failures)
if status == "Success" {
*job.NodeStatus = types.StatusCompleted
} else {
*job.NodeStatus = types.StatusFailed
return fmt.Errorf("aws ssm job id %s for node %s failed with status code %s - details %s", commandID, job.NodeName, status, statusDetails)
}
}
return nil
}
const (
SSMLinuxShellScript = "AWS-RunShellScript"
SSMPowerShellScript = "AWS-RunPowerShellScript"
)
func (s *SSM) Send(ctx context.Context, job *types.InspecJob, script string, scriptType string) (*ssm.SendCommandOutput, error) {
conf := &aws.Config{Region: aws.String(job.TargetConfig.Region)}
sess := session.Must(session.NewSession(conf))
svc := ssm.New(sess)
input := new(ssm.SendCommandInput)
input.SetComment("run script to execute inspec and report to automate")
scriptTypeAWS := SSMLinuxShellScript
if scriptType == inspec.PowershellScript {
scriptTypeAWS = SSMPowerShellScript
}
input.SetDocumentName(scriptTypeAWS)
// this is left here, commented out, intentionally
// if ever we have trouble with this, we can uncomment this line to see the
// output of all the jobs
// input.SetOutputS3BucketName("vj-test-ssm")
// set instance id of node
input.SetInstanceIds([]*string{aws.String(job.SourceID)})
params := make(map[string][]*string)
// convert it to a string pointer for AWS
ssmScript := []*string{}
ssmScript = append(ssmScript, &script)
params["commands"] = ssmScript
input.SetParameters(params)
err := input.Validate()
if err != nil {
return nil, errors.Wrap(err, "Send (ssm job) unable to validate input")
}
// and send the command
return svc.SendCommandWithContext(ctx, input)
}
func (s *SSM) GetCommandStatus(ctx context.Context, commandID string, region string) (string, string, error) {
var status string
var statusDetails string
conf := &aws.Config{Region: aws.String(region)}
sess := session.Must(session.NewSession(conf))
svc := ssm.New(sess)
output, err := svc.ListCommandsWithContext(ctx, &ssm.ListCommandsInput{CommandId: aws.String(commandID)})
if err != nil {
return status, statusDetails, err
}
for _, cmd := range output.Commands {
if *cmd.CommandId == commandID {
return *cmd.Status, *cmd.StatusDetails, nil
}
}
return status, statusDetails, nil
}
| [
"\"AWS_REGION\""
]
| []
| [
"AWS_REGION"
]
| [] | ["AWS_REGION"] | go | 1 | 0 | |
AIC21_Backend/asgi.py | """
ASGI config for AIC21_Backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AIC21_Backend.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
e2e/spaces_test.go | //go:build integration
// +build integration
/*
Copyright 2020 DigitalOcean
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
minio "github.com/minio/minio-go"
)
type s3Client struct {
*minio.Client
}
func createS3Client() (*s3Client, error) {
cl, err := minio.New(os.Getenv("S3_ENDPOINT"), os.Getenv("S3_ACCESS_KEY_ID"), os.Getenv("S3_SECRET_ACCESS_KEY"), true)
if err != nil {
return nil, fmt.Errorf("failed to create S3 client: %s", err)
}
return &s3Client{
Client: cl,
}, nil
}
func (cl *s3Client) ensureSpace(name string) error {
found, err := cl.BucketExists(name)
if err != nil {
return fmt.Errorf("failed to check for existance of bucket %q: %s", name, err)
}
if !found {
if err := cl.MakeBucket(name, "us-east-1"); err != nil {
return fmt.Errorf("failed to create bucket %q: %s", name, err)
}
} else {
fmt.Printf("Space %q exists already\n", name)
}
return nil
}
func (cl *s3Client) deleteSpace(name string) error {
found, err := cl.BucketExists(name)
if err != nil {
return fmt.Errorf("failed to check for existance of bucket %q: %s", name, err)
}
if found {
// Delete all bucket objects.
listCh := make(chan string)
errCh := make(chan error)
go func() {
defer close(listCh)
for object := range cl.ListObjects(name, "", true, nil) {
if object.Err != nil {
errCh <- object.Err
return
}
listCh <- object.Key
}
}()
remCh := cl.RemoveObjects(name, listCh)
select {
case err := <-errCh:
return fmt.Errorf("failed to list objects: %s", err)
case err, ok := <-remCh:
if ok {
return fmt.Errorf("failed to delete all objects: %s", err)
}
}
if err := cl.RemoveBucket(name); err != nil {
return fmt.Errorf("failed to remove bucket %q: %s", name, err)
}
}
return nil
}
| [
"\"S3_ENDPOINT\"",
"\"S3_ACCESS_KEY_ID\"",
"\"S3_SECRET_ACCESS_KEY\""
]
| []
| [
"S3_SECRET_ACCESS_KEY",
"S3_ENDPOINT",
"S3_ACCESS_KEY_ID"
]
| [] | ["S3_SECRET_ACCESS_KEY", "S3_ENDPOINT", "S3_ACCESS_KEY_ID"] | go | 3 | 0 | |
playlist/authorization.py | import webbrowser
import os
from spotipy.oauth2 import SpotifyOAuth
CLIENT_ID = os.environ["SPOTIFY_APP_ID"]
APP_SECRET = os.environ["SPOTIFY_APP_SECRET"]
REDIRECT_URI = os.environ["REDIRECT_URI"]
CACHE_PATH = os.path.join(os.path.split(__file__)[0], "cached_data/auth_token.json")
SCOPES = " ".join(["user-read-recently-played",
"user-top-read",
"user-library-modify",
"user-library-read",
"playlist-read-private",
"playlist-modify-private", ])
def authorize_application(token_path, scopes):
cached_data_path = os.path.join(os.path.split(__file__)[0], 'cached_data')
if not os.path.isdir(cached_data_path):
os.mkdir(cached_data_path)
oauth = SpotifyOAuth(CLIENT_ID, APP_SECRET, REDIRECT_URI,
cache_path=token_path, scope=scopes)
authorization_url = oauth.get_authorize_url()
webbrowser.open(authorization_url)
authorization_response = input("Enter the full callback URL: ")
code = oauth.parse_response_code(authorization_response)
token = oauth.get_access_token(code)
print("Authorization was successful!")
return token
def get_token(token_path="cached_data/auth_token.json"):
oauth = SpotifyOAuth(CLIENT_ID, APP_SECRET, REDIRECT_URI,
cache_path=CACHE_PATH, scope=SCOPES)
token = oauth.get_cached_token()
if not token:
token = authorize_application(CACHE_PATH, SCOPES)
return token
if __name__ == "__main__":
authorize_application(CACHE_PATH, SCOPES)
| []
| []
| [
"REDIRECT_URI",
"SPOTIFY_APP_ID",
"SPOTIFY_APP_SECRET"
]
| [] | ["REDIRECT_URI", "SPOTIFY_APP_ID", "SPOTIFY_APP_SECRET"] | python | 3 | 0 | |
server/database/util/insert.py | import os
import yaml
import pandas as pd
import mysql.connector
# End Imports--------------------------------------------------------------------------------------------------------------------------------------------------------
# Load Database Configuration
dbconf = yaml.load(open(os.environ["DATABASE_CONFIG"]), yaml.Loader)
# Establish Database Connection
cnx = mysql.connector.connect(user = dbconf['mysql_user'], password = dbconf['mysql_password'],
host = dbconf['mysql_host'], database = dbconf['mysql_db'])
# Create Database Cursor
cursor = cnx.cursor()
# End Database Connection--------------------------------------------------------------------------------------------------------------------------------------------
# Delete From All Tables
cursor.execute("DELETE FROM `Group_Transaction`;")
cursor.execute("DELETE FROM `User_Transaction`;")
cursor.execute("DELETE FROM `Transaction`;")
cursor.execute("DELETE FROM `Group_Stock`;")
cursor.execute("DELETE FROM `Group_Users`;")
cursor.execute("DELETE FROM `Group_Info`;")
cursor.execute("DELETE FROM `User_Stock`;")
cursor.execute("DELETE FROM `Watchlist`;")
cursor.execute("DELETE FROM `User`;")
cursor.execute("DELETE FROM `Stock_Update`;")
cursor.execute("DELETE FROM `Stock`;")
# Commit Data To Database
cnx.commit()
# Insert a user for testing purpose
insert_user = """
INSERT INTO User (user_id, balance, password)
VALUES (%s, %s, %s);
"""
# Insert User Into Database
random_user = (int(0), float(2500000), str("123"))
cursor.execute(insert_user, random_user)
cnx.commit()
# Read Stock Data As Pandas Dataframe
df = pd.read_csv(os.path.realpath("../stock.csv"))
# Dynamic SQL Query
stock_insert_query = """
INSERT INTO Stock (stock_id, name, price, share)
VALUES (%s, %s, %s, %s);
"""
# Iterate Over Rows In Dataframe
for label, row in (df.iterrows()):
# Load Stock Attributes Into Variable
stock_id = row['stock_id']
name = row['name']
price = row['price']
share = row['share']
# Format Stock Tuple
stock_data = (int(stock_id), str(name), float(price), int(share))
# Insert New Stock Tuple
cursor.execute(stock_insert_query, stock_data)
# Commit Data To Database
cnx.commit()
# End Stock Data Insertion--------------------------------------------------------------------------------------------------------------------------------------------
# Read History Data As Pandas Dataframe
df = pd.read_csv(os.path.realpath("../history.csv"))
# Dynamic SQL Query
history_insert_query = """
INSERT INTO Stock_Update (update_id, stock_id, price_change)
VALUES (%s, %s, %s);
"""
# Iterate Over Rows In Dataframe
for label, row in (df.iterrows()):
# Load Stock Attributes Into Variable
update_id = row['update_id']
stock_id = row['stock_id']
price_change = row['price_change']
# Format History Tuple
history_data = (int(update_id), int(stock_id), float(price_change))
# Insert New Stock Tuple
cursor.execute(history_insert_query, history_data)
# Commit Data To Database
cnx.commit()
# Close Database Cursor
cursor.close()
# Close Database Connection
cnx.close()
# End History Data Insertion------------------------------------------------------------------------------------------------------------------------------------------
| []
| []
| [
"DATABASE_CONFIG"
]
| [] | ["DATABASE_CONFIG"] | python | 1 | 0 | |
internal/install/execution/nerdstorage_status_reporter_integration_test.go | // +build integration
package execution
import (
"os"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/newrelic/newrelic-cli/internal/install/types"
"github.com/newrelic/newrelic-client-go/newrelic"
"github.com/newrelic/newrelic-client-go/pkg/config"
"github.com/newrelic/newrelic-client-go/pkg/nerdstorage"
"github.com/newrelic/newrelic-client-go/pkg/workloads"
)
func TestReportRecipeSucceeded_Basic(t *testing.T) {
apiKey := os.Getenv("NEW_RELIC_API_KEY")
accountID := os.Getenv("NEW_RELIC_ACCOUNT_ID")
if apiKey == "" || accountID == "" {
t.Skipf("NEW_RELIC_API_KEY and NEW_RELIC_ACCOUNT_ID are required to run this test")
}
cfg := config.Config{
PersonalAPIKey: apiKey,
}
c, err := newrelic.New(newrelic.ConfigPersonalAPIKey(cfg.PersonalAPIKey))
if err != nil {
t.Fatalf("error creating integration test client")
}
a, err := strconv.Atoi(accountID)
if err != nil {
t.Fatalf("error parsing account ID")
}
entityGUID := createEntity(t, a, c)
r := NewNerdStorageStatusReporter(&c.NerdStorage)
status := NewInstallStatus([]StatusSubscriber{r}, NewConcreteSuccessLinkGenerator())
status.withEntityGUID(entityGUID)
defer deleteUserStatusCollection(t, c.NerdStorage)
defer deleteEntityStatusCollection(t, entityGUID, c.NerdStorage)
defer deleteEntity(t, entityGUID, c)
rec := types.OpenInstallationRecipe{Name: "testName"}
evt := RecipeStatusEvent{
Recipe: rec,
EntityGUID: entityGUID,
}
err = r.RecipeInstalled(status, evt)
require.NoError(t, err)
time.Sleep(1 * time.Second)
s, err := getUserStatusCollection(t, c.NerdStorage)
require.NoError(t, err)
require.NotEmpty(t, s)
s, err = getEntityStatusCollection(t, entityGUID, c.NerdStorage)
require.NoError(t, err)
require.NotEmpty(t, s)
}
func TestReportRecipeSucceeded_UserScopeOnly(t *testing.T) {
apiKey := os.Getenv("NEW_RELIC_API_KEY")
accountID := os.Getenv("NEW_RELIC_ACCOUNT_ID")
if apiKey == "" || accountID == "" {
t.Skipf("NEW_RELIC_API_KEY and NEW_RELIC_ACCOUNT_ID are required to run this test")
}
cfg := config.Config{
PersonalAPIKey: apiKey,
}
c, err := newrelic.New(newrelic.ConfigPersonalAPIKey(cfg.PersonalAPIKey))
if err != nil {
t.Fatalf("error creating integration test client")
}
a, err := strconv.Atoi(accountID)
if err != nil {
t.Fatalf("error parsing account ID")
}
entityGUID := createEntity(t, a, c)
r := NewNerdStorageStatusReporter(&c.NerdStorage)
status := NewInstallStatus([]StatusSubscriber{r}, NewConcreteSuccessLinkGenerator())
defer deleteUserStatusCollection(t, c.NerdStorage)
defer deleteEntityStatusCollection(t, entityGUID, c.NerdStorage)
defer deleteEntity(t, entityGUID, c)
rec := types.OpenInstallationRecipe{Name: "testName"}
evt := RecipeStatusEvent{
Recipe: rec,
}
err = r.RecipeInstalled(status, evt)
require.NoError(t, err)
s, err := getUserStatusCollection(t, c.NerdStorage)
require.NoError(t, err)
require.NotEmpty(t, s)
s, err = getEntityStatusCollection(t, entityGUID, c.NerdStorage)
require.NoError(t, err)
require.Empty(t, s)
}
func getUserStatusCollection(t *testing.T, c nerdstorage.NerdStorage) ([]interface{}, error) {
getCollectionInput := nerdstorage.GetCollectionInput{
PackageID: packageID,
Collection: collectionID,
}
return c.GetCollectionWithUserScope(getCollectionInput)
}
func getEntityStatusCollection(t *testing.T, guid string, c nerdstorage.NerdStorage) ([]interface{}, error) {
getCollectionInput := nerdstorage.GetCollectionInput{
PackageID: packageID,
Collection: collectionID,
}
return c.GetCollectionWithEntityScope(guid, getCollectionInput)
}
func deleteUserStatusCollection(t *testing.T, c nerdstorage.NerdStorage) {
di := nerdstorage.DeleteCollectionInput{
Collection: collectionID,
PackageID: packageID,
}
ok, err := c.DeleteCollectionWithUserScope(di)
require.NoError(t, err)
require.True(t, ok)
}
func deleteEntityStatusCollection(t *testing.T, guid string, c nerdstorage.NerdStorage) {
di := nerdstorage.DeleteCollectionInput{
Collection: collectionID,
PackageID: packageID,
}
_, err := c.DeleteCollectionWithEntityScope(guid, di)
require.NoError(t, err)
}
func createEntity(t *testing.T, accountID int, c *newrelic.NewRelic) string {
i := workloads.CreateInput{
Name: "testEntity",
}
e, err := c.Workloads.CreateWorkload(accountID, i)
require.NoError(t, err)
return e.GUID
}
func deleteEntity(t *testing.T, guid string, c *newrelic.NewRelic) {
_, err := c.Workloads.DeleteWorkload(guid)
require.NoError(t, err)
}
| [
"\"NEW_RELIC_API_KEY\"",
"\"NEW_RELIC_ACCOUNT_ID\"",
"\"NEW_RELIC_API_KEY\"",
"\"NEW_RELIC_ACCOUNT_ID\""
]
| []
| [
"NEW_RELIC_ACCOUNT_ID",
"NEW_RELIC_API_KEY"
]
| [] | ["NEW_RELIC_ACCOUNT_ID", "NEW_RELIC_API_KEY"] | go | 2 | 0 | |
tfx/extensions/google_cloud_ai_platform/pusher/executor_test.py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.extensions.google_cloud_ai_platform.pusher.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from typing import Any, Dict, Text
# Standard Imports
import mock
import tensorflow as tf
from tfx.components.pusher import executor as tfx_pusher_executor
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_ai_platform.pusher import executor
from tfx.types import standard_artifacts
from tfx.utils import json_utils
from tfx.utils import telemetry_utils
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super(ExecutorTest, self).setUp()
self._source_data_dir = os.path.join(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
'components', 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
fileio.makedirs(self._output_data_dir)
self._model_export = standard_artifacts.Model()
self._model_export.uri = os.path.join(self._source_data_dir,
'trainer/current')
self._model_blessing = standard_artifacts.ModelBlessing()
self._input_dict = {
tfx_pusher_executor.MODEL_KEY: [self._model_export],
tfx_pusher_executor.MODEL_BLESSING_KEY: [self._model_blessing],
}
self._model_push = standard_artifacts.PushedModel()
self._model_push.uri = os.path.join(self._output_data_dir, 'model_push')
fileio.makedirs(self._model_push.uri)
self._output_dict = {
tfx_pusher_executor.PUSHED_MODEL_KEY: [self._model_push],
}
# Dict format of exec_properties. custom_config needs to be serialized
# before being passed into Do function.
self._exec_properties = {
'custom_config': {
executor.SERVING_ARGS_KEY: {
'model_name': 'model_name',
'project_id': 'project_id'
},
},
'push_destination': None,
}
self._executor = executor.Executor()
def _serialize_custom_config_under_test(self) -> Dict[Text, Any]:
"""Converts self._exec_properties['custom_config'] to string."""
result = copy.deepcopy(self._exec_properties)
result['custom_config'] = json_utils.dumps(result['custom_config'])
return result
def assertDirectoryEmpty(self, path):
self.assertEqual(len(fileio.listdir(path)), 0)
def assertDirectoryNotEmpty(self, path):
self.assertGreater(len(fileio.listdir(path)), 0)
def assertPushed(self):
self.assertDirectoryNotEmpty(self._model_push.uri)
self.assertEqual(1, self._model_push.get_int_custom_property('pushed'))
def assertNotPushed(self):
self.assertDirectoryEmpty(self._model_push.uri)
self.assertEqual(0, self._model_push.get_int_custom_property('pushed'))
@mock.patch(
'tfx.extensions.google_cloud_ai_platform.pusher.executor.discovery')
@mock.patch.object(executor, 'runner', autospec=True)
def testDoBlessed(self, mock_runner, _):
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/blessed')
self._model_blessing.set_int_custom_property('blessed', 1)
mock_runner.get_service_name_and_api_version.return_value = ('ml', 'v1')
self._executor.Do(self._input_dict, self._output_dict,
self._serialize_custom_config_under_test())
executor_class_path = '%s.%s' % (self._executor.__class__.__module__,
self._executor.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
mock_runner.deploy_model_for_aip_prediction.assert_called_once_with(
mock.ANY,
self._model_push.uri,
mock.ANY,
mock.ANY,
job_labels,
)
self.assertPushed()
version = self._model_push.get_string_custom_property('pushed_version')
self.assertEqual(
self._model_push.get_string_custom_property('pushed_destination'),
'projects/project_id/models/model_name/versions/{}'.format(version))
@mock.patch(
'tfx.extensions.google_cloud_ai_platform.pusher.executor.discovery')
@mock.patch.object(executor, 'runner', autospec=True)
def testDoNotBlessed(self, mock_runner, _):
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/not_blessed')
self._model_blessing.set_int_custom_property('blessed', 0)
mock_runner.get_service_name_and_api_version.return_value = ('ml', 'v1')
self._executor.Do(self._input_dict, self._output_dict,
self._serialize_custom_config_under_test())
self.assertNotPushed()
mock_runner.deploy_model_for_aip_prediction.assert_not_called()
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
internal/analyser/filesystem_test.go | package analyser
import (
"context"
"fmt"
"os"
"testing"
)
func TestNewFileSystem_notExist(t *testing.T) {
memLimit := 512
base := "/does-not-exist"
_, err := NewFileSystem(base, memLimit)
if err == nil {
t.Errorf("expected error for path %v, got: %v", base, err)
}
}
func TestFileSystem(t *testing.T) {
memLimit := 512
fs, err := NewFileSystem(os.TempDir(), memLimit)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
ctx := context.Background()
exec, err := fs.NewExecuter(ctx, "github.com/gopherci/gopherci")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
gopath := exec.(*FileSystemExecuter).gopath
if !exists(gopath) {
t.Errorf("expected %q to exist", gopath)
}
out, err := exec.Execute(ctx, []string{"echo $GOPATH $PATH"})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if want := gopath + " " + os.Getenv("PATH") + "\n"; want != string(out) {
t.Errorf("\nwant %s\nhave %s", want, out)
}
out, err = exec.Execute(ctx, []string{"pwd"})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Ensure current working directory is project path
if want := gopath + "/src/github.com/gopherci/gopherci\n"; want != string(out) {
t.Errorf("\nwant %q\nhave %q", want, out)
}
// Ensure correct memory limit
out, err = exec.Execute(ctx, []string{"ulimit", "-v"})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if want := fmt.Sprintf("%d\n", memLimit*1024); want != string(out) {
t.Errorf("\nwant %q\nhave %q", want, out)
}
err = exec.Stop(ctx)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if exists(gopath) {
t.Errorf("expected %q to be removed", gopath)
}
}
func exists(path string) bool {
_, err := os.Stat(path)
return err == nil || !os.IsNotExist(err)
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
src/releaser/releaser.go | // Copyright 2017-present The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package releaser implements a set of utilities and a wrapper around Goreleaser
// to help automate the Hugo release process.
package releaser
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/strawberryssg/strawberry-v0/common/hexec"
"github.com/strawberryssg/strawberry-v0/common/hugo"
"github.com/pkg/errors"
)
const commitPrefix = "releaser:"
type releaseNotesState int
const (
releaseNotesNone = iota
releaseNotesCreated
releaseNotesReady
)
// ReleaseHandler provides functionality to release a new version of Hugo.
type ReleaseHandler struct {
cliVersion string
skipPublish bool
// Just simulate, no actual changes.
try bool
git func(args ...string) (string, error)
}
func (r ReleaseHandler) calculateVersions() (hugo.Version, hugo.Version) {
newVersion := hugo.MustParseVersion(r.cliVersion)
finalVersion := newVersion.Next()
finalVersion.PatchLevel = 0
if newVersion.Suffix != "-test" {
newVersion.Suffix = ""
}
finalVersion.Suffix = "-DEV"
return newVersion, finalVersion
}
// New initialises a ReleaseHandler.
func New(version string, skipPublish, try bool) *ReleaseHandler {
// When triggered from CI release branch
version = strings.TrimPrefix(version, "release-")
version = strings.TrimPrefix(version, "v")
rh := &ReleaseHandler{cliVersion: version, skipPublish: skipPublish, try: try}
if try {
rh.git = func(args ...string) (string, error) {
fmt.Println("git", strings.Join(args, " "))
return "", nil
}
} else {
rh.git = git
}
return rh
}
// Run creates a new release.
func (r *ReleaseHandler) Run() error {
if os.Getenv("GITHUB_TOKEN") == "" {
return errors.New("GITHUB_TOKEN not set, create one here with the repo scope selected: https://github.com/settings/tokens/new")
}
newVersion, finalVersion := r.calculateVersions()
version := newVersion.String()
tag := "v" + version
isPatch := newVersion.PatchLevel > 0
mainVersion := newVersion
mainVersion.PatchLevel = 0
// Exit early if tag already exists
exists, err := tagExists(tag)
if err != nil {
return err
}
if exists {
return fmt.Errorf("tag %q already exists", tag)
}
var changeLogFromTag string
if newVersion.PatchLevel == 0 {
// There may have been patch releases between, so set the tag explicitly.
changeLogFromTag = "v" + newVersion.Prev().String()
exists, _ := tagExists(changeLogFromTag)
if !exists {
// fall back to one that exists.
changeLogFromTag = ""
}
}
var (
gitCommits gitInfos
gitCommitsDocs gitInfos
relNotesState releaseNotesState
)
relNotesState, err = r.releaseNotesState(version)
if err != nil {
return err
}
prepareReleaseNotes := isPatch || relNotesState == releaseNotesNone
shouldRelease := isPatch || relNotesState == releaseNotesReady
defer r.gitPush() // TODO(bep)
if prepareReleaseNotes || shouldRelease {
gitCommits, err = getGitInfos(changeLogFromTag, "gotham", "", !r.try)
if err != nil {
return err
}
// TODO(bep) explicit tag?
gitCommitsDocs, err = getGitInfos("", "hugoDocs", "../hugoDocs", !r.try)
if err != nil {
return err
}
}
if relNotesState == releaseNotesCreated {
fmt.Println("Release notes created, but not ready. Rename to *-ready.md to continue ...")
return nil
}
if prepareReleaseNotes {
releaseNotesFile, err := r.writeReleaseNotesToTemp(version, isPatch, gitCommits, gitCommitsDocs)
if err != nil {
return err
}
if _, err := r.git("add", releaseNotesFile); err != nil {
return err
}
commitMsg := fmt.Sprintf("%s Add release notes for %s", commitPrefix, newVersion)
if !isPatch {
commitMsg += "\n\nRename to *-ready.md to continue."
}
commitMsg += "\n[ci skip]"
if _, err := r.git("commit", "-m", commitMsg); err != nil {
return err
}
}
if !shouldRelease {
fmt.Printf("Skip release ... ")
return nil
}
// For docs, for now we assume that:
// The /docs subtree is up to date and ready to go.
// The hugoDocs/dev and hugoDocs/master must be merged manually after release.
// TODO(bep) improve this when we see how it works.
if err := r.bumpVersions(newVersion); err != nil {
return err
}
if _, err := r.git("commit", "-a", "-m", fmt.Sprintf("%s Bump versions for release of %s\n\n[ci skip]", commitPrefix, newVersion)); err != nil {
return err
}
releaseNotesFile := getReleaseNotesDocsTempFilename(version, true)
title, description := version, version
if isPatch {
title = "Hugo " + version + ": A couple of Bug Fixes"
description = "This version fixes a couple of bugs introduced in " + mainVersion.String() + "."
}
// Write the release notes to the docs site as well.
docFile, err := r.writeReleaseNotesToDocs(title, description, releaseNotesFile)
if err != nil {
return err
}
if _, err := r.git("add", docFile); err != nil {
return err
}
if _, err := r.git("commit", "-m", fmt.Sprintf("%s Add release notes to /docs for release of %s\n\n[ci skip]", commitPrefix, newVersion)); err != nil {
return err
}
if _, err := r.git("tag", "-a", tag, "-m", fmt.Sprintf("%s %s [ci skip]", commitPrefix, newVersion)); err != nil {
return err
}
if !r.skipPublish {
if _, err := r.git("push", "origin", tag); err != nil {
return err
}
}
if err := r.release(releaseNotesFile); err != nil {
return err
}
if err := r.bumpVersions(finalVersion); err != nil {
return err
}
if !r.try {
// No longer needed.
if err := os.Remove(releaseNotesFile); err != nil {
return err
}
}
if _, err := r.git("commit", "-a", "-m", fmt.Sprintf("%s Prepare repository for %s\n\n[ci skip]", commitPrefix, finalVersion)); err != nil {
return err
}
return nil
}
func (r *ReleaseHandler) gitPush() {
if r.skipPublish {
return
}
if _, err := r.git("push", "origin", "HEAD"); err != nil {
log.Fatal("push failed:", err)
}
}
func (r *ReleaseHandler) release(releaseNotesFile string) error {
if r.try {
fmt.Println("Skip goreleaser...")
return nil
}
args := []string{"--parallelism", "3", "--timeout", "120m", "--rm-dist", "--release-notes", releaseNotesFile}
if r.skipPublish {
args = append(args, "--skip-publish")
}
cmd, _ := hexec.SafeCommand("goreleaser", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return errors.Wrap(err, "goreleaser failed")
}
return nil
}
func (r *ReleaseHandler) bumpVersions(ver hugo.Version) error {
toDev := ""
if ver.Suffix != "" {
toDev = ver.Suffix
}
if err := r.replaceInFile("common/hugo/version_current.go",
`Number:(\s{4,})(.*),`, fmt.Sprintf(`Number:${1}%.2f,`, ver.Number),
`PatchLevel:(\s*)(.*),`, fmt.Sprintf(`PatchLevel:${1}%d,`, ver.PatchLevel),
`Suffix:(\s{4,})".*",`, fmt.Sprintf(`Suffix:${1}"%s",`, toDev)); err != nil {
return err
}
snapcraftGrade := "stable"
if ver.Suffix != "" {
snapcraftGrade = "devel"
}
if err := r.replaceInFile("snap/snapcraft.yaml",
`version: "(.*)"`, fmt.Sprintf(`version: "%s"`, ver),
`grade: (.*) #`, fmt.Sprintf(`grade: %s #`, snapcraftGrade)); err != nil {
return err
}
var minVersion string
if ver.Suffix != "" {
// People use the DEV version in daily use, and we cannot create new themes
// with the next version before it is released.
minVersion = ver.Prev().String()
} else {
minVersion = ver.String()
}
if err := r.replaceInFile("commands/new.go",
`min_version = "(.*)"`, fmt.Sprintf(`min_version = "%s"`, minVersion)); err != nil {
return err
}
return nil
}
func (r *ReleaseHandler) replaceInFile(filename string, oldNew ...string) error {
fullFilename := hugoFilepath(filename)
fi, err := os.Stat(fullFilename)
if err != nil {
return err
}
if r.try {
fmt.Printf("Replace in %q: %q\n", filename, oldNew)
return nil
}
b, err := ioutil.ReadFile(fullFilename)
if err != nil {
return err
}
newContent := string(b)
for i := 0; i < len(oldNew); i += 2 {
re := regexp.MustCompile(oldNew[i])
newContent = re.ReplaceAllString(newContent, oldNew[i+1])
}
return ioutil.WriteFile(fullFilename, []byte(newContent), fi.Mode())
}
func hugoFilepath(filename string) string {
pwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
return filepath.Join(pwd, filename)
}
func isCI() bool {
return os.Getenv("CI") != ""
}
| [
"\"GITHUB_TOKEN\"",
"\"CI\""
]
| []
| [
"GITHUB_TOKEN",
"CI"
]
| [] | ["GITHUB_TOKEN", "CI"] | go | 2 | 0 | |
src/main/java/com/milaboratory/mixcr/cli/Main.java | /*
* Copyright (c) 2014-2021, MiLaboratories Inc. All Rights Reserved
*
* BEFORE DOWNLOADING AND/OR USING THE SOFTWARE, WE STRONGLY ADVISE
* AND ASK YOU TO READ CAREFULLY LICENSE AGREEMENT AT:
*
* https://github.com/milaboratory/mixcr/blob/develop/LICENSE
*/
package com.milaboratory.mixcr.cli;
import com.milaboratory.cli.ValidationException;
import com.milaboratory.util.TempFileManager;
import com.milaboratory.util.VersionInfo;
import io.repseq.core.VDJCLibraryRegistry;
import io.repseq.seqbase.SequenceResolvers;
import picocli.CommandLine;
import picocli.CommandLine.Model.CommandSpec;
import picocli.CommandLine.ParameterException;
import picocli.CommandLine.ParseResult;
import picocli.CommandLine.RunLast;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
public final class Main {
private static boolean initialized = false;
public static void main(String... args) {
Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
e.printStackTrace();
System.exit(2);
});
handleParseResult(parseArgs(args).getParseResult(), args);
}
public static void handleParseResult(ParseResult parseResult, String[] args) {
ExceptionHandler<Object> exHandler = new ExceptionHandler<>();
exHandler.andExit(1);
RunLast runLast = new RunLast() {
@Override
protected List<Object> handle(ParseResult parseResult) throws CommandLine.ExecutionException {
List<CommandLine> parsedCommands = parseResult.asCommandLineList();
CommandLine commandLine = parsedCommands.get(parsedCommands.size() - 1);
Object command = commandLine.getCommand();
try {
if (command instanceof CommandSpec && ((CommandSpec) command).userObject() instanceof Runnable) {
((Runnable) ((CommandSpec) command).userObject()).run();
return new ArrayList<>();
}
return super.handle(parseResult);
} catch (ParameterException ex) {
throw ex;
} catch (CommandLine.ExecutionException ex) {
throw ex;
} catch (Exception ex) {
throw new CommandLine.ExecutionException(commandLine,
"Error while running command (" + command + "): " + ex, ex);
}
}
};
try {
runLast.handleParseResult(parseResult);
} catch (ParameterException ex) {
exHandler.handleParseException(ex, args);
} catch (CommandLine.ExecutionException ex) {
exHandler.handleExecutionException(ex, parseResult);
}
}
public static CommandLine mkCmd() {
System.setProperty("picocli.usage.width", "100");
// Getting command string if executed from script
String command = System.getProperty("mixcr.command", "java -jar mixcr.jar");
if (!initialized) {
// Checking whether we are running a snapshot version
if (VersionInfo.getVersionInfoForArtifact("mixcr").getVersion().contains("SNAPSHOT"))
// If so, enable asserts
ClassLoader.getSystemClassLoader().setDefaultAssertionStatus(true);
TempFileManager.setPrefix("mixcr_");
Path cachePath = Paths.get(System.getProperty("user.home"), ".mixcr", "cache");
String repseqioCacheEnv = System.getenv("REPSEQIO_CACHE");
if (repseqioCacheEnv != null) {
cachePath = Paths.get(repseqioCacheEnv);
}
//if (System.getProperty("allow.http") != null || System.getenv("MIXCR_ALLOW_HTTP") != null)
//TODO add mechanism to deny http requests
SequenceResolvers.initDefaultResolver(cachePath);
Path libraries = Paths.get(System.getProperty("user.home"), ".mixcr", "libraries");
VDJCLibraryRegistry.getDefault().addPathResolverWithPartialSearch(".");
if (System.getProperty("mixcr.path") != null) {
Path bin = Paths.get(System.getProperty("mixcr.path"));
Path searchPath = bin.resolve("libraries");
if (Files.exists(searchPath))
VDJCLibraryRegistry.getDefault().addPathResolverWithPartialSearch(searchPath);
}
if (System.getProperty("library.path") != null)
VDJCLibraryRegistry.getDefault().addPathResolverWithPartialSearch(System.getProperty("library.path"));
if (System.getenv("MIXCR_LIBRARY_PATH") != null)
VDJCLibraryRegistry.getDefault().addPathResolverWithPartialSearch(System.getenv("MIXCR_LIBRARY_PATH"));
if (Files.exists(libraries))
VDJCLibraryRegistry.getDefault().addPathResolverWithPartialSearch(libraries);
initialized = true;
}
CommandLine cmd = new CommandLine(new CommandMain())
.setCommandName(command)
.addSubcommand("help", CommandLine.HelpCommand.class)
.addSubcommand("analyze", CommandAnalyze.CommandAnalyzeMain.class)
.addSubcommand("align", CommandAlign.class)
.addSubcommand("assemble", CommandAssemble.class)
.addSubcommand("assembleContigs", CommandAssembleContigs.class)
.addSubcommand("assemblePartial", CommandAssemblePartialAlignments.class)
.addSubcommand("extend", CommandExtend.class)
.addSubcommand("exportAlignments", CommandExport.mkAlignmentsSpec())
.addSubcommand("exportAlignmentsPretty", CommandExportAlignmentsPretty.class)
.addSubcommand("exportClones", CommandExport.mkClonesSpec())
.addSubcommand("exportClonesPretty", CommandExportClonesPretty.class)
.addSubcommand("exportReadsForClones", CommandExportReadsForClones.class)
.addSubcommand("exportAlignmentsForClones", CommandExportAlignmentsForClones.class)
.addSubcommand("exportReads", CommandExportReads.class)
.addSubcommand("mergeAlignments", CommandMergeAlignments.class)
.addSubcommand("filterAlignments", CommandFilterAlignments.class)
.addSubcommand("sortAlignments", CommandSortAlignments.class)
.addSubcommand("alignmentsDiff", CommandAlignmentsDiff.class)
.addSubcommand("clonesDiff", CommandClonesDiff.class)
.addSubcommand("alignmentsStat", CommandAlignmentsStats.class)
.addSubcommand("listLibraries", CommandListLibraries.class)
.addSubcommand("versionInfo", CommandVersionInfo.class)
.addSubcommand("pipelineInfo", CommandPipelineInfo.class)
.addSubcommand("slice", CommandSlice.class)
.addSubcommand("info", CommandInfo.class);
cmd.getSubcommands()
.get("analyze")
.addSubcommand("amplicon", CommandAnalyze.mkAmplicon())
.addSubcommand("shotgun", CommandAnalyze.mkShotgun());
cmd.setSeparator(" ");
return cmd;
}
public static CommandLine parseArgs(String... args) {
if (args.length == 0)
args = new String[]{"help"};
ExceptionHandler exHandler = new ExceptionHandler();
exHandler.andExit(1);
CommandLine cmd = mkCmd();
try {
cmd.parseArgs(args);
} catch (ParameterException ex) {
exHandler.handleParseException(ex, args);
}
return cmd;
}
public static class ExceptionHandler<R> extends CommandLine.DefaultExceptionHandler<R> {
@Override
public R handleParseException(ParameterException ex, String[] args) {
if (ex instanceof ValidationException && !((ValidationException) ex).printHelp) {
System.err.println(ex.getMessage());
return returnResultOrExit(null);
}
return super.handleParseException(ex, args);
}
}
}
| [
"\"REPSEQIO_CACHE\"",
"\"MIXCR_ALLOW_HTTP\"",
"\"MIXCR_LIBRARY_PATH\"",
"\"MIXCR_LIBRARY_PATH\""
]
| []
| [
"MIXCR_ALLOW_HTTP",
"MIXCR_LIBRARY_PATH",
"REPSEQIO_CACHE"
]
| [] | ["MIXCR_ALLOW_HTTP", "MIXCR_LIBRARY_PATH", "REPSEQIO_CACHE"] | java | 3 | 0 | |
pretrained-model/vocoder/mbmelgan/mbmelgan-female-generator.py | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import tensorflow as tf
import numpy as np
from glob import glob
from itertools import cycle
mels = glob('../speech-bahasa/output-female-v2/mels/*.npy')
file_cycle = cycle(mels)
f = next(file_cycle)
import random
def generate(batch_max_steps=8192, hop_size=256):
while True:
f = next(file_cycle)
mel = np.load(f)
audio = np.load(f.replace('mels', 'audios'))
batch_max_frames = batch_max_steps // hop_size
if len(audio) < len(mel) * hop_size:
audio = np.pad(audio, [[0, len(mel) * hop_size - len(audio)]])
if len(mel) > batch_max_frames:
interval_start = 0
interval_end = len(mel) - batch_max_frames
start_frame = random.randint(interval_start, interval_end)
start_step = start_frame * hop_size
audio = audio[start_step: start_step + batch_max_steps]
mel = mel[start_frame: start_frame + batch_max_frames, :]
else:
audio = np.pad(audio, [[0, batch_max_steps - len(audio)]])
mel = np.pad(mel, [[0, batch_max_frames - len(mel)], [0, 0]])
yield {'mel': mel, 'audio': audio}
dataset = tf.data.Dataset.from_generator(
generate,
{'mel': tf.float32, 'audio': tf.float32},
output_shapes={
'mel': tf.TensorShape([None, 80]),
'audio': tf.TensorShape([None]),
},
)
dataset = dataset.shuffle(32)
dataset = dataset.padded_batch(
32,
padded_shapes={
'audio': tf.TensorShape([None]),
'mel': tf.TensorShape([None, 80]),
},
padding_values={
'audio': tf.constant(0, dtype=tf.float32),
'mel': tf.constant(0, dtype=tf.float32),
},
)
features = dataset.make_one_shot_iterator().get_next()
features
import malaya_speech
import malaya_speech.train
from malaya_speech.train.model import melgan, mb_melgan
from malaya_speech.train.model import stft
import malaya_speech.config
from malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss
mb_melgan_config = malaya_speech.config.mb_melgan_config
generator = melgan.Generator(
mb_melgan.GeneratorConfig(**mb_melgan_config['melgan_generator_params']),
name='mb_melgan-generator',
)
pqmf = mb_melgan.PQMF(
mb_melgan.GeneratorConfig(**mb_melgan_config['melgan_generator_params']),
dtype=tf.float32,
name='pqmf',
)
sub_band_stft_loss = stft.loss.MultiResolutionSTFT(
**mb_melgan_config['subband_stft_loss_params']
)
full_band_stft_loss = stft.loss.MultiResolutionSTFT(
**mb_melgan_config['stft_loss_params']
)
y_mb_hat = generator(features['mel'], training=True)
audios = features['audio']
y_hat = pqmf.synthesis(y_mb_hat)
y_mb = pqmf.analysis(tf.expand_dims(audios, -1))
y_mb = tf.transpose(y_mb, (0, 2, 1))
y_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1]))
y_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1))
y_mb_hat = tf.reshape(y_mb_hat, (-1, tf.shape(y_mb_hat)[-1]))
sub_sc_loss, sub_mag_loss = calculate_2d_loss(
y_mb, y_mb_hat, sub_band_stft_loss
)
sub_sc_loss = tf.reduce_mean(tf.reshape(sub_sc_loss, [-1, pqmf.subbands]), -1)
sub_mag_loss = tf.reduce_mean(tf.reshape(sub_mag_loss, [-1, pqmf.subbands]), -1)
full_sc_loss, full_mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), full_band_stft_loss
)
generator_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (
full_sc_loss + full_mag_loss
)
generator_loss = tf.reduce_mean(generator_loss)
g_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5, beta2=0.9).minimize(
generator_loss
)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
checkpoint = 10000
epoch = 200_000
path = 'mbmelgan-female'
ckpt_path = tf.train.latest_checkpoint(path)
if ckpt_path:
saver.restore(sess, ckpt_path)
print(f'restoring checkpoint from {ckpt_path}')
for i in range(0, epoch):
g_loss, _ = sess.run([generator_loss, g_optimizer])
if i % checkpoint == 0:
saver.save(sess, f'{path}/model.ckpt', global_step=i)
print(i, g_loss)
saver.save(sess, f'{path}/model.ckpt', global_step=epoch)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
slackbot_settings.py | import os
API_TOKEN = os.environ["SLACK_TOKEN"]
PLUGINS = ['plugins']
| []
| []
| [
"SLACK_TOKEN"
]
| [] | ["SLACK_TOKEN"] | python | 1 | 0 | |
base_wrapper.py | import boto3
from cloud_watch_logger import CloudWatchLogger
import os
region = os.environ['AWS_REGION']
def get_code_path():
return "/" + "/".join(__file__.split('/')[:-1])
class BaseWrapper(object):
"""
This class handles all anonymization for tabular data.
It Uses Glue, S3, IAM and STS to detect the data, put it into a table and then remove
the required information before passing the data to a new bucket.
"""
def __init__(self, aws_access_key_id, aws_secret_access_key, aws_session_token=None):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
if aws_session_token is None:
self.s3_client = boto3.client('s3', region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.iam_client = boto3.client('iam', region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.sts_client = boto3.client('sts', region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
else:
self.s3_client = boto3.client('s3', region, aws_session_token=aws_session_token)
self.iam_client = boto3.client('iam', region, aws_session_token=aws_session_token)
self.sts_client = boto3.client('sts', region, aws_session_token=aws_session_token)
self.account_id = self.sts_client.get_caller_identity().get('Account')
self.logger = CloudWatchLogger(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
log_group='/aws/elastic-anonymization-service/jobs/output')
def _log_flush(self, base_name):
self.logger.flush(base_name)
def _log(self, message, response_dict=None):
self.logger.log(message=message, response_dict=response_dict)
print(message)
def create_bucket(self, bucket_name):
self._log("Creating bucket: " + bucket_name)
buckets = self.list_buckets()
if bucket_name in buckets:
self._log("Bucket exist: " + bucket_name)
return True
try:
response = self.s3_client.create_bucket(Bucket=bucket_name)
self._log("create_bucket response: ", response)
bucket_arn = response
except Exception as e:
self._log(str(e))
return False
return bucket_arn
def list_buckets(self):
response = self.s3_client.list_buckets()
buckets = []
for bucket in response['Buckets']:
buckets.append(bucket["Name"])
return buckets
def upload_to_s3(self, body, bucket, key):
self._log("Uploading to s3: " + bucket + "/" + key)
response = self.s3_client.put_object(
ACL='private',
Body=str.encode(body),
Bucket=bucket,
ContentEncoding='utf-8',
ContentType='application/x-python-code',
Key=key,
StorageClass='STANDARD'
)
self._log("put_object response: ", response) | []
| []
| [
"AWS_REGION"
]
| [] | ["AWS_REGION"] | python | 1 | 0 | |
cmd/common.go | package cmd
import (
"context"
"fmt"
"math"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
pqtazblob "github.com/xitongsys/parquet-go-source/azblob"
"github.com/xitongsys/parquet-go-source/gcs"
"github.com/xitongsys/parquet-go-source/http"
"github.com/xitongsys/parquet-go-source/local"
"github.com/xitongsys/parquet-go-source/s3"
"github.com/xitongsys/parquet-go/parquet"
"github.com/xitongsys/parquet-go/reader"
"github.com/xitongsys/parquet-go/source"
"github.com/xitongsys/parquet-go/types"
"github.com/xitongsys/parquet-go/writer"
)
// CommonOption represents common options across most commands
type CommonOption struct {
URI string `arg:"" predictor:"file" help:"URI of Parquet file, check https://github.com/hangxie/parquet-tools/blob/main/USAGE.md#parquet-file-location for more details."`
HttpMultipleConnection bool `help:"(HTTP endpoint only) use multiple HTTP connection." default:"false"`
HttpIgnoreTLSError bool `help:"(HTTP endpoint only) ignore TLS error." default:"false"`
HttpExtraHeaders map[string]string `mapsep:"," help:"(HTTP endpoint only) extra HTTP headers." default:""`
}
// Context represents command's context
type Context struct {
Version string
Build string
}
// ReinterpretField represents a field that needs to be re-interpretted before output
type ReinterpretField struct {
parquetType parquet.Type
convertedType parquet.ConvertedType
precision int
scale int
}
func parseURI(uri string) (*url.URL, error) {
u, err := url.Parse(uri)
if err != nil {
return nil, fmt.Errorf("unable to parse file location [%s]: %s", uri, err.Error())
}
if u.Scheme == "" {
u.Scheme = "file"
}
if u.Scheme == "file" {
u.Path = filepath.Join(u.Host, u.Path)
u.Host = ""
}
return u, nil
}
func getBucketRegion(bucket string) (string, error) {
// Get region of the S3 bucket
ctx := context.Background()
sess := session.Must(session.NewSession())
region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-east-1")
if err != nil {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
return "", fmt.Errorf("unable to find bucket %s's region", bucket)
}
return "", fmt.Errorf("AWS error: %s", err.Error())
}
return region, nil
}
func newParquetFileReader(option CommonOption) (*reader.ParquetReader, error) {
u, err := parseURI(option.URI)
if err != nil {
return nil, err
}
var fileReader source.ParquetFile
switch u.Scheme {
case "s3":
region, err := getBucketRegion(u.Host)
if err != nil {
return nil, err
}
fileReader, err = s3.NewS3FileReader(context.Background(), u.Host, strings.TrimLeft(u.Path, "/"), &aws.Config{Region: aws.String(region)})
if err != nil {
return nil, fmt.Errorf("failed to open S3 object [%s]: %s", option.URI, err.Error())
}
case "file":
fileReader, err = local.NewLocalFileReader(u.Path)
if err != nil {
return nil, fmt.Errorf("failed to open local file [%s]: %s", u.Path, err.Error())
}
case "gs":
fileReader, err = gcs.NewGcsFileReader(context.Background(), "", u.Host, strings.TrimLeft(u.Path, "/"))
if err != nil {
return nil, fmt.Errorf("failed to open GCS object [%s]: %s", option.URI, err.Error())
}
case "wasbs":
azURL, cred, err := azureAccessDetail(*u)
if err != nil {
return nil, err
}
fileReader, err = pqtazblob.NewAzBlobFileReader(context.Background(), azURL, cred, pqtazblob.ReaderOptions{})
if err != nil {
return nil, fmt.Errorf("failed to open Azure blob object [%s]: %s", option.URI, err.Error())
}
case "http", "https":
fileReader, err = http.NewHttpReader(option.URI, option.HttpMultipleConnection, option.HttpIgnoreTLSError, option.HttpExtraHeaders)
if err != nil {
return nil, fmt.Errorf("failed to open HTTP source [%s]: %s", option.URI, err.Error())
}
default:
return nil, fmt.Errorf("unknown location scheme [%s]", u.Scheme)
}
return reader.NewParquetReader(fileReader, nil, int64(runtime.NumCPU()))
}
func newFileWriter(option CommonOption) (source.ParquetFile, error) {
u, err := parseURI(option.URI)
if err != nil {
return nil, err
}
var fileWriter source.ParquetFile
switch u.Scheme {
case "s3":
region, err := getBucketRegion(u.Host)
if err != nil {
return nil, err
}
fileWriter, err = s3.NewS3FileWriter(context.Background(), u.Host, strings.TrimLeft(u.Path, "/"), "bucket-owner-full-control", nil, &aws.Config{Region: aws.String(region)})
if err != nil {
return nil, fmt.Errorf("failed to open S3 object [%s]: %s", option.URI, err.Error())
}
case "file":
fileWriter, err = local.NewLocalFileWriter(u.Path)
if err != nil {
return nil, fmt.Errorf("failed to open local file [%s]: %s", u.Path, err.Error())
}
case "gs":
fileWriter, err = gcs.NewGcsFileWriter(context.Background(), "", u.Host, strings.TrimLeft(u.Path, "/"))
if err != nil {
return nil, fmt.Errorf("failed to open GCS object [%s]: %s", option.URI, err.Error())
}
case "wasbs":
azURL, cred, err := azureAccessDetail(*u)
if err != nil {
return nil, err
}
fileWriter, err = pqtazblob.NewAzBlobFileWriter(context.Background(), azURL, cred, pqtazblob.WriterOptions{})
if err != nil {
return nil, fmt.Errorf("failed to open Azure blob object [%s]: %s", option.URI, err.Error())
}
case "http", "https":
return nil, fmt.Errorf("writing to %s endpoint is not currently supported", u.Scheme)
default:
return nil, fmt.Errorf("unknown location scheme [%s]", u.Scheme)
}
return fileWriter, nil
}
func newCSVWriter(option CommonOption, schema []string) (*writer.CSVWriter, error) {
fileWriter, err := newFileWriter(option)
if err != nil {
return nil, err
}
return writer.NewCSVWriter(schema, fileWriter, int64(runtime.NumCPU()))
}
func newJSONWriter(option CommonOption, schema string) (*writer.JSONWriter, error) {
fileWriter, err := newFileWriter(option)
if err != nil {
return nil, err
}
return writer.NewJSONWriter(schema, fileWriter, int64(runtime.NumCPU()))
}
func azureAccessDetail(azURL url.URL) (string, azblob.Credential, error) {
container := azURL.User.Username()
if azURL.Host == "" || container == "" || strings.HasSuffix(azURL.Path, "/") {
return "", nil, fmt.Errorf("azure blob URI format: wasbs://[email protected]/path/to/blob")
}
httpURL := fmt.Sprintf("https://%s/%s%s", azURL.Host, container, azURL.Path)
accessKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY")
if accessKey == "" {
// anonymouse access
return httpURL, azblob.NewAnonymousCredential(), nil
}
credential, err := azblob.NewSharedKeyCredential(strings.Split(azURL.Host, ".")[0], accessKey)
if err != nil {
return "", nil, fmt.Errorf("failed to create Azure credential")
}
return httpURL, credential, nil
}
func getReinterpretFields(rootPath string, schemaRoot *schemaNode, noInterimLayer bool) map[string]ReinterpretField {
reinterpretFields := make(map[string]ReinterpretField)
for _, child := range schemaRoot.Children {
currentPath := rootPath + "." + child.Name
if child.Type == nil && child.ConvertedType == nil && child.NumChildren != nil {
// STRUCT
for k, v := range getReinterpretFields(currentPath, child, noInterimLayer) {
reinterpretFields[k] = v
}
continue
}
if child.Type != nil && *child.Type == parquet.Type_INT96 {
reinterpretFields[currentPath] = ReinterpretField{
parquetType: parquet.Type_INT96,
convertedType: parquet.ConvertedType_TIMESTAMP_MICROS,
precision: 0,
scale: 0,
}
continue
}
if child.ConvertedType != nil {
switch *child.ConvertedType {
case parquet.ConvertedType_MAP, parquet.ConvertedType_LIST:
if noInterimLayer {
child = child.Children[0]
}
fallthrough
case parquet.ConvertedType_MAP_KEY_VALUE:
for k, v := range getReinterpretFields(currentPath, child, noInterimLayer) {
reinterpretFields[k] = v
}
case parquet.ConvertedType_DECIMAL, parquet.ConvertedType_INTERVAL:
reinterpretFields[currentPath] = ReinterpretField{
parquetType: *child.Type,
convertedType: *child.ConvertedType,
precision: int(*child.Precision),
scale: int(*child.Scale),
}
}
}
}
return reinterpretFields
}
func decimalToFloat(fieldAttr ReinterpretField, iface interface{}) (*float64, error) {
if iface == nil {
return nil, nil
}
switch value := iface.(type) {
case int64:
f64 := float64(value) / math.Pow10(fieldAttr.scale)
return &f64, nil
case int32:
f64 := float64(value) / math.Pow10(fieldAttr.scale)
return &f64, nil
case string:
buf := stringToBytes(fieldAttr, value)
f64, err := strconv.ParseFloat(types.DECIMAL_BYTE_ARRAY_ToString(buf, fieldAttr.precision, fieldAttr.scale), 64)
if err != nil {
return nil, err
}
return &f64, nil
}
return nil, fmt.Errorf("unknown type: %s", reflect.TypeOf(iface))
}
func stringToBytes(fieldAttr ReinterpretField, value string) []byte {
buf := []byte(value)
if fieldAttr.convertedType == parquet.ConvertedType_INTERVAL {
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
buf[i], buf[j] = buf[j], buf[i]
}
}
return buf
}
func newSchemaTree(reader *reader.ParquetReader) *schemaNode {
schemas := reader.SchemaHandler.SchemaElements
stack := []*schemaNode{}
root := &schemaNode{
SchemaElement: *schemas[0],
Children: []*schemaNode{},
}
stack = append(stack, root)
pos := 1
for len(stack) > 0 {
node := stack[len(stack)-1]
if len(node.Children) < int(node.SchemaElement.GetNumChildren()) {
childNode := &schemaNode{
SchemaElement: *schemas[pos],
Children: []*schemaNode{},
}
node.Children = append(node.Children, childNode)
stack = append(stack, childNode)
pos++
} else {
stack = stack[:len(stack)-1]
if len(node.Children) == 0 {
node.Children = nil
}
}
}
return root
}
| [
"\"AZURE_STORAGE_ACCESS_KEY\""
]
| []
| [
"AZURE_STORAGE_ACCESS_KEY"
]
| [] | ["AZURE_STORAGE_ACCESS_KEY"] | go | 1 | 0 | |
awx/main/tasks.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
try:
import psutil
except Exception:
psutil = None
import urllib.parse as urlparse
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now, timedelta
from django.utils.encoding import smart_str
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
# Django-CRUM
from crum import impersonate
# Runner
import ansible_runner
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.models import (
Schedule, TowerScheduleState, Instance, InstanceGroup,
UnifiedJob, Notification,
Inventory, InventorySource, SmartInventoryMembership,
Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob,
JobEvent, ProjectUpdateEvent, InventoryUpdateEvent, AdHocCommandEvent, SystemJobEvent,
build_safe_env
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.isolated import manager as isolated_manager
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (get_ssh_version, update_scm_url,
get_licenser,
ignore_inventory_computed_fields,
ignore_inventory_group_removal, extract_ansible_vars, schedule_task_manager,
get_awx_version)
from awx.main.utils.common import _get_ansible_version, get_custom_venv_choices
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from rest_framework.exceptions import PermissionDenied
__all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate',
'RunAdHocCommand', 'handle_work_error', 'handle_work_success', 'apply_cluster_membership_policies',
'update_inventory_computed_fields', 'update_host_smart_inventory_memberships',
'send_notifications', 'run_administrative_checks', 'purge_old_stdout_files']
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
if Instance.objects.me().is_controller():
awx_isolated_heartbeat()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, '
'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task()
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
iso_hostnames = set([])
for ig in all_groups:
if ig.controller_id is not None:
iso_hostnames.update(ig.policy_instance_list)
considered_instances = [inst for inst in all_instances if inst.hostname not in iso_hostnames]
total_instances = len(considered_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[
instance.pk for instance in ig.instances.all() # obtained in prefetch
])
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
if ig.controller_id is None:
actual_groups.append(group_actual)
else:
# For isolated groups, _only_ apply the policy_instance_list
# do not add to in-memory list, so minimum rules not applied
logger.debug('Committing instances to isolated group {}'.format(ig.name))
ig.instances.set(group_actual.instances)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in considered_instances if i.managed_by_policy]
logger.debug("Total non-isolated instances:{} available for policy: {}".format(
total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to non-isolated groups
with transaction.atomic():
for g in actual_groups:
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all', exchange_type='fanout')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
@task(queue='tower_broadcast_all', exchange_type='fanout')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all', exchange_type='fanout')
def profile_sql(threshold=1, minutes=1):
if threshold == 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set(
'awx-profile-sql-threshold',
threshold,
timeout=minutes * 60
)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task()
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
except Exception as e:
logger.error("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task()
def gather_analytics():
if settings.PENDO_TRACKING_STATE == 'off':
return
try:
tgz = analytics.gather()
logger.debug('gathered analytics: {}'.format(tgz))
analytics.ship(tgz)
finally:
if os.path.exists(tgz):
os.remove(tgz)
@task()
def run_administrative_checks():
logger.warn("Running administrative checks.")
if not settings.TOWER_ADMIN_ALERTS:
return
validation_info = get_licenser().validate()
if validation_info['license_type'] != 'open' and validation_info.get('instance_count', 0) < 1:
return
used_percentage = float(validation_info.get('current_instances', 0)) / float(validation_info.get('instance_count', 100))
tower_admin_emails = User.objects.filter(is_superuser=True).values_list('email', flat=True)
if (used_percentage * 100) > 90:
send_mail("Ansible Tower host usage over 90%",
_("Ansible Tower host usage over 90%"),
tower_admin_emails,
fail_silently=True)
if validation_info.get('date_warning', False):
send_mail("Ansible Tower license will expire soon",
_("Ansible Tower license will expire soon"),
tower_admin_emails,
fail_silently=True)
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT,f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT,f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT,f)))
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all_non_isolated())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error("Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname,
other_inst.version,
this_inst.hostname,
this_inst.version
))
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(
other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_isolated_heartbeat():
local_hostname = settings.CLUSTER_HOST_ID
logger.debug("Controlling node checking for any isolated management tasks.")
poll_interval = settings.AWX_ISOLATED_PERIODIC_CHECK
# Get isolated instances not checked since poll interval - some buffer
nowtime = now()
accept_before = nowtime - timedelta(seconds=(poll_interval - 10))
isolated_instance_qs = Instance.objects.filter(
rampart_groups__controller__instances__hostname=local_hostname,
)
isolated_instance_qs = isolated_instance_qs.filter(
last_isolated_check__lt=accept_before
) | isolated_instance_qs.filter(
last_isolated_check=None
)
# Fast pass of isolated instances, claiming the nodes to update
with transaction.atomic():
for isolated_instance in isolated_instance_qs:
isolated_instance.last_isolated_check = nowtime
# Prevent modified time from being changed, as in normal heartbeat
isolated_instance.save(update_fields=['last_isolated_check'])
# Slow pass looping over isolated IGs and their isolated instances
if len(isolated_instance_qs) > 0:
logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs])))
isolated_manager.IsolatedManager().health_check(isolated_instance_qs)
@task()
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license()
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(
new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task()
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task()
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag:
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
(first_instance_type, first_instance.name, first_instance.id)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task()
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
'''
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
'''
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields(update_hosts=should_update_hosts)
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
@task()
def update_host_smart_inventory_memberships():
try:
with transaction.atomic():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
SmartInventoryMembership.objects.all().delete()
memberships = []
changed_inventories = set([])
for smart_inventory in smart_inventories:
add_for_inventory = [
SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id[0])
for host_id in smart_inventory.hosts.values_list('id')
]
memberships.extend(add_for_inventory)
if add_for_inventory:
changed_inventories.add(smart_inventory)
SmartInventoryMembership.objects.bulk_create(memberships)
except IntegrityError as e:
logger.error("Update Host Smart Inventory Memberships failed due to an exception: {}".format(e))
return
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
@task()
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification(
'inventories-status_changed',
{'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'}
)
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
cleanup_paths = []
proot_show_paths = []
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 '
'seconds (retry #%d): %s',
self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(
pk,
_attempt=_attempt + 1,
**updates
)
else:
logger.error('Failed to update %s after %d retries.',
self.model._meta.object_name, _attempt)
def get_ansible_version(self, instance):
if not hasattr(self, '_ansible_version'):
self._ansible_version = _get_ansible_version(
ansible_path=self.get_path_to_ansible(instance, executable='ansible'))
return self._ansible_version
def get_path_to(self, *args):
'''
Return absolute path relative to this file.
'''
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def get_path_to_ansible(self, instance, executable='ansible-playbook', **kwargs):
venv_path = getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH)
venv_exe = os.path.join(venv_path, 'bin', executable)
if os.path.exists(venv_exe):
return venv_exe
return shutil.which(executable)
def build_private_data(self, instance, private_data_dir):
'''
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
'''
def build_private_data_dir(self, instance):
'''
Create a temporary directory for job-related files.
'''
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=settings.AWX_PROOT_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(path)
# Ansible Runner requires that this directory exists.
# Specifically, when using process isolation
os.mkdir(os.path.join(path, 'project'))
return path
def build_private_data_files(self, instance, private_data_dir):
'''
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
'''
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
ssh_ver = get_ssh_version()
ssh_too_old = True if ssh_ver == "unknown" else Version(ssh_ver) < Version("6.0")
openssh_keys_supported = ssh_ver != "unknown" and Version(ssh_ver) >= Version("6.5")
for credential, data in private_data.get('credentials', {}).items():
# Bail out now if a private key was provided in OpenSSH format
# and we're running an earlier version (<6.5).
if 'OPENSSH PRIVATE KEY' in data and not openssh_keys_supported:
raise RuntimeError(OPENSSH_KEY_ERROR)
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.kind in ('ssh', 'scm') and not ssh_too_old:
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
name = 'credential_%d-cert.pub' % credential.pk
path = os.path.join(private_data_dir, name)
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
'''
Build a dictionary of passwords for responding to prompts.
'''
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
'''
Build ansible yaml file filled with extra vars to be passed via [email protected]
'''
def build_params_process_isolation(self, instance, private_data_dir, cwd):
'''
Build ansible runner .run() parameters for process isolation.
'''
process_isolation_params = dict()
if self.should_use_proot(instance):
process_isolation_params = {
'process_isolation': True,
'process_isolation_path': settings.AWX_PROOT_BASE_PATH,
'process_isolation_show_paths': self.proot_show_paths + [private_data_dir, cwd] + settings.AWX_PROOT_SHOW_PATHS,
'process_isolation_hide_paths': [
settings.AWX_PROOT_BASE_PATH,
'/etc/tower',
'/etc/ssh',
'/var/lib/awx',
'/var/log',
settings.PROJECTS_ROOT,
settings.JOBOUTPUT_ROOT,
] + getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or [],
'process_isolation_ro_paths': [settings.ANSIBLE_VENV_PATH, settings.AWX_VENV_PATH],
}
if getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH) != settings.ANSIBLE_VENV_PATH:
process_isolation_params['process_isolation_ro_paths'].append(instance.ansible_virtualenv_path)
return process_isolation_params
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_ansible_venv(self, venv_path, env, isolated=False):
env['VIRTUAL_ENV'] = venv_path
env['PATH'] = os.path.join(venv_path, "bin") + ":" + env['PATH']
venv_libdir = os.path.join(venv_path, "lib")
if not isolated and (
not os.path.exists(venv_libdir) or
os.path.join(venv_path, '') not in get_custom_venv_choices()
):
raise InvalidVirtualenvError(_(
'Invalid virtual environment selected: {}'.format(venv_path)
))
isolated_manager.set_pythonpath(venv_libdir, env)
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
def build_env(self, instance, private_data_dir, isolated, private_data_files=None):
'''
Build environment dictionary for ansible-playbook.
'''
env = dict(os.environ.items())
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
# Set environment variables needed for inventory and job event
# callbacks to work.
# Update PYTHONPATH to use local site-packages.
# NOTE:
# Derived class should call add_ansible_venv() or add_awx_venv()
if self.should_use_proot(instance):
env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
return env
def should_use_proot(self, instance):
'''
Return whether this task should use proot.
'''
return False
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
json_data = json.dumps(script_data)
handle, path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
f.write('#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
return path
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_cwd(self, instance, private_data_dir):
raise NotImplementedError
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
'''
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
'''
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance):
'''
Hook for any steps to run before the job/task starts
'''
def post_run_hook(self, instance, status):
'''
Hook for any steps to run before job/task is marked as complete.
'''
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
'''
Hook for any steps to run after job/task is marked as complete.
'''
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
'''
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
'''
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
should_write_event = False
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return should_write_event
def cancel_callback(self):
'''
Ansible runner callback to tell the job when/if it is canceled
'''
self.instance = self.update_model(self.instance.pk)
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
'''
Ansible runner callback triggered on finished run
'''
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
'''
Ansible runner callback triggered on status transition
'''
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command),
job_cwd=runner_config.cwd, job_env=job_env)
def check_handler(self, config):
'''
IsolatedManager callback triggered by the repeated checks of the isolated node
'''
job_env = build_safe_env(config['env'])
for k, v in self.safe_cred_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk,
job_args=json.dumps(config['command']),
job_cwd=config['cwd'],
job_env=job_env)
@with_path_cleanup
def run(self, pk, **kwargs):
'''
Run the job/task and capture its output.
'''
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running',
start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
isolated_manager_instance = None
try:
isolated = self.instance.is_isolated()
self.pre_run_hook(self.instance)
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_PROOT_BASE_PATH):
raise RuntimeError('AWX_PROOT_BASE_PATH=%s does not exist' % settings.AWX_PROOT_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
private_data_dir = self.build_private_data_dir(self.instance)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
cwd = self.build_cwd(self.instance, private_data_dir)
process_isolation_params = self.build_params_process_isolation(self.instance,
private_data_dir,
cwd)
env = self.build_env(self.instance, private_data_dir, isolated,
private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(
credential, env, self.safe_cred_env, args, private_data_dir
)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'project_dir': cwd,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'event_handler': self.event_handler,
'cancel_callback': self.cancel_callback,
'finished_callback': self.finished_callback,
'status_handler': self.status_handler,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
'suppress_ansible_output': True,
**process_isolation_params,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
if self.instance.is_isolated() is True:
module_args = None
if 'module_args' in params:
# if it's adhoc, copy the module args
module_args = ansible_runner.utils.args2cmdline(
params.get('module_args'),
)
else:
# otherwise, it's a playbook, so copy the project dir
copy_tree(cwd, os.path.join(private_data_dir, 'project'))
shutil.move(
params.pop('inventory'),
os.path.join(private_data_dir, 'inventory')
)
ansible_runner.utils.dump_artifacts(params)
isolated_manager_instance = isolated_manager.IsolatedManager(
cancelled_callback=lambda: self.update_model(self.instance.pk).cancel_flag,
check_callback=self.check_handler,
)
status, rc = isolated_manager_instance.run(self.instance,
private_data_dir,
params.get('playbook'),
params.get('module'),
module_args,
event_data_key=self.event_data_key,
ident=str(self.instance.pk))
self.event_ct = len(isolated_manager_instance.handled_events)
else:
self.dispatcher = CallbackQueueDispatcher()
res = ansible_runner.interface.run(**params)
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status,
emitted_events=self.event_ct,
**extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=isolated_manager_instance)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task()
class RunJob(BaseTask):
'''
Run a job using ansible-playbook.
'''
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
'''
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
'''
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
'''
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
'''
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.get_deprecated_credential('ssh')
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError(
'multiple vault credentials were specified with --vault-id {}@prompt'.format(
vault_id
)
)
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def add_ansible_venv(self, venv_path, env, isolated=False):
super(RunJob, self).add_ansible_venv(venv_path, env, isolated=isolated)
# Add awx/lib to PYTHONPATH.
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + self.get_path_to('..', 'lib') + ':'
def build_env(self, job, private_data_dir, isolated=False, private_data_files=None):
'''
Build environment dictionary for ansible-playbook.
'''
plugin_dir = self.get_path_to('..', 'plugins', 'callback')
plugin_dirs = [plugin_dir]
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and \
settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
plugin_path = ':'.join(plugin_dirs)
env = super(RunJob, self).build_env(job, private_data_dir,
isolated=isolated,
private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
self.add_ansible_venv(job.ansible_virtualenv_path, env, isolated=isolated)
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.use_fact_cache:
library_path = env.get('ANSIBLE_LIBRARY')
env['ANSIBLE_LIBRARY'] = ':'.join(
filter(None, [
library_path,
self.get_path_to('..', 'plugins', 'library')
])
)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if not isolated:
env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_path
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each
# job and visible inside the proot environment (when enabled).
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = cp_dir
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
return env
def build_args(self, job, private_data_dir, passwords):
'''
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
'''
creds = job.get_deprecated_credential('ssh')
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
vault_id = k.split('.')[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks: # FIXME: Max limit?
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_cwd(self, job, private_data_dir):
cwd = job.project.get_project_path()
if not cwd:
root = settings.PROJECTS_ROOT
raise RuntimeError('project local_path %s cannot be found in %s' %
(job.project.local_path, root))
return cwd
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return os.path.join(job.playbook)
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are whitelisted as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
vault_id = k.split('.')[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def should_use_proot(self, job):
'''
Return whether this task should use proot.
'''
return getattr(settings, 'AWX_PROOT_ENABLED', False)
def pre_run_hook(self, job):
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
if job.project and job.project.scm_type:
pu_ig = job.instance_group
pu_en = job.execution_node
if job.is_isolated() is True:
pu_ig = pu_ig.controller
pu_en = settings.CLUSTER_HOST_ID
if job.project.status in ('error', 'failed'):
msg = _(
'The project revision for this job template is unknown due to a failed update.'
)
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
local_project_sync = job.project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
status='running',
instance_group = pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id))
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
project_update_task().run(local_project_sync.id)
job = self.update_model(job.pk, scm_revision=job.project.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(job.pk, status='failed',
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
('project_update', local_project_sync.name, local_project_sync.id)))
raise
def final_run_hook(self, job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
fact_modification_times,
)
if isolated_manager_instance:
isolated_manager_instance.cleanup()
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
update_inventory_computed_fields.delay(inventory.id, True)
@task()
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
@property
def proot_show_paths(self):
return [settings.PROJECTS_ROOT]
def build_private_data(self, project_update, private_data_dir):
'''
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
'''
handle, self.revision_path = tempfile.mkstemp(dir=settings.PROJECTS_ROOT)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(self.revision_path)
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
'''
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
'''
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, isolated=False, private_data_files=None):
'''
Build environment dictionary for ansible-playbook.
'''
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir,
isolated=isolated,
private_data_files=private_data_files)
self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_PROOT_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
env['ANSIBLE_CALLBACK_PLUGINS'] = self.get_path_to('..', 'plugins', 'callback')
return env
def _build_scm_url_extra_vars(self, project_update):
'''
Helper method to build SCM url and extra vars with parameters needed
for authentication.
'''
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url,
check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type == 'insights':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username,
scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
'''
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
'''
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
else:
args.append('-v')
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
if project_update.project.scm_revision and project_update.job_type == 'run':
scm_branch = project_update.project.scm_revision
else:
scm_branch = project_update.scm_branch or {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
extra_vars.update({
'project_path': project_update.get_project_path(check_if_exists=False),
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license(show_key=False).get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_type': project_update.scm_type,
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_delete_on_update': project_update.scm_delete_on_update if project_update.job_type == 'check' else False,
'scm_full_checkout': True if project_update.job_type == 'run' else False,
'scm_revision_output': self.revision_path,
'scm_revision': project_update.project.scm_revision,
'roles_enabled': getattr(settings, 'AWX_ROLES_ENABLED', True)
})
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_cwd(self, project_update, private_data_dir):
return self.get_path_to('..', 'playbooks')
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
self.build_cwd(project_update, private_data_dir)
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d['Password:\s*?$'] = 'scm_password' # noqa
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because '
'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src,
status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because '
'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id))
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(
project_update.log_format
))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was cancelled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info(
'{} spent {} waiting to acquire lock for local source tree '
'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance):
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
self.acquire_lock(instance)
def post_run_hook(self, instance, status):
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in ('failed', 'canceled',):
fd = open(self.revision_path, 'r')
lines = fd.readlines()
if lines:
p.scm_revision = lines[0].strip()
else:
logger.info("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save()
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def should_use_proot(self, project_update):
'''
Return whether this task should use proot.
'''
return getattr(settings, 'AWX_PROOT_ENABLED', False)
@task()
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
@property
def proot_show_paths(self):
return [self.get_path_to('..', 'plugins', 'inventory')]
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update))
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, isolated, private_data_files=None):
"""Build environment dictionary for inventory import.
This used to be the mechanism by which any data that needs to be passed
to the inventory update script is set up. In particular, this is how
inventory update is aware of its proper credentials.
Most environment injection is now accomplished by the credential
injectors. The primary purpose this still serves is to
still point to the inventory update INI or config file.
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update,
private_data_dir,
isolated,
private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
self.add_awx_venv(env)
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update))
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as either script or auto plugin
if injector.should_use_plugin():
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
else:
env['ANSIBLE_INVENTORY_ENABLED'] = 'script'
if inventory_update.source in ['scm', 'custom']:
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
# Piece together the initial command to run via. the shell.
args = ['awx-manage', 'inventory_import']
args.extend(['--inventory-id', str(inventory.pk)])
# Add appropriate arguments for overwrite if the inventory_update
# object calls for it.
if inventory_update.overwrite:
args.append('--overwrite')
if inventory_update.overwrite_vars:
args.append('--overwrite-vars')
# Declare the virtualenv the management command should activate
# as it calls ansible-inventory
args.extend(['--venv', inventory_update.ansible_virtualenv_path])
src = inventory_update.source
# Add several options to the shell arguments based on the
# inventory-source-specific setting in the AWX configuration.
# These settings are "per-source"; it's entirely possible that
# they will be different between cloud providers if an AWX user
# actively uses more than one.
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
args.extend(['--enabled-var',
getattr(settings, '%s_ENABLED_VAR' % src.upper())])
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
args.extend(['--enabled-value',
getattr(settings, '%s_ENABLED_VALUE' % src.upper())])
if getattr(settings, '%s_GROUP_FILTER' % src.upper(), False):
args.extend(['--group-filter',
getattr(settings, '%s_GROUP_FILTER' % src.upper())])
if getattr(settings, '%s_HOST_FILTER' % src.upper(), False):
args.extend(['--host-filter',
getattr(settings, '%s_HOST_FILTER' % src.upper())])
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
args.append('--exclude-empty-groups')
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
args.extend(['--instance-id-var',
getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper()),])
# Add arguments for the source inventory script
args.append('--source')
args.append(self.psuedo_build_inventory(inventory_update, private_data_dir))
if src == 'custom':
args.append("--custom")
args.append('-v%d' % inventory_update.verbosity)
if settings.DEBUG:
args.append('--traceback')
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def psuedo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
if injector is not None:
if injector.should_use_plugin():
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
# Use the vendored script path
inventory_path = self.get_path_to('..', 'plugins', 'inventory', injector.script_name)
elif src == 'scm':
inventory_path = inventory_update.get_actual_source_path()
elif src == 'custom':
handle, inventory_path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
if inventory_update.source_script is None:
raise RuntimeError('Inventory Script does not exist')
f.write(inventory_update.source_script.script)
f.close()
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return inventory_path
def build_cwd(self, inventory_update, private_data_dir):
'''
There are two cases where the inventory "source" is in a different
location from the private data:
- deprecated vendored inventory scripts in awx/plugins/inventory
- SCM, where source needs to live in the project folder
in these cases, the inventory does not exist in the standard tempdir
'''
src = inventory_update.source
if src == 'scm' and inventory_update.source_project_update:
return inventory_update.source_project_update.get_project_path(check_if_exists=False)
if src in CLOUD_PROVIDERS:
injector = None
if src in InventorySource.injectors:
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
if (not injector) or (not injector.should_use_plugin()):
return self.get_path_to('..', 'plugins', 'inventory')
return private_data_dir
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update):
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (inventory_update.source=='scm' and inventory_update.launch_type!='scm' and source_project):
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
status='running',
execution_node=inventory_update.execution_node,
instance_group = inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id))
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
project_update_task().run(local_project_sync.id)
inventory_update.inventory_source.scm_last_revision = local_project_sync.project.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk, status='failed',
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
('project_update', local_project_sync.name, local_project_sync.id)))
raise
@task()
class RunAdHocCommand(BaseTask):
'''
Run an ad hoc command using ansible.
'''
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
'''
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
'''
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
'''
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
'''
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, isolated=False, private_data_files=None):
'''
Build environment dictionary for ansible.
'''
plugin_dir = self.get_path_to('..', 'plugins', 'callback')
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir,
isolated=isolated,
private_data_files=private_data_files)
self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_dir
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
# Specify empty SSH args (should disable ControlPersist entirely for
# ad hoc commands).
env.setdefault('ANSIBLE_SSH_ARGS', '')
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
'''
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
'''
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.username
become_method = creds.become_method
become_username = creds.become_username
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_cwd(self, ad_hoc_command, private_data_dir):
return private_data_dir
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
def should_use_proot(self, ad_hoc_command):
'''
Return whether this task should use proot.
'''
return getattr(settings, 'AWX_PROOT_ENABLED', False)
def final_run_hook(self, adhoc_job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
super(RunAdHocCommand, self).final_run_hook(adhoc_job, status, private_data_dir, fact_modification_times)
if isolated_manager_instance:
isolated_manager_instance.cleanup()
@task()
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(['--jobs', '--project-updates', '--inventory-updates',
'--management-jobs', '--ad-hoc-commands', '--workflow-jobs',
'--notifications'])
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, isolated=False, private_data_files=None):
env = super(RunSystemJob, self).build_env(instance, private_data_dir,
isolated=isolated,
private_data_files=private_data_files)
self.add_awx_venv(env)
return env
def build_cwd(self, instance, private_data_dir):
return settings.BASE_DIR
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(
related_obj, new_obj, model, field_name
))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task()
def deep_copy_model_obj(
model_module, model_name, obj_pk, new_obj_pk,
user_pk, sub_obj_list, permission_check_func=None
):
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]),
sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(
obj, new_obj, sub_model, sub_obj, creater
))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(
importlib.import_module(permission_check_func[0]), permission_check_func[1]
), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id, True)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_cinefiles.py | import pytest, os, shutil
import glob
from pprint import pprint
from lxml import html
import cinefiles.cinefiles as cf
def test_import():
import cinefiles
movies = [ '5th Element','Amour','Astronaut Farmer',
'Down Periscope','Grand Budapest Hotel, The (2014)',
'Interstellar (2014)','Invisible War, The',
'Men Who Stare at Goats, The','Mulan (1998)',
'Soylent Green (1973)','Thin Red Line']
@pytest.fixture(scope='module')
def directoryA(tmpdir_factory):
testbed = tmpdir_factory.mktemp('testA')
for m in movies:
tempmovie = testbed.mkdir('/'+m).join('/movie.mp4')
tempmovie.write('movie code')
return testbed
def test_directoryA(directoryA):
assert os.path.exists(str(directoryA)+'/Thin Red Line/movie.mp4')
@pytest.fixture(scope='module')
def examples(tmpdir_factory):
safe_examples = tmpdir_factory.mktemp('safe_examples')
shutil.copytree('examples',str(safe_examples)+'/examples')
return safe_examples.join('examples')
def test_safe_examples_dir(examples):
assert os.path.exists(str(examples)+'/run_cf.py')
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_examplerunA(directoryA, examples, monkeypatch):
monkeypatch.chdir(examples)
import cinefiles.cinefiles as cf
import logging
search = cf.Cinefiles(configfile=str(examples)+'/cinefiles.ini')
#we must change searchfolder to temporary directory
search.configdict.update({'searchfolder':str(directoryA)})
search.run()
#check basic structure
for item in directoryA.listdir():
if(item.isdir()):
foldername = str(item).split('/')[-1]
print(foldername)
if(foldername != 'cinefiles' and foldername != '.cinefiles'):
index = item.join('/index.htm')
assert index.exists()
@pytest.fixture(scope='session')
def dirA_complete(tmpdir_factory):
testbed = tmpdir_factory.mktemp('testA_complete')
for m in movies:
tempmovie = testbed.mkdir('/'+m).join('/movie.mp4')
tempmovie.write('movie code')
search = cf.Cinefiles(searchfolder=str(testbed))
search.run()
return testbed
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_checkarchive(dirA_complete, monkeypatch):
monkeypatch.chdir(dirA_complete)
assert dirA_complete.join('/5th Element/index.htm').exists()
newsearch = cf.Cinefiles(searchfolder=str(dirA_complete))
# newsearch.run()
it = os.scandir(str(dirA_complete))
for entry in it:
if entry.is_dir():
subit = os.scandir(entry.path)
for subentry in subit:
if(subentry.name == 'archive.log' or subentry.name == '.archive.log'):
assert newsearch.checkarchive(subentry)
#all of these movies have all 3 reviews
moviesB = [ '5th Element','Grand Budapest Hotel, The (2014)',
'Interstellar (2014)','Thin Red Line']
@pytest.fixture(scope='session')
def directoryB(tmpdir_factory):
testbed = tmpdir_factory.mktemp('testB')
for m in moviesB:
tempmovie = testbed.mkdir('/'+m).join('/movie.mp4')
tempmovie.write('movie code')
search = cf.Cinefiles(searchfolder=str(testbed))
search.run()
return testbed
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_metadata(directoryB):
newsearch = cf.Cinefiles(searchfolder=str(directoryB))
for m in moviesB:
pathobj = directoryB.join('/'+m)
resultdict = newsearch.getattrfrommetadata(str(pathobj))
print(str(pathobj))
for key in resultdict:
if(key != 'indexfile'):
#indexfile is set later
print(key)
assert resultdict[key] != ''
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_masterindex_imdb(directoryB):
masterindex = directoryB.join('/index.htm')
htmlstring = ''
for line in masterindex.readlines():
htmlstring += line
tree = html.fromstring(htmlstring)
results = tree.xpath('//td[@class="rowimdb"]')
for r in results:
assert r.text_content != ''
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_masterindex_meta(directoryB):
masterindex = directoryB.join('/index.htm')
htmlstring = ''
for line in masterindex.readlines():
htmlstring += line
tree = html.fromstring(htmlstring)
results = tree.xpath('//td[@class="rowmeta"]')
for r in results:
assert r.text_content != ''
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_masterindex_meta(directoryB):
masterindex = directoryB.join('/index.htm')
htmlstring = ''
for line in masterindex.readlines():
htmlstring += line
tree = html.fromstring(htmlstring)
results = tree.xpath('//td[@class="rowroger"]')
for r in results:
assert r.text_content != ''
@pytest.fixture(scope='function')
def min_ini(tmpdir_factory):
minimal = tmpdir_factory.mktemp('minimal')
config = minimal.join('/cinefiles.ini')
config.write('[cinefiles]\n searchfolder=none\n')
return minimal
def test_no_args(min_ini,monkeypatch):
monkeypatch.chdir(min_ini)
tc = cf.Cinefiles()
assert tc.configdict['searchfolder'] == 'none'
@pytest.fixture(scope='function')
def blank_folder(tmpdir_factory):
return tmpdir_factory.mktemp('blank')
def test_no_conf(blank_folder,monkeypatch):
monkeypatch.chdir(blank_folder)
with pytest.raises(IOError) as err:
tc = cf.Cinefiles()
@pytest.fixture(scope='function')
def broken_ini(tmpdir_factory):
broken = tmpdir_factory.mktemp('minimal')
config = broken.join('/cinefiles.ini')
config.write('\n')
return broken
def test_broken_conf(broken_ini,monkeypatch):
monkeypatch.chdir(broken_ini)
with pytest.raises(ValueError) as err:
tc = cf.Cinefiles()
def test_onwindows():
assert not cf.running_on_windows()
def test_main(script_runner):
ret = script_runner.run('./cinefiles')
def test_fullsetup():
full = cf.Cinefiles(guess=False,skip=False,test=False,destroy=False,
debugnum=3,localresources=False,searchfolder=False)
for key in {'guess','skip','test','destroy','localresources','searchfolder'}:
assert full.configdict[key] == False
assert full.configdict['debugnum'] == 3
def recurseprint(directoryobj,tabnum=0):
for item in directoryobj.listdir():
print('\t'*tabnum+item.basename, end='')
if(item.isdir()):
print('/')
recurseprint(item,tabnum+1)
else:
print('') | []
| []
| [
"LOGNAME"
]
| [] | ["LOGNAME"] | python | 1 | 0 | |
src/codplayer/test/test_player.py | # codplayer - test the player core, primarily the Transport class
#
# Copyright 2013 Peter Liljenberg <[email protected]>
#
# Distributed under an MIT license, please see LICENSE in the top dir.
import unittest
import threading
import time
import sys
import traceback
import os
from .. import player
from .. import state
from .. import source
from .. import sink
from .. import model
from .. import audio
debug = os.getenv('DEBUG_TEST', 'fake-string-to-disable-logging')
#
# Transport test and helper classes
#
class TestPublisher(object):
"""Some synchronisation to let the test cases detect when the
Transport has updated the state.
"""
def __init__(self, test):
super(TestPublisher, self).__init__()
self.test_id = test.id()
self.updated = threading.Event()
def clear(self):
self.updated.clear()
def wait(self, timeout):
return self.updated.wait(timeout)
def update_state(self, state):
if debug in self.test_id:
sys.stderr.write('{0.test_id}: {1}\n'.format(self, state))
self.updated.set()
class DummySource(source.Source):
"""Packet source generating dummy packets, each a second long.
"""
TRACK_LENGTH_SECS = 1000
TRACK_LENGTH_FRAMES = TRACK_LENGTH_SECS * model.PCM.rate
def __init__(self, disc_id, num_tracks, num_packets = None,
pause_after_track_number = None):
disc = model.DbDisc()
disc.disc_id = disc_id
disc.audio_format = model.PCM
for i in range(num_tracks):
track = model.DbTrack()
track.number = i + 1
track.length = self.TRACK_LENGTH_FRAMES
if pause_after_track_number == track.number:
track.pause_after = True
disc.tracks.append(track)
super(DummySource, self).__init__(disc)
# Inifinite isn't really that, so we know the test eventually stops
self.num_packets = num_packets or self.TRACK_LENGTH_SECS
def iter_packets(self, track_number, packet_rate):
while track_number < len(self.disc.tracks):
track = self.disc.tracks[track_number]
for i in xrange(self.num_packets):
if track.pause_after and i + 1 == self.num_packets:
flags = audio.AudioPacket.PAUSE_AFTER
else:
flags = 0
packet = audio.AudioPacket(self.disc, track, track_number,
i * model.PCM.rate, 1, flags)
packet.data = '0123456789abcdef'
yield packet
track_number += 1
class DummySink(sink.Sink):
def __init__(self, test, *expect):
self.test = test
self.id = test.id()
self.expect = list(expect)
self.expect.reverse()
def on_call(self, func, *args):
if debug in self.id:
sys.stderr.write('{0}: {1}{2}\n'.format(self.id, func, args))
if not self.expect:
self.test.fail('unexpected additional call {0}{1}'.format(func, args))
e = self.expect.pop()
self.test.assertEqual(e.func, func, e.msg)
if e.checks:
try:
e.checks(*args)
except:
self.test.fail(traceback.format_exc())
if e.ret:
try:
return e.ret(*args)
except:
self.test.fail(traceback.format_exc())
def done(self):
if self.expect:
self.test.fail('test finished unexpectedly, {0} events remaining'.format(len(self.expect)))
def pause(self):
return self.on_call('pause')
def resume(self):
self.on_call('resume')
def stop(self):
self.on_call('stop')
def start(self, format):
self.on_call('start', format)
def add_packet(self, packet, offset):
return self.on_call('add_packet', packet, offset)
def drain(self):
return self.on_call('drain')
class Expect(object):
def __init__(self, func, msg = None, checks = None, ret = None):
self.func = func
self.msg = msg
self.checks = checks
self.ret = ret
class DummyPlayer:
def __init__(self, test, publisher):
self._id = test.id()
self._publisher = publisher
def log(self, msg, *args, **kwargs):
if debug in self._id:
sys.stderr.write('{0}: {1}: {2}\n'.format(
self._id, threading.current_thread().name,
msg.format(*args, **kwargs)))
def publish_state(self, state):
self._publisher.update_state(state)
def publish_disc(self, disc):
pass
debug = log
cfg = None
def create_transport(test, sink):
publisher = TestPublisher(test)
return player.Transport(DummyPlayer(test, publisher), sink), publisher
# Actual test cases follow
class TestTransport(unittest.TestCase):
longMessage = True
def test_working_play_stop_at_end(self):
# Single track with three packets
src = DummySource('disc1', 1, 3)
# Delay one packet at a time in a dummy buffer
buf = []
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY,
'state should set by Transport before getting update from sink'),
self.assertEqual(t.state.disc_id, 'disc1'),
self.assertEqual(t.state.no_tracks, 1),
self.assertEqual(t.state.length, src.TRACK_LENGTH_SECS),
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 0),
# buffer the packet
buf.append(packet),
),
ret = lambda packet, offset: (len(packet.data), None, None),
),
Expect('add_packet', 'should add second packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 0,
'state should not have been updated yet'),
# buffer the packet
buf.append(packet),
),
# Return first packet as being played
ret = lambda packet, offset: (len(packet.data), buf.pop(0), None),
),
Expect('add_packet', 'should add third packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 0,
'state should show first packet'),
# buffer the packet
buf.append(packet),
),
# Return second packet as being played
ret = lambda packet, offset: (len(packet.data), buf.pop(0), None),
),
Expect('drain', 'should be draining buffered packet',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 1,
'state should show second packet'),
),
# Return third packet as being played, but keep in buffer
ret = lambda: (buf[0], None),
),
Expect('drain', 'should be draining still buffered packet',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 2,
'state should show third packet'),
),
# Return third packet as being played and empty buffer
ret = lambda: (buf.pop(0), None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 2,
'state should show third packet'),
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
self.assertEqual(t.state.length, 0)
self.assertEqual(t.state.track, 0)
self.assertEqual(t.state.position, 0)
def test_writing_partial_packet(self):
# Single track with single packet
src = DummySource('disc1', 1, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (4, packet, None),
),
Expect('add_packet', 'should remaining bytes in first packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 4),
),
ret = lambda packet, offset: (len(packet.data) - 4, packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_stopping(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_eject(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to eject the disc
t.eject(),
self.assertIs(t.state.state, player.State.NO_DISC,
'state should be NO_DISC immediately, since this is a disruptive change'),
self.assertEqual(t.state.disc_id, None),
self.assertEqual(t.state.no_tracks, 0),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.NO_DISC)
def test_stop_at_end_and_play_again(self):
# Single track with single packet
src = DummySource('disc1', 1, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add only packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
Expect('start', 'should call start on play',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add only packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for first run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for first run state to update')
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
# Now play it again
done.clear()
t.play()
# Wait for second run to finish
self.assertTrue(done.wait(5), 'timeout waiting for second run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_stopping_and_play_again(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
Expect('start', 'should call start on playing disc again',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for first run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Now play it again
done.clear()
t.play()
# Wait for second run to finish
self.assertTrue(done.wait(5), 'timeout waiting for second run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_new_source_while_playing(self):
# Single track with lots of packets
src1 = DummySource('disc1', 1)
# Single track with one packet
src2 = DummySource('disc2', 1, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on first disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.disc_id, 'disc1')
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we change the disc'),
# Tell the transport to switch to the next source
t.new_source(src2),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING immediately, since this is a disruptive change'),
self.assertEqual(t.state.disc_id, 'disc2'),
self.assertEqual(t.state.no_tracks, 1),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on changing disc'),
Expect('start', 'should call start on second disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
self.assertEqual(t.state.disc_id, 'disc2')
),
),
Expect('add_packet', 'should add only packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src1)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
self.assertEqual(t.state.disc_id, 'disc2')
def test_next_track(self):
# Two tracks with two packets each
src = DummySource('disc1', 2, 2)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add first packet of first track',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we next()'),
# Tell the transport to move to the next track
t.next(),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING while waiting for next track to start'),
self.assertEqual(t.state.track, 2, 'track should be updated'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on switching track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within next()'),
self.assertEqual(t.state.track, 1, 'track should still be the first track'),
self.assertEqual(t.state.position, 0),
),
),
Expect('start', 'should call start for new track',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should still be WORKING while waiting for next track to start'),
self.assertEqual(t.state.track, 2, 'track should still be the pending track'),
self.assertEqual(t.state.position, 0),
),
),
Expect('add_packet', 'should add first packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 1, 'should be second track record'),
self.assertEqual(packet.track.number, 2, 'should be second track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we next()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to move to the next track (which will stop)
t.next(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP since there are no more tracks'),
self.assertEqual(t.state.track, 0, 'track should be updated'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_prev_track(self):
# Two tracks with four packets each, to be able to test restarting track
src = DummySource('disc1', 2, 4)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 2, 'should start playing second track'),
),
),
Expect('add_packet', 'should add first packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(packet.track_number, 1, 'should be second track record'),
self.assertEqual(packet.track.number, 2, 'should be second track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when starting to play track'),
self.assertEqual(t.state.position, 0, 'should start playing from start of track'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add second packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add third packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertEqual(t.state.position, 1, 'position should be second packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add fourth packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 3 * model.PCM.rate, 'should be fourth packet'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we prev()'),
self.assertEqual(t.state.position, 2, 'position should be third packet when we prev()'),
# Tell transport to restart from start of the second track
t.prev(),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING while waiting for track to restart'),
self.assertEqual(t.state.track, 2, 'should still be the second track'),
self.assertEqual(t.state.position, 0, 'position should be start of track'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on switching track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within prev()'),
self.assertEqual(t.state.track, 2, 'track should still be the second track'),
self.assertEqual(t.state.position, 2, 'position should still be third packet'),
),
),
Expect('start', 'should call start on restart of track',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should still be WORKING while waiting for track to restart'),
self.assertEqual(t.state.track, 2, 'track should still be the second track'),
self.assertEqual(t.state.position, 0, 'position should still be start of track'),
),
),
Expect('add_packet', 'should add first packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(packet.track_number, 1, 'should be second track record'),
self.assertEqual(packet.track.number, 2, 'should be second track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we prev()'),
self.assertEqual(t.state.track, 2, 'track should be the second track when we prev()'),
# Tell the transport to move to the previous track
t.prev(),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING while waiting for prev track to start'),
self.assertEqual(t.state.track, 1, 'should be the first track'),
self.assertEqual(t.state.position, 0, 'position should be start of track'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on switching track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within prev()'),
self.assertEqual(t.state.track, 2, 'track should still be the second track'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
),
Expect('start', 'should call start on restart of track',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should still be WORKING while waiting for track to restart'),
self.assertEqual(t.state.track, 1, 'track should still be the first track'),
self.assertEqual(t.state.position, 0, 'position should still be start of track'),
),
),
Expect('add_packet', 'should add first packet of first track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we prev()'),
self.assertEqual(t.state.track, 1, 'track should be the first track when we prev()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to move to the previous track, which will stop on start of disc
t.prev(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP since we prev() at start of disc'),
self.assertEqual(t.state.track, 0, 'track should be updated'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should call stop when prev() at start of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
)
# Kick off test on second track and wait for it
t, p = create_transport(self, expects)
t.new_source(src, 1)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_pause_and_resume(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we pause()'),
# Tell the transport to pause
t.pause(),
self.assertIs(t.state.state, player.State.PAUSE,
'state should be PAUSE immediately, since the sink "paused" itself'),
self.assertEqual(t.state.position, 0, 'should be paused on first packet'),
),
# Accept packet despite pause - let's pretend it's buffered
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('pause', 'should be told to pause by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within pause()'),
),
# Tell transport that we are "paused"
ret = lambda: True
),
Expect('add_packet', 'should add second packet while paused',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertIs(t.state.state, player.State.PAUSE),
# Tell transport to resume again
t.play(),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY immediately'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('resume', 'should be told to resume by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PAUSE,
'state should still be PAUSE, since this is called within play()'),
),
),
Expect('add_packet', 'should add third packet after resume',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertIs(t.state.state, player.State.PLAY),
# Allow test to detect that state has updated
p.clear(),
# Tell transport to stop the test
t.stop(),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_play_pause_command(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we play_pause()'),
# Tell the transport to toggle into pause
t.play_pause(),
self.assertIs(t.state.state, player.State.PAUSE,
'state should be PAUSE immediately, since the sink "paused" itself'),
self.assertEqual(t.state.position, 0, 'should be paused on first packet'),
),
# Accept packet despite pause - let's pretend it's buffered
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('pause', 'should be told to pause by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within play_pause()'),
),
# Tell transport that we are "paused"
ret = lambda: True
),
Expect('add_packet', 'should add second packet while paused',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertIs(t.state.state, player.State.PAUSE),
# Tell transport to resume again
t.play_pause(),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY immediately'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('resume', 'should be told to resume by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PAUSE,
'state should still be PAUSE, since this is called within play_pause()'),
),
),
Expect('add_packet', 'should add third packet after resume',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertIs(t.state.state, player.State.PLAY),
# Allow test to detect that state has updated
p.clear(),
# Tell transport to stop the test
t.stop(),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_pause_after_track(self):
# Two tracks of two packets each, pause after the first one
src = DummySource('disc1', 2, 2, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add second packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertEqual(packet.flags, packet.PAUSE_AFTER,
'packet should tell transport to pause'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'drain should be called when pausing after track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 1, "position should be second packet"),
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop when pausing after track',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
Expect('start', 'should call start on play',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 2, 'should start playing second track'),
),
),
Expect('add_packet', 'should add first packet in second track',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
# State should have been updated
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.track, 2),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add second packet in second track',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'drain final track',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for first run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for first run state to update')
self.assertEqual(t.state.state, player.State.PAUSE,
'transport should be paused at end of first track')
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 1, "position should be second packet"),
# Now hit play to keep playing second track
done.clear()
t.play()
# Wait for second run to finish
self.assertTrue(done.wait(5), 'timeout waiting for second run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_device_error_without_packet(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet, and get error in response',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
),
ret = lambda packet, offset: (0, None, 'foobar'),
),
Expect('add_packet', 'should retry first packet after error',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(offset, 0),
self.assertEqual(t.state.error, 'Audio sink error: foobar',
'state.error should be set'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Now stop, so test doesn't run away
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
| []
| []
| [
"DEBUG_TEST"
]
| [] | ["DEBUG_TEST"] | python | 1 | 0 | |
mongo_test.go | package ginsessionmongodb
import (
"context"
"os"
"testing"
"time"
"github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/tester"
_ "github.com/joho/godotenv/autoload"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var newStore = func(_ *testing.T) sessions.Store {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
connect := options.Client().ApplyURI(os.Getenv("URI"))
client, err := mongo.Connect(ctx, connect)
if err != nil {
panic(err)
}
coll := client.Database("test").Collection("sessions")
return NewStore(coll, 3600, true, []byte("secret"))
}
func TestMongo_SessionGetSet(t *testing.T) {
tester.GetSet(t, newStore)
}
func TestMongo_SessionDeleteKey(t *testing.T) {
tester.DeleteKey(t, newStore)
}
func TestMongo_SessionFlashes(t *testing.T) {
tester.Flashes(t, newStore)
}
func TestMongo_SessionClear(t *testing.T) {
tester.Clear(t, newStore)
}
func TestMongo_SessionOptions(t *testing.T) {
tester.Options(t, newStore)
}
| [
"\"URI\""
]
| []
| [
"URI"
]
| [] | ["URI"] | go | 1 | 0 | |
tests/validation/tests/v3_api/test_import_k3s_cluster.py | import os
from .common import * # NOQA
from lib.aws import AmazonWebServices
from python_terraform import *
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
RANCHER_REGION = os.environ.get("AWS_REGION")
RANCHER_VPC_ID = os.environ.get("AWS_VPC")
RANCHER_SUBNETS = os.environ.get("AWS_SUBNET")
RANCHER_AWS_AMI = os.environ.get("AWS_AMI", "")
RANCHER_AWS_USER = os.environ.get("AWS_USER", "ubuntu")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME", "")
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "sa")
RANCHER_RESOURCE_NAME = os.environ.get("RANCHER_RESOURCE_NAME", "")
RANCHER_K3S_VERSION = os.environ.get("RANCHER_K3S_VERSION", "")
RANCHER_K3S_NO_OF_SERVER_NODES = \
os.environ.get("RANCHER_K3S_NO_OF_SERVER_NODES", 2)
RANCHER_K3S_NO_OF_WORKER_NODES = \
os.environ.get("RANCHER_K3S_NO_OF_WORKER_NODES", 0)
RANCHER_K3S_SERVER_FLAGS = os.environ.get("RANCHER_K3S_SERVER_FLAGS", "server")
RANCHER_K3S_WORKER_FLAGS = os.environ.get("RANCHER_K3S_WORKER_FLAGS", "agent")
RANCHER_QA_SPACE = os.environ.get("RANCHER_QA_SPACE", "")
RANCHER_EC2_INSTANCE_CLASS = os.environ.get("RANCHER_EC2_INSTANCE_CLASS", "t2.medium")
RANCHER_EXTERNAL_DB = os.environ.get("RANCHER_EXTERNAL_DB", "mysql")
RANCHER_EXTERNAL_DB_VERSION = os.environ.get("RANCHER_EXTERNAL_DB_VERSION", "5.7")
RANCHER_INSTANCE_CLASS = os.environ.get("RANCHER_INSTANCE_CLASS", "db.t2.micro")
RANCHER_DB_GROUP_NAME = os.environ.get("RANCHER_DB_GROUP_NAME", "default.mysql5.7")
RANCHER_DB_USERNAME = os.environ.get("RANCHER_DB_USERNAME", "")
RANCHER_DB_PASSWORD = os.environ.get("RANCHER_DB_PASSWORD", "")
RANCHER_K3S_KUBECONFIG_PATH = DATA_SUBDIR + "/k3s_kubeconfig.yaml"
def test_create_k3s_single_control_cluster():
aws_nodes, client, k3s_clusterfilepath = create_single_control_cluster()
def test_create_k3s_multiple_control_cluster():
k3s_clusterfilepath = create_multiple_control_cluster()
def test_import_k3s_single_control_cluster():
aws_nodes, client, k3s_clusterfilepath = create_single_control_cluster()
cluster = create_rancher_cluster(client, k3s_clusterfilepath)
cluster_cleanup(client, cluster, aws_nodes)
def test_import_k3s_multiple_control_cluster():
client = get_user_client()
k3s_clusterfilepath = create_multiple_control_cluster()
cluster = create_rancher_cluster(client, k3s_clusterfilepath)
def create_single_control_cluster():
# Get URL and User_Token
client = get_user_client()
# Create nodes in AWS
aws_nodes = create_nodes()
# Install k3s on master node
kubeconfig, node_token = install_k3s_master_node(aws_nodes[0])
# Join worker nodes
join_k3s_worker_nodes(aws_nodes[0], aws_nodes[1:], node_token)
# Verify cluster health
verify_cluster_health(aws_nodes[0])
# Update master node IP in kubeconfig file
localhost = "127.0.0.1"
kubeconfig = kubeconfig.replace(localhost, aws_nodes[0].public_ip_address)
k3s_kubeconfig_file = "k3s_kubeconfig.yaml"
k3s_clusterfilepath = create_kube_config_file(kubeconfig, k3s_kubeconfig_file)
print(k3s_clusterfilepath)
k3s_kubeconfig_file = "k3s_kubeconfig.yaml"
k3s_clusterfilepath = DATA_SUBDIR + "/" + k3s_kubeconfig_file
is_file = os.path.isfile(k3s_clusterfilepath)
assert is_file
with open(k3s_clusterfilepath, 'r') as f:
print(f.read())
return aws_nodes, client, k3s_clusterfilepath
def create_multiple_control_cluster():
k3s_kubeconfig_file = "k3s_kubeconfig.yaml"
k3s_clusterfilepath = DATA_SUBDIR + "/" + k3s_kubeconfig_file
tf_dir = DATA_SUBDIR + "/" + "terraform/master"
keyPath = os.path.abspath('.') + '/.ssh/' + AWS_SSH_KEY_NAME
os.chmod(keyPath, 0o400)
no_of_servers = int(RANCHER_K3S_NO_OF_SERVER_NODES)
no_of_servers = no_of_servers - 1
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'resource_name': RANCHER_RESOURCE_NAME,
'access_key': keyPath,
'external_db': RANCHER_EXTERNAL_DB,
'external_db_version': RANCHER_EXTERNAL_DB_VERSION,
'db_group_name': RANCHER_DB_GROUP_NAME,
'instance_class': RANCHER_INSTANCE_CLASS,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'username': RANCHER_DB_USERNAME,
'password': RANCHER_DB_PASSWORD,
'k3s_version': RANCHER_K3S_VERSION,
'no_of_server_nodes': no_of_servers,
'server_flags': RANCHER_K3S_SERVER_FLAGS,
'qa_space': RANCHER_QA_SPACE})
print("Creating cluster")
tf.init()
print(tf.plan(out="plan_server.out"))
print("\n\n")
print(tf.apply("--auto-approve"))
print("\n\n")
tf_dir = DATA_SUBDIR + "/" + "terraform/worker"
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'resource_name': RANCHER_RESOURCE_NAME,
'access_key': keyPath,
'k3s_version': RANCHER_K3S_VERSION,
'no_of_worker_nodes': int(RANCHER_K3S_NO_OF_WORKER_NODES),
'worker_flags': RANCHER_K3S_WORKER_FLAGS})
print("Joining worker nodes")
tf.init()
print(tf.plan(out="plan_worker.out"))
print("\n\n")
print(tf.apply("--auto-approve"))
print("\n\n")
cmd = "cp /tmp/multinode_kubeconfig1 " + k3s_clusterfilepath
os.system(cmd)
is_file = os.path.isfile(k3s_clusterfilepath)
assert is_file
print(k3s_clusterfilepath)
with open(k3s_clusterfilepath, 'r') as f:
print(f.read())
print("K3s Cluster Created")
return k3s_clusterfilepath
def create_rancher_cluster(client, k3s_clusterfilepath):
clustername = random_test_name("testcustom-k3s")
cluster = client.create_cluster(name=clustername)
cluster_token = create_custom_host_registration_token(client, cluster)
command = cluster_token.insecureCommand
finalimportcommand = command + " --kubeconfig " + k3s_clusterfilepath
print(finalimportcommand)
result = run_command(finalimportcommand)
clusters = client.list_cluster(name=clustername).data
assert len(clusters) > 0
print("Cluster is")
print(clusters[0])
# Validate the cluster
cluster = validate_cluster(client, clusters[0],
check_intermediate_state=False)
return cluster
def create_nodes():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
int(RANCHER_K3S_NO_OF_WORKER_NODES),
random_test_name("testcustom-k3s"+"-"+HOST_NAME))
assert len(aws_nodes) == int(RANCHER_K3S_NO_OF_WORKER_NODES)
for aws_node in aws_nodes:
print("AWS NODE PUBLIC IP {}".format(aws_node.public_ip_address))
return aws_nodes
def install_k3s_master_node(master):
# Connect to the node and install k3s on master
print("K3s VERSION {}".format(RANCHER_K3S_VERSION))
cmd = "curl -sfL https://get.k3s.io | \
{} sh -s - server --node-external-ip {}".\
format("INSTALL_K3S_VERSION={}".format(RANCHER_K3S_VERSION) if RANCHER_K3S_VERSION else "", master.public_ip_address)
print("Master Install {}".format(cmd))
install_result = master.execute_command(cmd)
print(install_result)
# Get node token from master
cmd = "sudo cat /var/lib/rancher/k3s/server/node-token"
print(cmd)
node_token = master.execute_command(cmd)
print(node_token)
# Get kube_config from master
cmd = "sudo cat /etc/rancher/k3s/k3s.yaml"
kubeconfig = master.execute_command(cmd)
print(kubeconfig)
print("NO OF WORKER NODES: {}".format(RANCHER_K3S_NO_OF_WORKER_NODES))
print("NODE TOKEN: \n{}".format(node_token))
print("KUBECONFIG: \n{}".format(kubeconfig))
return kubeconfig[0].strip("\n"), node_token[0].strip("\n")
def join_k3s_worker_nodes(master, workers, node_token):
for worker in workers:
cmd = "curl -sfL https://get.k3s.io | \
{} K3S_URL=https://{}:6443 K3S_TOKEN={} sh -s - ". \
format("INSTALL_K3S_VERSION={}".format(RANCHER_K3S_VERSION) \
if RANCHER_K3S_VERSION else "", master.public_ip_address, node_token)
cmd = cmd + " {} {}".format("--node-external-ip", worker.public_ip_address)
print("Joining k3s master")
print(cmd)
install_result = worker.execute_command(cmd)
print(install_result)
def verify_cluster_health(master):
cmd = "sudo k3s kubectl get nodes"
install_result = master.execute_command(cmd)
print(install_result)
def create_kube_config_file(kubeconfig, k3s_kubeconfig_file):
k3s_clusterfilepath = DATA_SUBDIR + "/" + k3s_kubeconfig_file
f = open(k3s_clusterfilepath, "w")
f.write(kubeconfig)
f.close()
return k3s_clusterfilepath
| []
| []
| [
"AWS_SUBNET",
"RANCHER_DB_GROUP_NAME",
"RANCHER_K3S_SERVER_FLAGS",
"AWS_REGION",
"RANCHER_RESOURCE_NAME",
"AWS_AMI",
"AWS_USER",
"RANCHER_HOST_NAME",
"RANCHER_K3S_NO_OF_SERVER_NODES",
"RANCHER_K3S_NO_OF_WORKER_NODES",
"RANCHER_EC2_INSTANCE_CLASS",
"AWS_VPC",
"RANCHER_INSTANCE_CLASS",
"AWS_SSH_KEY_NAME",
"RANCHER_QA_SPACE",
"RANCHER_EXTERNAL_DB",
"RANCHER_K3S_VERSION",
"RANCHER_DB_PASSWORD",
"RANCHER_K3S_WORKER_FLAGS",
"RANCHER_DB_USERNAME",
"RANCHER_EXTERNAL_DB_VERSION"
]
| [] | ["AWS_SUBNET", "RANCHER_DB_GROUP_NAME", "RANCHER_K3S_SERVER_FLAGS", "AWS_REGION", "RANCHER_RESOURCE_NAME", "AWS_AMI", "AWS_USER", "RANCHER_HOST_NAME", "RANCHER_K3S_NO_OF_SERVER_NODES", "RANCHER_K3S_NO_OF_WORKER_NODES", "RANCHER_EC2_INSTANCE_CLASS", "AWS_VPC", "RANCHER_INSTANCE_CLASS", "AWS_SSH_KEY_NAME", "RANCHER_QA_SPACE", "RANCHER_EXTERNAL_DB", "RANCHER_K3S_VERSION", "RANCHER_DB_PASSWORD", "RANCHER_K3S_WORKER_FLAGS", "RANCHER_DB_USERNAME", "RANCHER_EXTERNAL_DB_VERSION"] | python | 21 | 0 | |
VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/TargetTool/TargetTool.py | #
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import os
import sys
import traceback
from optparse import OptionParser
import Common.EdkLogger as EdkLogger
import Common.BuildToolError as BuildToolError
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
# To Do 1.set clean, 2. add item, if the line is disabled.
class TargetTool():
def __init__(self, opt, args):
self.WorkSpace = os.path.normpath(os.getenv('WORKSPACE'))
self.Opt = opt
self.Arg = args[0]
self.FileName = os.path.normpath(os.path.join(self.WorkSpace, 'Conf', 'target.txt'))
# VBox - begin
if 'VBOX_TARGET_CONF' in os.environ:
self.FileName = os.path.abspath(os.environ['VBOX_TARGET_CONF']);
# VBox - end
if os.path.isfile(self.FileName) == False:
print "%s does not exist." % self.FileName
sys.exit(1)
self.TargetTxtDictionary = {
TAB_TAT_DEFINES_ACTIVE_PLATFORM : None,
TAB_TAT_DEFINES_TOOL_CHAIN_CONF : None,
TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER : None,
TAB_TAT_DEFINES_TARGET : None,
TAB_TAT_DEFINES_TOOL_CHAIN_TAG : None,
TAB_TAT_DEFINES_TARGET_ARCH : None,
TAB_TAT_DEFINES_BUILD_RULE_CONF : None,
}
self.LoadTargetTxtFile(self.FileName)
def LoadTargetTxtFile(self, filename):
if os.path.exists(filename) and os.path.isfile(filename):
return self.ConvertTextFileToDict(filename, '#', '=')
else:
raise ParseError('LoadTargetTxtFile() : No Target.txt file exists.')
return 1
#
# Convert a text file to a dictionary
#
def ConvertTextFileToDict(self, FileName, CommentCharacter, KeySplitCharacter):
"""Convert a text file to a dictionary of (name:value) pairs."""
try:
f = open(FileName,'r')
for Line in f:
if Line.startswith(CommentCharacter) or Line.strip() == '':
continue
LineList = Line.split(KeySplitCharacter,1)
if len(LineList) >= 2:
Key = LineList[0].strip()
if Key.startswith(CommentCharacter) == False and Key in self.TargetTxtDictionary.keys():
if Key == TAB_TAT_DEFINES_ACTIVE_PLATFORM or Key == TAB_TAT_DEFINES_TOOL_CHAIN_CONF \
or Key == TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER \
or Key == TAB_TAT_DEFINES_ACTIVE_MODULE:
self.TargetTxtDictionary[Key] = LineList[1].replace('\\', '/').strip()
elif Key == TAB_TAT_DEFINES_TARGET or Key == TAB_TAT_DEFINES_TARGET_ARCH \
or Key == TAB_TAT_DEFINES_TOOL_CHAIN_TAG or Key == TAB_TAT_DEFINES_BUILD_RULE_CONF:
self.TargetTxtDictionary[Key] = LineList[1].split()
f.close()
return 0
except:
last_type, last_value, last_tb = sys.exc_info()
traceback.print_exception(last_type, last_value, last_tb)
def Print(self):
KeyList = self.TargetTxtDictionary.keys()
errMsg = ''
for Key in KeyList:
if type(self.TargetTxtDictionary[Key]) == type([]):
print "%-30s = %s" % (Key, ''.join(elem + ' ' for elem in self.TargetTxtDictionary[Key]))
elif self.TargetTxtDictionary[Key] == None:
errMsg += " Missing %s configuration information, please use TargetTool to set value!" % Key + os.linesep
else:
print "%-30s = %s" % (Key, self.TargetTxtDictionary[Key])
if errMsg != '':
print os.linesep + 'Warning:' + os.linesep + errMsg
def RWFile(self, CommentCharacter, KeySplitCharacter, Num):
try:
fr = open(self.FileName, 'r')
fw = open(os.path.normpath(os.path.join(self.WorkSpace, 'Conf\\targetnew.txt')), 'w')
existKeys = []
for Line in fr:
if Line.startswith(CommentCharacter) or Line.strip() == '':
fw.write(Line)
else:
LineList = Line.split(KeySplitCharacter,1)
if len(LineList) >= 2:
Key = LineList[0].strip()
if Key.startswith(CommentCharacter) == False and Key in self.TargetTxtDictionary.keys():
if Key not in existKeys:
existKeys.append(Key)
else:
print "Warning: Found duplicate key item in original configuration files!"
if Num == 0:
Line = "%-30s = \n" % Key
else:
ret = GetConfigureKeyValue(self, Key)
if ret != None:
Line = ret
fw.write(Line)
for key in self.TargetTxtDictionary.keys():
if key not in existKeys:
print "Warning: %s does not exist in original configuration file" % key
Line = GetConfigureKeyValue(self, key)
if Line == None:
Line = "%-30s = " % key
fw.write(Line)
fr.close()
fw.close()
os.remove(self.FileName)
os.rename(os.path.normpath(os.path.join(self.WorkSpace, 'Conf\\targetnew.txt')), self.FileName)
except:
last_type, last_value, last_tb = sys.exc_info()
traceback.print_exception(last_type, last_value, last_tb)
def GetConfigureKeyValue(self, Key):
Line = None
if Key == TAB_TAT_DEFINES_ACTIVE_PLATFORM and self.Opt.DSCFILE != None:
dscFullPath = os.path.join(self.WorkSpace, self.Opt.DSCFILE)
if os.path.exists(dscFullPath):
Line = "%-30s = %s\n" % (Key, self.Opt.DSCFILE)
else:
EdkLogger.error("TagetTool", BuildToolError.FILE_NOT_FOUND,
"DSC file %s does not exist!" % self.Opt.DSCFILE, RaiseError=False)
elif Key == TAB_TAT_DEFINES_TOOL_CHAIN_CONF and self.Opt.TOOL_DEFINITION_FILE != None:
tooldefFullPath = os.path.join(self.WorkSpace, self.Opt.TOOL_DEFINITION_FILE)
if os.path.exists(tooldefFullPath):
Line = "%-30s = %s\n" % (Key, self.Opt.TOOL_DEFINITION_FILE)
else:
EdkLogger.error("TagetTool", BuildToolError.FILE_NOT_FOUND,
"Tooldef file %s does not exist!" % self.Opt.TOOL_DEFINITION_FILE, RaiseError=False)
elif self.Opt.NUM >= 2:
Line = "%-30s = %s\n" % (Key, 'Enable')
elif self.Opt.NUM <= 1:
Line = "%-30s = %s\n" % (Key, 'Disable')
elif Key == TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER and self.Opt.NUM != None:
Line = "%-30s = %s\n" % (Key, str(self.Opt.NUM))
elif Key == TAB_TAT_DEFINES_TARGET and self.Opt.TARGET != None:
Line = "%-30s = %s\n" % (Key, ''.join(elem + ' ' for elem in self.Opt.TARGET))
elif Key == TAB_TAT_DEFINES_TARGET_ARCH and self.Opt.TARGET_ARCH != None:
Line = "%-30s = %s\n" % (Key, ''.join(elem + ' ' for elem in self.Opt.TARGET_ARCH))
elif Key == TAB_TAT_DEFINES_TOOL_CHAIN_TAG and self.Opt.TOOL_CHAIN_TAG != None:
Line = "%-30s = %s\n" % (Key, self.Opt.TOOL_CHAIN_TAG)
elif Key == TAB_TAT_DEFINES_BUILD_RULE_CONF and self.Opt.BUILD_RULE_FILE != None:
buildruleFullPath = os.path.join(self.WorkSpace, self.Opt.BUILD_RULE_FILE)
if os.path.exists(buildruleFullPath):
Line = "%-30s = %s\n" % (Key, self.Opt.BUILD_RULE_FILE)
else:
EdkLogger.error("TagetTool", BuildToolError.FILE_NOT_FOUND,
"Build rule file %s does not exist!" % self.Opt.BUILD_RULE_FILE, RaiseError=False)
return Line
VersionNumber = ("0.01" + " " + gBUILD_VERSION)
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2010, Intel Corporation All rights reserved."
__usage__ = "%prog [options] {args} \
\nArgs: \
\n Clean clean the all default configuration of target.txt. \
\n Print print the all default configuration of target.txt. \
\n Set replace the default configuration with expected value specified by option."
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def RangeCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
gParamCheck.append(option)
if value < 1 or value > 8:
parser.error("The count of multi-thread is not in valid range of 1 ~ 8.")
else:
setattr(parser.values, option.dest, value)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def MyOptionParser():
parser = OptionParser(version=__version__,prog="TargetTool.exe",usage=__usage__,description=__copyright__)
parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32','X64','IPF','EBC', 'ARM','0'], dest="TARGET_ARCH",
help="ARCHS is one of list: IA32, X64, IPF, ARM or EBC, which replaces target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option. 0 will clear this setting in target.txt and can't combine with other value.")
parser.add_option("-p", "--platform", action="callback", type="string", dest="DSCFILE", callback=SingleCheckCallback,
help="Specify a DSC file, which replace target.txt's ACTIVE_PLATFORM definition. 0 will clear this setting in target.txt and can't combine with other value.")
parser.add_option("-c", "--tooldef", action="callback", type="string", dest="TOOL_DEFINITION_FILE", callback=SingleCheckCallback,
help="Specify the WORKSPACE relative path of tool_def.txt file, which replace target.txt's TOOL_CHAIN_CONF definition. 0 will clear this setting in target.txt and can't combine with other value.")
parser.add_option("-t", "--target", action="append", type="choice", choices=['DEBUG','RELEASE','0'], dest="TARGET",
help="TARGET is one of list: DEBUG, RELEASE, which replaces target.txt's TARGET definition. To specify more TARGET, please repeat this option. 0 will clear this setting in target.txt and can't combine with other value.")
parser.add_option("-n", "--tagname", action="callback", type="string", dest="TOOL_CHAIN_TAG", callback=SingleCheckCallback,
help="Specify the Tool Chain Tagname, which replaces target.txt's TOOL_CHAIN_TAG definition. 0 will clear this setting in target.txt and can't combine with other value.")
parser.add_option("-r", "--buildrule", action="callback", type="string", dest="BUILD_RULE_FILE", callback=SingleCheckCallback,
help="Specify the build rule configure file, which replaces target.txt's BUILD_RULE_CONF definition. If not specified, the default value Conf/build_rule.txt will be set.")
parser.add_option("-m", "--multithreadnum", action="callback", type="int", dest="NUM", callback=RangeCheckCallback,
help="Specify the multi-thread number which replace target.txt's MAX_CONCURRENT_THREAD_NUMBER. If the value is less than 2, MULTIPLE_THREAD will be disabled. If the value is larger than 1, MULTIPLE_THREAD will be enabled.")
(opt, args)=parser.parse_args()
return (opt, args)
if __name__ == '__main__':
EdkLogger.Initialize()
EdkLogger.SetLevel(EdkLogger.QUIET)
if os.getenv('WORKSPACE') == None:
print "ERROR: WORKSPACE should be specified or edksetup script should be executed before run TargetTool"
sys.exit(1)
(opt, args) = MyOptionParser()
if len(args) != 1 or (args[0].lower() != 'print' and args[0].lower() != 'clean' and args[0].lower() != 'set'):
print "The number of args isn't 1 or the value of args is invalid."
sys.exit(1)
if opt.NUM != None and opt.NUM < 1:
print "The MAX_CONCURRENT_THREAD_NUMBER must be larger than 0."
sys.exit(1)
if opt.TARGET != None and len(opt.TARGET) > 1:
for elem in opt.TARGET:
if elem == '0':
print "0 will clear the TARGET setting in target.txt and can't combine with other value."
sys.exit(1)
if opt.TARGET_ARCH != None and len(opt.TARGET_ARCH) > 1:
for elem in opt.TARGET_ARCH:
if elem == '0':
print "0 will clear the TARGET_ARCH setting in target.txt and can't combine with other value."
sys.exit(1)
try:
FileHandle = TargetTool(opt, args)
if FileHandle.Arg.lower() == 'print':
FileHandle.Print()
sys.exit(0)
elif FileHandle.Arg.lower() == 'clean':
FileHandle.RWFile('#', '=', 0)
else:
FileHandle.RWFile('#', '=', 1)
except Exception, e:
last_type, last_value, last_tb = sys.exc_info()
traceback.print_exception(last_type, last_value, last_tb)
| []
| []
| [
"WORKSPACE",
"VBOX_TARGET_CONF"
]
| [] | ["WORKSPACE", "VBOX_TARGET_CONF"] | python | 2 | 0 | |
crd/client.go | /*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package crd
import (
"errors"
"os"
"time"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
type (
FissionClient struct {
crdClient *rest.RESTClient
}
)
// Get a kubernetes client using the kubeconfig file at the
// environment var $KUBECONFIG, or an in-cluster config if that's
// undefined.
func GetKubernetesClient() (*rest.Config, *kubernetes.Clientset, *apiextensionsclient.Clientset, error) {
var config *rest.Config
var err error
// get the config, either from kubeconfig or using our
// in-cluster service account
kubeConfig := os.Getenv("KUBECONFIG")
if len(kubeConfig) != 0 {
config, err = clientcmd.BuildConfigFromFlags("", kubeConfig)
if err != nil {
return nil, nil, nil, err
}
} else {
config, err = rest.InClusterConfig()
if err != nil {
return nil, nil, nil, err
}
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, nil, err
}
apiExtClientset, err := apiextensionsclient.NewForConfig(config)
if err != nil {
return nil, nil, nil, err
}
return config, clientset, apiExtClientset, nil
}
// GetCrdClient gets a CRD client config
func GetCrdClient(config *rest.Config) (*rest.RESTClient, error) {
// mutate config to add our types
configureClient(config)
// make a REST client with that config
return rest.RESTClientFor(config)
}
// configureClient sets up a REST client for Fission CRD types.
//
// This is copied from the client-go CRD example. (I don't understand
// all of it completely.) It registers our types with the global API
// "scheme" (api.Scheme), which keeps a directory of types [I guess so
// it can use the string in the Kind field to make a Go object?]. It
// also puts the fission CRD types under a "group version" which we
// create for our CRDs types.
func configureClient(config *rest.Config) {
groupversion := schema.GroupVersion{
Group: "fission.io",
Version: "v1",
}
config.GroupVersion = &groupversion
config.APIPath = "/apis"
config.ContentType = runtime.ContentTypeJSON
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
schemeBuilder := runtime.NewSchemeBuilder(
func(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(
groupversion,
&Function{},
&FunctionList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&Environment{},
&EnvironmentList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&HTTPTrigger{},
&HTTPTriggerList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&KubernetesWatchTrigger{},
&KubernetesWatchTriggerList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&TimeTrigger{},
&TimeTriggerList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&MessageQueueTrigger{},
&MessageQueueTriggerList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&Package{},
&PackageList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&Recorder{},
&RecorderList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(
groupversion,
&CanaryConfig{},
&CanaryConfigList{},
&metav1.ListOptions{},
&metav1.DeleteOptions{},
)
return nil
})
schemeBuilder.AddToScheme(scheme.Scheme)
}
func waitForCRDs(crdClient *rest.RESTClient) error {
start := time.Now()
for {
fi := MakeFunctionInterface(crdClient, metav1.NamespaceDefault)
_, err := fi.List(metav1.ListOptions{})
if err != nil {
time.Sleep(100 * time.Millisecond)
} else {
return nil
}
if time.Since(start) > 30*time.Second {
return errors.New("timeout waiting for CRDs")
}
}
}
func MakeFissionClient() (*FissionClient, *kubernetes.Clientset, *apiextensionsclient.Clientset, error) {
config, kubeClient, apiExtClient, err := GetKubernetesClient()
if err != nil {
return nil, nil, nil, err
}
crdClient, err := GetCrdClient(config)
if err != nil {
return nil, nil, nil, err
}
fc := &FissionClient{
crdClient: crdClient,
}
return fc, kubeClient, apiExtClient, nil
}
func (fc *FissionClient) Functions(ns string) FunctionInterface {
return MakeFunctionInterface(fc.crdClient, ns)
}
func (fc *FissionClient) Environments(ns string) EnvironmentInterface {
return MakeEnvironmentInterface(fc.crdClient, ns)
}
func (fc *FissionClient) HTTPTriggers(ns string) HTTPTriggerInterface {
return MakeHTTPTriggerInterface(fc.crdClient, ns)
}
func (fc *FissionClient) KubernetesWatchTriggers(ns string) KubernetesWatchTriggerInterface {
return MakeKubernetesWatchTriggerInterface(fc.crdClient, ns)
}
func (fc *FissionClient) TimeTriggers(ns string) TimeTriggerInterface {
return MakeTimeTriggerInterface(fc.crdClient, ns)
}
func (fc *FissionClient) MessageQueueTriggers(ns string) MessageQueueTriggerInterface {
return MakeMessageQueueTriggerInterface(fc.crdClient, ns)
}
func (fc *FissionClient) Recorders(ns string) RecorderInterface {
return MakeRecorderInterface(fc.crdClient, ns)
}
func (fc *FissionClient) Packages(ns string) PackageInterface {
return MakePackageInterface(fc.crdClient, ns)
}
func (fc *FissionClient) CanaryConfigs(ns string) CanaryConfigInterface {
return MakeCanaryConfigInterface(fc.crdClient, ns)
}
func (fc *FissionClient) WaitForCRDs() error {
return waitForCRDs(fc.crdClient)
}
func (fc *FissionClient) GetCrdClient() *rest.RESTClient {
return fc.crdClient
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
examples/siha/very_active_minutes_dataset.py | """Example on how to read sleep data from SIHA
"""
import os
from tasrif.data_readers.siha_dataset import SihaDataset
from tasrif.processing_pipeline import SequenceOperator
from tasrif.processing_pipeline.custom import JqOperator
from tasrif.processing_pipeline.pandas import (
AsTypeOperator,
ConvertToDatetimeOperator,
JsonNormalizeOperator,
SetIndexOperator,
)
siha_folder_path = (
os.environ.get("SIHA_PATH")
or "/mnt/datafabric/qcri-hmc__profast__2020-2021-03-17T13:00:44"
)
pipeline = SequenceOperator(
[
SihaDataset(siha_folder_path, table_name="VeryActiveMinutes"),
JqOperator(
"map({patientID} + .data.activities_tracker_minutesVeryActive[].data."
+ '"activities-tracker-minutesVeryActive"[0])'
),
JsonNormalizeOperator(),
ConvertToDatetimeOperator(
feature_names=["dateTime"], infer_datetime_format=True
),
SetIndexOperator("dateTime"),
AsTypeOperator({"value": "float32"}),
]
)
df = pipeline.process()
print(df)
| []
| []
| [
"SIHA_PATH"
]
| [] | ["SIHA_PATH"] | python | 1 | 0 | |
src/runtime/crash_test.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"bytes"
"flag"
"fmt"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
)
var toRemove []string
func TestMain(m *testing.M) {
status := m.Run()
for _, file := range toRemove {
os.RemoveAll(file)
}
os.Exit(status)
}
var testprog struct {
sync.Mutex
dir string
target map[string]buildexe
}
type buildexe struct {
exe string
err error
}
func runTestProg(t *testing.T, binary, name string, env ...string) string {
if *flagQuick {
t.Skip("-quick")
}
testenv.MustHaveGoBuild(t)
exe, err := buildTestProg(t, binary)
if err != nil {
t.Fatal(err)
}
cmd := testenv.CleanCmdEnv(exec.Command(exe, name))
cmd.Env = append(cmd.Env, env...)
if testing.Short() {
cmd.Env = append(cmd.Env, "RUNTIME_TEST_SHORT=1")
}
var b bytes.Buffer
cmd.Stdout = &b
cmd.Stderr = &b
if err := cmd.Start(); err != nil {
t.Fatalf("starting %s %s: %v", binary, name, err)
}
// If the process doesn't complete within 1 minute,
// assume it is hanging and kill it to get a stack trace.
p := cmd.Process
done := make(chan bool)
go func() {
scale := 1
// This GOARCH/GOOS test is copied from cmd/dist/test.go.
// TODO(iant): Have cmd/dist update the environment variable.
if runtime.GOARCH == "arm" || runtime.GOOS == "windows" {
scale = 2
}
if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
if sc, err := strconv.Atoi(s); err == nil {
scale = sc
}
}
select {
case <-done:
case <-time.After(time.Duration(scale) * time.Minute):
p.Signal(sigquit)
}
}()
if err := cmd.Wait(); err != nil {
t.Logf("%s %s exit status: %v", binary, name, err)
}
close(done)
return b.String()
}
func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) {
if *flagQuick {
t.Skip("-quick")
}
checkStaleRuntime(t)
testprog.Lock()
defer testprog.Unlock()
if testprog.dir == "" {
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
testprog.dir = dir
toRemove = append(toRemove, dir)
}
if testprog.target == nil {
testprog.target = make(map[string]buildexe)
}
name := binary
if len(flags) > 0 {
name += "_" + strings.Join(flags, "_")
}
target, ok := testprog.target[name]
if ok {
return target.exe, target.err
}
exe := filepath.Join(testprog.dir, name+".exe")
cmd := exec.Command(testenv.GoToolPath(t), append([]string{"build", "-o", exe}, flags...)...)
cmd.Dir = "testdata/" + binary
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
testprog.target[name] = target
return "", target.err
}
target.exe = exe
testprog.target[name] = target
return exe, nil
}
var (
staleRuntimeOnce sync.Once // guards init of staleRuntimeErr
staleRuntimeErr error
)
func checkStaleRuntime(t *testing.T) {
staleRuntimeOnce.Do(func() {
// 'go run' uses the installed copy of runtime.a, which may be out of date.
out, err := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "list", "-gcflags=all="+os.Getenv("GO_GCFLAGS"), "-f", "{{.Stale}}", "runtime")).CombinedOutput()
if err != nil {
staleRuntimeErr = fmt.Errorf("failed to execute 'go list': %v\n%v", err, string(out))
return
}
if string(out) != "false\n" {
t.Logf("go list -f {{.Stale}} runtime:\n%s", out)
out, err := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "list", "-gcflags=all="+os.Getenv("GO_GCFLAGS"), "-f", "{{.StaleReason}}", "runtime")).CombinedOutput()
if err != nil {
t.Logf("go list -f {{.StaleReason}} failed: %v", err)
}
t.Logf("go list -f {{.StaleReason}} runtime:\n%s", out)
staleRuntimeErr = fmt.Errorf("Stale runtime.a. Run 'go install runtime'.")
}
})
if staleRuntimeErr != nil {
t.Fatal(staleRuntimeErr)
}
}
func testCrashHandler(t *testing.T, cgo bool) {
type crashTest struct {
Cgo bool
}
var output string
if cgo {
output = runTestProg(t, "testprogcgo", "Crash")
} else {
output = runTestProg(t, "testprog", "Crash")
}
want := "main: recovered done\nnew-thread: recovered done\nsecond-new-thread: recovered done\nmain-again: recovered done\n"
if output != want {
t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want)
}
}
func TestCrashHandler(t *testing.T) {
testCrashHandler(t, false)
}
func testDeadlock(t *testing.T, name string) {
output := runTestProg(t, "testprog", name)
want := "fatal error: all goroutines are asleep - deadlock!\n"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestSimpleDeadlock(t *testing.T) {
testDeadlock(t, "SimpleDeadlock")
}
func TestInitDeadlock(t *testing.T) {
testDeadlock(t, "InitDeadlock")
}
func TestLockedDeadlock(t *testing.T) {
testDeadlock(t, "LockedDeadlock")
}
func TestLockedDeadlock2(t *testing.T) {
testDeadlock(t, "LockedDeadlock2")
}
func TestGoexitDeadlock(t *testing.T) {
output := runTestProg(t, "testprog", "GoexitDeadlock")
want := "no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
func TestStackOverflow(t *testing.T) {
output := runTestProg(t, "testprog", "StackOverflow")
want := "runtime: goroutine stack exceeds 1474560-byte limit\nfatal error: stack overflow"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestThreadExhaustion(t *testing.T) {
output := runTestProg(t, "testprog", "ThreadExhaustion")
want := "runtime: program exceeds 10-thread limit\nfatal error: thread exhaustion"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestRecursivePanic(t *testing.T) {
output := runTestProg(t, "testprog", "RecursivePanic")
want := `wrap: bad
panic: again
`
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestGoexitCrash(t *testing.T) {
output := runTestProg(t, "testprog", "GoexitExit")
want := "no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
func TestGoexitDefer(t *testing.T) {
c := make(chan struct{})
go func() {
defer func() {
r := recover()
if r != nil {
t.Errorf("non-nil recover during Goexit")
}
c <- struct{}{}
}()
runtime.Goexit()
}()
// Note: if the defer fails to run, we will get a deadlock here
<-c
}
func TestGoNil(t *testing.T) {
output := runTestProg(t, "testprog", "GoNil")
want := "go of nil func value"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
func TestMainGoroutineID(t *testing.T) {
output := runTestProg(t, "testprog", "MainGoroutineID")
want := "panic: test\n\ngoroutine 1 [running]:\n"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestNoHelperGoroutines(t *testing.T) {
output := runTestProg(t, "testprog", "NoHelperGoroutines")
matches := regexp.MustCompile(`goroutine [0-9]+ \[`).FindAllStringSubmatch(output, -1)
if len(matches) != 1 || matches[0][0] != "goroutine 1 [" {
t.Fatalf("want to see only goroutine 1, see:\n%s", output)
}
}
func TestBreakpoint(t *testing.T) {
output := runTestProg(t, "testprog", "Breakpoint")
// If runtime.Breakpoint() is inlined, then the stack trace prints
// "runtime.Breakpoint(...)" instead of "runtime.Breakpoint()".
want := "runtime.Breakpoint("
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
func TestGoexitInPanic(t *testing.T) {
// see issue 8774: this code used to trigger an infinite recursion
output := runTestProg(t, "testprog", "GoexitInPanic")
want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
// Issue 14965: Runtime panics should be of type runtime.Error
func TestRuntimePanicWithRuntimeError(t *testing.T) {
testCases := [...]func(){
0: func() {
var m map[uint64]bool
m[1234] = true
},
1: func() {
ch := make(chan struct{})
close(ch)
close(ch)
},
2: func() {
var ch = make(chan struct{})
close(ch)
ch <- struct{}{}
},
3: func() {
var s = make([]int, 2)
_ = s[2]
},
4: func() {
n := -1
_ = make(chan bool, n)
},
5: func() {
close((chan bool)(nil))
},
}
for i, fn := range testCases {
got := panicValue(fn)
if _, ok := got.(runtime.Error); !ok {
t.Errorf("test #%d: recovered value %v(type %T) does not implement runtime.Error", i, got, got)
}
}
}
func panicValue(fn func()) (recovered interface{}) {
defer func() {
recovered = recover()
}()
fn()
return
}
func TestPanicAfterGoexit(t *testing.T) {
// an uncaught panic should still work after goexit
output := runTestProg(t, "testprog", "PanicAfterGoexit")
want := "panic: hello"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestRecoveredPanicAfterGoexit(t *testing.T) {
output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit")
want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestRecoverBeforePanicAfterGoexit(t *testing.T) {
// 1. defer a function that recovers
// 2. defer a function that panics
// 3. call goexit
// Goexit should run the #2 defer. Its panic
// should be caught by the #1 defer, and execution
// should resume in the caller. Like the Goexit
// never happened!
defer func() {
r := recover()
if r == nil {
panic("bad recover")
}
}()
defer func() {
panic("hello")
}()
runtime.Goexit()
}
func TestNetpollDeadlock(t *testing.T) {
t.Parallel()
output := runTestProg(t, "testprognet", "NetpollDeadlock")
want := "done\n"
if !strings.HasSuffix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestPanicTraceback(t *testing.T) {
t.Parallel()
output := runTestProg(t, "testprog", "PanicTraceback")
want := "panic: hello"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
// Check functions in the traceback.
fns := []string{"main.pt1.func1", "panic", "main.pt2.func1", "panic", "main.pt2", "main.pt1"}
for _, fn := range fns {
re := regexp.MustCompile(`(?m)^` + regexp.QuoteMeta(fn) + `\(.*\n`)
idx := re.FindStringIndex(output)
if idx == nil {
t.Fatalf("expected %q function in traceback:\n%s", fn, output)
}
output = output[idx[1]:]
}
}
func testPanicDeadlock(t *testing.T, name string, want string) {
// test issue 14432
output := runTestProg(t, "testprog", name)
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestPanicDeadlockGosched(t *testing.T) {
testPanicDeadlock(t, "GoschedInPanic", "panic: errorThatGosched\n\n")
}
func TestPanicDeadlockSyscall(t *testing.T) {
testPanicDeadlock(t, "SyscallInPanic", "1\n2\npanic: 3\n\n")
}
func TestPanicLoop(t *testing.T) {
output := runTestProg(t, "testprog", "PanicLoop")
if want := "panic while printing panic value"; !strings.Contains(output, want) {
t.Errorf("output does not contain %q:\n%s", want, output)
}
}
func TestMemPprof(t *testing.T) {
testenv.MustHaveGoRun(t)
exe, err := buildTestProg(t, "testprog")
if err != nil {
t.Fatal(err)
}
got, err := testenv.CleanCmdEnv(exec.Command(exe, "MemProf")).CombinedOutput()
if err != nil {
t.Fatal(err)
}
fn := strings.TrimSpace(string(got))
defer os.Remove(fn)
for try := 0; try < 2; try++ {
cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-alloc_space", "-top"))
// Check that pprof works both with and without explicit executable on command line.
if try == 0 {
cmd.Args = append(cmd.Args, exe, fn)
} else {
cmd.Args = append(cmd.Args, fn)
}
found := false
for i, e := range cmd.Env {
if strings.HasPrefix(e, "PPROF_TMPDIR=") {
cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
found = true
break
}
}
if !found {
cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
}
top, err := cmd.CombinedOutput()
t.Logf("%s:\n%s", cmd.Args, top)
if err != nil {
t.Error(err)
} else if !bytes.Contains(top, []byte("MemProf")) {
t.Error("missing MemProf in pprof output")
}
}
}
var concurrentMapTest = flag.Bool("run_concurrent_map_tests", false, "also run flaky concurrent map tests")
func TestConcurrentMapWrites(t *testing.T) {
if !*concurrentMapTest {
t.Skip("skipping without -run_concurrent_map_tests")
}
testenv.MustHaveGoRun(t)
output := runTestProg(t, "testprog", "concurrentMapWrites")
want := "fatal error: concurrent map writes"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestConcurrentMapReadWrite(t *testing.T) {
if !*concurrentMapTest {
t.Skip("skipping without -run_concurrent_map_tests")
}
testenv.MustHaveGoRun(t)
output := runTestProg(t, "testprog", "concurrentMapReadWrite")
want := "fatal error: concurrent map read and map write"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestConcurrentMapIterateWrite(t *testing.T) {
if !*concurrentMapTest {
t.Skip("skipping without -run_concurrent_map_tests")
}
testenv.MustHaveGoRun(t)
output := runTestProg(t, "testprog", "concurrentMapIterateWrite")
want := "fatal error: concurrent map iteration and map write"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
type point struct {
x, y *int
}
func (p *point) negate() {
*p.x = *p.x * -1
*p.y = *p.y * -1
}
// Test for issue #10152.
func TestPanicInlined(t *testing.T) {
defer func() {
r := recover()
if r == nil {
t.Fatalf("recover failed")
}
buf := make([]byte, 2048)
n := runtime.Stack(buf, false)
buf = buf[:n]
if !bytes.Contains(buf, []byte("(*point).negate(")) {
t.Fatalf("expecting stack trace to contain call to (*point).negate()")
}
}()
pt := new(point)
pt.negate()
}
// Test for issues #3934 and #20018.
// We want to delay exiting until a panic print is complete.
func TestPanicRace(t *testing.T) {
testenv.MustHaveGoRun(t)
exe, err := buildTestProg(t, "testprog")
if err != nil {
t.Fatal(err)
}
// The test is intentionally racy, and in my testing does not
// produce the expected output about 0.05% of the time.
// So run the program in a loop and only fail the test if we
// get the wrong output ten times in a row.
const tries = 10
retry:
for i := 0; i < tries; i++ {
got, err := testenv.CleanCmdEnv(exec.Command(exe, "PanicRace")).CombinedOutput()
if err == nil {
t.Logf("try %d: program exited successfully, should have failed", i+1)
continue
}
if i > 0 {
t.Logf("try %d:\n", i+1)
}
t.Logf("%s\n", got)
wants := []string{
"panic: crash",
"PanicRace",
"created by ",
}
for _, want := range wants {
if !bytes.Contains(got, []byte(want)) {
t.Logf("did not find expected string %q", want)
continue retry
}
}
// Test generated expected output.
return
}
t.Errorf("test ran %d times without producing expected output", tries)
}
func TestBadTraceback(t *testing.T) {
output := runTestProg(t, "testprog", "BadTraceback")
for _, want := range []string{
"runtime: unexpected return pc",
"called from 0xbad",
"00000bad", // Smashed LR in hex dump
"<main.badLR", // Symbolization in hex dump (badLR1 or badLR2)
} {
if !strings.Contains(output, want) {
t.Errorf("output does not contain %q:\n%s", want, output)
}
}
}
func TestTimePprof(t *testing.T) {
if runtime.GOOS == "aix" {
t.Skip("pprof not yet available on AIX (see golang.org/issue/28555)")
}
fn := runTestProg(t, "testprog", "TimeProf")
fn = strings.TrimSpace(fn)
defer os.Remove(fn)
cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-top", "-nodecount=1", fn))
cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
top, err := cmd.CombinedOutput()
t.Logf("%s", top)
if err != nil {
t.Error(err)
} else if bytes.Contains(top, []byte("ExternalCode")) {
t.Error("profiler refers to ExternalCode")
}
}
// Test that runtime.abort does so.
func TestAbort(t *testing.T) {
// Pass GOTRACEBACK to ensure we get runtime frames.
output := runTestProg(t, "testprog", "Abort", "GOTRACEBACK=system")
if want := "runtime.abort"; !strings.Contains(output, want) {
t.Errorf("output does not contain %q:\n%s", want, output)
}
if strings.Contains(output, "BAD") {
t.Errorf("output contains BAD:\n%s", output)
}
// Check that it's a signal traceback.
want := "PC="
// For systems that use a breakpoint, check specifically for that.
switch runtime.GOARCH {
case "386", "amd64":
switch runtime.GOOS {
case "plan9":
want = "sys: breakpoint"
case "windows":
want = "Exception 0x80000003"
default:
want = "SIGTRAP"
}
}
if !strings.Contains(output, want) {
t.Errorf("output does not contain %q:\n%s", want, output)
}
}
// For TestRuntimePanic: test a panic in the runtime package without
// involving the testing harness.
func init() {
if os.Getenv("GO_TEST_RUNTIME_PANIC") == "1" {
defer func() {
if r := recover(); r != nil {
// We expect to crash, so exit 0
// to indicate failure.
os.Exit(0)
}
}()
runtime.PanicForTesting(nil, 1)
// We expect to crash, so exit 0 to indicate failure.
os.Exit(0)
}
}
func TestRuntimePanic(t *testing.T) {
testenv.MustHaveExec(t)
cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestRuntimePanic"))
cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_PANIC=1")
out, err := cmd.CombinedOutput()
t.Logf("%s", out)
if err == nil {
t.Error("child process did not fail")
} else if want := "runtime.unexportedPanicForTesting"; !bytes.Contains(out, []byte(want)) {
t.Errorf("output did not contain expected string %q", want)
}
}
// Test that g0 stack overflows are handled gracefully.
func TestG0StackOverflow(t *testing.T) {
testenv.MustHaveExec(t)
switch runtime.GOOS {
case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "android":
t.Skipf("g0 stack is wrong on pthread platforms (see golang.org/issue/26061)")
}
if os.Getenv("TEST_G0_STACK_OVERFLOW") != "1" {
cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestG0StackOverflow", "-test.v"))
cmd.Env = append(cmd.Env, "TEST_G0_STACK_OVERFLOW=1")
out, err := cmd.CombinedOutput()
// Don't check err since it's expected to crash.
if n := strings.Count(string(out), "morestack on g0\n"); n != 1 {
t.Fatalf("%s\n(exit status %v)", out, err)
}
// Check that it's a signal-style traceback.
if runtime.GOOS != "windows" {
if want := "PC="; !strings.Contains(string(out), want) {
t.Errorf("output does not contain %q:\n%s", want, out)
}
}
return
}
runtime.G0StackOverflow()
}
// Test that panic message is not clobbered.
// See issue 30150.
func TestDoublePanic(t *testing.T) {
output := runTestProg(t, "testprog", "DoublePanic", "GODEBUG=clobberfree=1")
wants := []string{"panic: XXX", "panic: YYY"}
for _, want := range wants {
if !strings.Contains(output, want) {
t.Errorf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
}
| [
"\"GO_TEST_TIMEOUT_SCALE\"",
"\"GO_GCFLAGS\"",
"\"GO_GCFLAGS\"",
"\"GO_TEST_RUNTIME_PANIC\"",
"\"TEST_G0_STACK_OVERFLOW\""
]
| []
| [
"GO_GCFLAGS",
"GO_TEST_TIMEOUT_SCALE",
"GO_TEST_RUNTIME_PANIC",
"TEST_G0_STACK_OVERFLOW"
]
| [] | ["GO_GCFLAGS", "GO_TEST_TIMEOUT_SCALE", "GO_TEST_RUNTIME_PANIC", "TEST_G0_STACK_OVERFLOW"] | go | 4 | 0 | |
acceptance/acceptance_test.go | //go:build acceptance
// +build acceptance
package acceptance
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/ghodss/yaml"
"github.com/google/go-containerregistry/pkg/name"
"github.com/pelletier/go-toml"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/pack/acceptance/assertions"
"github.com/buildpacks/pack/acceptance/buildpacks"
"github.com/buildpacks/pack/acceptance/config"
"github.com/buildpacks/pack/acceptance/invoke"
"github.com/buildpacks/pack/acceptance/managers"
"github.com/buildpacks/pack/internal/cache"
"github.com/buildpacks/pack/internal/style"
"github.com/buildpacks/pack/pkg/archive"
h "github.com/buildpacks/pack/testhelpers"
)
const (
runImage = "pack-test/run"
buildImage = "pack-test/build"
)
var (
dockerCli client.CommonAPIClient
registryConfig *h.TestRegistryConfig
suiteManager *SuiteManager
imageManager managers.ImageManager
assertImage assertions.ImageAssertionManager
)
func TestAcceptance(t *testing.T) {
var err error
h.RequireDocker(t)
rand.Seed(time.Now().UTC().UnixNano())
assert := h.NewAssertionManager(t)
dockerCli, err = client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.38"))
assert.Nil(err)
imageManager = managers.NewImageManager(t, dockerCli)
registryConfig = h.RunRegistry(t)
defer registryConfig.RmRegistry(t)
assertImage = assertions.NewImageAssertionManager(t, imageManager, registryConfig)
inputConfigManager, err := config.NewInputConfigurationManager()
assert.Nil(err)
assetsConfig := config.ConvergedAssetManager(t, assert, inputConfigManager)
suiteManager = &SuiteManager{out: t.Logf}
suite := spec.New("acceptance suite", spec.Report(report.Terminal{}))
if inputConfigManager.Combinations().IncludesCurrentSubjectPack() {
suite("p_current", func(t *testing.T, when spec.G, it spec.S) {
testWithoutSpecificBuilderRequirement(
t,
when,
it,
assetsConfig.NewPackAsset(config.Current),
)
}, spec.Report(report.Terminal{}))
}
for _, combo := range inputConfigManager.Combinations() {
// see https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
combo := combo
t.Logf(`setting up run combination %s: %s`,
style.Symbol(combo.String()),
combo.Describe(assetsConfig),
)
suite(combo.String(), func(t *testing.T, when spec.G, it spec.S) {
testAcceptance(
t,
when,
it,
assetsConfig.NewPackAsset(combo.Pack),
assetsConfig.NewPackAsset(combo.PackCreateBuilder),
assetsConfig.NewLifecycleAsset(combo.Lifecycle),
)
}, spec.Report(report.Terminal{}))
}
suite.Run(t)
assert.Nil(suiteManager.CleanUp())
}
// These tests either (a) do not require a builder or (b) do not require a specific builder to be provided
// in order to test compatibility.
// They should only be run against the "current" (i.e., main) version of pack.
func testWithoutSpecificBuilderRequirement(
t *testing.T,
when spec.G,
it spec.S,
packConfig config.PackAsset,
) {
var (
pack *invoke.PackInvoker
assert = h.NewAssertionManager(t)
buildpackManager buildpacks.BuildpackManager
)
it.Before(func() {
pack = invoke.NewPackInvoker(t, assert, packConfig, registryConfig.DockerConfigDir)
pack.EnableExperimental()
buildpackManager = buildpacks.NewBuildpackManager(t, assert)
})
it.After(func() {
pack.Cleanup()
})
when("invalid subcommand", func() {
it("prints usage", func() {
output, err := pack.Run("some-bad-command")
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsCommandUnknown("some-bad-command")
assertOutput.IncludesUsagePrompt()
})
})
when("build with default builders not set", func() {
it("informs the user", func() {
output, err := pack.Run(
"build", "some/image",
"-p", filepath.Join("testdata", "mock_app"),
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesMessageToSetDefaultBuilder()
assertOutput.IncludesPrefixedGoogleBuilder()
assertOutput.IncludesPrefixedHerokuBuilders()
assertOutput.IncludesPrefixedPaketoBuilders()
})
})
when("buildpack", func() {
when("package", func() {
var (
tmpDir string
buildpackManager buildpacks.BuildpackManager
simplePackageConfigFixtureName = "package.toml"
)
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "buildpack-package-tests")
assert.Nil(err)
buildpackManager = buildpacks.NewBuildpackManager(t, assert)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.SimpleLayersParent, buildpacks.SimpleLayers)
})
it.After(func() {
assert.Nil(os.RemoveAll(tmpDir))
})
generateAggregatePackageToml := func(buildpackURI, nestedPackageName, os string) string {
t.Helper()
packageTomlFile, err := ioutil.TempFile(tmpDir, "package_aggregate-*.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
"package_aggregate.toml",
packageTomlFile,
map[string]interface{}{
"BuildpackURI": buildpackURI,
"PackageName": nestedPackageName,
"OS": os,
},
)
assert.Nil(packageTomlFile.Close())
return packageTomlFile.Name()
}
when("no --format is provided", func() {
it("creates the package as image", func() {
packageName := "test/package-" + h.RandString(10)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
output := pack.RunSuccessfully("buildpack", "package", packageName, "-c", packageTomlPath)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(packageName)
defer imageManager.CleanupImages(packageName)
assertImage.ExistsLocally(packageName)
})
})
when("--format image", func() {
it("creates the package", func() {
t.Log("package w/ only buildpacks")
nestedPackageName := "test/package-" + h.RandString(10)
packageName := "test/package-" + h.RandString(10)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageName,
aggregatePackageToml,
buildpacks.WithRequiredBuildpacks(
buildpacks.SimpleLayersParent,
buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
),
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageBuildpack)
defer imageManager.CleanupImages(nestedPackageName, packageName)
assertImage.ExistsLocally(nestedPackageName)
assertImage.ExistsLocally(packageName)
})
when("--publish", func() {
it("publishes image to registry", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10))
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
buildpacks.WithPublish(),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
output := pack.RunSuccessfully(
"buildpack", "package", packageName,
"-c", aggregatePackageToml,
"--publish",
)
defer imageManager.CleanupImages(packageName)
assertions.NewOutputAssertionManager(t, output).ReportsPackagePublished(packageName)
assertImage.NotExistsLocally(packageName)
assertImage.CanBePulledFromRegistry(packageName)
})
})
when("--pull-policy=never", func() {
it("should use local image", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
nestedPackageName := "test/package-" + h.RandString(10)
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
defer imageManager.CleanupImages(nestedPackageName)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
defer imageManager.CleanupImages(packageName)
pack.JustRunSuccessfully(
"buildpack", "package", packageName,
"-c", aggregatePackageToml,
"--pull-policy", "never")
assertImage.ExistsLocally(packageName)
})
it("should not pull image from registry", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10))
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithPublish(),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
output, err := pack.Run(
"buildpack", "package", packageName,
"-c", aggregatePackageToml,
"--pull-policy", "never",
)
assert.NotNil(err)
assertions.NewOutputAssertionManager(t, output).ReportsImageNotExistingOnDaemon(nestedPackageName)
})
})
})
when("--format file", func() {
when("the file extension is .cnb", func() {
it("creates the package with the same extension", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
destinationFile := filepath.Join(tmpDir, "package.cnb")
output := pack.RunSuccessfully(
"buildpack", "package", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(destinationFile)
h.AssertTarball(t, destinationFile)
})
})
when("the file extension is empty", func() {
it("creates the package with a .cnb extension", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
destinationFile := filepath.Join(tmpDir, "package")
expectedFile := filepath.Join(tmpDir, "package.cnb")
output := pack.RunSuccessfully(
"buildpack", "package", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(expectedFile)
h.AssertTarball(t, expectedFile)
})
})
when("the file extension is not .cnb", func() {
it("creates the package with the given extension but shows a warning", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
destinationFile := filepath.Join(tmpDir, "package.tar.gz")
output := pack.RunSuccessfully(
"buildpack", "package", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsPackageCreation(destinationFile)
assertOutput.ReportsInvalidExtension(".gz")
h.AssertTarball(t, destinationFile)
})
})
})
when("package.toml is invalid", func() {
it("displays an error", func() {
output, err := pack.Run(
"buildpack", "package", "some-package",
"-c", pack.FixtureManager().FixtureLocation("invalid_package.toml"),
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsReadingConfig()
})
})
})
when("inspect", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "buildpack-inspect-tests")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
when("buildpack archive", func() {
it("succeeds", func() {
packageFileLocation := filepath.Join(
tmpDir,
fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)),
)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageFile := buildpacks.NewPackageFile(
t,
pack,
packageFileLocation,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageFile)
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_buildpack_output.txt",
map[string]interface{}{
"buildpack_source": "LOCAL ARCHIVE",
"buildpack_name": packageFileLocation,
},
)
output := pack.RunSuccessfully("buildpack", "inspect", packageFileLocation)
assert.TrimmedEq(output, expectedOutput)
})
})
when("buildpack image", func() {
when("inspect", func() {
it("succeeds", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageImageName := registryConfig.RepoName("buildpack-" + h.RandString(8))
packageImage := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
defer imageManager.CleanupImages(packageImageName)
buildpackManager.PrepareBuildpacks(tmpDir, packageImage)
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_buildpack_output.txt",
map[string]interface{}{
"buildpack_source": "LOCAL IMAGE",
"buildpack_name": packageImageName,
},
)
output := pack.RunSuccessfully("buildpack", "inspect", packageImageName)
assert.TrimmedEq(output, expectedOutput)
})
})
})
})
})
when("builder", func() {
when("suggest", func() {
it("displays suggested builders", func() {
output := pack.RunSuccessfully("builder", "suggest")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesSuggestedBuildersHeading()
assertOutput.IncludesPrefixedGoogleBuilder()
assertOutput.IncludesPrefixedHerokuBuilders()
assertOutput.IncludesPrefixedPaketoBuilders()
})
})
})
when("config", func() {
when("default-builder", func() {
it("sets the default builder in ~/.pack/config.toml", func() {
builderName := "paketobuildpacks/builder:base"
output := pack.RunSuccessfully("config", "default-builder", builderName)
assertions.NewOutputAssertionManager(t, output).ReportsSettingDefaultBuilder(builderName)
})
})
when("trusted-builders", func() {
it("prints list of trusted builders", func() {
output := pack.RunSuccessfully("config", "trusted-builders")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesTrustedBuildersHeading()
assertOutput.IncludesHerokuBuilders()
assertOutput.IncludesGoogleBuilder()
assertOutput.IncludesPaketoBuilders()
})
when("add", func() {
it("sets the builder as trusted in ~/.pack/config.toml", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
assert.Contains(pack.ConfigFileContents(), builderName)
})
})
when("remove", func() {
it("removes the previously trusted builder from ~/${PACK_HOME}/config.toml", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
assert.Contains(pack.ConfigFileContents(), builderName)
pack.JustRunSuccessfully("config", "trusted-builders", "remove", builderName)
assert.NotContains(pack.ConfigFileContents(), builderName)
})
})
when("list", func() {
it("prints list of trusted builders", func() {
output := pack.RunSuccessfully("config", "trusted-builders", "list")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesTrustedBuildersHeading()
assertOutput.IncludesHerokuBuilders()
assertOutput.IncludesGoogleBuilder()
assertOutput.IncludesPaketoBuilders()
})
it("shows a builder trusted by pack config trusted-builders add", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
output := pack.RunSuccessfully("config", "trusted-builders", "list")
assert.Contains(output, builderName)
})
})
})
})
when("stack", func() {
when("suggest", func() {
it("displays suggested stacks", func() {
output, err := pack.Run("stack", "suggest")
assert.Nil(err)
assertions.NewOutputAssertionManager(t, output).IncludesSuggestedStacksHeading()
})
})
})
when("report", func() {
when("default builder is set", func() {
it("redacts default builder", func() {
pack.RunSuccessfully("config", "default-builder", "paketobuildpacks/builder:base")
output := pack.RunSuccessfully("report")
version := pack.Version()
expectedOutput := pack.FixtureManager().TemplateFixture(
"report_output.txt",
map[string]interface{}{
"DefaultBuilder": "[REDACTED]",
"Version": version,
"OS": runtime.GOOS,
"Arch": runtime.GOARCH,
},
)
assert.Equal(output, expectedOutput)
})
it("explicit mode doesn't redact", func() {
pack.RunSuccessfully("config", "default-builder", "paketobuildpacks/builder:base")
output := pack.RunSuccessfully("report", "--explicit")
version := pack.Version()
expectedOutput := pack.FixtureManager().TemplateFixture(
"report_output.txt",
map[string]interface{}{
"DefaultBuilder": "paketobuildpacks/builder:base",
"Version": version,
"OS": runtime.GOOS,
"Arch": runtime.GOARCH,
},
)
assert.Equal(output, expectedOutput)
})
})
})
}
func testAcceptance(
t *testing.T,
when spec.G,
it spec.S,
subjectPackConfig, createBuilderPackConfig config.PackAsset,
lifecycle config.LifecycleAsset,
) {
var (
pack, createBuilderPack *invoke.PackInvoker
buildpackManager buildpacks.BuildpackManager
bpDir = buildpacksDir(lifecycle.EarliestBuildpackAPIVersion())
assert = h.NewAssertionManager(t)
)
it.Before(func() {
pack = invoke.NewPackInvoker(t, assert, subjectPackConfig, registryConfig.DockerConfigDir)
pack.EnableExperimental()
createBuilderPack = invoke.NewPackInvoker(t, assert, createBuilderPackConfig, registryConfig.DockerConfigDir)
createBuilderPack.EnableExperimental()
buildpackManager = buildpacks.NewBuildpackManager(
t,
assert,
buildpacks.WithBuildpackAPIVersion(lifecycle.EarliestBuildpackAPIVersion()),
)
})
it.After(func() {
pack.Cleanup()
createBuilderPack.Cleanup()
})
when("stack is created", func() {
var (
runImageMirror string
stackBaseImages = map[string][]string{
"linux": {"ubuntu:bionic"},
"windows": {"mcr.microsoft.com/windows/nanoserver:1809", "golang:1.17-nanoserver-1809"},
}
)
it.Before(func() {
value, err := suiteManager.RunTaskOnceString("create-stack",
func() (string, error) {
runImageMirror := registryConfig.RepoName(runImage)
err := createStack(t, dockerCli, runImageMirror)
if err != nil {
return "", err
}
return runImageMirror, nil
})
assert.Nil(err)
baseStackNames := stackBaseImages[imageManager.HostOS()]
suiteManager.RegisterCleanUp("remove-stack-images", func() error {
imageManager.CleanupImages(baseStackNames...)
imageManager.CleanupImages(runImage, buildImage, value)
return nil
})
runImageMirror = value
})
when("builder is created", func() {
var builderName string
it.Before(func() {
key := taskKey(
"create-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createBuilder(t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror)
})
assert.Nil(err)
suiteManager.RegisterCleanUp("clean-"+key, func() error {
imageManager.CleanupImages(value)
return nil
})
builderName = value
})
when("complex builder", func() {
it.Before(func() {
// create our nested builder
h.SkipIf(t, imageManager.HostOS() == "windows", "These tests are not yet compatible with Windows-based containers")
h.SkipIf(t, !createBuilderPack.SupportsFeature(invoke.BuilderNoDuplicateLayers), "bug fixed in 0.18.0")
// create a task, handled by a 'task manager' which executes our pack commands during tests.
// looks like this is used to de-dup tasks
key := taskKey(
"create-complex-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createComplexBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
})
assert.Nil(err)
// register task to be run to 'clean up' a task
suiteManager.RegisterCleanUp("clean-"+key, func() error {
imageManager.CleanupImages(value)
return nil
})
builderName = value
output := pack.RunSuccessfully(
"config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1")
})
when("builder has duplicate buildpacks", func() {
it("buildpack layers have no duplication", func() {
assertImage.DoesNotHaveDuplicateLayers(builderName)
})
})
})
when("builder.toml is invalid", func() {
it("displays an error", func() {
builderConfigPath := createBuilderPack.FixtureManager().FixtureLocation("invalid_builder.toml")
output, err := createBuilderPack.Run(
"builder", "create", "some-builder:build",
"--config", builderConfigPath,
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsInvalidBuilderToml()
})
})
when("build", func() {
var repo, repoName string
it.Before(func() {
repo = "some-org/" + h.RandString(10)
repoName = registryConfig.RepoName(repo)
pack.JustRunSuccessfully("config", "lifecycle-image", lifecycle.Image())
})
it.After(func() {
imageManager.CleanupImages(repoName)
ref, err := name.ParseReference(repoName, name.WeakValidation)
assert.Nil(err)
cacheImage := cache.NewImageCache(ref, dockerCli)
buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli)
launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli)
cacheImage.Clear(context.TODO())
buildCacheVolume.Clear(context.TODO())
launchCacheVolume.Clear(context.TODO())
})
when("builder is untrusted", func() {
var untrustedBuilderName string
it.Before(func() {
var err error
untrustedBuilderName, err = createBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
assert.Nil(err)
suiteManager.RegisterCleanUp("remove-lifecycle-"+lifecycle.Image(), func() error {
img := imageManager.GetImageID(lifecycle.Image())
imageManager.CleanupImages(img)
return nil
})
})
it.After(func() {
imageManager.CleanupImages(untrustedBuilderName)
})
when("daemon", func() {
it("uses the 5 phases", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertOutput.IncludesLifecycleImageTag(lifecycle.Image())
assertOutput.IncludesSeparatePhases()
})
})
when("--publish", func() {
it("uses the 5 phases", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
"--publish",
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertOutput.IncludesLifecycleImageTag(lifecycle.Image())
assertOutput.IncludesSeparatePhases()
})
})
when("additional tags", func() {
var additionalRepoName string
it.Before(func() {
additionalRepoName = fmt.Sprintf("%s_additional", repoName)
})
it.After(func() {
imageManager.CleanupImages(additionalRepoName)
})
it("pushes image to additional tags", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
"--tag", additionalRepoName,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assert.Contains(output, additionalRepoName)
})
})
})
when("default builder is set", func() {
it.Before(func() {
pack.RunSuccessfully("config", "default-builder", builderName)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
})
it("creates a runnable, rebuildable image on daemon from app dir", func() {
appPath := filepath.Join("testdata", "mock_app")
output := pack.RunSuccessfully(
"build", repoName,
"-p", appPath,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsUsingBuildCacheVolume()
assertOutput.ReportsSelectingRunImageMirror(runImageMirror)
t.Log("app is runnable")
assertImage.RunsWithOutput(repoName, "Launch Dep Contents", "Cached Dep Contents")
t.Log("it uses the run image as a base image")
assertImage.HasBaseImage(repoName, runImage)
t.Log("sets the run image metadata")
assertImage.HasLabelWithData(repoName, "io.buildpacks.lifecycle.metadata", fmt.Sprintf(`"stack":{"runImage":{"image":"%s","mirrors":["%s"]}}}`, runImage, runImageMirror))
t.Log("sets the source metadata")
if pack.SupportsFeature(invoke.SourceMetadataFromProjectTOML) {
assertImage.HasLabelWithData(repoName, "io.buildpacks.project.metadata", (`{"source":{"type":"project","version":{"declared":"1.0.2"},"metadata":{"url":"https://github.com/buildpacks/pack"}}}`))
}
t.Log("registry is empty")
assertImage.NotExistsInRegistry(repo)
t.Log("add a local mirror")
localRunImageMirror := registryConfig.RepoName("pack-test/run-mirror")
imageManager.TagImage(runImage, localRunImageMirror)
defer imageManager.CleanupImages(localRunImageMirror)
pack.JustRunSuccessfully("config", "run-image-mirrors", "add", runImage, "-m", localRunImageMirror)
t.Log("rebuild")
output = pack.RunSuccessfully(
"build", repoName,
"-p", appPath,
)
assertOutput = assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror)
cachedLaunchLayer := "simple/layers:cached-launch-layer"
assertLifecycleOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertLifecycleOutput.ReportsRestoresCachedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsCacheReuse(cachedLaunchLayer)
t.Log("app is runnable")
assertImage.RunsWithOutput(repoName, "Launch Dep Contents", "Cached Dep Contents")
t.Log("rebuild with --clear-cache")
output = pack.RunSuccessfully("build", repoName, "-p", appPath, "--clear-cache")
assertOutput = assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertLifecycleOutput = assertions.NewLifecycleOutputAssertionManager(t, output)
assertLifecycleOutput.ReportsSkippingBuildpackLayerAnalysis()
assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsCacheCreation(cachedLaunchLayer)
t.Log("cacher adds layers")
assert.Matches(output, regexp.MustCompile(`(?i)Adding cache layer 'simple/layers:cached-launch-layer'`))
t.Log("inspecting image")
inspectCmd := "inspect"
if !pack.Supports("inspect") {
inspectCmd = "inspect-image"
}
var (
webCommand string
helloCommand string
helloArgs []string
helloArgsPrefix string
)
if imageManager.HostOS() == "windows" {
webCommand = ".\\run"
helloCommand = "cmd"
helloArgs = []string{"/c", "echo hello world"}
helloArgsPrefix = " "
} else {
webCommand = "./run"
helloCommand = "echo"
helloArgs = []string{"hello", "world"}
helloArgsPrefix = ""
}
formats := []compareFormat{
{
extension: "txt",
compareFunc: assert.TrimmedEq,
outputArg: "human-readable",
},
{
extension: "json",
compareFunc: assert.EqualJSON,
outputArg: "json",
},
{
extension: "yaml",
compareFunc: assert.EqualYAML,
outputArg: "yaml",
},
{
extension: "toml",
compareFunc: assert.EqualTOML,
outputArg: "toml",
},
}
for _, format := range formats {
t.Logf("inspecting image %s format", format.outputArg)
output = pack.RunSuccessfully(inspectCmd, repoName, "--output", format.outputArg)
expectedOutput := pack.FixtureManager().TemplateFixture(
fmt.Sprintf("inspect_image_local_output.%s", format.extension),
map[string]interface{}{
"image_name": repoName,
"base_image_id": h.ImageID(t, runImageMirror),
"base_image_top_layer": h.TopLayerDiffID(t, runImageMirror),
"run_image_local_mirror": localRunImageMirror,
"run_image_mirror": runImageMirror,
"web_command": webCommand,
"hello_command": helloCommand,
"hello_args": helloArgs,
"hello_args_prefix": helloArgsPrefix,
},
)
format.compareFunc(output, expectedOutput)
}
})
when("--no-color", func() {
it("doesn't have color", func() {
appPath := filepath.Join("testdata", "mock_app")
// --no-color is set as a default option in our tests, and doesn't need to be explicitly provided
output := pack.RunSuccessfully("build", repoName, "-p", appPath)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.WithoutColors()
})
})
when("--quiet", func() {
it("only logs app name and sha", func() {
appPath := filepath.Join("testdata", "mock_app")
pack.SetVerbose(false)
defer pack.SetVerbose(true)
output := pack.RunSuccessfully("build", repoName, "-p", appPath, "--quiet")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportSuccessfulQuietBuild(repoName)
})
})
it("supports building app from a zip file", func() {
appPath := filepath.Join("testdata", "mock_app.zip")
output := pack.RunSuccessfully("build", repoName, "-p", appPath)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
})
when("--network", func() {
var tmpDir string
it.Before(func() {
h.SkipIf(t, imageManager.HostOS() == "windows", "temporarily disabled on WCOW due to CI flakiness")
var err error
tmpDir, err = ioutil.TempDir("", "archive-buildpacks-")
assert.Nil(err)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.InternetCapable)
})
it.After(func() {
h.SkipIf(t, imageManager.HostOS() == "windows", "temporarily disabled on WCOW due to CI flakiness")
assert.Succeeds(os.RemoveAll(tmpDir))
})
when("the network mode is not provided", func() {
it("reports buildpack access to internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsConnectedToInternet()
})
})
when("the network mode is set to default", func() {
it("reports buildpack access to internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
"--network", "default",
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsConnectedToInternet()
})
})
when("the network mode is set to none", func() {
it("reports buildpack disconnected from internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
"--network", "none",
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsDisconnectedFromInternet()
})
})
})
when("--volume", func() {
var (
volumeRoot = "/"
slash = "/"
tmpDir string
tmpVolumeSrc string
)
it.Before(func() {
h.SkipIf(t, os.Getenv("DOCKER_HOST") != "", "cannot mount volume when DOCKER_HOST is set")
if imageManager.HostOS() == "windows" {
volumeRoot = `c:\`
slash = `\`
}
var err error
tmpDir, err = ioutil.TempDir("", "volume-buildpack-tests-")
assert.Nil(err)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ReadVolume, buildpacks.ReadWriteVolume)
tmpVolumeSrc, err = ioutil.TempDir("", "volume-mount-source")
assert.Nil(err)
assert.Succeeds(os.Chmod(tmpVolumeSrc, 0777)) // Override umask
// Some OSes (like macOS) use symlinks for the standard temp dir.
// Resolve it so it can be properly mounted by the Docker daemon.
tmpVolumeSrc, err = filepath.EvalSymlinks(tmpVolumeSrc)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tmpVolumeSrc, "some-file"), []byte("some-content\n"), 0777)
assert.Nil(err)
})
it.After(func() {
_ = os.RemoveAll(tmpDir)
_ = os.RemoveAll(tmpVolumeSrc)
})
when("volume is read-only", func() {
it("mounts the provided volume in the detect and build phases", func() {
volumeDest := volumeRoot + "platform" + slash + "volume-mount-target"
testFilePath := volumeDest + slash + "some-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadVolume.FullPathIn(tmpDir),
"--env", "TEST_FILE_PATH="+testFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsReadingFileContents("Detect", testFilePath, "some-content")
bpOutputAsserts.ReportsReadingFileContents("Build", testFilePath, "some-content")
})
it("should fail to write", func() {
volumeDest := volumeRoot + "platform" + slash + "volume-mount-target"
testDetectFilePath := volumeDest + slash + "detect-file"
testBuildFilePath := volumeDest + slash + "build-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir),
"--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath,
"--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsFailingToWriteFileContents("Detect", testDetectFilePath)
bpOutputAsserts.ReportsFailingToWriteFileContents("Build", testBuildFilePath)
})
})
when("volume is read-write", func() {
it("can be written to", func() {
volumeDest := volumeRoot + "volume-mount-target"
testDetectFilePath := volumeDest + slash + "detect-file"
testBuildFilePath := volumeDest + slash + "build-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s:rw", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir),
"--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath,
"--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsWritingFileContents("Detect", testDetectFilePath)
bpOutputAsserts.ReportsReadingFileContents("Detect", testDetectFilePath, "some-content")
bpOutputAsserts.ReportsWritingFileContents("Build", testBuildFilePath)
bpOutputAsserts.ReportsReadingFileContents("Build", testBuildFilePath, "some-content")
})
})
})
when("--default-process", func() {
it("sets the default process from those in the process list", func() {
pack.RunSuccessfully(
"build", repoName,
"--default-process", "hello",
"-p", filepath.Join("testdata", "mock_app"),
)
assertImage.RunsWithLogs(repoName, "hello world")
})
})
when("--buildpack", func() {
when("the argument is an ID", func() {
it("adds the buildpacks to the builder if necessary and runs them", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", "simple/layers", // can omit version if only one
"--buildpack", "[email protected]",
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertTestAppOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertTestAppOutput.ReportsBuildStep("Simple Layers Buildpack")
assertTestAppOutput.ReportsBuildStep("NOOP Buildpack")
assertOutput.ReportsSuccessfulImageBuild(repoName)
t.Log("app is runnable")
assertImage.RunsWithOutput(
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
})
})
when("the argument is an archive", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "archive-buildpack-tests-")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
it("adds the buildpack to the builder and runs it", func() {
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ArchiveNotInBuilder)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.ArchiveNotInBuilder.FullPathIn(tmpDir),
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Local Buildpack")
})
})
when("the argument is directory", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "folder-buildpack-tests-")
assert.Nil(err)
})
it.After(func() {
_ = os.RemoveAll(tmpDir)
})
it("adds the buildpacks to the builder and runs it", func() {
h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows")
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.FolderNotInBuilder)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.FolderNotInBuilder.FullPathIn(tmpDir),
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Local Buildpack")
})
})
when("the argument is a buildpackage image", func() {
var (
tmpDir string
packageImageName string
)
it.After(func() {
imageManager.CleanupImages(packageImageName)
_ = os.RemoveAll(tmpDir)
})
it("adds the buildpacks to the builder and runs them", func() {
packageImageName = registryConfig.RepoName("buildpack-" + h.RandString(8))
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageImage := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageImage)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", packageImageName,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack(
"simple/layers/parent",
"simple-layers-parent-version",
)
assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack")
})
})
when("the argument is a buildpackage file", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "package-file")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
it("adds the buildpacks to the builder and runs them", func() {
packageFileLocation := filepath.Join(
tmpDir,
fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)),
)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageFile := buildpacks.NewPackageFile(
t,
pack,
packageFileLocation,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageFile)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", packageFileLocation,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack(
"simple/layers/parent",
"simple-layers-parent-version",
)
assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack")
})
})
when("the buildpack stack doesn't match the builder", func() {
var otherStackBuilderTgz string
it.Before(func() {
otherStackBuilderTgz = h.CreateTGZ(t, filepath.Join(bpDir, "other-stack-buildpack"), "./", 0755)
})
it.After(func() {
assert.Succeeds(os.Remove(otherStackBuilderTgz))
})
it("errors", func() {
output, err := pack.Run(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", otherStackBuilderTgz,
)
assert.NotNil(err)
assert.Contains(output, "other/stack/bp")
assert.Contains(output, "other-stack-version")
assert.Contains(output, "does not support stack 'pack.test.stack'")
})
})
})
when("--env-file", func() {
var envPath string
it.Before(func() {
envfile, err := ioutil.TempFile("", "envfile")
assert.Nil(err)
defer envfile.Close()
err = os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment")
assert.Nil(err)
envfile.WriteString(`
DETECT_ENV_BUILDPACK=true
ENV1_CONTENTS=Env1 Layer Contents From File
ENV2_CONTENTS
`)
envPath = envfile.Name()
})
it.After(func() {
assert.Succeeds(os.Unsetenv("ENV2_CONTENTS"))
assert.Succeeds(os.RemoveAll(envPath))
})
it("provides the env vars to the build and detect steps", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--env-file", envPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertImage.RunsWithOutput(
repoName,
"Env2 Layer Contents From Environment",
"Env1 Layer Contents From File",
)
})
})
when("--env", func() {
it.Before(func() {
assert.Succeeds(os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment"))
})
it.After(func() {
assert.Succeeds(os.Unsetenv("ENV2_CONTENTS"))
})
it("provides the env vars to the build and detect steps", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--env", "DETECT_ENV_BUILDPACK=true",
"--env", `ENV1_CONTENTS="Env1 Layer Contents From Command Line"`,
"--env", "ENV2_CONTENTS",
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertImage.RunsWithOutput(
repoName,
"Env2 Layer Contents From Environment",
"Env1 Layer Contents From Command Line",
)
})
})
when("--run-image", func() {
var runImageName string
when("the run-image has the correct stack ID", func() {
it.Before(func() {
user := func() string {
if imageManager.HostOS() == "windows" {
return "ContainerAdministrator"
}
return "root"
}
runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(`
FROM %s
USER %s
RUN echo "custom-run" > /custom-run.txt
USER pack
`, runImage, user()))
})
it.After(func() {
imageManager.CleanupImages(runImageName)
})
it("uses the run image as the base image", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--run-image", runImageName,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsPullingImage(runImageName)
t.Log("app is runnable")
assertImage.RunsWithOutput(
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
t.Log("uses the run image as the base image")
assertImage.HasBaseImage(repoName, runImageName)
})
})
when("the run image has the wrong stack ID", func() {
it.Before(func() {
runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(`
FROM %s
LABEL io.buildpacks.stack.id=other.stack.id
USER pack
`, runImage))
})
it.After(func() {
imageManager.CleanupImages(runImageName)
})
it("fails with a message", func() {
output, err := pack.Run(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--run-image", runImageName,
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsRunImageStackNotMatchingBuilder(
"other.stack.id",
"pack.test.stack",
)
})
})
})
when("--publish", func() {
it("creates image on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
t.Log("checking that registry has contents")
assertImage.ExistsInRegistryCatalog(repo)
// TODO: remove this if block after pack 0.18.0 is released
if !pack.SupportsFeature(invoke.InspectRemoteImage) {
imageManager.PullImage(repoName, registryConfig.RegistryAuth())
}
cmdName := "inspect"
if !pack.Supports("inspect") {
cmdName = "inspect-image"
}
t.Log("inspect-image")
var (
webCommand string
helloCommand string
helloArgs []string
helloArgsPrefix string
)
if imageManager.HostOS() == "windows" {
webCommand = ".\\run"
helloCommand = "cmd"
helloArgs = []string{"/c", "echo hello world"}
helloArgsPrefix = " "
} else {
webCommand = "./run"
helloCommand = "echo"
helloArgs = []string{"hello", "world"}
helloArgsPrefix = ""
}
formats := []compareFormat{
{
extension: "txt",
compareFunc: assert.TrimmedEq,
outputArg: "human-readable",
},
{
extension: "json",
compareFunc: assert.EqualJSON,
outputArg: "json",
},
{
extension: "yaml",
compareFunc: assert.EqualYAML,
outputArg: "yaml",
},
{
extension: "toml",
compareFunc: assert.EqualTOML,
outputArg: "toml",
},
}
for _, format := range formats {
t.Logf("inspecting image %s format", format.outputArg)
output = pack.RunSuccessfully(cmdName, repoName, "--output", format.outputArg)
expectedOutput := pack.FixtureManager().TemplateFixture(
fmt.Sprintf("inspect_image_published_output.%s", format.extension),
map[string]interface{}{
"image_name": repoName,
"base_image_ref": strings.Join([]string{runImageMirror, h.Digest(t, runImageMirror)}, "@"),
"base_image_top_layer": h.TopLayerDiffID(t, runImageMirror),
"run_image_mirror": runImageMirror,
"web_command": webCommand,
"hello_command": helloCommand,
"hello_args": helloArgs,
"hello_args_prefix": helloArgsPrefix,
},
)
format.compareFunc(output, expectedOutput)
}
// TODO: remove this if block after pack 0.18.0 is released
if pack.SupportsFeature(invoke.InspectRemoteImage) {
imageManager.PullImage(repoName, registryConfig.RegistryAuth())
}
t.Log("app is runnable")
assertImage.RunsWithOutput(
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
})
when("additional tags are specified with --tag", func() {
var additionalRepo string
var additionalRepoName string
it.Before(func() {
additionalRepo = fmt.Sprintf("%s_additional", repo)
additionalRepoName = fmt.Sprintf("%s_additional", repoName)
})
it.After(func() {
imageManager.CleanupImages(additionalRepoName)
})
it("creates additional tags on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
"--tag", additionalRepoName,
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
t.Log("checking that registry has contents")
assertImage.ExistsInRegistryCatalog(repo)
assertImage.ExistsInRegistryCatalog(additionalRepo)
imageManager.PullImage(repoName, registryConfig.RegistryAuth())
imageManager.PullImage(additionalRepoName, registryConfig.RegistryAuth())
t.Log("additional app is runnable")
assertImage.RunsWithOutput(
additionalRepoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
imageDigest := h.Digest(t, repoName)
additionalDigest := h.Digest(t, additionalRepoName)
assert.Equal(imageDigest, additionalDigest)
})
})
})
when("--cache-image", func() {
var cacheImageName string
it.Before(func() {
cacheImageName = fmt.Sprintf("%s-cache", repoName)
})
it("creates image and cache image on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
"--cache-image",
cacheImageName,
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
cacheImageRef, err := name.ParseReference(cacheImageName, name.WeakValidation)
assert.Nil(err)
t.Log("checking that registry has contents")
assertImage.CanBePulledFromRegistry(repoName)
if imageManager.HostOS() == "windows" {
// Cache images are automatically Linux container images, and therefore can't be pulled
// and inspected correctly on WCOW systems
//https://github.com/buildpacks/lifecycle/issues/529
imageManager.PullImage(cacheImageRef.Name(), registryConfig.RegistryAuth())
} else {
assertImage.CanBePulledFromRegistry(cacheImageRef.Name())
}
defer imageManager.CleanupImages(cacheImageRef.Name())
})
})
when("ctrl+c", func() {
it("stops the execution", func() {
var buf = new(bytes.Buffer)
command := pack.StartWithWriter(
buf,
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
)
go command.TerminateAtStep("DETECTING")
err := command.Wait()
assert.NotNil(err)
assert.NotContains(buf.String(), "Successfully built image")
})
})
when("--descriptor", func() {
when("using a included buildpack", func() {
var tempAppDir, tempWorkingDir, origWorkingDir string
it.Before(func() {
h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows")
var err error
tempAppDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
tempWorkingDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
origWorkingDir, err = os.Getwd()
assert.Nil(err)
// Create test directories and files:
//
// ├── cookie.jar
// ├── descriptor-buildpack/...
// ├── media
// │ ├── mountain.jpg
// │ └── person.png
// └── test.sh
assert.Succeeds(os.Mkdir(filepath.Join(tempAppDir, "descriptor-buildpack"), os.ModePerm))
h.RecursiveCopy(t, filepath.Join(bpDir, "descriptor-buildpack"), filepath.Join(tempAppDir, "descriptor-buildpack"))
err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755)
assert.Nil(err)
projectToml := `
[project]
name = "exclude test"
[[project.licenses]]
type = "MIT"
[build]
exclude = [ "*.sh", "media/person.png", "descriptor-buildpack" ]
[[build.buildpacks]]
uri = "descriptor-buildpack"
`
excludeDescriptorPath := filepath.Join(tempAppDir, "project.toml")
err = ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
// set working dir to be outside of the app we are building
assert.Succeeds(os.Chdir(tempWorkingDir))
})
it.After(func() {
os.RemoveAll(tempAppDir)
if origWorkingDir != "" {
assert.Succeeds(os.Chdir(origWorkingDir))
}
})
it("uses buildpack specified by descriptor", func() {
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
)
assert.NotContains(output, "person.png")
assert.NotContains(output, "test.sh")
})
})
when("exclude and include", func() {
var buildpackTgz, tempAppDir string
it.Before(func() {
buildpackTgz = h.CreateTGZ(t, filepath.Join(bpDir, "descriptor-buildpack"), "./", 0755)
var err error
tempAppDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
// Create test directories and files:
//
// ├── cookie.jar
// ├── other-cookie.jar
// ├── nested-cookie.jar
// ├── nested
// │ └── nested-cookie.jar
// ├── secrets
// │ ├── api_keys.json
// | |── user_token
// ├── media
// │ ├── mountain.jpg
// │ └── person.png
// └── test.sh
err = os.Mkdir(filepath.Join(tempAppDir, "secrets"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "api_keys.json"), []byte("{}"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "user_token"), []byte("token"), 0755)
assert.Nil(err)
err = os.Mkdir(filepath.Join(tempAppDir, "nested"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "nested", "nested-cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "other-cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "nested-cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755)
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tempAppDir))
})
it("should exclude ALL specified files and directories", func() {
projectToml := `
[project]
name = "exclude test"
[[project.licenses]]
type = "MIT"
[build]
exclude = [ "*.sh", "secrets/", "media/metadata", "/other-cookie.jar" ,"/nested-cookie.jar"]
`
excludeDescriptorPath := filepath.Join(tempAppDir, "exclude.toml")
err := ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
"--buildpack", buildpackTgz,
"--descriptor", excludeDescriptorPath,
)
assert.NotContains(output, "api_keys.json")
assert.NotContains(output, "user_token")
assert.NotContains(output, "test.sh")
assert.NotContains(output, "other-cookie.jar")
assert.Contains(output, "cookie.jar")
assert.Contains(output, "nested-cookie.jar")
assert.Contains(output, "mountain.jpg")
assert.Contains(output, "person.png")
})
it("should ONLY include specified files and directories", func() {
projectToml := `
[project]
name = "include test"
[[project.licenses]]
type = "MIT"
[build]
include = [ "*.jar", "media/mountain.jpg", "/media/person.png", ]
`
includeDescriptorPath := filepath.Join(tempAppDir, "include.toml")
err := ioutil.WriteFile(includeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
"--buildpack", buildpackTgz,
"--descriptor", includeDescriptorPath,
)
assert.NotContains(output, "api_keys.json")
assert.NotContains(output, "user_token")
assert.NotContains(output, "test.sh")
assert.Contains(output, "cookie.jar")
assert.Contains(output, "mountain.jpg")
assert.Contains(output, "person.png")
})
})
})
})
})
when("inspecting builder", func() {
when("inspecting a nested builder", func() {
it.Before(func() {
// create our nested builder
h.SkipIf(t, imageManager.HostOS() == "windows", "These tests are not yet compatible with Windows-based containers")
h.SkipIf(t, !pack.SupportsFeature(invoke.BuilderNoDuplicateLayers), "bug fixed in 0.18.0")
// create a task, handled by a 'task manager' which executes our pack commands during tests.
// looks like this is used to de-dup tasks
key := taskKey(
"create-complex-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
// run task on taskmanager and save output, in case there are future calls to the same task
// likely all our changes need to go on the createBuilderPack.
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createComplexBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
})
assert.Nil(err)
// register task to be run to 'clean up' a task
suiteManager.RegisterCleanUp("clean-"+key, func() error {
imageManager.CleanupImages(value)
return nil
})
builderName = value
output := pack.RunSuccessfully(
"config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1")
})
it("displays nested Detection Order groups", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
it("provides nested detection output up to depth", func() {
depth := "1"
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", "--depth", depth, builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", "--depth", depth, builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_depth_2_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_depth_2_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
when("output format is toml", func() {
it("prints builder information in toml format", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "toml")
} else {
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "toml")
}
err := toml.NewDecoder(strings.NewReader(string(output))).Decode(&struct{}{})
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.TOMLOutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_toml.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output_toml.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.TrimmedEq(string(output), expectedOutput)
})
})
when("output format is yaml", func() {
it("prints builder information in yaml format", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "yaml")
} else {
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "yaml")
}
err := yaml.Unmarshal([]byte(output), &struct{}{})
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.YAMLOutputForAPIs(14)
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_yaml.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output_yaml.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.TrimmedEq(string(output), expectedOutput)
})
})
when("output format is json", func() {
it("prints builder information in json format", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "json")
} else {
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "json")
}
err := json.Unmarshal([]byte(output), &struct{}{})
assert.Nil(err)
var prettifiedOutput bytes.Buffer
err = json.Indent(&prettifiedOutput, []byte(output), "", " ")
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.JSONOutputForAPIs(8)
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_json.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output_json.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.Equal(prettifiedOutput.String(), expectedOutput)
})
})
})
it("displays configuration for a builder (local and remote)", func() {
output := pack.RunSuccessfully(
"config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1")
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
it("indicates builder is trusted", func() {
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
pack.JustRunSuccessfully("config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1")
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "Yes",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
})
when("rebase", func() {
var repoName, runBefore, origID string
var buildRunImage func(string, string, string)
it.Before(func() {
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
repoName = registryConfig.RepoName("some-org/" + h.RandString(10))
runBefore = registryConfig.RepoName("run-before/" + h.RandString(10))
buildRunImage = func(newRunImage, contents1, contents2 string) {
user := func() string {
if imageManager.HostOS() == "windows" {
return "ContainerAdministrator"
}
return "root"
}
h.CreateImage(t, dockerCli, newRunImage, fmt.Sprintf(`
FROM %s
USER %s
RUN echo %s > /contents1.txt
RUN echo %s > /contents2.txt
USER pack
`, runImage, user(), contents1, contents2))
}
buildRunImage(runBefore, "contents-before-1", "contents-before-2")
pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--builder", builderName,
"--run-image", runBefore,
"--pull-policy", "never",
)
origID = h.ImageID(t, repoName)
assertImage.RunsWithOutput(
repoName,
"contents-before-1",
"contents-before-2",
)
})
it.After(func() {
imageManager.CleanupImages(origID, repoName, runBefore)
ref, err := name.ParseReference(repoName, name.WeakValidation)
assert.Nil(err)
buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli)
launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli)
assert.Succeeds(buildCacheVolume.Clear(context.TODO()))
assert.Succeeds(launchCacheVolume.Clear(context.TODO()))
})
when("daemon", func() {
when("--run-image", func() {
var runAfter string
it.Before(func() {
runAfter = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(runAfter, "contents-after-1", "contents-after-2")
})
it.After(func() {
imageManager.CleanupImages(runAfter)
})
it("uses provided run image", func() {
output := pack.RunSuccessfully(
"rebase", repoName,
"--run-image", runAfter,
"--pull-policy", "never",
)
assert.Contains(output, fmt.Sprintf("Successfully rebased image '%s'", repoName))
assertImage.RunsWithOutput(
repoName,
"contents-after-1",
"contents-after-2",
)
})
})
when("local config has a mirror", func() {
var localRunImageMirror string
it.Before(func() {
localRunImageMirror = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(localRunImageMirror, "local-mirror-after-1", "local-mirror-after-2")
pack.JustRunSuccessfully("config", "run-image-mirrors", "add", runImage, "-m", localRunImageMirror)
})
it.After(func() {
imageManager.CleanupImages(localRunImageMirror)
})
it("prefers the local mirror", func() {
output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", "never")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror)
assertOutput.ReportsSuccessfulRebase(repoName)
assertImage.RunsWithOutput(
repoName,
"local-mirror-after-1",
"local-mirror-after-2",
)
})
})
when("image metadata has a mirror", func() {
it.Before(func() {
// clean up existing mirror first to avoid leaking images
imageManager.CleanupImages(runImageMirror)
buildRunImage(runImageMirror, "mirror-after-1", "mirror-after-2")
})
it("selects the best mirror", func() {
output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", "never")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSelectingRunImageMirror(runImageMirror)
assertOutput.ReportsSuccessfulRebase(repoName)
assertImage.RunsWithOutput(
repoName,
"mirror-after-1",
"mirror-after-2",
)
})
})
})
when("--publish", func() {
it.Before(func() {
assert.Succeeds(h.PushImage(dockerCli, repoName, registryConfig))
})
when("--run-image", func() {
var runAfter string
it.Before(func() {
runAfter = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(runAfter, "contents-after-1", "contents-after-2")
assert.Succeeds(h.PushImage(dockerCli, runAfter, registryConfig))
})
it.After(func() {
imageManager.CleanupImages(runAfter)
})
it("uses provided run image", func() {
output := pack.RunSuccessfully("rebase", repoName, "--publish", "--run-image", runAfter)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulRebase(repoName)
assertImage.CanBePulledFromRegistry(repoName)
assertImage.RunsWithOutput(
repoName,
"contents-after-1",
"contents-after-2",
)
})
})
})
})
})
})
}
func buildpacksDir(bpAPIVersion string) string {
return filepath.Join("testdata", "mock_buildpacks", bpAPIVersion)
}
func createComplexBuilder(t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
lifecycle config.LifecycleAsset,
buildpackManager buildpacks.BuildpackManager,
runImageMirror string,
) (string, error) {
t.Log("creating complex builder image...")
// CREATE TEMP WORKING DIR
tmpDir, err := ioutil.TempDir("", "create-complex-test-builder")
if err != nil {
return "", err
}
defer os.RemoveAll(tmpDir)
// ARCHIVE BUILDPACKS
builderBuildpacks := []buildpacks.TestBuildpack{
buildpacks.Noop,
buildpacks.Noop2,
buildpacks.OtherStack,
buildpacks.ReadEnv,
}
templateMapping := map[string]interface{}{
"run_image_mirror": runImageMirror,
}
packageImageName := registryConfig.RepoName("nested-level-1-buildpack-" + h.RandString(8))
nestedLevelTwoBuildpackName := registryConfig.RepoName("nested-level-2-buildpack-" + h.RandString(8))
simpleLayersBuildpackName := registryConfig.RepoName("simple-layers-buildpack-" + h.RandString(8))
simpleLayersBuildpackDifferentShaName := registryConfig.RepoName("simple-layers-buildpack-different-name-" + h.RandString(8))
templateMapping["package_id"] = "simple/nested-level-1"
templateMapping["package_image_name"] = packageImageName
templateMapping["nested_level_1_buildpack"] = packageImageName
templateMapping["nested_level_2_buildpack"] = nestedLevelTwoBuildpackName
templateMapping["simple_layers_buildpack"] = simpleLayersBuildpackName
templateMapping["simple_layers_buildpack_different_sha"] = simpleLayersBuildpackDifferentShaName
fixtureManager := pack.FixtureManager()
nestedLevelOneConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-1-package.toml")
assert.Nil(err)
fixtureManager.TemplateFixtureToFile(
"nested-level-1-buildpack_package.toml",
nestedLevelOneConfigFile,
templateMapping,
)
err = nestedLevelOneConfigFile.Close()
assert.Nil(err)
nestedLevelTwoConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-2-package.toml")
assert.Nil(err)
fixtureManager.TemplateFixtureToFile(
"nested-level-2-buildpack_package.toml",
nestedLevelTwoConfigFile,
templateMapping,
)
err = nestedLevelTwoConfigFile.Close()
assert.Nil(err)
packageImageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
nestedLevelOneConfigFile.Name(),
buildpacks.WithRequiredBuildpacks(
buildpacks.NestedLevelOne,
buildpacks.NewPackageImage(
t,
pack,
nestedLevelTwoBuildpackName,
nestedLevelTwoConfigFile.Name(),
buildpacks.WithRequiredBuildpacks(
buildpacks.NestedLevelTwo,
buildpacks.NewPackageImage(
t,
pack,
simpleLayersBuildpackName,
fixtureManager.FixtureLocation("simple-layers-buildpack_package.toml"),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
),
),
),
),
)
simpleLayersDifferentShaBuildpack := buildpacks.NewPackageImage(
t,
pack,
simpleLayersBuildpackDifferentShaName,
fixtureManager.FixtureLocation("simple-layers-buildpack-different-sha_package.toml"),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayersDifferentSha),
)
defer imageManager.CleanupImages(packageImageName, nestedLevelTwoBuildpackName, simpleLayersBuildpackName, simpleLayersBuildpackDifferentShaName)
builderBuildpacks = append(
builderBuildpacks,
packageImageBuildpack,
simpleLayersDifferentShaBuildpack,
)
buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...)
// ADD lifecycle
if lifecycle.HasLocation() {
lifecycleURI := lifecycle.EscapedPath()
t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI)
templateMapping["lifecycle_uri"] = lifecycleURI
} else {
lifecycleVersion := lifecycle.Version()
t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion)
templateMapping["lifecycle_version"] = lifecycleVersion
}
// RENDER builder.toml
builderConfigFile, err := ioutil.TempFile(tmpDir, "nested_builder.toml")
if err != nil {
return "", err
}
pack.FixtureManager().TemplateFixtureToFile("nested_builder.toml", builderConfigFile, templateMapping)
err = builderConfigFile.Close()
if err != nil {
return "", err
}
// NAME BUILDER
bldr := registryConfig.RepoName("test/builder-" + h.RandString(10))
// CREATE BUILDER
output := pack.RunSuccessfully(
"builder", "create", bldr,
"-c", builderConfigFile.Name(),
"--no-color",
)
assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr))
assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig))
return bldr, nil
}
func createBuilder(
t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
lifecycle config.LifecycleAsset,
buildpackManager buildpacks.BuildpackManager,
runImageMirror string,
) (string, error) {
t.Log("creating builder image...")
// CREATE TEMP WORKING DIR
tmpDir, err := ioutil.TempDir("", "create-test-builder")
assert.Nil(err)
defer os.RemoveAll(tmpDir)
templateMapping := map[string]interface{}{
"run_image_mirror": runImageMirror,
}
// ARCHIVE BUILDPACKS
builderBuildpacks := []buildpacks.TestBuildpack{
buildpacks.Noop,
buildpacks.Noop2,
buildpacks.OtherStack,
buildpacks.ReadEnv,
}
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package.toml", imageManager.HostOS())
packageImageName := registryConfig.RepoName("simple-layers-package-image-buildpack-" + h.RandString(8))
packageImageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
defer imageManager.CleanupImages(packageImageName)
builderBuildpacks = append(builderBuildpacks, packageImageBuildpack)
templateMapping["package_image_name"] = packageImageName
templateMapping["package_id"] = "simple/layers"
buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...)
// ADD lifecycle
var lifecycleURI string
var lifecycleVersion string
if lifecycle.HasLocation() {
lifecycleURI = lifecycle.EscapedPath()
t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI)
templateMapping["lifecycle_uri"] = lifecycleURI
} else {
lifecycleVersion = lifecycle.Version()
t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion)
templateMapping["lifecycle_version"] = lifecycleVersion
}
// RENDER builder.toml
configFileName := "builder.toml"
builderConfigFile, err := ioutil.TempFile(tmpDir, "builder.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
configFileName,
builderConfigFile,
templateMapping,
)
err = builderConfigFile.Close()
assert.Nil(err)
// NAME BUILDER
bldr := registryConfig.RepoName("test/builder-" + h.RandString(10))
// CREATE BUILDER
output := pack.RunSuccessfully(
"builder", "create", bldr,
"-c", builderConfigFile.Name(),
"--no-color",
)
assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr))
assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig))
return bldr, nil
}
func generatePackageTomlWithOS(
t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
tmpDir string,
fixtureName string,
platform_os string,
) string {
t.Helper()
packageTomlFile, err := ioutil.TempFile(tmpDir, "package-*.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
fixtureName,
packageTomlFile,
map[string]interface{}{
"OS": platform_os,
},
)
assert.Nil(packageTomlFile.Close())
return packageTomlFile.Name()
}
func createStack(t *testing.T, dockerCli client.CommonAPIClient, runImageMirror string) error {
t.Helper()
t.Log("creating stack images...")
stackBaseDir := filepath.Join("testdata", "mock_stack", imageManager.HostOS())
if err := createStackImage(dockerCli, runImage, filepath.Join(stackBaseDir, "run")); err != nil {
return err
}
if err := createStackImage(dockerCli, buildImage, filepath.Join(stackBaseDir, "build")); err != nil {
return err
}
imageManager.TagImage(runImage, runImageMirror)
if err := h.PushImage(dockerCli, runImageMirror, registryConfig); err != nil {
return err
}
return nil
}
func createStackImage(dockerCli client.CommonAPIClient, repoName string, dir string) error {
defaultFilterFunc := func(file string) bool { return true }
ctx := context.Background()
buildContext := archive.ReadDirAsTar(dir, "/", 0, 0, -1, true, false, defaultFilterFunc)
return h.CheckImageBuildResult(dockerCli.ImageBuild(ctx, buildContext, dockertypes.ImageBuildOptions{
Tags: []string{repoName},
Remove: true,
ForceRemove: true,
}))
}
// taskKey creates a key from the prefix and all arguments to be unique
func taskKey(prefix string, args ...string) string {
hash := sha256.New()
for _, v := range args {
hash.Write([]byte(v))
}
return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(hash.Sum(nil)))
}
type compareFormat struct {
extension string
compareFunc func(string, string)
outputArg string
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
sts/client.go | package sts
import (
"os"
"github.com/dbdd4us/qcloudapi-sdk-go/common"
)
const (
StsHost = "sts.api.qcloud.com"
StsPath = "/v2/index.php"
)
type Client struct {
*common.Client
}
func NewClient(credential common.CredentialInterface, opts common.Opts) (*Client, error) {
if opts.Host == "" {
opts.Host = StsHost
}
if opts.Path == "" {
opts.Path = StsPath
}
client, err := common.NewClient(credential, opts)
if err != nil {
return &Client{}, err
}
return &Client{client}, nil
}
func NewClientFromEnv() (*Client, error) {
secretId := os.Getenv("QCloudSecretId")
secretKey := os.Getenv("QCloudSecretKey")
region := os.Getenv("QCloudStsAPIRegion")
host := os.Getenv("QCloudStsAPIHost")
path := os.Getenv("QCloudStsAPIPath")
return NewClient(
common.Credential{
secretId,
secretKey,
},
common.Opts{
Region: region,
Host: host,
Path: path,
},
)
}
| [
"\"QCloudSecretId\"",
"\"QCloudSecretKey\"",
"\"QCloudStsAPIRegion\"",
"\"QCloudStsAPIHost\"",
"\"QCloudStsAPIPath\""
]
| []
| [
"QCloudStsAPIHost",
"QCloudStsAPIPath",
"QCloudSecretId",
"QCloudSecretKey",
"QCloudStsAPIRegion"
]
| [] | ["QCloudStsAPIHost", "QCloudStsAPIPath", "QCloudSecretId", "QCloudSecretKey", "QCloudStsAPIRegion"] | go | 5 | 0 | |
xdet_v3_resnet_train.py | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
#from scipy.misc import imread, imsave, imshow, imresize
import tensorflow as tf
from net import xdet_body_v3
from utility import train_helper
from dataset import dataset_factory
from preprocessing import preprocessing_factory
from preprocessing import anchor_manipulator
# hardware related configuration
tf.app.flags.DEFINE_integer(
'num_readers', 16,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../PASCAL/VOC_TF/VOC0712TF/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_v3/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 500,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 7200,
'The frequency with which the model is saved, in seconds.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 352,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'resnet_size', 50,
'The size of the ResNet model to use.')
tf.app.flags.DEFINE_integer(
'train_epochs', None,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'batch_size', 12,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.56, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.')
# optimizer related configuration
tf.app.flags.DEFINE_float(
'weight_decay', 0.0005, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.00005,
'The minimal end learning rate used by a polynomial decay learning rate.')
# for learning rate exponential_decay
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'decay_steps', 1000,
'Number of epochs after which learning rate decays.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '60000, 800000',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.6, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model/resnet50',#None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
'model_scope', 'xdet_resnet',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").')
tf.app.flags.DEFINE_string(
'cloud_checkpoint_path', 'resnet50/model.ckpt',
'The path to a checkpoint from which to fine-tune.')
FLAGS = tf.app.flags.FLAGS
def input_pipeline():
image_preprocessing_fn = lambda image_, shape_, glabels_, gbboxes_ : preprocessing_factory.get_preprocessing(
'xdet_resnet', is_training=True)(image_, glabels_, gbboxes_, out_shape=[FLAGS.train_image_size] * 2, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'))
anchor_creator = anchor_manipulator.AnchorCreator([FLAGS.train_image_size] * 2,
layers_shapes = [(22, 22)],
anchor_scales = [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]],
extra_anchor_scales = [[0.1]],
anchor_ratios = [[1., 2., 3., .5, 0.3333]],
layer_steps = [16])
def input_fn():
all_anchors, num_anchors_list = anchor_creator.get_all_anchors()
anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(all_anchors,
num_classes = FLAGS.num_classes,
allowed_borders = [0.05],
positive_threshold = FLAGS.match_threshold,
ignore_threshold = FLAGS.neg_threshold,
prior_scaling=[0.1, 0.1, 0.2, 0.2])
list_from_batch, _ = dataset_factory.get_dataset(FLAGS.dataset_name,
FLAGS.dataset_split_name,
FLAGS.data_dir,
image_preprocessing_fn,
file_pattern = None,
reader = None,
batch_size = FLAGS.batch_size,
num_readers = FLAGS.num_readers,
num_preprocessing_threads = FLAGS.num_preprocessing_threads,
num_epochs = FLAGS.train_epochs,
anchor_encoder = anchor_encoder_decoder.encode_all_anchors)
return list_from_batch[-1], {'targets': list_from_batch[:-1],
'decode_fn': lambda pred : anchor_encoder_decoder.decode_all_anchors([pred])[0],
'num_anchors_list': num_anchors_list}
return input_fn
def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):
"""
ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2
|x| - 0.5 / sigma^2, otherwise
"""
sigma2 = sigma * sigma
inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)
return outside_mul
def xdet_model_fn(features, labels, mode, params):
"""Our model_fn for ResNet to be used with our Estimator."""
num_anchors_list = labels['num_anchors_list']
num_feature_layers = len(num_anchors_list)
shape = labels['targets'][-1]
glabels = labels['targets'][:num_feature_layers][0]
gtargets = labels['targets'][num_feature_layers : 2 * num_feature_layers][0]
gscores = labels['targets'][2 * num_feature_layers : 3 * num_feature_layers][0]
with tf.variable_scope(params['model_scope'], default_name = None, values = [features], reuse=tf.AUTO_REUSE):
backbone = xdet_body_v3.xdet_resnet_v3(params['resnet_size'], params['data_format'])
body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
cls_pred, location_pred = xdet_body_v3.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format'])
if params['data_format'] == 'channels_first':
cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1])
location_pred = tf.transpose(location_pred, [0, 2, 3, 1])
bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4]))
cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']])
location_pred = tf.reshape(location_pred, [-1, 4])
glabels = tf.reshape(glabels, [-1])
gscores = tf.reshape(gscores, [-1])
gtargets = tf.reshape(gtargets, [-1, 4])
# raw mask for positive > 0.5, and for negetive < 0.3
# each positive examples has one label
positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold'])
fpositive_mask = tf.cast(positive_mask, tf.float32)
n_positives = tf.reduce_sum(fpositive_mask)
# negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard
# note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.)
negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.)
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
#negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
fnegtive_mask = tf.cast(negtive_mask, tf.float32)
n_negtives = tf.reduce_sum(fnegtive_mask)
n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32)
n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32))
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0]
prob_for_negtives = tf.where(negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=n_neg_to_select)
selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1]
# # random select negtive examples for classification
# selected_neg_mask = tf.random_uniform(tf.shape(gscores), minval=0, maxval=1.) < tf.where(
# tf.greater(n_negtives, 0),
# tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives),
# tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)),
# name='rand_select_negtive')
# include both selected negtive and all positive examples
final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask))
total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32))
# add mask for glabels and cls_pred here
glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask))
cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask))
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask))
predictions = {
'classes': tf.argmax(cls_pred, axis=-1),
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),
'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) }
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.)
#cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy_loss')
tf.summary.scalar('cross_entropy_loss', cross_entropy)
loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred))
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1))
loc_loss = tf.identity(loc_loss, name='location_loss')
tf.summary.scalar('location_loss', loc_loss)
tf.losses.add_loss(loc_loss)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
total_loss = tf.identity(loss, name='total_loss')
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[int(_) for _ in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype))
# Create a tensor named learning_rate for logging purposes.
tf.identity(truncated_learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', truncated_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
momentum=params['momentum'])
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes'])
metrics = {'cls_accuracy': cls_accuracy}
# Create a tensor named train_accuracy for logging purposes.
tf.identity(cls_accuracy[1], name='cls_accuracy')
tf.summary.scalar('cls_accuracy', cls_accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics,
scaffold = tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold(FLAGS)))
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def main(_):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction)
config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options)
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(
save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(
save_checkpoints_steps=None).replace(
save_summary_steps=FLAGS.save_summary_steps).replace(
keep_checkpoint_max=5).replace(
log_step_count_steps=FLAGS.log_every_n_steps).replace(
session_config=config)
xdetector = tf.estimator.Estimator(
model_fn=xdet_model_fn, model_dir=FLAGS.model_dir, config=run_config,
params={
'resnet_size': FLAGS.resnet_size,
'data_format': FLAGS.data_format,
'model_scope': FLAGS.model_scope,
'num_classes': FLAGS.num_classes,
'negative_ratio': FLAGS.negative_ratio,
'match_threshold': FLAGS.match_threshold,
'neg_threshold': FLAGS.neg_threshold,
'weight_decay': FLAGS.weight_decay,
'momentum': FLAGS.momentum,
'learning_rate': FLAGS.learning_rate,
'end_learning_rate': FLAGS.end_learning_rate,
'learning_rate_decay_factor': FLAGS.learning_rate_decay_factor,
'decay_steps': FLAGS.decay_steps,
'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),
'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),
})
tensors_to_log = {
'lr': 'learning_rate',
'ce_loss': 'cross_entropy_loss',
'loc_loss': 'location_loss',
'total_loss': 'total_loss',
'cls_acc': 'cls_accuracy',
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps)
print('Starting a training cycle.')
xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| []
| []
| [
"TF_ENABLE_WINOGRAD_NONFUSED"
]
| [] | ["TF_ENABLE_WINOGRAD_NONFUSED"] | python | 1 | 0 | |
rubrik_oracle_backup_clone.py |
import rbs_oracle_common
import click
import logging
import sys
import os
import platform
from datetime import datetime
import configparser
@click.command()
@click.option('--source_host_db', '-s', type=str, required=True, help='The source <host or RAC cluster>:<database>')
@click.option('--mount_path', '-m', type=str, required=True, help='The path used to mount the backup files')
@click.option('--new_oracle_name', '-n', type=str, required=True, help='Name for the cloned live mounted database')
@click.option('--configuration_file', '-f', type=str, help='Oracle duplicate configuration file, can be used for all optional parameters. Overrides any set as script options')
@click.option('--time_restore', '-t', type=str, help='The point in time for the database clone in iso 8601 format (2019-04-30T18:23:21)')
@click.option('--oracle_home', '-o', type=str, help='ORACLE_HOME path for this database clone')
@click.option('--parallelism', '-p', default=4, type=str, help='The degree of parallelism to use for the RMAN duplicate')
@click.option('--no_spfile', is_flag=True, help='Restore SPFILE and replace instance specific parameters with new DB name')
@click.option('--no_file_name_check', is_flag=True, help='Do not check for existing files and overwrite existing files. Potentially destructive use with caution')
@click.option('--refresh_db', is_flag=True, help='Refresh and existing database. Overwriting exiting database. Requires no_file_name_check.')
@click.option('--control_files', type=str, help='Locations for control files. Using full paths in single quotes separated by commas')
@click.option('--db_file_name_convert', type=str, help='Remap the datafile locations. Using full paths in single quotes separated by commas in pairs of \'from location\',\'to location\'')
@click.option('--log_file_name_convert', type=str, help='Remap the redo log locations. Using full paths in single quotes separated by commas in pairs of \'from location\',\'to location\'')
@click.option('--audit_file_dest', type=str, help='Set the path for the audit files. This path must exist on the target host')
@click.option('--core_dump_dest', type=str, help='Set the path for the core dump files. This path must exist on the target host')
@click.option('--log_path', '-l', type=str, help='Log directory, if not specified the mount_path with be used.')
@click.option('--debug_level', '-d', type=str, default='WARNING', help='Logging level: DEBUG, INFO, WARNING or CRITICAL.')
def cli(source_host_db, mount_path, new_oracle_name, configuration_file, time_restore, oracle_home, parallelism,
no_spfile, no_file_name_check, refresh_db, control_files, db_file_name_convert, log_file_name_convert,
audit_file_dest, core_dump_dest, log_path, debug_level):
"""
This will use the Rubrik RMAN backups to do a duplicate (or refresh) of an Oracle Database.
\b
The source database is specified in a host:db format. The backup mount path and the new Oracle DB name are required.
If the restore time is not provided the most recent recoverable time will be used. Command line Oracle path
parameters must be enclosed in both double quotes and each path within enclosed with single quotes. All the optional
parameters can be provided in a configuration file. All the flag options must be entered as true false in the
configuration file. If the Oracle Home is not specified the ORACLE_HOME path from the source database will be used.
If a log directory is not specified, no log will be created.
\b
Example:
rubrik_oracle_backup_clone -s jz-sourcehost-1:ora1db -m /u02/oradata/restore -n oracln -t 2020-11-06T00:06:00 -p 8
-l /home/oracle/clone_logs --no_file_name_check --refresh_db
--db_file_name_convert "'/u02/oradata/ora1db/','/u02/oradata/oracln/'"
--control_files "'/u02/oradata/oracln/control01.ctl','/u02/oradata/oracln/control02.ctl'"
--log_file_name_convert "'/u02/oradata/ora1db/','u02/oradata/oracln/'"
--audit_file_dest "'/u01/app/oracle/admin/clonedb/adump'"
--core_dump_dest "'/u01/app/oracle/admin/clonedb/cdump'"
\b
Example Configuration File:
### The following line is required:
[parameters]
### All parameters are optional. Command line flags are boolean (true/false)
### The degree of parallelism to use for the RMAN duplicate (default is 4)
# parallelism = 4
### Do not restore the spfile renaming the parameters with the new db name.
# no_spfile = true
### Pint in time for duplicate
# time_restore = 2020-11-08T00:06:00
### ORACLE_HOME if different than source db
# oracle_home = /u01/app/oracle/product/12.2.0/dbhome_1
### Do not check for existing files
# no_file_name_check = true
### Refresh an existing database. The database will be shutdown and the existing file will be overwritten.
### Requires no_file_name_check = True
# refresh_db = True
### Control File locations
# control_files = '/u02/oradata/clonedb/control01.ctl','/u02/oradata/clonedb/control02.ctl'
### Remap the database files
# db_file_name_convert = '/u02/oradata/ora1db/','/u02/oradata/clonedb/'
### Remap the redo log locations
# log_file_name_convert = '/u02/oradata/ora1db/','u02/oradata/clonedb/'
### Set the audit file destination path
# audit_file_dest = '/u01/app/oracle/admin/clonedb/adump'
### Set the core dump destination path
# core_dump_dest = '/u01/app/oracle/admin/clonedb/cdump'
### Directory where logs will be created. If not provided not logs will be created
# log_path = /home/oracle/clone_logs
\b
Example:
rubrik_oracle_backup_clone -s jz-sourcehost-1:ora1db -m /u02/oradata/restore -n oracln -f /home/oracle/clone_config.txt
"""
numeric_level = getattr(logging, debug_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {}'.format(debug_level))
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(numeric_level)
console_formatter = logging.Formatter('%(asctime)s: %(message)s')
ch.setFormatter(console_formatter)
logger.addHandler(ch)
# Read in the configuration
if configuration_file:
configuration = configparser.ConfigParser()
configuration.read(configuration_file)
if 'parallelism' in configuration['parameters'].keys():
parallelism = configuration['parameters']['parallelism']
if 'no_spfile' in configuration['parameters'].keys():
no_spfile = configuration['parameters'].getboolean('no_spfile')
if 'no_file_name_check' in configuration['parameters'].keys():
no_file_name_check = configuration['parameters'].getboolean('no_file_name_check')
if 'refresh_db' in configuration['parameters'].keys():
refresh_db = configuration['parameters'].getboolean('refresh_db')
if 'control_files' in configuration['parameters'].keys():
control_files = configuration['parameters']['control_files']
if 'db_file_name_convert' in configuration['parameters'].keys():
db_file_name_convert = configuration['parameters']['db_file_name_convert']
if 'log_file_name_convert' in configuration['parameters'].keys():
log_file_name_convert = configuration['parameters']['log_file_name_convert']
if 'log_path' in configuration['parameters'].keys():
log_path = configuration['parameters']['log_path']
if 'time_restore' in configuration['parameters'].keys():
time_restore = configuration['parameters']['time_restore']
if 'audit_file_dest' in configuration['parameters'].keys():
audit_file_dest = configuration['parameters']['audit_file_dest']
if 'core_dump_dest' in configuration['parameters'].keys():
core_dump_dest = configuration['parameters']['core_dump_dest']
logger.debug("Parameters for duplicate loaded from file: {}.".format(configuration))
# Set up the file logging
if log_path:
os.makedirs(log_path, exist_ok=True)
logfile = os.path.join(log_path, "{}_Clone_{}.log".format(new_oracle_name, datetime.now().strftime("%Y%m%d-%H%M%S")))
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
fh.setFormatter(file_formatter)
logger.addHandler(fh)
source_host_db = source_host_db.split(":")
# Get the target host name which is the host running the command
host_target = platform.uname()[1].split('.')[0]
logger.debug("The hostname used for the target host is {}".format(host_target))
if len(new_oracle_name) > 8:
logger.debug("The new oracle name: {} is too long. Oracle names must be 8 characters or less. Aborting clone".format(new_oracle_name))
raise RubrikOracleBackupMountCloneError("The new oracle name: {} is too long. Oracle names must be 8 characters or less.".format(new_oracle_name))
if new_oracle_name == source_host_db[1]:
logger.debug("The new oracle db name {} cannot be the same as the source db name {} ".format(new_oracle_name, source_host_db[1]))
raise RubrikOracleBackupMountCloneError("The new oracle db name {} cannot be the same as the source db name {} ".format(new_oracle_name, source_host_db[1]))
rubrik = rbs_oracle_common.RubrikConnection()
database = rbs_oracle_common.RubrikRbsOracleDatabase(rubrik, source_host_db[1], source_host_db[0])
oracle_db_info = database.get_oracle_db_info()
# If the source database is on a RAC cluster the target must be a RAC cluster otherwise it will be an Oracle Host
if 'racName' in oracle_db_info.keys():
if oracle_db_info['racName']:
host_id = database.get_rac_id(rubrik.cluster_id, host_target)
else:
host_id = database.get_host_id(rubrik.cluster_id, host_target)
# Use the provided time or if no time has been provided use the the most recent recovery point
if time_restore:
time_ms = database.epoch_time(time_restore, rubrik.timezone)
logger.warning("Materializing backup set from time {} for mount.". format(time_restore))
else:
logger.warning("Using most recent recovery point for mount.")
time_ms = database.epoch_time(oracle_db_info['latestRecoveryPoint'], rubrik.timezone)
# Check ORACLE_HOME and set to source ORACLE_HOME is not provided
if not oracle_home:
oracle_home = oracle_db_info['oracleHome']
if not os.path.exists(oracle_home):
logger.debug("The ORACLE_HOME: {} does not exist on the target host: {}".format(oracle_home, host_target))
raise RubrikOracleBackupMountCloneError("The ORACLE_HOME: {} does not exist on the target host: {}".format(oracle_home, host_target))
# Get directories in path to allow us to find the new directory after the mount
live_mount_directories = os.listdir(mount_path)
logger.warning("Starting the mount of the requested {} backup pieces on {}.".format(source_host_db[1], host_target))
live_mount_info = database.live_mount(host_id, time_ms, files_only=True, mount_path=mount_path)
live_mount_info = database.async_requests_wait(live_mount_info['id'], 20)
logger.debug("Backup Live Mount Asyc Request: {}".format(live_mount_info))
logger.info("Async request completed with status: {}".format(live_mount_info['status']))
if live_mount_info['status'] != "SUCCEEDED":
logger.debug("Mount of backup files did not complete successfully. Mount ended with status {}".format(live_mount_info['status']))
raise RubrikOracleBackupMountCloneError("Mount of backup files did not complete successfully. Mount ended with status {}".format(live_mount_info['status']))
logger.warning("Live mount of the backup files completed.")
# Now determine the new live mount directory
new_live_mount_directories = os.listdir(mount_path)
live_mount_directory = list(set(new_live_mount_directories) - set(live_mount_directories))
if len(live_mount_directory) == 1:
backup_path = os.path.join(mount_path, live_mount_directory[0])
else:
logger.debug("Multiple directories were created in {} during this operation. Live mount directory cannot be determined".format(mount_path))
raise RubrikOracleBackupMountCloneError("Multiple directories were created in {} during this operation. Live mount directory cannot be determined".format(mount_path))
logger.info("Using the live mount path: {}".format(backup_path))
live_mount_id = live_mount_directory[0].split('_')[1]
logger.debug("Live mount ID is {}".format(live_mount_id))
os.environ["ORACLE_HOME"] = oracle_home
os.environ["ORACLE_SID"] = new_oracle_name
logger.debug("Setting env variable ORACLE_HOME={}, ORACLE_SID={}.".format(oracle_home, new_oracle_name))
if refresh_db:
logger.warning("Shutting down {} database for refresh".format(new_oracle_name))
logger.info(database.sqlplus_sysdba(oracle_home, "shutdown immediate;"))
if no_spfile:
logger.warning("Starting auxiliary instance")
sql_return = database.sqlplus_sysdba(oracle_home, "startup nomount")
logger.info(sql_return)
else:
logger.warning("Creating minimal init file to start instance")
init_file = os.path.join(oracle_home, 'dbs', 'init{}.ora'.format(new_oracle_name))
logger.debug("Creating new temporary init file {}".format(init_file))
with open(init_file, 'w') as file:
file.write('db_name={}\n'.format(new_oracle_name))
logger.warning("Starting auxiliary instance")
sql_return = database.sqlplus_sysdba(oracle_home, "startup nomount pfile='{}'".format(init_file))
logger.info(sql_return)
if "ORA-01081: cannot start already-running ORACLE" in sql_return:
logger.debug("There is an instance of {} all ready running on this host. Aborting clone".format(new_oracle_name))
raise RubrikOracleBackupMountCloneError("There is an instance of {} all ready running on this host or refreshed DB did not start cleanly. Aborting clone".format(new_oracle_name))
sql_return = database.sqlplus_sysdba(oracle_home, "select instance_name from v$instance;")
logger.info(sql_return)
if new_oracle_name not in sql_return:
logger.debug("DB Instance check failed. Instance name is not {}. Aborting clone".format(new_oracle_name))
raise RubrikOracleBackupMountCloneError("DB Instance check failed. Instance name is not {}. Aborting clone".format(new_oracle_name))
logger.warning("Beginning duplicate of {} to {} on host {}.".format(source_host_db[1], new_oracle_name, source_host_db[0]))
duplicate_commands = "run { "
for x in range(int(parallelism)):
channel = x + 1
duplicate_commands = duplicate_commands + "allocate auxiliary channel aux{} device type disk; ".format(channel)
duplicate_commands = duplicate_commands + "duplicate database to '{}' ".format(new_oracle_name)
if time_restore:
time_restore = time_restore.replace("T", "")
duplicate_commands = duplicate_commands + """until time "TO_DATE('{}','YYYY-MM-DD HH24:MI:SS')" """.format(time_restore)
if not no_spfile:
duplicate_commands = duplicate_commands + "SPFILE parameter_value_convert ('{}','{}') ".format(source_host_db[1], new_oracle_name)
if control_files:
duplicate_commands = duplicate_commands + "set control_files = {} ".format(control_files)
if db_file_name_convert:
duplicate_commands = duplicate_commands + "set db_file_name_convert = {} ".format(db_file_name_convert)
if log_file_name_convert:
duplicate_commands = duplicate_commands + "set log_file_name_convert = {} ".format(log_file_name_convert)
if audit_file_dest:
duplicate_commands = duplicate_commands + "set audit_file_dest = {} ".format(audit_file_dest)
if core_dump_dest:
duplicate_commands = duplicate_commands + "set core_dump_dest = {} ".format(core_dump_dest)
duplicate_commands = duplicate_commands + "BACKUP LOCATION '{}' ".format(mount_path)
if no_file_name_check:
duplicate_commands = duplicate_commands + "NOFILENAMECHECK; }"
else:
duplicate_commands = duplicate_commands + "; }"
logger.debug("Duplicate script: "
"{}".format(duplicate_commands))
logger.info(database.rman(oracle_home, duplicate_commands, "auxiliary"))
logger.warning("Duplicate of {} database complete.".format(new_oracle_name))
mount = rbs_oracle_common.RubrikRbsOracleMount(rubrik, source_host_db[1], source_host_db[0], host_target)
logger.warning("Unmounting backups.")
delete_request = mount.live_mount_delete(live_mount_id)
delete_request = mount.async_requests_wait(delete_request['id'], 12)
logger.info("Async request completed with status: {}".format(delete_request['status']))
logger.debug(delete_request)
if delete_request['status'] != "SUCCEEDED":
logger.warning("Unmount of backup files failed with status: {}".format(delete_request['status']))
else:
logger.info("Live mount of backup data files with id: {} has been unmounted.".format(live_mount_id))
logger.warning("Backups unmounted")
logger.warning("Database clone complete")
return
class RubrikOracleBackupMountCloneError(rbs_oracle_common.NoTraceBackWithLineNumber):
"""
Renames object so error is named with calling script
"""
pass
if __name__ == "__main__":
cli()
| []
| []
| [
"ORACLE_SID",
"ORACLE_HOME"
]
| [] | ["ORACLE_SID", "ORACLE_HOME"] | python | 2 | 0 | |
test/integration_tests/test.go | package integrationtests
import (
"fmt"
"os"
"os/exec"
"strconv"
"testing"
"time"
"github.com/bunsenapp/go-selenium"
"github.com/pkg/errors"
)
func setUp() {
command := exec.Command("docker", "run", "-d", "--name=goselenium-tests", "-p=4444:4444", "selenium/standalone-firefox")
command.CombinedOutput()
time.Sleep(2000 * time.Millisecond)
}
func tearDown() {
command := exec.Command("docker", "rm", "goselenium-tests", "-f")
command.CombinedOutput()
}
func errorAndWrap(t *testing.T, message string, oldError error) {
if oldError == nil {
t.Errorf(errors.New(message).Error())
} else {
err := errors.Wrap(oldError, message)
t.Errorf(err.Error())
}
}
func printObjectResult(obj interface{}) {
envResult := os.Getenv("GOSELENIUM_TEST_DETAIL")
shouldShowDetailedResults, err := strconv.ParseBool(envResult)
if shouldShowDetailedResults && err == nil {
fmt.Println(fmt.Sprintf("Object returned: %+v", obj))
}
}
func createDriver(t *testing.T) goselenium.WebDriver {
caps := goselenium.Capabilities{}
caps.SetBrowser(goselenium.FirefoxBrowser())
driver, err := goselenium.NewSeleniumWebDriver("http://localhost:4444/wd/hub/", caps)
if err != nil {
t.Errorf("Driver creation threw an error.")
}
return driver
}
| [
"\"GOSELENIUM_TEST_DETAIL\""
]
| []
| [
"GOSELENIUM_TEST_DETAIL"
]
| [] | ["GOSELENIUM_TEST_DETAIL"] | go | 1 | 0 | |
detectron2/modeling/backbone/fpn.py | # Copyright (c) Facebook, Inc. and its affiliates.
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
import os
import logging
__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"]
class FPN(Backbone):
"""
This module implements :paper:`FPN`.
It creates pyramid features built on top of some input feature maps.
"""
_fuse_type: torch.jit.Final[str]
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
assert in_features, in_features
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
strides = [input_shapes[f].stride for f in in_features]
in_channels_per_feature = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(in_channels_per_feature):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(
in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(strides[idx]))
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
self.in_features = tuple(in_features)
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
bottom_up_features = self.bottom_up(x)
results = []
prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]])
results.append(self.output_convs[0](prev_features))
# Reverse feature maps into top-down order (from low to high resolution)
for idx, (lateral_conv, output_conv) in enumerate(
zip(self.lateral_convs, self.output_convs)
):
# Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336
# Therefore we loop over all modules but skip the first one
if idx > 0:
features = self.in_features[-idx - 1]
features = bottom_up_features[features]
top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
if self.top_block.in_feature in bottom_up_features:
top_block_in_feature = bottom_up_features[self.top_block.in_feature]
else:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
logging.info(f"need to add noise here: {os.environ['ADDNOISE']}")
if os.environ.get("ADDNOISE") == "True":
noise = ((1.2 - 0.8) * torch.rand(1) + 0.8).cuda()
return {f: res*noise for f, res in zip(self._out_features, results)}
return {f: res for f, res in zip(self._out_features, results)}
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels, in_feature="res5"):
super().__init__()
self.num_levels = 2
self.in_feature = in_feature
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| []
| []
| [
"ADDNOISE"
]
| [] | ["ADDNOISE"] | python | 1 | 0 | |
blueapps/conf/database.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
def get_default_database_config_dict(settings_module):
if os.getenv('GCS_MYSQL_NAME') and os.getenv('MYSQL_NAME'):
db_prefix = settings_module.get('DB_PREFIX', '')
if not db_prefix:
raise EnvironmentError('no DB_PREFIX config while multiple '
'databases found in environment')
elif os.getenv('GCS_MYSQL_NAME'):
db_prefix = 'GCS_MYSQL'
elif os.getenv('MYSQL_NAME'):
db_prefix = 'MYSQL'
else:
if settings_module.get('IS_LOCAL', False):
return {}
else:
raise EnvironmentError('no database[GCS_MYSQL or MYSQL] config')
return {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['%s_NAME' % db_prefix],
'USER': os.environ['%s_USER' % db_prefix],
'PASSWORD': os.environ['%s_PASSWORD' % db_prefix],
'HOST': os.environ['%s_HOST' % db_prefix],
'PORT': os.environ['%s_PORT' % db_prefix],
}
| []
| []
| [
"%s_PASSWORD' % db_prefi",
"GCS_MYSQL_NAME",
"%s_NAME' % db_prefi",
"MYSQL_NAME",
"%s_USER' % db_prefi",
"%s_HOST' % db_prefi",
"%s_PORT' % db_prefi"
]
| [] | ["%s_PASSWORD' % db_prefi", "GCS_MYSQL_NAME", "%s_NAME' % db_prefi", "MYSQL_NAME", "%s_USER' % db_prefi", "%s_HOST' % db_prefi", "%s_PORT' % db_prefi"] | python | 7 | 0 | |
pkg/archive/changes_posix_test.go | package archive // import "github.com/demonoid81/moby/pkg/archive"
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"sort"
"testing"
)
func TestHardLinkOrder(t *testing.T) {
names := []string{"file1.txt", "file2.txt", "file3.txt"}
msg := []byte("Hey y'all")
// Create dir
src, err := ioutil.TempDir("", "docker-hardlink-test-src-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(src)
for _, name := range names {
func() {
fh, err := os.Create(path.Join(src, name))
if err != nil {
t.Fatal(err)
}
defer fh.Close()
if _, err = fh.Write(msg); err != nil {
t.Fatal(err)
}
}()
}
// Create dest, with changes that includes hardlinks
dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-")
if err != nil {
t.Fatal(err)
}
os.RemoveAll(dest) // we just want the name, at first
if err := copyDir(src, dest); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dest)
for _, name := range names {
for i := 0; i < 5; i++ {
if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil {
t.Fatal(err)
}
}
}
// get changes
changes, err := ChangesDirs(dest, src)
if err != nil {
t.Fatal(err)
}
// sort
sort.Sort(changesByPath(changes))
// ExportChanges
ar, err := ExportChanges(dest, changes, nil, nil)
if err != nil {
t.Fatal(err)
}
hdrs, err := walkHeaders(ar)
if err != nil {
t.Fatal(err)
}
// reverse sort
sort.Sort(sort.Reverse(changesByPath(changes)))
// ExportChanges
arRev, err := ExportChanges(dest, changes, nil, nil)
if err != nil {
t.Fatal(err)
}
hdrsRev, err := walkHeaders(arRev)
if err != nil {
t.Fatal(err)
}
// line up the two sets
sort.Sort(tarHeaders(hdrs))
sort.Sort(tarHeaders(hdrsRev))
// compare Size and LinkName
for i := range hdrs {
if hdrs[i].Name != hdrsRev[i].Name {
t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name)
}
if hdrs[i].Size != hdrsRev[i].Size {
t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size)
}
if hdrs[i].Typeflag != hdrsRev[i].Typeflag {
t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag)
}
if hdrs[i].Linkname != hdrsRev[i].Linkname {
t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname)
}
}
}
type tarHeaders []tar.Header
func (th tarHeaders) Len() int { return len(th) }
func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] }
func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name }
func walkHeaders(r io.Reader) ([]tar.Header, error) {
t := tar.NewReader(r)
var headers []tar.Header
for {
hdr, err := t.Next()
if err != nil {
if err == io.EOF {
break
}
return headers, err
}
headers = append(headers, *hdr)
}
return headers, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
trec2014/sbin/eval/rouge_sampled_eval_time.py | import numpy as np
from cuttsum.summarizer.filters import APFilteredSummary, \
APSalienceFilteredSummary, HACFilteredSummary, \
APSalTRankSalThreshFilteredSummary, RankedSalienceFilteredSummary
import os
from cuttsum.pipeline import jobs
import cuttsum.judgements as cj
import pandas as pd
import random
import subprocess
import re
import multiprocessing
from datetime import datetime
from collections import defaultdict
def random_summary(updates, max_length):
updates_cpy = list(updates)
random.shuffle(updates_cpy)
length = 0
summary = []
while len(updates_cpy) > 0 and length + len(summary) - 1 < max_length:
summary.append(updates_cpy.pop())
summary_text = u'\n'.join(summary)[:max_length]
return summary_text
def make_rank_summaries(
rank_sal_cutoff, rank_sim_cutoff, model_summaries, max_samples=1000):
data_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge", "rank")
rouge_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge")
config_path = os.path.join(
data_dir, "rank_sal_{}_sim_{}_config".format(
rank_sal_cutoff, rank_sim_cutoff))
config_paths = defaultdict(list)
for job in jobs.event_cross_validation_jobs("crossval"):
for event, corpus in job.eval_events():
model_summary = model_summaries[event.fs_name()]
model_path = os.path.join(
rouge_dir, "model_{}".format(event.fs_name()))
max_len = len(model_summary)
event_hours = event.list_event_hours()
df = RankedSalienceFilteredSummary().get_dataframe(
event,
rank_sal_cutoff, rank_sim_cutoff)
n_hours = len(event_hours)
for t, h in enumerate(xrange(12, n_hours, 12), 1):
print "\t",t, h
timestamp = int((event_hours[h] - \
datetime(1970,1,1)).total_seconds())
df_t = df[df['timestamp'] < timestamp]
updates = [update.decode('utf-8')
for update in df_t['text'].tolist()]
for n_sample in xrange(max_samples):
summary_text = random_summary(updates, max_len)
sum_path = os.path.join(
data_dir,
"rank_sal_{}_sim_{}_sample_{}_t{}_{}".format(
rank_sal_cutoff, rank_sim_cutoff, n_sample, t,
event.fs_name()))
with open(sum_path, 'w') as f:
f.write(summary_text.encode('utf-8'))
config_paths[t].append(
'{} {}'.format(sum_path, model_path))
all_config_paths = []
for t in sorted(config_paths.keys()):
config_path_t = config_path + "_t{}".format(t)
print config_path_t
with open(config_path_t, 'w') as f:
f.write('\n'.join(config_paths[t]))
all_config_paths.append(config_path_t)
return all_config_paths
def make_apsaltr_summaries(
apsal_sal_cutoff, apsal_sim_cutoff, model_summaries, max_samples=1000):
data_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge")
rouge_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge", "apsaltr")
config_path = os.path.join(
data_dir, "apsaltr_sal_{}_sim_{}_config".format(
apsal_sal_cutoff, apsal_sim_cutoff))
config_paths = defaultdict(list)
for job in jobs.event_cross_validation_jobs("crossval"):
for event, corpus in job.eval_events():
model_summary = model_summaries[event.fs_name()]
model_path = os.path.join(
rouge_dir, "model_{}".format(event.fs_name()))
max_len = len(model_summary)
event_hours = event.list_event_hours()
df = APSalTRankSalThreshFilteredSummary().get_dataframe(
event, job.key, job.feature_set,
apsal_sal_cutoff, apsal_sim_cutoff)
n_hours = len(event_hours)
for t, h in enumerate(xrange(12, n_hours, 12), 1):
print "\t",t, h
timestamp = int((event_hours[h] - \
datetime(1970,1,1)).total_seconds())
df_t = df[df['timestamp'] < timestamp]
updates = [update.decode('utf-8')
for update in df_t['text'].tolist()]
for n_sample in xrange(max_samples):
summary_text = random_summary(updates, max_len)
sum_path = os.path.join(
data_dir,
"apsaltr_sal_{}_sim_{}_sample_{}_t{}_{}".format(
apsal_sal_cutoff, apsal_sim_cutoff, n_sample, t,
event.fs_name()))
with open(sum_path, 'w') as f:
f.write(summary_text.encode('utf-8'))
config_paths[t].append(
'{} {}'.format(sum_path, model_path))
all_config_paths = []
for t in sorted(config_paths.keys()):
config_path_t = config_path + "_t{}".format(t)
print config_path_t
with open(config_path_t, 'w') as f:
f.write('\n'.join(config_paths[t]))
all_config_paths.append(config_path_t)
return all_config_paths
def make_apsal_summaries(
apsal_sal_cutoff, apsal_sim_cutoff, model_summaries, max_samples=1000):
data_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge", "apsal")
rouge_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge")
config_path = os.path.join(
data_dir, "apsal_sal_{}_sim_{}_config".format(
apsal_sal_cutoff, apsal_sim_cutoff))
config_paths = defaultdict(list)
for job in jobs.event_cross_validation_jobs("crossval"):
for event, corpus in job.eval_events():
model_summary = model_summaries[event.fs_name()]
model_path = os.path.join(
rouge_dir, "model_{}".format(event.fs_name()))
max_len = len(model_summary)
event_hours = event.list_event_hours()
df = APSalienceFilteredSummary().get_dataframe(
event, job.key, job.feature_set,
apsal_sal_cutoff, apsal_sim_cutoff)
n_hours = len(event_hours)
for t, h in enumerate(xrange(12, n_hours, 12), 1):
print "\t",t, h
timestamp = int((event_hours[h] - \
datetime(1970,1,1)).total_seconds())
df_t = df[df['timestamp'] < timestamp]
updates = [update.decode('utf-8')
for update in df_t['text'].tolist()]
for n_sample in xrange(max_samples):
summary_text = random_summary(updates, max_len)
sum_path = os.path.join(
data_dir,
"apsal_sal_{}_sim_{}_sample_{}_t{}_{}".format(
apsal_sal_cutoff, apsal_sim_cutoff, n_sample, t,
event.fs_name()))
with open(sum_path, 'w') as f:
f.write(summary_text.encode('utf-8'))
config_paths[t].append(
'{} {}'.format(sum_path, model_path))
all_config_paths = []
for t in sorted(config_paths.keys()):
config_path_t = config_path + "_t{}".format(t)
print config_path_t
with open(config_path_t, 'w') as f:
f.write('\n'.join(config_paths[t]))
all_config_paths.append(config_path_t)
return all_config_paths
def make_ap_summaries(ap_sim_cutoff, model_summaries, max_samples=1000):
data_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge", "ap")
rouge_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge")
config_path = os.path.join(
data_dir, "ap_sim_{}_config".format(ap_sim_cutoff))
config_paths = defaultdict(list)
for job in jobs.event_cross_validation_jobs("crossval"):
for event, corpus in job.eval_events():
model_summary = model_summaries[event.fs_name()]
model_path = os.path.join(
rouge_dir, "model_{}".format(event.fs_name()))
max_len = len(model_summary)
event_hours = event.list_event_hours()
df = APFilteredSummary().get_dataframe(
event, ap_sim_cutoff)
n_hours = len(event_hours)
for t, h in enumerate(xrange(12, n_hours, 12), 1):
print "\t",t, h
timestamp = int((event_hours[h] - \
datetime(1970,1,1)).total_seconds())
df_t = df[df['timestamp'] < timestamp]
updates = [update.decode('utf-8')
for update in df_t['text'].tolist()]
for n_sample in xrange(max_samples):
summary_text = random_summary(updates, max_len)
sum_path = os.path.join(
data_dir, "ap_sim_{}_sample_{}_t{}_{}".format(
ap_sim_cutoff, n_sample, t,
event.fs_name()))
with open(sum_path, 'w') as f:
f.write(summary_text.encode('utf-8'))
config_paths[t].append(
'{} {}'.format(sum_path, model_path))
all_config_paths = []
for t in sorted(config_paths.keys()):
config_path_t = config_path + "_t{}".format(t)
print config_path_t
with open(config_path_t, 'w') as f:
f.write('\n'.join(config_paths[t]))
all_config_paths.append(config_path_t)
return all_config_paths
def make_hac_summaries(hac_dist_cutoff, hac_sim_cutoff,
model_summaries, max_samples=1000):
data_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge", "hac")
rouge_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge")
config_path = os.path.join(
data_dir, "hac_dist{}_sim_{}_config".format(
hac_dist_cutoff, hac_sim_cutoff))
config_paths = defaultdict(list)
for job in jobs.event_cross_validation_jobs("crossval"):
for event, corpus in job.eval_events():
model_summary = model_summaries[event.fs_name()]
model_path = os.path.join(
rouge_dir, "model_{}".format(event.fs_name()))
max_len = len(model_summary)
event_hours = event.list_event_hours()
df = HACFilteredSummary().get_dataframe(
event, hac_dist_cutoff, hac_sim_cutoff)
n_hours = len(event_hours)
for t, h in enumerate(xrange(12, n_hours, 12), 1):
print "\t",t, h
timestamp = int((event_hours[h] - \
datetime(1970,1,1)).total_seconds())
df_t = df[df['timestamp'] < timestamp]
updates = [update.decode('utf-8')
for update in df_t['text'].tolist()]
for n_sample in xrange(max_samples):
summary_text = random_summary(updates, max_len)
sum_path = os.path.join(
data_dir, "hac_dist{}_sim_{}_sample_{}_t{}_{}".format(
hac_dist_cutoff, hac_sim_cutoff, n_sample, t,
event.fs_name()))
with open(sum_path, 'w') as f:
f.write(summary_text.encode('utf-8'))
config_paths[t].append(
'{} {}'.format(sum_path, model_path))
all_config_paths = []
for t in sorted(config_paths.keys()):
config_path_t = config_path + "_t{}".format(t)
print config_path_t
with open(config_path_t, 'w') as f:
f.write('\n'.join(config_paths[t]))
all_config_paths.append(config_path_t)
return all_config_paths
def rouge(args):
config_path = args
rpath = config_path.replace("config", "result")
print rpath
o = subprocess.check_output(
"cd RELEASE-1.5.5 ; " + \
"./ROUGE-1.5.5.pl -s -d -n 2 -x -a -f A -m -z SPL {}".format(config_path),
shell=True)
with open(rpath, 'w') as f:
f.write(o)
recall = float(re.search('X ROUGE-2 Average_R: ([^ ]+)', o).group(1))
prec = float(re.search('X ROUGE-2 Average_P: ([^ ]+)', o).group(1))
f1 = float(re.search('X ROUGE-2 Average_F: ([^ ]+)', o).group(1))
return config_path, recall, prec, f1
model_summaries = {}
nuggets = pd.concat((cj.get_2013_nuggets(),
cj.get_2014_nuggets()))
data_dir = os.path.join(os.getenv("TREC_DATA", "."), "rouge")
if not os.path.exists(data_dir):
os.makedirs(data_dir)
for job in jobs.event_cross_validation_jobs("crossval"):
for event, corpus in job.eval_events():
nugget_text = '\n'.join(
nuggets[nuggets['query id'] == event.query_id]['text'].tolist())
model_summaries[event.fs_name()] = nugget_text.decode('utf-8')
model_path = os.path.join(
data_dir, "model_{}".format(event.fs_name()))
with open(model_path, 'w') as f:
f.write(nugget_text)
from cuttsum.misc import ProgressBar
apsal_sal_cutoffs = [.6]
apsal_sim_cutoffs = [.65]
apsaltr_sal_cutoffs = [.6]
apsaltr_sim_cutoffs = [.7]
rank_sal_cutoffs = [1.8]
rank_sim_cutoffs = [.4]
ap_sim_cutoffs = [.7]
hac_dist_cutoffs = [1.7]
hac_sim_cutoffs = [.2]
rank_configs = []
for rank_sal_cutoff in rank_sal_cutoffs:
print rank_sal_cutoff
for rank_sim_cutoff in rank_sim_cutoffs:
print rank_sim_cutoff
c = make_rank_summaries(
rank_sal_cutoff, rank_sim_cutoff, model_summaries)
rank_configs.extend(c)
apsaltr_configs = []
for apsaltr_sal_cutoff in apsaltr_sal_cutoffs:
print apsaltr_sal_cutoff
for apsaltr_sim_cutoff in apsaltr_sim_cutoffs:
print apsaltr_sim_cutoff
c = make_apsaltr_summaries(
apsaltr_sal_cutoff, apsaltr_sim_cutoff, model_summaries)
apsaltr_configs.extend(c)
apsal_configs = []
for apsal_sal_cutoff in apsal_sal_cutoffs:
print apsal_sal_cutoff
for apsal_sim_cutoff in apsal_sim_cutoffs:
print apsal_sim_cutoff
c = make_apsal_summaries(
apsal_sal_cutoff, apsal_sim_cutoff, model_summaries)
apsal_configs.extend(c)
ap_configs = []
for ap_sim_cutoff in ap_sim_cutoffs:
print ap_sim_cutoff
c = make_ap_summaries(
ap_sim_cutoff, model_summaries)
ap_configs.extend(c)
hac_configs = []
for hac_dist_cutoff in hac_dist_cutoffs:
print hac_dist_cutoff
for hac_sim_cutoff in hac_sim_cutoffs:
print hac_sim_cutoff
c = make_hac_summaries(
hac_dist_cutoff, hac_sim_cutoff, model_summaries)
hac_configs.extend(c)
def print_results(configs):
n_jobs = len(configs)
pb = ProgressBar(n_jobs)
results = []
for result in multiprocessing.Pool(20).imap_unordered(rouge, configs):
pb.update()
results.append(result)
results.sort(key=lambda x: x[3], reverse=True)
for result in results[:10]:
print result
print "BEST RANK"
print_results(rank_configs + apsaltr_configs + apsal_configs + ap_configs \
+ hac_configs)
| []
| []
| [
"TREC_DATA"
]
| [] | ["TREC_DATA"] | python | 1 | 0 | |
kvs/main.go | package main
import (
"context"
"flag"
"fmt"
"github.com/shniu/gostuff/kvs/log"
"github.com/shniu/gostuff/kvs/options"
"github.com/shniu/gostuff/kvs/server"
"github.com/shniu/gostuff/kvs/storage"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var logger = log.Logger
func main() {
logger.Info("============ Start KVS ============")
home := flag.String("home", "/tmp/kvs", "Home directory for kvs db")
port := flag.String("port", "3000", "Server listen address")
flag.Parse()
logger.Info("Server config:")
opts := options.Options
if *home != "" && strings.Index(*home, "") >= 0 {
opts.Home = *home
}
opts.Print()
// Init and load kvs
kvs := openKvs()
// Start server
kvServer := serverSetup(kvs, port)
go func() {
logger.Infof("Listening on http://0.0.0.0%s\n", kvServer.Addr)
if e := kvServer.ListenAndServe(); e != http.ErrServerClosed {
logger.Fatalln(e)
}
}()
// Graceful shutdown
graceful(kvServer, kvs, 5*time.Second)
}
// Open Kvsdb
func openKvs() storage.Kvs {
kvs, err := storage.Open(options.Options)
if err != nil {
logger.Fatalf("Initialization of kvs failed!")
}
return kvs
}
func serverSetup(kvs storage.Kvs, port *string) *http.Server {
addr := ":" + os.Getenv("PORT")
if addr == ":" {
addr = fmt.Sprintf(":%s", *port)
}
return &http.Server{
Addr: addr,
Handler: server.New(server.Kvs(kvs)),
}
}
func graceful(hs *http.Server, kvs storage.Kvs, timeout time.Duration) {
// close
defer kvs.Close()
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
<-stop
ctx, cancelFunc := context.WithTimeout(context.Background(), timeout)
defer cancelFunc()
logger.Infof("Shutdown with timeout: %s\n", timeout)
if err := hs.Shutdown(ctx); err != nil {
logger.Infof("Error: %v\n", err)
} else {
logger.Info("============ Server graceful stopped, bye bye ============")
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
storm_analysis/sa_library/rebin.py | #!/usr/bin/env python
"""
Rebinning of arrays.
Hazen 2/15
"""
import numpy
import scipy
import scipy.fftpack
#
# This comes directly from here:
#
# http://scipy-cookbook.readthedocs.io/items/Rebinning.html
#
# Copyright 2015, Various authors. Revision 28f8936a.
#
def downSample(a, *args):
'''rebin ndarray data into a smaller ndarray of the same rank whose dimensions
are factors of the original dimensions. eg. An array with 6 columns and 4 rows
can be reduced to have 6,3,2 or 1 columns and 4,2 or 1 rows.
example usages:
>>> a=rand(6,4); b=rebin(a,3,2)
>>> a=rand(6); b=rebin(a,2)
'''
shape = a.shape
lenShape = len(shape)
factor = numpy.asarray(shape)/numpy.asarray(args)
evList = ['a.reshape('] + \
['args[%d],int(factor[%d]),'%(i,i) for i in range(lenShape)] + \
[')'] + ['.sum(%d)'%(i+1) for i in range(lenShape)] + \
['/factor[%d]'%i for i in range(lenShape)]
return eval(''.join(evList))
def upSampleFFT(image, factor):
"""
Upsample using a FFT (high frequencies are set to zero).
"""
xsize = image.shape[0]*factor
ysize = image.shape[1]*factor
new_image_fft = numpy.zeros((xsize, ysize),dtype=numpy.complex)
image_fft = scipy.fftpack.fft2(image)
half_x = image.shape[0]/2
half_y = image.shape[1]/2
new_image_fft[:half_x,:half_y] = image_fft[:half_x,:half_y]
new_image_fft[-half_x:,:half_y] = image_fft[half_x:,:half_y]
new_image_fft[:half_x,-half_y:] = image_fft[:half_x,half_y:]
new_image_fft[-half_x:,-half_y:] = image_fft[half_x:,half_y:]
new_image = numpy.real(scipy.fftpack.ifft2(new_image_fft))
new_image[(new_image<0.0)] = 0.0
return new_image
#
# The MIT License
#
# Copyright (c) 2015 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| []
| []
| []
| [] | [] | python | null | null | null |
specs/master/cluster-init/files/host_provider/src/symphony.py | import json
import logging
import requests
from shutil import which
from urllib.parse import urlencode
class MachineStates:
building = "building"
active = "active"
error = "error"
deleting = "deleting"
deleted = "deleted"
class MachineResults:
succeed = "succeed"
executing = "executing"
failed = "fail"
# LSF: failed = "failed"
class RequestStates:
running = "running"
complete = "complete"
complete_with_error = "complete_with_error"
class SymphonyRestClient:
def __init__(self, config, logger=None):
self.config = config
self.logger = logger or logging.getLogger()
self.webserviceUrl = self.rest_url()
self.username = self.config.get('symphony.soam.user', 'Admin')
self.password = self.config.get('symphony.soam.password', 'Admin')
self.token = None
def rest_url(self):
import os
import subprocess
url = None
try:
# Firt, try to get the url from ego (supports manual cluster config changes)
# egosh client view REST_HOST_FACTORY_URL | grep DESCRIPTION | sed 's/^DESCRIPTION\:\s//g' -
# add which or command -v
egosh_cmd = "egosh"
if "EGO_BINDIR" in os.environ:
egosh_cmd = os.path.join(os.environ["EGO_BINDIR"], egosh_cmd)
else:
# which returns nothing, so revert to the default
egosh_cmd = which(egosh_cmd) or egosh_cmd
client_view = subprocess.check_output([egosh_cmd, 'client', 'view', 'REST_HOST_FACTORY_URL']).decode()
description = filter(lambda x: 'DESCRIPTION' in x, client_view.split('\n'))[0]
url = description.split()[1]
except:
# Fall back to generating the url from cluster config if we can't find it above
webserviceHostname = self.config.get('symphony.hostfactory.rest_address', '127.0.0.1')
webservicePort = self.config.get('symphony.hostfactory.HF_REST_LISTEN_PORT', '9080')
webserviceSsl = self.config.get('symphony.hostfactory.HF_REST_TRANSPORT', 'TCPIPv4').lower() == 'TCPIPv4SSL'.lower()
prefix = 'https' if webserviceSsl else 'http'
url = '%s://%s:%s/platform/rest/hostfactory' % (prefix, webserviceHostname, webservicePort)
return url.rstrip('/')
def _raise_on_error(self, r):
self.logger.info("Symphony REST API [%s] response (%s)", r.url, r.status_code)
if 400 <= r.status_code < 500:
if r.text:
raise Exception("Invalid Symphony REST call (%s): %s" % (r.status_code, r.text))
else:
raise Exception("Unspecified Symphony REST Error (%s)" % r.status_code)
r.raise_for_status()
def _login(self):
url = self.webserviceUrl + '/auth/login'
r = requests.get(url, auth=(self.username, self.password))
self._raise_on_error(r)
hfcsrftokenBody = r.json()
self.token = hfcsrftokenBody['hfcsrftoken']
return self.token
def update_hostfactory_templates(self, templates):
hfcsrftoken = self._login()
params = {'hfcsrftoken': hfcsrftoken}
url = self.webserviceUrl + '/provider/azurecc/templates'
r = requests.put(url, auth=(self.username, self.password), params=params, json=templates)
self._raise_on_error(r)
| []
| []
| [
"EGO_BINDIR"
]
| [] | ["EGO_BINDIR"] | python | 1 | 0 | |
idb/cli/__init__.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from abc import ABCMeta, abstractmethod
from argparse import ArgumentParser, Namespace
from typing import AsyncContextManager, Optional, Tuple
from idb.common import plugin
from idb.common.command import Command
from idb.common.companion import Companion
from idb.common.logging import log_call
from idb.common.types import (
Address,
IdbClient,
IdbConnectionException,
IdbManagementClient,
)
from idb.grpc.client import IdbClient as IdbClientGrpc
from idb.grpc.management import IdbManagementClient as IdbManagementClientGrpc
from idb.utils.contextlib import asynccontextmanager
def _parse_companion_info(value: str) -> Tuple[str, int]:
(host, port) = value.rsplit(":", 1)
return (host, int(port))
def _get_management_client(
logger: logging.Logger, args: Namespace
) -> IdbManagementClient:
return IdbManagementClientGrpc(
companion_path=args.companion_path,
logger=logger,
prune_dead_companion=args.prune_dead_companion,
)
@asynccontextmanager
async def _get_client(
args: Namespace, logger: logging.Logger
) -> AsyncContextManager[IdbClientGrpc]:
companion = vars(args).get("companion")
if companion is not None:
(host, port) = _parse_companion_info(companion)
async with IdbClientGrpc.build(
host=host, port=port, is_local=args.companion_local, logger=logger
) as client:
yield client
else:
async with IdbManagementClientGrpc(
logger=logger, companion_path=args.companion_path
).from_udid(udid=vars(args).get("udid")) as client:
yield client
class BaseCommand(Command, metaclass=ABCMeta):
def __init__(self) -> None:
super().__init__()
# Will inherit log levels when the log level is set on the base logger in run()
self.logger: logging.Logger = logging.getLogger(self.name)
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--log",
dest="log_level_deprecated",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default=None,
help="Set the logging level. Deprecated: Please place --log before the command name",
)
parser.add_argument(
"--json",
action="store_true",
default=False,
help="Create json structured output",
)
async def run(self, args: Namespace) -> None:
# In order to keep the argparse compatible with old invocations
# We should use the --log after command if set, otherwise use the pre-command --log
logging.getLogger().setLevel(args.log_level_deprecated or args.log_level)
name = self.__class__.__name__
self.logger.debug(f"{name} command run with: {args}")
if args.log_level_deprecated is not None:
self.logger.warning(
f"Setting --log after the command is deprecated, please place it at the start of the invocation"
)
async with log_call(
name=name, metadata=plugin.resolve_metadata(logger=self.logger)
):
await self._run_impl(args)
@abstractmethod
async def _run_impl(self, args: Namespace) -> None:
raise Exception("subclass")
# A command that vends the IdbClient interface.
class ClientCommand(BaseCommand):
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--udid",
help="Udid of target, can also be set with the IDB_UDID env var",
default=os.environ.get("IDB_UDID"),
)
super().add_parser_arguments(parser)
async def _run_impl(self, args: Namespace) -> None:
address: Optional[Address] = None
try:
async with _get_client(args=args, logger=self.logger) as client:
address = client.address
await self.run_with_client(args=args, client=client)
except IdbConnectionException as ex:
if not args.prune_dead_companion:
raise ex
if address is None:
raise ex
try:
await _get_management_client(logger=self.logger, args=args).disconnect(
destination=address
)
finally:
raise ex
@abstractmethod
async def run_with_client(self, args: Namespace, client: IdbClient) -> None:
pass
# A command that vends the IdbManagementClient interface
class ManagementCommand(BaseCommand):
async def _run_impl(self, args: Namespace) -> None:
await self.run_with_client(
args=args, client=_get_management_client(logger=self.logger, args=args)
)
@abstractmethod
async def run_with_client(
self, args: Namespace, client: IdbManagementClient
) -> None:
pass
# A command that vends the Companion interface
class CompanionCommand(BaseCommand):
async def _run_impl(self, args: Namespace) -> None:
await self.run_with_companion(
args=args,
companion=Companion(
companion_path=args.companion_path,
device_set_path=None,
logger=self.logger,
),
)
@abstractmethod
async def run_with_companion(self, args: Namespace, companion: Companion) -> None:
pass
| []
| []
| [
"IDB_UDID"
]
| [] | ["IDB_UDID"] | python | 1 | 0 | |
cmd/gradleExecuteBuild_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type gradleExecuteBuildOptions struct {
Path string `json:"path,omitempty"`
Task string `json:"task,omitempty"`
}
// GradleExecuteBuildCommand This step runs a gradle build command with parameters provided to the step.
func GradleExecuteBuildCommand() *cobra.Command {
const STEP_NAME = "gradleExecuteBuild"
metadata := gradleExecuteBuildMetadata()
var stepConfig gradleExecuteBuildOptions
var startTime time.Time
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createGradleExecuteBuildCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step runs a gradle build command with parameters provided to the step.",
Long: `This step runs a gradle build command with parameters provided to the step.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
gradleExecuteBuild(stepConfig, &stepTelemetryData)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addGradleExecuteBuildFlags(createGradleExecuteBuildCmd, &stepConfig)
return createGradleExecuteBuildCmd
}
func addGradleExecuteBuildFlags(cmd *cobra.Command, stepConfig *gradleExecuteBuildOptions) {
cmd.Flags().StringVar(&stepConfig.Path, "path", os.Getenv("PIPER_path"), "Path to the folder with gradle.build file which should be executed.")
cmd.Flags().StringVar(&stepConfig.Task, "task", `build`, "Gradle task that should be executed.")
}
// retrieve step metadata
func gradleExecuteBuildMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "gradleExecuteBuild",
Aliases: []config.Alias{},
Description: "This step runs a gradle build command with parameters provided to the step.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "path",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "buildGradlePath"}},
Default: os.Getenv("PIPER_path"),
},
{
Name: "task",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `build`,
},
},
},
Containers: []config.Container{
{Name: "gradle", Image: "gradle:4.7.0-jdk8-alpine"},
},
},
}
return theMetaData
}
| [
"\"PIPER_path\"",
"\"PIPER_path\""
]
| []
| [
"PIPER_path"
]
| [] | ["PIPER_path"] | go | 1 | 0 | |
go/src/github.com/hashicorp/terraform/builtin/providers/chef/provider_test.go | package chef
import (
"os"
"testing"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// To run these acceptance tests, you will need access to a Chef server.
// An easy way to get one is to sign up for a hosted Chef server account
// at https://manage.chef.io/signup , after which your base URL will
// be something like https://api.opscode.com/organizations/example/ .
// You will also need to create a "client" and write its private key to
// a file somewhere.
//
// You can then set the following environment variables to make these
// tests work:
// CHEF_SERVER_URL to the base URL as described above.
// CHEF_CLIENT_NAME to the name of the client object you created.
// CHEF_PRIVATE_KEY_FILE to the path to the private key file you created.
//
// You will probably need to edit the global permissions on your Chef
// Server account to allow this client (or all clients, if you're lazy)
// to have both List and Create access on all types of object:
// https://manage.chef.io/organizations/saymedia/global_permissions
//
// With all of that done, you can run like this:
// make testacc TEST=./builtin/providers/chef
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"chef": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("CHEF_SERVER_URL"); v == "" {
t.Fatal("CHEF_SERVER_URL must be set for acceptance tests")
}
if v := os.Getenv("CHEF_CLIENT_NAME"); v == "" {
t.Fatal("CHEF_CLIENT_NAME must be set for acceptance tests")
}
if v := os.Getenv("CHEF_PRIVATE_KEY_FILE"); v == "" {
t.Fatal("CHEF_PRIVATE_KEY_FILE must be set for acceptance tests")
}
}
| [
"\"CHEF_SERVER_URL\"",
"\"CHEF_CLIENT_NAME\"",
"\"CHEF_PRIVATE_KEY_FILE\""
]
| []
| [
"CHEF_CLIENT_NAME",
"CHEF_PRIVATE_KEY_FILE",
"CHEF_SERVER_URL"
]
| [] | ["CHEF_CLIENT_NAME", "CHEF_PRIVATE_KEY_FILE", "CHEF_SERVER_URL"] | go | 3 | 0 | |
wisps/data_analysis/path_parser.py | # -*- coding: utf-8 -*-
"""
After the introduction of version 6.2, all wisp data and hst-3d are now on MAST
3D-HST has not added any new data nor changed their directory structure,
but that's not the case for WISP
Aim: parse new directories to make them compatible with v5.0
"""
import os
import glob
from ..utils import memoize_func
REMOTE_FOLDER=os.environ['WISP_SURVEY_DATA']
@memoize_func
def get_image_path(name, spectrum_path):
#print (name)
##returns the image path without going through the whole thing again
if name.lower().startswith('par') or name.startswith('hlsp'):
survey='wisps'
elif name.startswith('goo') or name.startswith('ud') or name.startswith('aeg') or name.startswith('cos'):
survey='hst3d'
if survey=='wisps':
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
if '_wfc3' in name:
name=(name.split('wfc3_')[-1]).split('_g141')[0]
#print (name)
#print (REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*stamp2d.fits')
stamp_image_path=glob.glob(REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*stamp2d.fits')[0]
if survey=='hst3d':
#print (spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits')
stamp_image_path=glob.glob(spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits')[0]
#print ('stamp image',stamp_image_path )
#print (survey, spectrum_path, stamp_image_path)
return survey, stamp_image_path
@memoize_func
def parse_path(name, version):
"""
Parse a filename and retrieve all the survey info at once
"""
survey=None
spectrum_path=None
stamp_image_path=None
if name.startswith('Par') or name.startswith('par') or name.startswith('hlsp'):
survey='wisps'
elif name.startswith('goo') or name.startswith('ud') or name.startswith('aeg') or name.startswith('cos'):
survey='hst3d'
else:
survey=None
if survey=='wisps':
spectrum_path=_run_search(name)
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
name=name.split('_wfc3_')[-1].split('a_g102')[0]
stamp_image_path=glob.glob(REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*a_g141_v6.2_stamp2d.fits')[0]
if survey=='hst3d':
spectrum_path=_run_search(name)
s= spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits'
stamp_image_path=glob.glob(s.replace('g141', 'G141') )[0]
#print ('stamp image',stamp_image_path )
#print (survey, spectrum_path, stamp_image_path)
#blah
return survey, spectrum_path, stamp_image_path
@memoize_func
def _run_search(name):
#internal function used to search path given spectrum name
path=''
prefix= name[:3]
if name.startswith('Par') or name.startswith('par') or name.startswith('hlsp'):
#search version 6
if name.endswith('.dat'):
n=name.split('.dat')[0]
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
else:
folder=name.split('-')[0]
n=name
path1=REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'/1dspectra/*'+n+'*a_g141_*'
path2=REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'/1dspectra/*'+n+'*a_g102-g141_*'
path=glob.glob(path1)[0]
if len(glob.glob(path2)) > 0:
path=glob.glob(path2)[0]
#except:
# #search version 5
# folder=name.split('_')[0]
# path=REMOTE_FOLDER+'wisps/'+folder+'*/Spectra/*'+name+'.dat'
# #print (path)
# path=glob.glob(path)[0]
if prefix in ['aeg', 'cos', 'uds', 'goo']:
syls= (name.split('-'))
str_= REMOTE_FOLDER+'*'+prefix+'*'+'/*'+prefix+ '*'+syls[1]+'*'+'/1D/ASCII/'+prefix+'*'+ syls[1]+ '*'+syls[2]+'*'
#print (str_)
path=glob.glob(str_.replace('g141', 'G141'))[0]
return path
@memoize_func
def return_path(name):
#print(name)wisps
if type(name) is list:
paths=[]
for p in name:
paths.append( _run_search(p))
return paths
if type(name) is str:
return _run_search(name)
@memoize_func
def return_spectrum_name(path):
""" returns name given path in the wisp folder"""
name=''
if path.endswith('.dat'):
name= path.split('.dat')[0].split('/')[-1]
else:
name=path.split('.ascii')[0].split('/')[-1].split('.')[0]
return name | []
| []
| [
"WISP_SURVEY_DATA"
]
| [] | ["WISP_SURVEY_DATA"] | python | 1 | 0 | |
components/api-controller/cmd/controller/main.go | package main
import (
"os"
"path/filepath"
"time"
istioAuthenticationClient "github.com/kyma-project/kyma/components/api-controller/pkg/clients/authentication.istio.io/clientset/versioned"
kyma "github.com/kyma-project/kyma/components/api-controller/pkg/clients/gateway.kyma.cx/clientset/versioned"
kymaInformers "github.com/kyma-project/kyma/components/api-controller/pkg/clients/gateway.kyma.cx/informers/externalversions"
istioNetworkingClient "github.com/kyma-project/kyma/components/api-controller/pkg/clients/networking.istio.io/clientset/versioned"
authenticationV2 "github.com/kyma-project/kyma/components/api-controller/pkg/controller/authentication/v2"
"github.com/kyma-project/kyma/components/api-controller/pkg/controller/crd"
istioNetworkingV1 "github.com/kyma-project/kyma/components/api-controller/pkg/controller/networking/v1"
serviceV1 "github.com/kyma-project/kyma/components/api-controller/pkg/controller/service/v1"
"github.com/kyma-project/kyma/components/api-controller/pkg/controller/v1alpha2"
log "github.com/sirupsen/logrus"
apiExtensionsClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
k8sClient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
log.SetLevel(getLoggerLevel())
log.Info("Starting API controller application...")
stop := make(chan struct{})
jwtDefaultConfig := initJwtDefaultConfig()
istioGateway := getIstioGateway()
kubeConfig := initKubeConfig()
domainName := initDomainName()
apiExtensionsClientSet := apiExtensionsClient.NewForConfigOrDie(kubeConfig)
registerer := crd.NewRegistrar(apiExtensionsClientSet)
registerer.Register(v1alpha2.Crd(domainName))
k8sClientSet := k8sClient.NewForConfigOrDie(kubeConfig)
serviceV1Interface := serviceV1.New(k8sClientSet)
istioNetworkingClientSet := istioNetworkingClient.NewForConfigOrDie(kubeConfig)
istioNetworkingV1Interface := istioNetworkingV1.New(istioNetworkingClientSet, k8sClientSet, istioGateway)
istioAuthenticationClientSet := istioAuthenticationClient.NewForConfigOrDie(kubeConfig)
authenticationV2Interface := authenticationV2.New(istioAuthenticationClientSet, jwtDefaultConfig)
kymaClientSet := kyma.NewForConfigOrDie(kubeConfig)
internalInformerFactory := kymaInformers.NewSharedInformerFactory(kymaClientSet, time.Second*30)
go internalInformerFactory.Start(stop)
v1alpha2Controller := v1alpha2.NewController(kymaClientSet, istioNetworkingV1Interface, serviceV1Interface, authenticationV2Interface, internalInformerFactory, domainName)
v1alpha2Controller.Run(2, stop)
}
func initKubeConfig() *rest.Config {
kubeConfigLocation := filepath.Join(os.Getenv("HOME"), ".kube", "config")
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigLocation)
if err != nil {
log.Warn("unable to build kube config from file. Trying in-cluster configuration")
kubeConfig, err = rest.InClusterConfig()
if err != nil {
log.Fatal("cannot find Service Account in pod to build in-cluster kube config")
}
}
return kubeConfig
}
func getLoggerLevel() log.Level {
logLevel := os.Getenv("API_CONTROLLER_LOG_LEVEL")
if logLevel != "" {
level, err := log.ParseLevel(logLevel)
if err != nil {
println("Error while setting log level: " + logLevel + ". Root cause: " + err.Error())
} else {
return level
}
}
return log.InfoLevel
}
func getIstioGateway() string {
gateway := os.Getenv("GATEWAY_FQDN")
if gateway == "" {
log.Fatal("gateway not provided. Please provide env variables GATEWAY_FQDN")
}
return gateway
}
func initJwtDefaultConfig() authenticationV2.JwtDefaultConfig {
issuer := os.Getenv("DEFAULT_ISSUER")
jwksURI := os.Getenv("DEFAULT_JWKS_URI")
if issuer == "" || jwksURI == "" {
log.Fatal("default issuer or jwksURI not provided. Please provide env variables DEFAULT_ISSUER and DEFAULT_JWKS_URI")
}
return authenticationV2.JwtDefaultConfig{
Issuer: issuer,
JwksUri: jwksURI,
}
}
func initDomainName() string {
domainName := os.Getenv("DOMAIN_NAME")
if domainName == "" {
log.Fatal("domain name not provided. Please provide env variable DOMAIN_NAME")
}
return domainName
}
| [
"\"HOME\"",
"\"API_CONTROLLER_LOG_LEVEL\"",
"\"GATEWAY_FQDN\"",
"\"DEFAULT_ISSUER\"",
"\"DEFAULT_JWKS_URI\"",
"\"DOMAIN_NAME\""
]
| []
| [
"GATEWAY_FQDN",
"DEFAULT_ISSUER",
"DEFAULT_JWKS_URI",
"DOMAIN_NAME",
"HOME",
"API_CONTROLLER_LOG_LEVEL"
]
| [] | ["GATEWAY_FQDN", "DEFAULT_ISSUER", "DEFAULT_JWKS_URI", "DOMAIN_NAME", "HOME", "API_CONTROLLER_LOG_LEVEL"] | go | 6 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cooksite.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/adxbuyer/v201506/basic_operations/update_placement.py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the bid of a placement.
To add a placement, run add_placements.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupCriterionService.mutate
"""
__author__ = '[email protected] (Kevin Winter)'
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CRITERION_ID = 'INSERT_PLACEMENT_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201506')
# Construct operations and update bids.
operations = [{
'operator': 'SET',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'id': criterion_id,
},
'biddingStrategyConfiguration': {
'bids': [
{
'xsi_type': 'CpmBid',
'bid': {
'microAmount': '1000000'
},
}
]
}
}
}]
ad_group_criteria = ad_group_criterion_service.mutate(operations)
# Display results.
if 'value' in ad_group_criteria:
for criterion in ad_group_criteria['value']:
if criterion['criterion']['Criterion.Type'] == 'Keyword':
print ('Ad group criterion with ad group id \'%s\' and criterion id '
'\'%s\' had its bid set to \'%s\'.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['bids']['maxCpc']['amount']['microAmount']))
else:
print 'No ad group criteria were updated.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CRITERION_ID)
| []
| []
| []
| [] | [] | python | null | null | null |
tests/e2e/test_ironman.py | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import json
import os
from pathlib import Path
from shutil import rmtree
import pytest
from flexmock import flexmock
from sandcastle import (
Sandcastle,
VolumeSpec,
SandcastleTimeoutReached,
MappedDir,
)
from sandcastle.exceptions import SandcastleCommandFailed
from sandcastle.utils import run_command, get_timestamp_now
from tests.conftest import (
SANDBOX_IMAGE,
NAMESPACE,
run_test_within_pod,
SANDCASTLE_MOUNTPOINT,
PACKIT_SRPM_CMD,
)
def purge_dir_content(di: Path):
"""remove everything in the dir but not the dir itself"""
dir_items = list(di.iterdir())
if dir_items:
print(f"Removing {di} content: {[i.name for i in dir_items]}")
for item in dir_items:
# symlink pointing to a dir is also a dir and a symlink
if item.is_file() or item.is_symlink():
item.unlink()
else:
rmtree(item)
def test_exec_env():
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
o.run()
try:
env_list = o.exec(
command=["bash", "-c", "env"], env={"A": None, "B": "", "C": "c", "D": 1}
)
assert "A=\n" in env_list
assert "B=\n" in env_list
assert "C=c\n" in env_list
assert "D=1\n" in env_list
finally:
o.delete_pod()
def test_run_failure():
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
try:
with pytest.raises(SandcastleCommandFailed) as ex:
o.run(command=["ls", "/hauskrecht"])
assert (
ex.value.output
== "ls: cannot access '/hauskrecht': No such file or directory\n"
)
assert isinstance(ex.value, SandcastleCommandFailed)
assert "'exit_code': 2" in ex.value.reason
assert "'reason': 'Error'" in ex.value.reason
assert ex.value.rc == 2
finally:
o.delete_pod()
@pytest.mark.parametrize(
"cmd,should_fail",
(
(["ls", "/hauskrecht"], True),
(["ls", "/haus*ht"], True),
(["ls", "/etc/*wd"], True),
(["ls", "/etc/passwd"], False),
(["bash", "-c", "ls /etc/passwd"], False),
(["bash", "-c", "ls /etc/*wd"], False),
),
)
def test_exec(cmd, should_fail):
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
o.run()
try:
if should_fail:
with pytest.raises(SandcastleCommandFailed) as ex:
o.exec(command=cmd)
assert "No such file or directory\n" in ex.value.output
assert "ls: cannot access " in ex.value.output
assert isinstance(ex.value, SandcastleCommandFailed)
assert "2" in ex.value.reason
assert "ExitCode" in ex.value.reason
assert "NonZeroExitCode" in ex.value.reason
assert ex.value.rc == 2
else:
assert o.exec(command=cmd)
finally:
o.delete_pod()
@pytest.mark.skipif(
"KUBERNETES_SERVICE_HOST" not in os.environ,
reason="Not running in a pod, skipping.",
)
def test_dir_sync(tmp_path):
p = Path("/asdqwe")
vs = VolumeSpec(path=p, pvc_from_env="SANDCASTLE_PVC")
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, volume_mounts=[vs]
)
o.run()
d = p.joinpath("dir")
d.mkdir()
d.joinpath("file").write_text("asd")
try:
o.exec(command=["bash", "-c", "ls -lha /asdqwe/dir/file"])
o.exec(command=["bash", "-c", "[[ 'asd' == $(cat /asdqwe/dir/file) ]]"])
o.exec(command=["bash", "-c", "mkdir /asdqwe/dir/d"])
o.exec(command=["bash", "-c", "touch /asdqwe/dir/f"])
assert Path("/asdqwe/dir/d").is_dir()
assert Path("/asdqwe/dir/f").is_file()
finally:
o.delete_pod()
@pytest.mark.skipif(
"KUBERNETES_SERVICE_HOST" not in os.environ,
reason="Not running in a pod, skipping.",
)
def test_pod_sa_not_in_sandbox(tmp_path):
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
sa_path = "/var/run/secrets/kubernetes.io/serviceaccount"
with pytest.raises(SandcastleCommandFailed) as e:
o.run(command=["ls", "-lha", sa_path])
try:
assert (
e.value.output.strip()
== f"ls: cannot access '{sa_path}': No such file or directory"
)
assert e.value.rc == 2
finally:
o.delete_pod()
def test_exec_succ_pod(tmp_path):
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
# we mimic here that the pod has finished and we are still running commands inside
o.run(command=["true"])
try:
with pytest.raises(SandcastleTimeoutReached) as e:
o.exec(command=["true"])
assert "timeout" in str(e.value)
finally:
o.delete_pod()
def test_timeout(tmp_path: Path):
"""
make sure exec runs are handled well when the pod times out
and we provide output of the command in the exception
"""
tmp_path.joinpath("test").write_text("test")
m_dir = MappedDir(tmp_path, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
# we are going to trick sandcastle into thinking we are using the default command
# but we are not, b/c we don't want to wait 30 minutes for it time out in CI
o.set_pod_manifest(["sleep", "7"])
flexmock(Sandcastle).should_receive("set_pod_manifest").and_return(None).once()
o.run()
try:
# sadly, openshift does not tell us in any way that the container finished
# and that's why our exec got killed
with pytest.raises(SandcastleCommandFailed) as e:
# run a long running command and watch it get killed
o.exec(command=["bash", "-c", "while true; do date; sleep 1; done"])
assert "Command failed" in str(e.value)
assert e.value.rc == 137
assert e.value.output # we wanna be sure there was some output
finally:
o.delete_pod()
@pytest.mark.parametrize(
"cmd,should_fail",
(
(["ls", "/hauskrecht"], True),
(["ls", "/haus*ht"], True),
(["ls", "/etc/*wd"], True),
(["ls", "/etc/passwd"], False),
(["bash", "-c", "ls /etc/passwd"], False),
(["bash", "-c", "ls /etc/*wd"], False),
),
)
def test_md_exec(tmp_path, cmd, should_fail):
"""
make sure commands are exec'd properly in the sandbox with mapped dirs
this is what we use in p-s with RW vols
"""
# something needs to be inside
tmp_path.joinpath("dummy.file").write_text("something")
m_dir = MappedDir(tmp_path, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
o.run()
try:
if should_fail:
with pytest.raises(SandcastleCommandFailed) as ex:
o.exec(command=cmd)
assert "No such file or directory\n" in ex.value.output
assert "ls: cannot access " in ex.value.output
assert isinstance(ex.value, SandcastleCommandFailed)
assert "2" in ex.value.reason
assert "ExitCode" in ex.value.reason
assert "NonZeroExitCode" in ex.value.reason
assert ex.value.rc == 2
else:
o.exec(command=cmd)
finally:
o.delete_pod()
def test_md_multiple_exec(tmp_path):
tmp_path.joinpath("stark").mkdir()
tmp_path.joinpath("qwe").write_text("Hello, Tony!")
m_dir = MappedDir(tmp_path, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
o.run()
try:
out = o.exec(command=["ls", "./qwe"])
assert "qwe" in out
o.exec(command=["touch", "./stark/asd"])
assert tmp_path.joinpath("stark/asd").is_file()
o.exec(command=["touch", "./zxc"])
zxc = tmp_path.joinpath("zxc")
assert zxc.is_file()
zxc.write_text("vbnm")
assert "vbnm" == o.exec(command=["cat", "./zxc"])
assert o.exec(command=["pwd"], cwd="stark/").rstrip("\n").endswith("/stark")
finally:
o.delete_pod()
def test_file_got_changed(tmp_path):
m_dir = MappedDir(tmp_path, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)
p = m_dir.local_dir.joinpath("qwe")
p.write_text("Hello, Tony!")
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
o.run()
try:
o.exec(command=["bash", "-c", "echo '\nHello, Tony Stark!' >>./qwe"])
assert "Hello, Tony!\nHello, Tony Stark!\n" == p.read_text()
finally:
o.delete_pod()
def test_command_long_output(tmp_path):
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
o.run()
command = ["cat", "/etc/services"]
try:
out = o.exec(command=command)
finally:
o.delete_pod()
# random strings from /etc/services: beginning, middle, end
assert "ssh" in out
assert "7687/tcp" in out
assert "RADIX" in out
def test_user_is_set(tmp_path):
"""
verify that $HOME is writable and commands are executed
using a user which has an passwd entry
"""
o = Sandcastle(image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE)
o.run()
try:
assert o.exec(command=["getent", "passwd", "sandcastle"]).startswith(
"sandcastle:x:"
)
assert o.exec(command=["id", "-u", "-n"]).strip() == "sandcastle"
assert o.exec(
command=[
"bash",
"-c",
"touch ~/.i.want.to.write.to.home "
"&& ls -l /home/sandcastle/.i.want.to.write.to.home",
]
)
finally:
o.delete_pod()
@pytest.mark.parametrize(
"git_url,branch,command",
(
(
"https://github.com/packit/hello-world.git",
"main",
PACKIT_SRPM_CMD,
),
("https://github.com/packit/ogr.git", "main", PACKIT_SRPM_CMD),
(
"https://github.com/cockpit-project/cockpit-podman.git",
"master",
# this downloads megabytes of npm modules
# and verifies we can run npm in sandcastle
PACKIT_SRPM_CMD,
),
),
)
def test_md_e2e(tmp_path, git_url, branch, command):
# running in k8s
if "KUBERNETES_SERVICE_HOST" in os.environ:
t = Path(SANDCASTLE_MOUNTPOINT, f"clone-{get_timestamp_now()}")
else:
t = tmp_path
m_dir = MappedDir(t, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)
run_command(["git", "clone", "-b", branch, git_url, t])
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
o.run()
try:
output = o.exec(command=command)
print(output)
assert list(t.glob("*.src.rpm"))
o.exec(command=["packit", "--help"])
with pytest.raises(SandcastleCommandFailed) as ex:
o.exec(command=["bash", "-c", "echo 'I quit!'; exit 120"])
e = ex.value
assert "I quit!" in e.output
assert 120 == e.rc
assert "command terminated with non-zero exit code" in e.reason
finally:
o.delete_pod()
def test_md_new_namespace(tmp_path):
m_dir = MappedDir(tmp_path, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)
d = tmp_path.joinpath("dir")
d.mkdir()
d.joinpath("file").write_text("asd")
# running within openshift
namespace = os.getenv("SANDCASTLE_TESTS_NAMESPACE")
if not namespace:
# running on a host - you can't create new projects from inside a pod
namespace = f"sandcastle-tests-{get_timestamp_now()}"
c = ["oc", "new-project", namespace]
run_command(c)
try:
o = Sandcastle(
image_reference=SANDBOX_IMAGE,
k8s_namespace_name=namespace,
mapped_dir=m_dir,
)
o.run()
try:
o.exec(command=["ls", "-lha", "./dir/file"])
assert d.joinpath("file").read_text() == "asd"
cmd = [
"bash",
"-c",
"curl -skL https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/metrics",
]
out = o.exec(command=cmd)
j = json.loads(out)
# a small proof we are safe
assert j["reason"] == "Forbidden"
finally:
o.delete_pod()
finally:
if not os.getenv("SANDCASTLE_TESTS_NAMESPACE"):
run_command(["oc", "delete", "project", namespace])
run_command(["oc", "project", NAMESPACE])
# To verify this:
# tar: lost+found: Cannot utime: Operation not permitted
# tar: lost+found: Cannot change mode to rwxr-sr-x: Operation not permitted
# tar: Exiting with failure status due to previous errors
def test_lost_found_is_ignored(tmp_path):
tmp_path.joinpath("lost+found").mkdir()
tmp_path.joinpath("file").write_text("asd")
m_dir = MappedDir(tmp_path, SANDCASTLE_MOUNTPOINT)
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
o.run()
try:
o.exec(command=["ls", "-lha", "./"])
with pytest.raises(SandcastleCommandFailed) as ex:
o.exec(command=["ls", "./lost+found"])
assert "No such file or directory" in str(ex.value)
finally:
o.delete_pod()
def test_changing_mode(tmp_path):
# running in k8s
if "KUBERNETES_SERVICE_HOST" in os.environ:
t = Path(SANDCASTLE_MOUNTPOINT)
else:
t = tmp_path
m_dir = MappedDir(t, SANDCASTLE_MOUNTPOINT)
fi = t.joinpath("file")
fi.write_text("asd")
fi.chmod(mode=0o777)
fi2 = t.joinpath("file2")
fi2.write_text("qwe")
fi2.chmod(mode=0o755)
di = t.joinpath("dir")
di.mkdir()
di.chmod(mode=0o775)
o = Sandcastle(
image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
)
o.run()
try:
out = o.exec(command=["stat", "-c", "%a", "./file"]).strip()
assert "777" == out
stat_oct = oct(fi.stat().st_mode)[-3:]
assert stat_oct == "777"
out = o.exec(command=["stat", "-c", "%a", "./file2"]).strip()
assert "755" == out
stat_oct = oct(fi2.stat().st_mode)[-3:]
assert stat_oct == "755"
out = o.exec(command=["stat", "-c", "%a", "./dir"]).strip()
assert "775" == out
stat_oct = oct(di.stat().st_mode)[-3:]
assert stat_oct == "775"
finally:
purge_dir_content(t)
o.delete_pod()
@pytest.mark.parametrize(
"test_name,kwargs",
(
("test_exec", None),
("test_exec_env", None),
("test_md_exec", None),
("test_run_failure", None),
("test_dir_sync", {"with_pv_at": "/asdqwe"}),
("test_pod_sa_not_in_sandbox", None),
("test_exec_succ_pod", None),
("test_timeout", None),
("test_md_multiple_exec", None),
("test_file_got_changed", None),
("test_md_e2e", {"with_pv_at": SANDCASTLE_MOUNTPOINT}),
("test_lost_found_is_ignored", None),
("test_md_new_namespace", {"new_namespace": True}),
("test_changing_mode", {"with_pv_at": SANDCASTLE_MOUNTPOINT}),
("test_command_long_output", None),
("test_user_is_set", None),
),
)
def test_from_pod(test_name, kwargs):
"""initiate e2e: spawn a new openshift pod, from which every test case is being run"""
path = f"tests/e2e/test_ironman.py::{test_name}"
kwargs = kwargs or {}
run_test_within_pod(path, **kwargs)
| []
| []
| [
"SANDCASTLE_TESTS_NAMESPACE"
]
| [] | ["SANDCASTLE_TESTS_NAMESPACE"] | python | 1 | 0 | |
scripts/bert/dataset.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and DMLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT datasets."""
__all__ = ['MRPCDataset', 'ClassificationTransform', 'BERTTransform']
import os
import numpy as np
try:
from tokenizer import convert_to_unicode
except ImportError:
from .tokenizer import convert_to_unicode
from gluonnlp.data import TSVDataset
from gluonnlp.data.registry import register
@register(segment=['train', 'dev', 'test'])
class MRPCDataset(TSVDataset):
"""The Microsoft Research Paraphrase Corpus dataset.
Parameters
----------
segment : str or list of str, default 'train'
Dataset segment. Options are 'train', 'val', 'test' or their combinations.
root : str, default '$GLUE_DIR/MRPC'
Path to the folder which stores the MRPC dataset.
The datset can be downloaded by the following script:
https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
"""
def __init__(self, segment='train',
root=os.path.join(os.getenv('GLUE_DIR', 'glue_data'), 'MRPC')):
self._supported_segments = ['train', 'dev', 'test']
assert segment in self._supported_segments, 'Unsupported segment: %s' % segment
path = os.path.join(root, '%s.tsv' % segment)
A_IDX, B_IDX, LABEL_IDX = 3, 4, 0
fields = [A_IDX, B_IDX, LABEL_IDX]
super(MRPCDataset, self).__init__(path, num_discard_samples=1, field_indices=fields)
@staticmethod
def get_labels():
"""Get classification label ids of the dataset."""
return ['0', '1']
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class BERTTransform(object):
"""BERT style data transformation.
Parameters
----------
tokenizer : BasicTokenizer or FullTokensizer.
Tokenizer for the sentences.
max_seq_length : int.
Maximum sequence length of the sentences.
pad : bool, default True
Whether to pad the sentences to maximum length.
pair : bool, default True
Whether to transform sentences or sentence pairs.
"""
def __init__(self, tokenizer, max_seq_length, pad=True, pair=True):
self._tokenizer = tokenizer
self._max_seq_length = max_seq_length
self._pad = pad
self._pair = pair
def __call__(self, line):
"""Perform transformation for sequence pairs or single sequences.
The transformation is processed in the following steps:
- tokenize the input sequences
- insert [CLS], [SEP] as necessary
- generate type ids to indicate whether a token belongs to the first
sequence or the second sequence.
- generate valid length
For sequence pairs, the input is a tuple of 2 strings:
text_a, text_b.
Inputs:
text_a: 'is this jacksonville ?'
text_b: 'no it is not'
Tokenization:
text_a: 'is this jack ##son ##ville ?'
text_b: 'no it is not .'
Processed:
tokens: '[CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]'
type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
valid_length: 14
For single sequences, the input is a tuple of single string: text_a.
Inputs:
text_a: 'the dog is hairy .'
Tokenization:
text_a: 'the dog is hairy .'
Processed:
text_a: '[CLS] the dog is hairy . [SEP]'
type_ids: 0 0 0 0 0 0 0
valid_length: 7
Parameters
----------
line: tuple of str
Input strings. For sequence pairs, the input is a tuple of 3 strings:
(text_a, text_b). For single sequences, the input is a tuple of single
string: (text_a,).
Returns
-------
np.array: input token ids in 'int32', shape (batch_size, seq_length)
np.array: valid length in 'int32', shape (batch_size,)
np.array: input token type ids in 'int32', shape (batch_size, seq_length)
"""
# convert to unicode
text_a = line[0]
text_a = convert_to_unicode(text_a)
if self._pair:
assert len(line) == 2
text_b = line[1]
text_b = convert_to_unicode(text_b)
tokens_a = self._tokenizer.tokenize(text_a)
tokens_b = None
if self._pair:
tokens_b = self._tokenizer.tokenize(text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, self._max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > self._max_seq_length - 2:
tokens_a = tokens_a[0:(self._max_seq_length - 2)]
# The embedding vectors for `type=0` and `type=1` were learned during
# pre-training and are added to the wordpiece embedding vector
# (and position vector). This is not *strictly* necessary since
# the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append(self._tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append(self._tokenizer.vocab.sep_token)
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append(self._tokenizer.vocab.sep_token)
segment_ids.append(1)
input_ids = self._tokenizer.convert_tokens_to_ids(tokens)
# The valid length of sentences. Only real tokens are attended to.
valid_length = len(input_ids)
if self._pad:
# Zero-pad up to the sequence length.
padding_length = self._max_seq_length - valid_length
# use padding tokens for the input_ids and 0 for segment_ids
input_ids.extend(
[self._tokenizer.vocab[self._tokenizer.vocab.padding_token]] * padding_length)
segment_ids.extend([0] * padding_length)
return np.array(input_ids, dtype='int32'), np.array(valid_length, dtype='int32'),\
np.array(segment_ids, dtype='int32')
class ClassificationTransform(object):
"""Dataset Transformation for BERT-style Sentence Classification.
Parameters
----------
tokenizer : BasicTokenizer or FullTokensizer.
Tokenizer for the sentences.
labels : list of int.
List of all label ids for the classification task.
max_seq_length : int.
Maximum sequence length of the sentences.
pad : bool, default True
Whether to pad the sentences to maximum length.
pair : bool, default True
Whether to transform sentences or sentence pairs.
"""
def __init__(self, tokenizer, labels, max_seq_length, pad=True, pair=True):
self._label_map = {}
for (i, label) in enumerate(labels):
self._label_map[label] = i
self._bert_xform = BERTTransform(tokenizer, max_seq_length, pad=pad, pair=pair)
def __call__(self, line):
"""Perform transformation for sequence pairs or single sequences.
The transformation is processed in the following steps:
- tokenize the input sequences
- insert [CLS], [SEP] as necessary
- generate type ids to indicate whether a token belongs to the first
sequence or the second sequence.
- generate valid length
For sequence pairs, the input is a tuple of 3 strings:
text_a, text_b and label.
Inputs:
text_a: 'is this jacksonville ?'
text_b: 'no it is not'
label: '0'
Tokenization:
text_a: 'is this jack ##son ##ville ?'
text_b: 'no it is not .'
Processed:
tokens: '[CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]'
type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
valid_length: 14
label: 0
For single sequences, the input is a tuple of 2 strings: text_a and label.
Inputs:
text_a: 'the dog is hairy .'
label: '1'
Tokenization:
text_a: 'the dog is hairy .'
Processed:
text_a: '[CLS] the dog is hairy . [SEP]'
type_ids: 0 0 0 0 0 0 0
valid_length: 7
label: 1
Parameters
----------
line: tuple of str
Input strings. For sequence pairs, the input is a tuple of 3 strings:
(text_a, text_b, label). For single sequences, the input is a tuple
of 2 strings: (text_a, label).
Returns
-------
np.array: input token ids in 'int32', shape (batch_size, seq_length)
np.array: valid length in 'int32', shape (batch_size,)
np.array: input token type ids in 'int32', shape (batch_size, seq_length)
np.array: label id in 'int32', shape (batch_size, 1)
"""
label = line[-1]
label = convert_to_unicode(label)
label_id = self._label_map[label]
label_id = np.array([label_id], dtype='int32')
input_ids, valid_length, segment_ids = self._bert_xform(line[:-1])
return input_ids, valid_length, segment_ids, label_id
| []
| []
| [
"GLUE_DIR"
]
| [] | ["GLUE_DIR"] | python | 1 | 0 | |
pipenv/core.py | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import io
import json as simplejson
import logging
import os
import sys
import time
import warnings
import click
import six
import delegator
import dotenv
import pipfile
import vistir
from click_completion import init as init_completion
from . import environments, exceptions, pep508checker, progress
from ._compat import decode_for_output, fix_utf8
from .cmdparse import Script
from .environments import (
PIP_EXISTS_ACTION, PIPENV_CACHE_DIR, PIPENV_COLORBLIND,
PIPENV_DEFAULT_PYTHON_VERSION, PIPENV_DONT_USE_PYENV, PIPENV_DONT_USE_ASDF,
PIPENV_HIDE_EMOJIS, PIPENV_MAX_SUBPROCESS, PIPENV_PYUP_API_KEY,
PIPENV_RESOLVE_VCS, PIPENV_SHELL_FANCY, PIPENV_SKIP_VALIDATION, PIPENV_YES,
SESSION_IS_INTERACTIVE, is_type_checking
)
from .patched import crayons
from .project import Project
from .utils import (
convert_deps_to_pip, create_spinner, download_file,
escape_grouped_arguments, find_python, find_windows_executable,
get_canonical_names, get_source_list, interrupt_handled_subprocess,
is_pinned, is_python_command, is_required_version, is_star, is_valid_url,
parse_indexes, pep423_name, prepare_pip_source_args, proper_case,
python_version, run_command, venv_resolve_deps
)
if is_type_checking():
from typing import Dict, List, Optional, Union, Text
from pipenv.vendor.requirementslib.models.requirements import Requirement
TSourceDict = Dict[Text, Union[Text, bool]]
# Packages that should be ignored later.
BAD_PACKAGES = (
"distribute",
"packaging",
"pip",
"pkg-resources",
"setuptools",
"wheel",
)
FIRST_PACKAGES = ("cython",)
# Are we using the default Python?
USING_DEFAULT_PYTHON = True
if not PIPENV_HIDE_EMOJIS:
now = time.localtime()
# Halloween easter-egg.
if ((now.tm_mon == 10) and (now.tm_mday == 30)) or (
(now.tm_mon == 10) and (now.tm_mday == 31)
):
INSTALL_LABEL = "🎃 "
# Christmas easter-egg.
elif ((now.tm_mon == 12) and (now.tm_mday == 24)) or (
(now.tm_mon == 12) and (now.tm_mday == 25)
):
INSTALL_LABEL = "🎅 "
else:
INSTALL_LABEL = "🐍 "
INSTALL_LABEL2 = crayons.normal("☤ ", bold=True)
STARTING_LABEL = " "
else:
INSTALL_LABEL = " "
INSTALL_LABEL2 = " "
STARTING_LABEL = " "
# Enable shell completion.
init_completion()
# Disable colors, for the color blind and others who do not prefer colors.
if PIPENV_COLORBLIND:
crayons.disable()
def which(command, location=None, allow_global=False):
if not allow_global and location is None:
if project.virtualenv_exists:
location = project.virtualenv_location
else:
location = os.environ.get("VIRTUAL_ENV", None)
if not (location and os.path.exists(location)) and not allow_global:
raise RuntimeError("location not created nor specified")
version_str = "python{0}".format(".".join([str(v) for v in sys.version_info[:2]]))
is_python = command in ("python", os.path.basename(sys.executable), version_str)
if not allow_global:
if os.name == "nt":
p = find_windows_executable(os.path.join(location, "Scripts"), command)
else:
p = os.path.join(location, "bin", command)
else:
if is_python:
p = sys.executable
if not os.path.exists(p):
if is_python:
p = sys.executable or system_which("python")
else:
p = system_which(command)
return p
project = Project(which=which)
def do_clear():
click.echo(crayons.white(fix_utf8("Clearing caches…"), bold=True))
try:
from pip._internal import locations
except ImportError: # pip 9.
from pip import locations
try:
vistir.path.rmtree(PIPENV_CACHE_DIR)
vistir.path.rmtree(locations.USER_CACHE_DIR)
except OSError as e:
# Ignore FileNotFoundError. This is needed for Python 2.7.
import errno
if e.errno == errno.ENOENT:
pass
raise
def load_dot_env():
"""Loads .env file into sys.environ."""
if not environments.PIPENV_DONT_LOAD_ENV:
# If the project doesn't exist yet, check current directory for a .env file
project_directory = project.project_directory or "."
dotenv_file = environments.PIPENV_DOTENV_LOCATION or os.sep.join(
[project_directory, ".env"]
)
if os.path.isfile(dotenv_file):
click.echo(
crayons.normal(fix_utf8("Loading .env environment variables…"), bold=True),
err=True,
)
else:
if environments.PIPENV_DOTENV_LOCATION:
click.echo(
"{0}: file {1}={2} does not exist!!\n{3}".format(
crayons.red("Warning", bold=True),
crayons.normal("PIPENV_DOTENV_LOCATION", bold=True),
crayons.normal(environments.PIPENV_DOTENV_LOCATION, bold=True),
crayons.red("Not loading environment variables.", bold=True),
),
err=True,
)
dotenv.load_dotenv(dotenv_file, override=True)
def add_to_path(p):
"""Adds a given path to the PATH."""
if p not in os.environ["PATH"]:
os.environ["PATH"] = "{0}{1}{2}".format(p, os.pathsep, os.environ["PATH"])
def cleanup_virtualenv(bare=True):
"""Removes the virtualenv directory from the system."""
if not bare:
click.echo(crayons.red("Environment creation aborted."))
try:
# Delete the virtualenv.
vistir.path.rmtree(project.virtualenv_location)
except OSError as e:
click.echo(
"{0} An error occurred while removing {1}!".format(
crayons.red("Error: ", bold=True),
crayons.green(project.virtualenv_location),
),
err=True,
)
click.echo(crayons.blue(e), err=True)
def import_requirements(r=None, dev=False):
from .patched.notpip._vendor import requests as pip_requests
from .vendor.pip_shims.shims import parse_requirements
# Parse requirements.txt file with Pip's parser.
# Pip requires a `PipSession` which is a subclass of requests.Session.
# Since we're not making any network calls, it's initialized to nothing.
if r:
assert os.path.isfile(r)
# Default path, if none is provided.
if r is None:
r = project.requirements_location
with open(r, "r") as f:
contents = f.read()
indexes = []
trusted_hosts = []
# Find and add extra indexes.
for line in contents.split("\n"):
line_indexes, _trusted_hosts, _ = parse_indexes(line.strip())
indexes.extend(line_indexes)
trusted_hosts.extend(_trusted_hosts)
indexes = sorted(set(indexes))
trusted_hosts = sorted(set(trusted_hosts))
reqs = [f for f in parse_requirements(r, session=pip_requests)]
for package in reqs:
if package.name not in BAD_PACKAGES:
if package.link is not None:
package_string = (
"-e {0}".format(package.link)
if package.editable
else str(package.link)
)
project.add_package_to_pipfile(package_string, dev=dev)
else:
project.add_package_to_pipfile(str(package.req), dev=dev)
for index in indexes:
trusted = index in trusted_hosts
project.add_index_to_pipfile(index, verify_ssl=trusted)
project.recase_pipfile()
def ensure_environment():
# Skip this on Windows…
if os.name != "nt":
if "LANG" not in os.environ:
click.echo(
"{0}: the environment variable {1} is not set!"
"\nWe recommend setting this in {2} (or equivalent) for "
"proper expected behavior.".format(
crayons.red("Warning", bold=True),
crayons.normal("LANG", bold=True),
crayons.green("~/.profile"),
),
err=True,
)
def import_from_code(path="."):
from pipreqs import pipreqs
rs = []
try:
for r in pipreqs.get_all_imports(
path, encoding="utf-8", extra_ignore_dirs=[".venv"]
):
if r not in BAD_PACKAGES:
rs.append(r)
pkg_names = pipreqs.get_pkg_names(rs)
return [proper_case(r) for r in pkg_names]
except Exception:
return []
def ensure_pipfile(validate=True, skip_requirements=False, system=False):
"""Creates a Pipfile for the project, if it doesn't exist."""
from .environments import PIPENV_VIRTUALENV
# Assert Pipfile exists.
python = which("python") if not (USING_DEFAULT_PYTHON or system) else None
if project.pipfile_is_empty:
# Show an error message and exit if system is passed and no pipfile exists
if system and not PIPENV_VIRTUALENV:
raise exceptions.PipenvOptionsError(
"--system",
"--system is intended to be used for pre-existing Pipfile "
"installation, not installation of specific packages. Aborting."
)
# If there's a requirements file, but no Pipfile…
if project.requirements_exists and not skip_requirements:
click.echo(
crayons.normal(
fix_utf8("requirements.txt found, instead of Pipfile! Converting…"),
bold=True,
)
)
# Create a Pipfile…
project.create_pipfile(python=python)
with create_spinner("Importing requirements...") as sp:
# Import requirements.txt.
try:
import_requirements()
except Exception:
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Failed..."))
else:
sp.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Success!"))
# Warn the user of side-effects.
click.echo(
u"{0}: Your {1} now contains pinned versions, if your {2} did. \n"
"We recommend updating your {1} to specify the {3} version, instead."
"".format(
crayons.red("Warning", bold=True),
crayons.normal("Pipfile", bold=True),
crayons.normal("requirements.txt", bold=True),
crayons.normal('"*"', bold=True),
)
)
else:
click.echo(
crayons.normal(fix_utf8("Creating a Pipfile for this project…"), bold=True),
err=True,
)
# Create the pipfile if it doesn't exist.
project.create_pipfile(python=python)
# Validate the Pipfile's contents.
if validate and project.virtualenv_exists and not PIPENV_SKIP_VALIDATION:
# Ensure that Pipfile is using proper casing.
p = project.parsed_pipfile
changed = project.ensure_proper_casing()
# Write changes out to disk.
if changed:
click.echo(
crayons.normal(u"Fixing package names in Pipfile…", bold=True), err=True
)
project.write_toml(p)
def find_a_system_python(line):
"""Find a Python installation from a given line.
This tries to parse the line in various of ways:
* Looks like an absolute path? Use it directly.
* Looks like a py.exe call? Use py.exe to get the executable.
* Starts with "py" something? Looks like a python command. Try to find it
in PATH, and use it directly.
* Search for "python" and "pythonX.Y" executables in PATH to find a match.
* Nothing fits, return None.
"""
from .vendor.pythonfinder import Finder
finder = Finder(system=False, global_search=True)
if not line:
return next(iter(finder.find_all_python_versions()), None)
# Use the windows finder executable
if (line.startswith("py ") or line.startswith("py.exe ")) and os.name == "nt":
line = line.split(" ", 1)[1].lstrip("-")
python_entry = find_python(finder, line)
return python_entry
def ensure_python(three=None, python=None):
# Support for the PIPENV_PYTHON environment variable.
from .environments import PIPENV_PYTHON
if PIPENV_PYTHON and python is False and three is None:
python = PIPENV_PYTHON
def abort():
click.echo(
"You can specify specific versions of Python with:\n {0}".format(
crayons.red(
"$ pipenv --python {0}".format(
os.sep.join(("path", "to", "python"))
)
)
),
err=True,
)
sys.exit(1)
global USING_DEFAULT_PYTHON
USING_DEFAULT_PYTHON = three is None and not python
# Find out which python is desired.
if not python:
python = convert_three_to_python(three, python)
if not python:
python = project.required_python_version
if not python:
python = PIPENV_DEFAULT_PYTHON_VERSION
path_to_python = find_a_system_python(python)
if environments.is_verbose():
click.echo(u"Using python: {0}".format(python), err=True)
click.echo(u"Path to python: {0}".format(path_to_python), err=True)
if not path_to_python and python is not None:
# We need to install Python.
click.echo(
u"{0}: Python {1} {2}".format(
crayons.red("Warning", bold=True),
crayons.blue(python),
fix_utf8("was not found on your system…"),
),
err=True,
)
# check for python installers
from .vendor.pythonfinder.environment import PYENV_INSTALLED, ASDF_INSTALLED
from .installers import Pyenv, Asdf, InstallerError
# prefer pyenv if both pyenv and asdf are installed as it's
# dedicated to python installs so probably the preferred
# method of the user for new python installs.
if PYENV_INSTALLED and not PIPENV_DONT_USE_PYENV:
installer = Pyenv("pyenv")
elif ASDF_INSTALLED and not PIPENV_DONT_USE_ASDF:
installer = Asdf("asdf")
else:
installer = None
if not installer:
abort()
else:
if SESSION_IS_INTERACTIVE or PIPENV_YES:
try:
version = installer.find_version_to_install(python)
except ValueError:
abort()
except InstallerError as e:
click.echo(fix_utf8("Something went wrong…"))
click.echo(crayons.blue(e.err), err=True)
abort()
s = "{0} {1} {2}".format(
"Would you like us to install",
crayons.green("CPython {0}".format(version)),
"with {0}?".format(installer),
)
# Prompt the user to continue…
if not (PIPENV_YES or click.confirm(s, default=True)):
abort()
else:
# Tell the user we're installing Python.
click.echo(
u"{0} {1} {2} {3}{4}".format(
crayons.normal(u"Installing", bold=True),
crayons.green(u"CPython {0}".format(version), bold=True),
crayons.normal(u"with {0}".format(installer), bold=True),
crayons.normal(u"(this may take a few minutes)"),
crayons.normal(fix_utf8("…"), bold=True),
)
)
with create_spinner("Installing python...") as sp:
try:
c = installer.install(version)
except InstallerError as e:
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Failed...")
)
click.echo(fix_utf8("Something went wrong…"), err=True)
click.echo(crayons.blue(e.err), err=True)
else:
sp.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Success!"))
# Print the results, in a beautiful blue…
click.echo(crayons.blue(c.out), err=True)
# Clear the pythonfinder caches
from .vendor.pythonfinder import Finder
finder = Finder(system=False, global_search=True)
finder.find_python_version.cache_clear()
finder.find_all_python_versions.cache_clear()
# Find the newly installed Python, hopefully.
version = str(version)
path_to_python = find_a_system_python(version)
try:
assert python_version(path_to_python) == version
except AssertionError:
click.echo(
"{0}: The Python you just installed is not available on your {1}, apparently."
"".format(
crayons.red("Warning", bold=True),
crayons.normal("PATH", bold=True),
),
err=True,
)
sys.exit(1)
return path_to_python
def ensure_virtualenv(three=None, python=None, site_packages=None, pypi_mirror=None):
"""Creates a virtualenv, if one doesn't exist."""
from .environments import PIPENV_USE_SYSTEM
def abort():
sys.exit(1)
global USING_DEFAULT_PYTHON
if not project.virtualenv_exists:
try:
# Ensure environment variables are set properly.
ensure_environment()
# Ensure Python is available.
python = ensure_python(three=three, python=python)
if python is not None and not isinstance(python, six.string_types):
python = python.path.as_posix()
# Create the virtualenv.
# Abort if --system (or running in a virtualenv).
if PIPENV_USE_SYSTEM:
click.echo(
crayons.red(
"You are attempting to re–create a virtualenv that "
"Pipenv did not create. Aborting."
)
)
sys.exit(1)
do_create_virtualenv(
python=python, site_packages=site_packages, pypi_mirror=pypi_mirror
)
except KeyboardInterrupt:
# If interrupted, cleanup the virtualenv.
cleanup_virtualenv(bare=False)
sys.exit(1)
# If --three, --two, or --python were passed…
elif (python) or (three is not None) or (site_packages is not None):
USING_DEFAULT_PYTHON = False
# Ensure python is installed before deleting existing virtual env
python = ensure_python(three=three, python=python)
if python is not None and not isinstance(python, six.string_types):
python = python.path.as_posix()
click.echo(crayons.red("Virtualenv already exists!"), err=True)
# If VIRTUAL_ENV is set, there is a possibility that we are
# going to remove the active virtualenv that the user cares
# about, so confirm first.
if "VIRTUAL_ENV" in os.environ:
if not (
PIPENV_YES or click.confirm("Remove existing virtualenv?", default=True)
):
abort()
click.echo(
crayons.normal(fix_utf8("Removing existing virtualenv…"), bold=True), err=True
)
# Remove the virtualenv.
cleanup_virtualenv(bare=True)
# Call this function again.
ensure_virtualenv(
three=three,
python=python,
site_packages=site_packages,
pypi_mirror=pypi_mirror,
)
def ensure_project(
three=None,
python=None,
validate=True,
system=False,
warn=True,
site_packages=None,
deploy=False,
skip_requirements=False,
pypi_mirror=None,
clear=False,
):
"""Ensures both Pipfile and virtualenv exist for the project."""
from .environments import PIPENV_USE_SYSTEM
# Clear the caches, if appropriate.
if clear:
print("clearing")
sys.exit(1)
# Automatically use an activated virtualenv.
if PIPENV_USE_SYSTEM:
system = True
if not project.pipfile_exists and deploy:
raise exceptions.PipfileNotFound
# Fail if working under /
if not project.name:
click.echo(
"{0}: Pipenv is not intended to work under the root directory, "
"please choose another path.".format(crayons.red("ERROR")),
err=True
)
sys.exit(1)
# Skip virtualenv creation when --system was used.
if not system:
ensure_virtualenv(
three=three,
python=python,
site_packages=site_packages,
pypi_mirror=pypi_mirror,
)
if warn:
# Warn users if they are using the wrong version of Python.
if project.required_python_version:
path_to_python = which("python") or which("py")
if path_to_python and project.required_python_version not in (
python_version(path_to_python) or ""
):
click.echo(
"{0}: Your Pipfile requires {1} {2}, "
"but you are using {3} ({4}).".format(
crayons.red("Warning", bold=True),
crayons.normal("python_version", bold=True),
crayons.blue(project.required_python_version),
crayons.blue(python_version(path_to_python) or "unknown"),
crayons.green(shorten_path(path_to_python)),
),
err=True,
)
click.echo(
" {0} and rebuilding the virtual environment "
"may resolve the issue.".format(crayons.green("$ pipenv --rm")),
err=True,
)
if not deploy:
click.echo(
" {0} will surely fail."
"".format(crayons.red("$ pipenv check")),
err=True,
)
else:
raise exceptions.DeployException
# Ensure the Pipfile exists.
ensure_pipfile(
validate=validate, skip_requirements=skip_requirements, system=system
)
def shorten_path(location, bold=False):
"""Returns a visually shorter representation of a given system path."""
original = location
short = os.sep.join(
[s[0] if len(s) > (len("2long4")) else s for s in location.split(os.sep)]
)
short = short.split(os.sep)
short[-1] = original.split(os.sep)[-1]
if bold:
short[-1] = str(crayons.normal(short[-1], bold=True))
return os.sep.join(short)
# return short
def do_where(virtualenv=False, bare=True):
"""Executes the where functionality."""
if not virtualenv:
if not project.pipfile_exists:
click.echo(
"No Pipfile present at project home. Consider running "
"{0} first to automatically generate a Pipfile for you."
"".format(crayons.green("`pipenv install`")),
err=True,
)
return
location = project.pipfile_location
# Shorten the virtual display of the path to the virtualenv.
if not bare:
location = shorten_path(location)
click.echo(
"Pipfile found at {0}.\n Considering this to be the project home."
"".format(crayons.green(location)),
err=True,
)
else:
click.echo(project.project_directory)
else:
location = project.virtualenv_location
if not bare:
click.echo(
"Virtualenv location: {0}".format(crayons.green(location)), err=True
)
else:
click.echo(location)
def _cleanup_procs(procs, failed_deps_queue, retry=True):
while not procs.empty():
c = procs.get()
if not c.blocking:
c.block()
failed = False
if c.return_code != 0:
failed = True
if "Ignoring" in c.out:
click.echo(crayons.yellow(c.out.strip()))
elif environments.is_verbose():
click.echo(crayons.blue(c.out.strip() or c.err.strip()))
# The Installation failed…
if failed:
if "does not match installed location" in c.err:
project.environment.expand_egg_links()
click.echo("{0}".format(
crayons.yellow(
"Failed initial installation: Failed to overwrite existing "
"package, likely due to path aliasing. Expanding and trying "
"again!"
)
))
dep = c.dep.copy()
elif not retry:
# The Installation failed…
# We echo both c.out and c.err because pip returns error details on out.
err = c.err.strip().splitlines() if c.err else []
out = c.out.strip().splitlines() if c.out else []
err_lines = [line for message in [out, err] for line in message]
# Return the subprocess' return code.
raise exceptions.InstallError(c.dep.name, extra=err_lines)
else:
# Alert the user.
dep = c.dep.copy()
click.echo(
"{0} {1}! Will try again.".format(
crayons.red("An error occurred while installing"),
crayons.green(dep.as_line()),
), err=True
)
# Save the Failed Dependency for later.
failed_deps_queue.put(dep)
def batch_install(deps_list, procs, failed_deps_queue,
requirements_dir, no_deps=True, ignore_hashes=False,
allow_global=False, blocking=False, pypi_mirror=None,
retry=True, sequential_deps=None):
from .vendor.requirementslib.models.utils import strip_extras_markers_from_requirement
if sequential_deps is None:
sequential_deps = []
failed = (not retry)
install_deps = not no_deps
if not failed:
label = INSTALL_LABEL if not PIPENV_HIDE_EMOJIS else ""
else:
label = INSTALL_LABEL2
deps_to_install = deps_list[:]
deps_to_install.extend(sequential_deps)
sequential_dep_names = [d.name for d in sequential_deps]
deps_list_bar = progress.bar(
deps_to_install, width=32,
label=label
)
trusted_hosts = []
# Install these because
for dep in deps_list_bar:
extra_indexes = []
if dep.req.req:
dep.req.req = strip_extras_markers_from_requirement(dep.req.req)
if dep.markers:
dep.markers = str(strip_extras_markers_from_requirement(dep.get_markers()))
# Install the module.
is_artifact = False
if dep.is_file_or_url and (dep.is_direct_url or any(
dep.req.uri.endswith(ext) for ext in ["zip", "tar.gz"]
)):
is_artifact = True
elif dep.is_vcs:
is_artifact = True
if not PIPENV_RESOLVE_VCS and is_artifact and not dep.editable:
install_deps = True
no_deps = False
with vistir.contextmanagers.temp_environ():
if not allow_global:
os.environ["PIP_USER"] = vistir.compat.fs_str("0")
if "PYTHONHOME" in os.environ:
del os.environ["PYTHONHOME"]
if "GIT_CONFIG" in os.environ and dep.is_vcs:
del os.environ["GIT_CONFIG"]
c = pip_install(
dep,
ignore_hashes=any([ignore_hashes, dep.editable, dep.is_vcs]),
allow_global=allow_global,
no_deps=not install_deps,
block=any([dep.editable, dep.is_vcs, blocking]),
index=dep.index,
requirements_dir=requirements_dir,
pypi_mirror=pypi_mirror,
trusted_hosts=trusted_hosts,
extra_indexes=extra_indexes,
use_pep517=not failed,
)
c.dep = dep
# if dep.is_vcs or dep.editable:
is_sequential = sequential_deps and dep.name in sequential_dep_names
if is_sequential:
c.block()
procs.put(c)
if procs.full() or procs.qsize() == len(deps_list) or is_sequential:
_cleanup_procs(procs, failed_deps_queue, retry=retry)
def do_install_dependencies(
dev=False,
only=False,
bare=False,
requirements=False,
allow_global=False,
ignore_hashes=False,
skip_lock=False,
concurrent=True,
requirements_dir=None,
pypi_mirror=False,
):
""""
Executes the install functionality.
If requirements is True, simply spits out a requirements format to stdout.
"""
from six.moves import queue
if requirements:
bare = True
# Load the lockfile if it exists, or if only is being used (e.g. lock is being used).
if skip_lock or only or not project.lockfile_exists:
if not bare:
click.echo(
crayons.normal(fix_utf8("Installing dependencies from Pipfile…"), bold=True)
)
# skip_lock should completely bypass the lockfile (broken in 4dac1676)
lockfile = project.get_or_create_lockfile(from_pipfile=True)
else:
lockfile = project.get_or_create_lockfile()
if not bare:
click.echo(
crayons.normal(
fix_utf8("Installing dependencies from Pipfile.lock ({0})…".format(
lockfile["_meta"].get("hash", {}).get("sha256")[-6:]
)),
bold=True,
)
)
# Allow pip to resolve dependencies when in skip-lock mode.
no_deps = not skip_lock # skip_lock true, no_deps False, pip resolves deps
deps_list = list(lockfile.get_requirements(dev=dev, only=requirements))
if requirements:
index_args = prepare_pip_source_args(project.sources)
index_args = " ".join(index_args).replace(" -", "\n-")
deps = [
req.as_line(sources=False, include_hashes=False) for req in deps_list
]
# Output only default dependencies
click.echo(index_args)
click.echo(
"\n".join(sorted(deps))
)
sys.exit(0)
if concurrent:
nprocs = PIPENV_MAX_SUBPROCESS
else:
nprocs = 1
procs = queue.Queue(maxsize=nprocs)
failed_deps_queue = queue.Queue()
if skip_lock:
ignore_hashes = True
editable_or_vcs_deps = [dep for dep in deps_list if (dep.editable or dep.vcs)]
normal_deps = [dep for dep in deps_list if not (dep.editable or dep.vcs)]
install_kwargs = {
"no_deps": no_deps, "ignore_hashes": ignore_hashes, "allow_global": allow_global,
"blocking": not concurrent, "pypi_mirror": pypi_mirror,
"sequential_deps": editable_or_vcs_deps
}
batch_install(
normal_deps, procs, failed_deps_queue, requirements_dir, **install_kwargs
)
if not procs.empty():
_cleanup_procs(procs, failed_deps_queue)
# click.echo(crayons.normal(
# decode_for_output("Installing editable and vcs dependencies…"), bold=True
# ))
# install_kwargs.update({"blocking": True})
# # XXX: All failed and editable/vcs deps should be installed in sequential mode!
# procs = queue.Queue(maxsize=1)
# batch_install(
# editable_or_vcs_deps, procs, failed_deps_queue, requirements_dir,
# **install_kwargs
# )
# Iterate over the hopefully-poorly-packaged dependencies…
if not failed_deps_queue.empty():
click.echo(
crayons.normal(fix_utf8("Installing initially failed dependencies…"), bold=True)
)
retry_list = []
while not failed_deps_queue.empty():
failed_dep = failed_deps_queue.get()
retry_list.append(failed_dep)
install_kwargs.update({"retry": False})
batch_install(
retry_list, procs, failed_deps_queue, requirements_dir, **install_kwargs
)
if not procs.empty():
_cleanup_procs(procs, failed_deps_queue, retry=False)
def convert_three_to_python(three, python):
"""Converts a Three flag into a Python flag, and raises customer warnings
in the process, if needed.
"""
if not python:
if three is False:
return "2"
elif three is True:
return "3"
else:
return python
def do_create_virtualenv(python=None, site_packages=None, pypi_mirror=None):
"""Creates a virtualenv."""
click.echo(
crayons.normal(fix_utf8("Creating a virtualenv for this project…"), bold=True), err=True
)
click.echo(
u"Pipfile: {0}".format(crayons.red(project.pipfile_location, bold=True)),
err=True,
)
# Default to using sys.executable, if Python wasn't provided.
using_string = u"Using"
if not python:
python = sys.executable
using_string = "Using default python from"
click.echo(
u"{0} {1} {3} {2}".format(
crayons.normal(using_string, bold=True),
crayons.red(python, bold=True),
crayons.normal(fix_utf8("to create virtualenv…"), bold=True),
crayons.green("({0})".format(python_version(python))),
),
err=True,
)
cmd = [
vistir.compat.Path(sys.executable).absolute().as_posix(),
"-m",
"virtualenv",
"--prompt=({0}) ".format(project.name),
"--python={0}".format(python),
project.get_location_for_virtualenv(),
]
# Pass site-packages flag to virtualenv, if desired…
if site_packages:
click.echo(
crayons.normal(fix_utf8("Making site-packages available…"), bold=True), err=True
)
cmd.append("--system-site-packages")
if pypi_mirror:
pip_config = {"PIP_INDEX_URL": vistir.misc.fs_str(pypi_mirror)}
else:
pip_config = {}
# Actually create the virtualenv.
error = None
with create_spinner(u"Creating virtual environment...") as sp:
with interrupt_handled_subprocess(cmd, combine_stderr=False, env=pip_config) as c:
click.echo(crayons.blue(u"{0}".format(c.out)), err=True)
if c.returncode != 0:
error = c.err if environments.is_verbose() else exceptions.prettify_exc(c.err)
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(u"Failed creating virtual environment"))
else:
sp.green.ok(environments.PIPENV_SPINNER_OK_TEXT.format(u"Successfully created virtual environment!"))
if error is not None:
raise exceptions.VirtualenvCreationException(
extra=crayons.red("{0}".format(error))
)
# Associate project directory with the environment.
# This mimics Pew's "setproject".
project_file_name = os.path.join(project.virtualenv_location, ".project")
with open(project_file_name, "w") as f:
f.write(vistir.misc.fs_str(project.project_directory))
from .environment import Environment
sources = project.pipfile_sources
project._environment = Environment(
prefix=project.get_location_for_virtualenv(),
is_venv=True,
sources=sources,
pipfile=project.parsed_pipfile,
project=project
)
project._environment.add_dist("pipenv")
# Say where the virtualenv is.
do_where(virtualenv=True, bare=False)
def parse_download_fname(fname, name):
fname, fextension = os.path.splitext(fname)
if fextension == ".whl":
fname = "-".join(fname.split("-")[:-3])
if fname.endswith(".tar"):
fname, _ = os.path.splitext(fname)
# Substring out package name (plus dash) from file name to get version.
version = fname[len(name) + 1 :]
# Ignore implicit post releases in version number.
if "-" in version and version.split("-")[1].isdigit():
version = version.split("-")[0]
return version
def get_downloads_info(names_map, section):
from .vendor.requirementslib.models.requirements import Requirement
info = []
p = project.parsed_pipfile
for fname in os.listdir(project.download_location):
# Get name from filename mapping.
name = Requirement.from_line(names_map[fname]).name
# Get the version info from the filenames.
version = parse_download_fname(fname, name)
# Get the hash of each file.
cmd = '{0} hash "{1}"'.format(
escape_grouped_arguments(which_pip()),
os.sep.join([project.download_location, fname]),
)
c = delegator.run(cmd)
hash = c.out.split("--hash=")[1].strip()
# Verify we're adding the correct version from Pipfile
# and not one from a dependency.
specified_version = p[section].get(name, "")
if is_required_version(version, specified_version):
info.append(dict(name=name, version=version, hash=hash))
return info
def overwrite_dev(prod, dev):
dev_keys = set(list(dev.keys()))
prod_keys = set(list(prod.keys()))
for pkg in dev_keys & prod_keys:
dev[pkg] = prod[pkg]
return dev
def do_lock(
ctx=None,
system=False,
clear=False,
pre=False,
keep_outdated=False,
write=True,
pypi_mirror=None,
):
"""Executes the freeze functionality."""
cached_lockfile = {}
if not pre:
pre = project.settings.get("allow_prereleases")
if keep_outdated:
if not project.lockfile_exists:
raise exceptions.PipenvOptionsError(
"--keep-outdated", ctx=ctx,
message="Pipfile.lock must exist to use --keep-outdated!"
)
cached_lockfile = project.lockfile_content
# Create the lockfile.
lockfile = project._lockfile
# Cleanup lockfile.
for section in ("default", "develop"):
for k, v in lockfile[section].copy().items():
if not hasattr(v, "keys"):
del lockfile[section][k]
# Ensure that develop inherits from default.
dev_packages = project.dev_packages.copy()
dev_packages = overwrite_dev(project.packages, dev_packages)
# Resolve dev-package dependencies, with pip-tools.
for is_dev in [True, False]:
pipfile_section = "dev-packages" if is_dev else "packages"
if project.pipfile_exists:
packages = project.parsed_pipfile.get(pipfile_section, {})
else:
packages = getattr(project, pipfile_section.replace("-", "_"))
if write:
# Alert the user of progress.
click.echo(
u"{0} {1} {2}".format(
crayons.normal(u"Locking"),
crayons.red(u"[{0}]".format(pipfile_section.replace("_", "-"))),
crayons.normal(fix_utf8("dependencies…")),
),
err=True,
)
# Mutates the lockfile
venv_resolve_deps(
packages,
which=which,
project=project,
dev=is_dev,
clear=clear,
pre=pre,
allow_global=system,
pypi_mirror=pypi_mirror,
pipfile=packages,
lockfile=lockfile,
keep_outdated=keep_outdated
)
# Support for --keep-outdated…
if keep_outdated:
from pipenv.vendor.packaging.utils import canonicalize_name
for section_name, section in (
("default", project.packages),
("develop", project.dev_packages),
):
for package_specified in section.keys():
if not is_pinned(section[package_specified]):
canonical_name = canonicalize_name(package_specified)
if canonical_name in cached_lockfile[section_name]:
lockfile[section_name][canonical_name] = cached_lockfile[
section_name
][canonical_name].copy()
for key in ["default", "develop"]:
packages = set(cached_lockfile[key].keys())
new_lockfile = set(lockfile[key].keys())
missing = packages - new_lockfile
for missing_pkg in missing:
lockfile[key][missing_pkg] = cached_lockfile[key][missing_pkg].copy()
# Overwrite any develop packages with default packages.
lockfile["develop"].update(overwrite_dev(lockfile.get("default", {}), lockfile["develop"]))
if write:
project.write_lockfile(lockfile)
click.echo(
"{0}".format(
crayons.normal(
"Updated Pipfile.lock ({0})!".format(
lockfile["_meta"].get("hash", {}).get("sha256")[-6:]
),
bold=True,
)
),
err=True,
)
else:
return lockfile
def do_purge(bare=False, downloads=False, allow_global=False):
"""Executes the purge functionality."""
if downloads:
if not bare:
click.echo(crayons.normal(fix_utf8("Clearing out downloads directory…"), bold=True))
vistir.path.rmtree(project.download_location)
return
# Remove comments from the output, if any.
installed = set([
pep423_name(pkg.project_name) for pkg in project.environment.get_installed_packages()
])
bad_pkgs = set([pep423_name(pkg) for pkg in BAD_PACKAGES])
# Remove setuptools, pip, etc from targets for removal
to_remove = installed - bad_pkgs
# Skip purging if there is no packages which needs to be removed
if not to_remove:
if not bare:
click.echo("Found 0 installed package, skip purging.")
click.echo(crayons.green("Environment now purged and fresh!"))
return installed
if not bare:
click.echo(
fix_utf8("Found {0} installed package(s), purging…".format(len(to_remove)))
)
command = "{0} uninstall {1} -y".format(
escape_grouped_arguments(which_pip(allow_global=allow_global)),
" ".join(to_remove),
)
if environments.is_verbose():
click.echo("$ {0}".format(command))
c = delegator.run(command)
if c.return_code != 0:
raise exceptions.UninstallError(installed, command, c.out + c.err, c.return_code)
if not bare:
click.echo(crayons.blue(c.out))
click.echo(crayons.green("Environment now purged and fresh!"))
return installed
def do_init(
dev=False,
requirements=False,
allow_global=False,
ignore_pipfile=False,
skip_lock=False,
system=False,
concurrent=True,
deploy=False,
pre=False,
keep_outdated=False,
requirements_dir=None,
pypi_mirror=None,
):
"""Executes the init functionality."""
from .environments import (
PIPENV_VIRTUALENV, PIPENV_DEFAULT_PYTHON_VERSION, PIPENV_PYTHON, PIPENV_USE_SYSTEM
)
python = None
if PIPENV_PYTHON is not None:
python = PIPENV_PYTHON
elif PIPENV_DEFAULT_PYTHON_VERSION is not None:
python = PIPENV_DEFAULT_PYTHON_VERSION
if not system and not PIPENV_USE_SYSTEM:
if not project.virtualenv_exists:
try:
do_create_virtualenv(python=python, three=None, pypi_mirror=pypi_mirror)
except KeyboardInterrupt:
cleanup_virtualenv(bare=False)
sys.exit(1)
# Ensure the Pipfile exists.
if not deploy:
ensure_pipfile(system=system)
if not requirements_dir:
requirements_dir = vistir.path.create_tracked_tempdir(
suffix="-requirements", prefix="pipenv-"
)
# Write out the lockfile if it doesn't exist, but not if the Pipfile is being ignored
if (project.lockfile_exists and not ignore_pipfile) and not skip_lock:
old_hash = project.get_lockfile_hash()
new_hash = project.calculate_pipfile_hash()
if new_hash != old_hash:
if deploy:
click.echo(
crayons.red(
"Your Pipfile.lock ({0}) is out of date. Expected: ({1}).".format(
old_hash[-6:], new_hash[-6:]
)
)
)
raise exceptions.DeployException
sys.exit(1)
elif (system or allow_global) and not (PIPENV_VIRTUALENV):
click.echo(
crayons.red(fix_utf8(
"Pipfile.lock ({0}) out of date, but installation "
"uses {1}… re-building lockfile must happen in "
"isolation. Please rebuild lockfile in a virtualenv. "
"Continuing anyway…".format(
crayons.white(old_hash[-6:]), crayons.white("--system")
)),
bold=True,
),
err=True,
)
else:
if old_hash:
msg = fix_utf8("Pipfile.lock ({0}) out of date, updating to ({1})…")
else:
msg = fix_utf8("Pipfile.lock is corrupted, replaced with ({1})…")
click.echo(
crayons.red(msg.format(old_hash[-6:], new_hash[-6:]), bold=True),
err=True,
)
do_lock(
system=system,
pre=pre,
keep_outdated=keep_outdated,
write=True,
pypi_mirror=pypi_mirror,
)
# Write out the lockfile if it doesn't exist.
if not project.lockfile_exists and not skip_lock:
# Unless we're in a virtualenv not managed by pipenv, abort if we're
# using the system's python.
if (system or allow_global) and not (PIPENV_VIRTUALENV):
raise exceptions.PipenvOptionsError(
"--system",
"--system is intended to be used for Pipfile installation, "
"not installation of specific packages. Aborting.\n"
"See also: --deploy flag."
)
else:
click.echo(
crayons.normal(fix_utf8("Pipfile.lock not found, creating…"), bold=True),
err=True,
)
do_lock(
system=system,
pre=pre,
keep_outdated=keep_outdated,
write=True,
pypi_mirror=pypi_mirror,
)
do_install_dependencies(
dev=dev,
requirements=requirements,
allow_global=allow_global,
skip_lock=skip_lock,
concurrent=concurrent,
requirements_dir=requirements_dir,
pypi_mirror=pypi_mirror,
)
# Hint the user what to do to activate the virtualenv.
if not allow_global and not deploy and "PIPENV_ACTIVE" not in os.environ:
click.echo(
"To activate this project's virtualenv, run {0}.\n"
"Alternatively, run a command "
"inside the virtualenv with {1}.".format(
crayons.red("pipenv shell"), crayons.red("pipenv run")
)
)
def get_pip_args(
pre=False, # type: bool
verbose=False, # type: bool,
upgrade=False, # type: bool,
require_hashes=False, # type: bool,
no_build_isolation=False, # type: bool,
no_use_pep517=False, # type: bool,
no_deps=False, # type: bool,
selective_upgrade=False, # type: bool
src_dir=None, # type: Optional[str]
):
# type: (...) -> List[str]
from .vendor.packaging.version import parse as parse_version
arg_map = {
"pre": ["--pre"],
"verbose": ["--verbose"],
"upgrade": ["--upgrade"],
"require_hashes": ["--require-hashes"],
"no_build_isolation": ["--no-build-isolation"],
"no_use_pep517": [],
"no_deps": ["--no-deps"],
"selective_upgrade": [
"--upgrade-strategy=only-if-needed",
"--exists-action={0}".format(PIP_EXISTS_ACTION or "i")
],
"src_dir": src_dir,
}
if project.environment.pip_version >= parse_version("19.0"):
arg_map["no_use_pep517"].append("--no-use-pep517")
if project.environment.pip_version < parse_version("19.1"):
arg_map["no_use_pep517"].append("--no-build-isolation")
arg_set = []
for key in arg_map.keys():
if key in locals() and locals().get(key):
arg_set.extend(arg_map.get(key))
elif key == "selective_upgrade" and not locals().get(key):
arg_set.append("--exists-action=i")
return list(vistir.misc.dedup(arg_set))
def get_requirement_line(
requirement, # type: Requirement
src_dir=None, # type: Optional[str]
include_hashes=True, # type: bool
format_for_file=False, # type: bool
):
# type: (...) -> Union[List[str], str]
line = None
if requirement.vcs or requirement.is_file_or_url:
if src_dir and requirement.line_instance.wheel_kwargs:
requirement.line_instance._wheel_kwargs.update({
"src_dir": src_dir
})
requirement.line_instance.vcsrepo
line = requirement.line_instance.line
if requirement.line_instance.markers:
line = '{0}; {1}'.format(line, requirement.line_instance.markers)
if not format_for_file:
line = '"{0}"'.format(line)
if requirement.editable:
if not format_for_file:
return ["-e", line]
return '-e {0}'.format(line)
if not format_for_file:
return [line]
return line
return requirement.as_line(include_hashes=include_hashes, as_list=not format_for_file)
def write_requirement_to_file(
requirement, # type: Requirement
requirements_dir=None, # type: Optional[str]
src_dir=None, # type: Optional[str]
include_hashes=True # type: bool
):
# type: (...) -> str
if not requirements_dir:
requirements_dir = vistir.path.create_tracked_tempdir(
prefix="pipenv", suffix="requirements")
line = requirement.line_instance.get_line(
with_prefix=True, with_hashes=include_hashes, with_markers=True, as_list=False
)
f = vistir.compat.NamedTemporaryFile(
prefix="pipenv-", suffix="-requirement.txt", dir=requirements_dir,
delete=False
)
if environments.is_verbose():
click.echo(
"Writing supplied requirement line to temporary file: {0!r}".format(line),
err=True
)
f.write(vistir.misc.to_bytes(line))
r = f.name
f.close()
return r
def pip_install(
requirement=None,
r=None,
allow_global=False,
ignore_hashes=False,
no_deps=None,
block=True,
index=None,
pre=False,
selective_upgrade=False,
requirements_dir=None,
extra_indexes=None,
pypi_mirror=None,
trusted_hosts=None,
use_pep517=True
):
piplogger = logging.getLogger("pipenv.patched.notpip._internal.commands.install")
src_dir = None
if not trusted_hosts:
trusted_hosts = []
trusted_hosts.extend(os.environ.get("PIP_TRUSTED_HOSTS", []))
if not allow_global:
src_dir = os.getenv("PIP_SRC", os.getenv("PIP_SRC_DIR", project.virtualenv_src_location))
else:
src_dir = os.getenv("PIP_SRC", os.getenv("PIP_SRC_DIR"))
if requirement:
if requirement.editable or not requirement.hashes:
ignore_hashes = True
elif not (requirement.is_vcs or requirement.editable or requirement.vcs):
ignore_hashes = False
line = None
# Try installing for each source in project.sources.
if not index and requirement.index:
index = requirement.index
if index and not extra_indexes:
extra_indexes = list(project.sources)
if requirement and requirement.vcs or requirement.editable:
requirement.index = None
# Install dependencies when a package is a non-editable VCS dependency.
# Don't specify a source directory when using --system.
if not requirement.editable and no_deps is not True:
# Leave this off becauase old lockfiles don't have all deps included
# TODO: When can it be turned back on?
no_deps = False
elif requirement.editable and no_deps is None:
no_deps = True
r = write_requirement_to_file(
requirement, requirements_dir=requirements_dir, src_dir=src_dir,
include_hashes=not ignore_hashes
)
sources = get_source_list(
index, extra_indexes=extra_indexes, trusted_hosts=trusted_hosts,
pypi_mirror=pypi_mirror
)
if r:
with io.open(r, "r") as fh:
if "--hash" not in fh.read():
ignore_hashes = True
if environments.is_verbose():
piplogger.setLevel(logging.WARN)
if requirement:
click.echo(
crayons.normal("Installing {0!r}".format(requirement.name), bold=True),
err=True,
)
pip_command = [which_pip(allow_global=allow_global), "install"]
pip_args = get_pip_args(
pre=pre, verbose=environments.is_verbose(), upgrade=True,
selective_upgrade=selective_upgrade, no_use_pep517=not use_pep517,
no_deps=no_deps, require_hashes=not ignore_hashes
)
pip_command.extend(pip_args)
if r:
pip_command.extend(["-r", vistir.path.normalize_path(r)])
elif line:
pip_command.extend(line)
pip_command.extend(prepare_pip_source_args(sources))
if environments.is_verbose():
click.echo("$ {0}".format(pip_command), err=True)
cache_dir = vistir.compat.Path(PIPENV_CACHE_DIR)
DEFAULT_EXISTS_ACTION = "w"
if selective_upgrade:
DEFAULT_EXISTS_ACTION = "i"
exists_action = vistir.misc.fs_str(PIP_EXISTS_ACTION or DEFAULT_EXISTS_ACTION)
pip_config = {
"PIP_CACHE_DIR": vistir.misc.fs_str(cache_dir.as_posix()),
"PIP_WHEEL_DIR": vistir.misc.fs_str(cache_dir.joinpath("wheels").as_posix()),
"PIP_DESTINATION_DIR": vistir.misc.fs_str(
cache_dir.joinpath("pkgs").as_posix()
),
"PIP_EXISTS_ACTION": exists_action,
"PATH": vistir.misc.fs_str(os.environ.get("PATH")),
}
if src_dir:
if environments.is_verbose():
click.echo("Using source directory: {0!r}".format(src_dir), err=True)
pip_config.update(
{"PIP_SRC": vistir.misc.fs_str(src_dir)}
)
cmd = Script.parse(pip_command)
pip_command = cmd.cmdify()
c = None
c = delegator.run(pip_command, block=block, env=pip_config)
c.env = pip_config
return c
def pip_download(package_name):
cache_dir = vistir.compat.Path(PIPENV_CACHE_DIR)
pip_config = {
"PIP_CACHE_DIR": vistir.misc.fs_str(cache_dir.as_posix()),
"PIP_WHEEL_DIR": vistir.misc.fs_str(cache_dir.joinpath("wheels").as_posix()),
"PIP_DESTINATION_DIR": vistir.misc.fs_str(
cache_dir.joinpath("pkgs").as_posix()
),
}
for source in project.sources:
cmd = '{0} download "{1}" -i {2} -d {3}'.format(
escape_grouped_arguments(which_pip()),
package_name,
source["url"],
project.download_location,
)
c = delegator.run(cmd, env=pip_config)
if c.return_code == 0:
break
return c
def fallback_which(command, location=None, allow_global=False, system=False):
"""
A fallback implementation of the `which` utility command that relies exclusively on
searching the path for commands.
:param str command: The command to search for, optional
:param str location: The search location to prioritize (prepend to path), defaults to None
:param bool allow_global: Whether to search the global path, defaults to False
:param bool system: Whether to use the system python instead of pipenv's python, defaults to False
:raises ValueError: Raised if no command is provided
:raises TypeError: Raised if the command provided is not a string
:return: A path to the discovered command location
:rtype: str
"""
from .vendor.pythonfinder import Finder
if not command:
raise ValueError("fallback_which: Must provide a command to search for...")
if not isinstance(command, six.string_types):
raise TypeError("Provided command must be a string, received {0!r}".format(command))
global_search = system or allow_global
if location is None:
global_search = True
finder = Finder(system=False, global_search=global_search, path=location)
if is_python_command(command):
result = find_python(finder, command)
if result:
return result
result = finder.which(command)
if result:
return result.path.as_posix()
return ""
def which_pip(allow_global=False):
"""Returns the location of virtualenv-installed pip."""
location = None
if "VIRTUAL_ENV" in os.environ:
location = os.environ["VIRTUAL_ENV"]
if allow_global:
if location:
pip = which("pip", location=location)
if pip:
return pip
for p in ("pip", "pip3", "pip2"):
where = system_which(p)
if where:
return where
pip = which("pip")
if not pip:
pip = fallback_which("pip", allow_global=allow_global, location=location)
return pip
def system_which(command, mult=False):
"""Emulates the system's which. Returns None if not found."""
_which = "which -a" if not os.name == "nt" else "where"
os.environ = {
vistir.compat.fs_str(k): vistir.compat.fs_str(val)
for k, val in os.environ.items()
}
result = None
try:
c = delegator.run("{0} {1}".format(_which, command))
try:
# Which Not found…
if c.return_code == 127:
click.echo(
"{}: the {} system utility is required for Pipenv to find Python installations properly."
"\n Please install it.".format(
crayons.red("Warning", bold=True), crayons.red(_which)
),
err=True,
)
assert c.return_code == 0
except AssertionError:
result = fallback_which(command, allow_global=True)
except TypeError:
if not result:
result = fallback_which(command, allow_global=True)
else:
if not result:
result = next(iter([c.out, c.err]), "").split("\n")
result = next(iter(result)) if not mult else result
return result
if not result:
result = fallback_which(command, allow_global=True)
result = [result] if mult else result
return result
def format_help(help):
"""Formats the help string."""
help = help.replace("Options:", str(crayons.normal("Options:", bold=True)))
help = help.replace(
"Usage: pipenv", str("Usage: {0}".format(crayons.normal("pipenv", bold=True)))
)
help = help.replace(" check", str(crayons.red(" check", bold=True)))
help = help.replace(" clean", str(crayons.red(" clean", bold=True)))
help = help.replace(" graph", str(crayons.red(" graph", bold=True)))
help = help.replace(" install", str(crayons.magenta(" install", bold=True)))
help = help.replace(" lock", str(crayons.green(" lock", bold=True)))
help = help.replace(" open", str(crayons.red(" open", bold=True)))
help = help.replace(" run", str(crayons.yellow(" run", bold=True)))
help = help.replace(" shell", str(crayons.yellow(" shell", bold=True)))
help = help.replace(" sync", str(crayons.green(" sync", bold=True)))
help = help.replace(" uninstall", str(crayons.magenta(" uninstall", bold=True)))
help = help.replace(" update", str(crayons.green(" update", bold=True)))
additional_help = """
Usage Examples:
Create a new project using Python 3.7, specifically:
$ {1}
Remove project virtualenv (inferred from current directory):
$ {9}
Install all dependencies for a project (including dev):
$ {2}
Create a lockfile containing pre-releases:
$ {6}
Show a graph of your installed dependencies:
$ {4}
Check your installed dependencies for security vulnerabilities:
$ {7}
Install a local setup.py into your virtual environment/Pipfile:
$ {5}
Use a lower-level pip command:
$ {8}
Commands:""".format(
crayons.red("pipenv --three"),
crayons.red("pipenv --python 3.7"),
crayons.red("pipenv install --dev"),
crayons.red("pipenv lock"),
crayons.red("pipenv graph"),
crayons.red("pipenv install -e ."),
crayons.red("pipenv lock --pre"),
crayons.red("pipenv check"),
crayons.red("pipenv run pip freeze"),
crayons.red("pipenv --rm"),
)
help = help.replace("Commands:", additional_help)
return help
def format_pip_error(error):
error = error.replace("Expected", str(crayons.green("Expected", bold=True)))
error = error.replace("Got", str(crayons.red("Got", bold=True)))
error = error.replace(
"THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE",
str(
crayons.red(
"THESE PACKAGES DO NOT MATCH THE HASHES FROM Pipfile.lock!", bold=True
)
),
)
error = error.replace(
"someone may have tampered with them",
str(crayons.red("someone may have tampered with them")),
)
error = error.replace("option to pip install", "option to 'pipenv install'")
return error
def format_pip_output(out, r=None):
def gen(out):
for line in out.split("\n"):
# Remove requirements file information from pip9 output.
if "(from -r" in line:
yield line[: line.index("(from -r")]
else:
yield line
out = "\n".join([l for l in gen(out)])
return out
def warn_in_virtualenv():
# Only warn if pipenv isn't already active.
if environments.is_in_virtualenv() and not environments.is_quiet():
click.echo(
"{0}: Pipenv found itself running within a virtual environment, "
"so it will automatically use that environment, instead of "
"creating its own for any project. You can set "
"{1} to force pipenv to ignore that environment and create "
"its own instead. You can set {2} to suppress this "
"warning.".format(
crayons.green("Courtesy Notice"),
crayons.normal("PIPENV_IGNORE_VIRTUALENVS=1", bold=True),
crayons.normal("PIPENV_VERBOSITY=-1", bold=True),
),
err=True,
)
def ensure_lockfile(keep_outdated=False, pypi_mirror=None):
"""Ensures that the lockfile is up-to-date."""
if not keep_outdated:
keep_outdated = project.settings.get("keep_outdated")
# Write out the lockfile if it doesn't exist, but not if the Pipfile is being ignored
if project.lockfile_exists:
old_hash = project.get_lockfile_hash()
new_hash = project.calculate_pipfile_hash()
if new_hash != old_hash:
click.echo(
crayons.red(
fix_utf8("Pipfile.lock ({0}) out of date, updating to ({1})…".format(
old_hash[-6:], new_hash[-6:]
)),
bold=True,
),
err=True,
)
do_lock(keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
else:
do_lock(keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
def do_py(system=False):
if not project.virtualenv_exists:
click.echo(
"{}({}){}".format(
crayons.red("No virtualenv has been created for this project "),
crayons.white(project.project_directory, bold=True),
crayons.red(" yet!")
),
err=True,
)
return
try:
click.echo(which("python", allow_global=system))
except AttributeError:
click.echo(crayons.red("No project found!"))
def do_outdated(pypi_mirror=None, pre=False, clear=False):
# TODO: Allow --skip-lock here?
from .vendor.requirementslib.models.requirements import Requirement
from .vendor.requirementslib.models.utils import get_version
from .vendor.packaging.utils import canonicalize_name
from .vendor.vistir.compat import Mapping
from collections import namedtuple
packages = {}
package_info = namedtuple("PackageInfo", ["name", "installed", "available"])
installed_packages = project.environment.get_installed_packages()
outdated_packages = {
canonicalize_name(pkg.project_name): package_info
(pkg.project_name, pkg.parsed_version, pkg.latest_version)
for pkg in project.environment.get_outdated_packages()
}
reverse_deps = project.environment.reverse_dependencies()
for result in installed_packages:
dep = Requirement.from_line(str(result.as_requirement()))
packages.update(dep.as_pipfile())
updated_packages = {}
lockfile = do_lock(clear=clear, pre=pre, write=False, pypi_mirror=pypi_mirror)
for section in ("develop", "default"):
for package in lockfile[section]:
try:
updated_packages[package] = lockfile[section][package]["version"]
except KeyError:
pass
outdated = []
skipped = []
for package in packages:
norm_name = pep423_name(package)
if norm_name in updated_packages:
if updated_packages[norm_name] != packages[package]:
outdated.append(
package_info(package, updated_packages[norm_name], packages[package])
)
elif canonicalize_name(package) in outdated_packages:
skipped.append(outdated_packages[canonicalize_name(package)])
for package, old_version, new_version in skipped:
name_in_pipfile = project.get_package_name_in_pipfile(package)
pipfile_version_text = ""
required = ""
version = None
if name_in_pipfile:
version = get_version(project.packages[name_in_pipfile])
reverse_deps = reverse_deps.get(name_in_pipfile)
if isinstance(reverse_deps, Mapping) and "required" in reverse_deps:
required = " {0} required".format(reverse_deps["required"])
if version:
pipfile_version_text = " ({0} set in Pipfile)".format(version)
else:
pipfile_version_text = " (Unpinned in Pipfile)"
click.echo(
crayons.yellow(
"Skipped Update of Package {0!s}: {1!s} installed,{2!s}{3!s}, "
"{4!s} available.".format(
package, old_version, required, pipfile_version_text, new_version
)
), err=True
)
if not outdated:
click.echo(crayons.green("All packages are up to date!", bold=True))
sys.exit(0)
for package, new_version, old_version in outdated:
click.echo(
"Package {0!r} out-of-date: {1!r} installed, {2!r} available.".format(
package, old_version, new_version
)
)
sys.exit(bool(outdated))
def do_install(
packages=False,
editable_packages=False,
index_url=False,
extra_index_url=False,
dev=False,
three=False,
python=False,
pypi_mirror=None,
system=False,
lock=True,
ignore_pipfile=False,
skip_lock=False,
requirements=False,
sequential=False,
pre=False,
code=False,
deploy=False,
keep_outdated=False,
selective_upgrade=False,
site_packages=None,
):
from .environments import PIPENV_VIRTUALENV, PIPENV_USE_SYSTEM
from .vendor.pip_shims.shims import PipError
requirements_directory = vistir.path.create_tracked_tempdir(
suffix="-requirements", prefix="pipenv-"
)
warnings.filterwarnings("default", category=vistir.compat.ResourceWarning)
if selective_upgrade:
keep_outdated = True
packages = packages if packages else []
editable_packages = editable_packages if editable_packages else []
package_args = [p for p in packages if p] + [p for p in editable_packages if p]
skip_requirements = False
# Don't search for requirements.txt files if the user provides one
if requirements or package_args or project.pipfile_exists:
skip_requirements = True
concurrent = not sequential
# Ensure that virtualenv is available and pipfile are available
ensure_project(
three=three,
python=python,
system=system,
warn=True,
deploy=deploy,
skip_requirements=skip_requirements,
pypi_mirror=pypi_mirror,
site_packages=site_packages,
)
# Don't attempt to install develop and default packages if Pipfile is missing
if not project.pipfile_exists and not (package_args or dev) and not code:
if not (ignore_pipfile or deploy):
raise exceptions.PipfileNotFound(project.path_to("Pipfile"))
elif ((skip_lock and deploy) or ignore_pipfile) and not project.lockfile_exists:
raise exceptions.LockfileNotFound(project.path_to("Pipfile.lock"))
# Load the --pre settings from the Pipfile.
if not pre:
pre = project.settings.get("allow_prereleases")
if not keep_outdated:
keep_outdated = project.settings.get("keep_outdated")
remote = requirements and is_valid_url(requirements)
# Warn and exit if --system is used without a pipfile.
if (system and package_args) and not (PIPENV_VIRTUALENV):
raise exceptions.SystemUsageError
# Automatically use an activated virtualenv.
if PIPENV_USE_SYSTEM:
system = True
# Check if the file is remote or not
if remote:
click.echo(
crayons.normal(
fix_utf8("Remote requirements file provided! Downloading…"), bold=True
),
err=True,
)
fd = vistir.path.create_tracked_tempfile(
prefix="pipenv-", suffix="-requirement.txt", dir=requirements_directory
)
temp_reqs = fd.name
requirements_url = requirements
# Download requirements file
try:
download_file(requirements, temp_reqs)
except IOError:
fd.close()
os.unlink(temp_reqs)
click.echo(
crayons.red(
u"Unable to find requirements file at {0}.".format(
crayons.normal(requirements)
)
),
err=True,
)
sys.exit(1)
finally:
fd.close()
# Replace the url with the temporary requirements file
requirements = temp_reqs
remote = True
if requirements:
error, traceback = None, None
click.echo(
crayons.normal(
fix_utf8("Requirements file provided! Importing into Pipfile…"), bold=True
),
err=True,
)
try:
import_requirements(r=project.path_to(requirements), dev=dev)
except (UnicodeDecodeError, PipError) as e:
# Don't print the temp file path if remote since it will be deleted.
req_path = requirements_url if remote else project.path_to(requirements)
error = (
u"Unexpected syntax in {0}. Are you sure this is a "
"requirements.txt style file?".format(req_path)
)
traceback = e
except AssertionError as e:
error = (
u"Requirements file doesn't appear to exist. Please ensure the file exists in your "
"project directory or you provided the correct path."
)
traceback = e
finally:
# If requirements file was provided by remote url delete the temporary file
if remote:
fd.close() # Close for windows to allow file cleanup.
os.remove(temp_reqs)
if error and traceback:
click.echo(crayons.red(error))
click.echo(crayons.blue(str(traceback)), err=True)
sys.exit(1)
if code:
click.echo(
crayons.normal(fix_utf8("Discovering imports from local codebase…"), bold=True)
)
for req in import_from_code(code):
click.echo(" Found {0}!".format(crayons.green(req)))
project.add_package_to_pipfile(req)
# Allow more than one package to be provided.
package_args = [p for p in packages] + [
"-e {0}".format(pkg) for pkg in editable_packages
]
# Support for --selective-upgrade.
# We should do this part first to make sure that we actually do selectively upgrade
# the items specified
if selective_upgrade:
from .vendor.requirementslib.models.requirements import Requirement
for i, package in enumerate(package_args[:]):
section = project.packages if not dev else project.dev_packages
package = Requirement.from_line(package)
package__name, package__val = package.pipfile_entry
try:
if not is_star(section[package__name]) and is_star(package__val):
# Support for VCS dependencies.
package_args[i] = convert_deps_to_pip(
{package__name: section[package__name]}, project=project, r=False
)[0]
except KeyError:
pass
# Install all dependencies, if none was provided.
# This basically ensures that we have a pipfile and lockfile, then it locks and
# installs from the lockfile
if not packages and not editable_packages:
# Update project settings with pre preference.
if pre:
project.update_settings({"allow_prereleases": pre})
do_init(
dev=dev,
allow_global=system,
ignore_pipfile=ignore_pipfile,
system=system,
skip_lock=skip_lock,
concurrent=concurrent,
deploy=deploy,
pre=pre,
requirements_dir=requirements_directory,
pypi_mirror=pypi_mirror,
keep_outdated=keep_outdated
)
# This is for if the user passed in dependencies, then we want to make sure we
else:
from .vendor.requirementslib.models.requirements import Requirement
# make a tuple of (display_name, entry)
pkg_list = packages + ['-e {0}'.format(pkg) for pkg in editable_packages]
if not system and not project.virtualenv_exists:
do_init(
dev=dev,
system=system,
allow_global=system,
concurrent=concurrent,
keep_outdated=keep_outdated,
requirements_dir=requirements_directory,
deploy=deploy,
pypi_mirror=pypi_mirror,
skip_lock=skip_lock,
)
pip_shims_module = os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
for pkg_line in pkg_list:
click.echo(
crayons.normal(
fix_utf8("Installing {0}…".format(crayons.green(pkg_line, bold=True))),
bold=True,
)
)
# pip install:
with vistir.contextmanagers.temp_environ(), create_spinner("Installing...") as sp:
if not system:
os.environ["PIP_USER"] = vistir.compat.fs_str("0")
if "PYTHONHOME" in os.environ:
del os.environ["PYTHONHOME"]
sp.text = "Resolving {0}...".format(pkg_line)
try:
pkg_requirement = Requirement.from_line(pkg_line)
except ValueError as e:
sp.write_err(vistir.compat.fs_str("{0}: {1}".format(crayons.red("WARNING"), e)))
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Installation Failed"))
sys.exit(1)
if index_url:
pkg_requirement.index = index_url
no_deps = False
sp.text = "Installing..."
try:
sp.text = "Installing {0}...".format(pkg_requirement.name)
if environments.is_verbose():
sp.hide_and_write("Installing package: {0}".format(pkg_requirement.as_line(include_hashes=False)))
c = pip_install(
pkg_requirement,
ignore_hashes=True,
allow_global=system,
selective_upgrade=selective_upgrade,
no_deps=no_deps,
pre=pre,
requirements_dir=requirements_directory,
index=index_url,
extra_indexes=extra_index_url,
pypi_mirror=pypi_mirror,
)
if not c.ok:
sp.write_err(
u"{0} An error occurred while installing {1}!".format(
crayons.red(u"Error: ", bold=True), crayons.green(pkg_line)
),
)
sp.write_err(
vistir.compat.fs_str(u"Error text: {0}".format(c.out))
)
sp.write_err(crayons.blue(vistir.compat.fs_str(format_pip_error(c.err))))
if environments.is_verbose():
sp.write_err(crayons.blue(vistir.compat.fs_str(format_pip_output(c.out))))
if "setup.py egg_info" in c.err:
sp.write_err(vistir.compat.fs_str(
"This is likely caused by a bug in {0}. "
"Report this to its maintainers.".format(
crayons.green(pkg_requirement.name)
)
))
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Installation Failed"))
sys.exit(1)
except (ValueError, RuntimeError) as e:
sp.write_err(vistir.compat.fs_str(
"{0}: {1}".format(crayons.red("WARNING"), e),
))
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Installation Failed",
))
sys.exit(1)
# Warn if --editable wasn't passed.
if pkg_requirement.is_vcs and not pkg_requirement.editable and not PIPENV_RESOLVE_VCS:
sp.write_err(
"{0}: You installed a VCS dependency in non-editable mode. "
"This will work fine, but sub-dependencies will not be resolved by {1}."
"\n To enable this sub-dependency functionality, specify that this dependency is editable."
"".format(
crayons.red("Warning", bold=True),
crayons.red("$ pipenv lock"),
)
)
sp.write(vistir.compat.fs_str(
u"{0} {1} {2} {3}{4}".format(
crayons.normal(u"Adding", bold=True),
crayons.green(u"{0}".format(pkg_requirement.name), bold=True),
crayons.normal(u"to Pipfile's", bold=True),
crayons.red(u"[dev-packages]" if dev else u"[packages]", bold=True),
crayons.normal(fix_utf8("…"), bold=True),
)
))
# Add the package to the Pipfile.
try:
project.add_package_to_pipfile(pkg_requirement, dev)
except ValueError:
import traceback
sp.write_err(
"{0} {1}".format(
crayons.red("Error:", bold=True), traceback.format_exc()
)
)
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Failed adding package to Pipfile"
))
sp.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Installation Succeeded"))
# Update project settings with pre preference.
if pre:
project.update_settings({"allow_prereleases": pre})
if pip_shims_module:
os.environ["PIP_SHIMS_BASE_MODULE"] = pip_shims_module
do_init(
dev=dev,
system=system,
allow_global=system,
concurrent=concurrent,
keep_outdated=keep_outdated,
requirements_dir=requirements_directory,
deploy=deploy,
pypi_mirror=pypi_mirror,
skip_lock=skip_lock,
)
sys.exit(0)
def do_uninstall(
packages=False,
editable_packages=False,
three=None,
python=False,
system=False,
lock=False,
all_dev=False,
all=False,
keep_outdated=False,
pypi_mirror=None,
ctx=None
):
from .environments import PIPENV_USE_SYSTEM
from .vendor.requirementslib.models.requirements import Requirement
from .vendor.packaging.utils import canonicalize_name
# Automatically use an activated virtualenv.
if PIPENV_USE_SYSTEM:
system = True
# Ensure that virtualenv is available.
# TODO: We probably shouldn't ensure a project exists if the outcome will be to just
# install things in order to remove them... maybe tell the user to install first?
ensure_project(three=three, python=python, pypi_mirror=pypi_mirror)
# Un-install all dependencies, if --all was provided.
if not any([packages, editable_packages, all_dev, all]):
raise exceptions.MissingParameter(
crayons.red("No package provided!"),
ctx=ctx, param_type="parameter",
)
editable_pkgs = [
Requirement.from_line("-e {0}".format(p)).name for p in editable_packages if p
]
packages = packages + editable_pkgs
package_names = [p for p in packages if p]
package_map = {
canonicalize_name(p): p for p in packages if p
}
installed_package_names = project.installed_package_names
# Intelligently detect if --dev should be used or not.
lockfile_packages = set()
if project.lockfile_exists:
project_pkg_names = project.lockfile_package_names
else:
project_pkg_names = project.pipfile_package_names
pipfile_remove = True
# Uninstall [dev-packages], if --dev was provided.
if all_dev:
if "dev-packages" not in project.parsed_pipfile and not project_pkg_names["dev"]:
click.echo(
crayons.normal(
"No {0} to uninstall.".format(crayons.red("[dev-packages]")),
bold=True,
)
)
return
click.echo(
crayons.normal(
fix_utf8("Un-installing {0}…".format(crayons.red("[dev-packages]"))), bold=True
)
)
package_names = project_pkg_names["dev"]
# Remove known "bad packages" from the list.
bad_pkgs = get_canonical_names(BAD_PACKAGES)
ignored_packages = bad_pkgs & set(list(package_map.keys()))
for ignored_pkg in ignored_packages:
if environments.is_verbose():
click.echo("Ignoring {0}.".format(ignored_pkg), err=True)
pkg_name_index = package_names.index(package_map[ignored_pkg])
del package_names[pkg_name_index]
used_packages = project_pkg_names["combined"] & installed_package_names
failure = False
packages_to_remove = set()
if all:
click.echo(
crayons.normal(
fix_utf8("Un-installing all {0} and {1}…".format(
crayons.red("[dev-packages]"),
crayons.red("[packages]"),
)), bold=True
)
)
do_purge(bare=False, allow_global=system)
sys.exit(0)
if all_dev:
package_names = project_pkg_names["dev"]
else:
package_names = set([pkg_name for pkg_name in package_names])
selected_pkg_map = {
canonicalize_name(p): p for p in package_names
}
packages_to_remove = [
p for normalized, p in selected_pkg_map.items()
if normalized in (used_packages - bad_pkgs)
]
pip_path = None
for normalized, package_name in selected_pkg_map.items():
click.echo(
crayons.white(
fix_utf8("Uninstalling {0}…".format(package_name)), bold=True
)
)
# Uninstall the package.
if package_name in packages_to_remove:
with project.environment.activated():
if pip_path is None:
pip_path = which_pip(allow_global=system)
cmd = [pip_path, "uninstall", package_name, "-y"]
c = run_command(cmd)
click.echo(crayons.blue(c.out))
if c.return_code != 0:
failure = True
if not failure and pipfile_remove:
in_packages = project.get_package_name_in_pipfile(package_name, dev=False)
in_dev_packages = project.get_package_name_in_pipfile(
package_name, dev=True
)
if normalized in lockfile_packages:
click.echo("{0} {1} {2} {3}".format(
crayons.blue("Removing"),
crayons.green(package_name),
crayons.blue("from"),
crayons.white(fix_utf8("Pipfile.lock…")))
)
lockfile = project.get_or_create_lockfile()
if normalized in lockfile.default:
del lockfile.default[normalized]
if normalized in lockfile.develop:
del lockfile.develop[normalized]
lockfile.write()
if not (in_dev_packages or in_packages):
if normalized in lockfile_packages:
continue
click.echo(
"No package {0} to remove from Pipfile.".format(
crayons.green(package_name)
)
)
continue
click.echo(
fix_utf8("Removing {0} from Pipfile…".format(crayons.green(package_name)))
)
# Remove package from both packages and dev-packages.
if in_dev_packages:
project.remove_package_from_pipfile(package_name, dev=True)
if in_packages:
project.remove_package_from_pipfile(package_name, dev=False)
if lock:
do_lock(system=system, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
sys.exit(int(failure))
def do_shell(three=None, python=False, fancy=False, shell_args=None, pypi_mirror=None):
# Ensure that virtualenv is available.
ensure_project(
three=three, python=python, validate=False, pypi_mirror=pypi_mirror,
)
# Support shell compatibility mode.
if PIPENV_SHELL_FANCY:
fancy = True
from .shells import choose_shell
shell = choose_shell()
click.echo(fix_utf8("Launching subshell in virtual environment…"), err=True)
fork_args = (
project.virtualenv_location,
project.project_directory,
shell_args,
)
# Set an environment variable, so we know we're in the environment.
# Only set PIPENV_ACTIVE after finishing reading virtualenv_location
# otherwise its value will be changed
os.environ["PIPENV_ACTIVE"] = vistir.misc.fs_str("1")
os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
if fancy:
shell.fork(*fork_args)
return
try:
shell.fork_compat(*fork_args)
except (AttributeError, ImportError):
click.echo(fix_utf8(
"Compatibility mode not supported. "
"Trying to continue as well-configured shell…"),
err=True,
)
shell.fork(*fork_args)
def _inline_activate_virtualenv():
try:
activate_this = which("activate_this.py")
if not activate_this or not os.path.exists(activate_this):
raise exceptions.VirtualenvActivationException()
with open(activate_this) as f:
code = compile(f.read(), activate_this, "exec")
exec(code, dict(__file__=activate_this))
# Catch all errors, just in case.
except Exception:
click.echo(
u"{0}: There was an unexpected error while activating your "
u"virtualenv. Continuing anyway...".format(
crayons.red("Warning", bold=True)
),
err=True,
)
def _inline_activate_venv():
"""Built-in venv doesn't have activate_this.py, but doesn't need it anyway.
As long as we find the correct executable, built-in venv sets up the
environment automatically.
See: https://bugs.python.org/issue21496#msg218455
"""
components = []
for name in ("bin", "Scripts"):
bindir = os.path.join(project.virtualenv_location, name)
if os.path.exists(bindir):
components.append(bindir)
if "PATH" in os.environ:
components.append(os.environ["PATH"])
os.environ["PATH"] = os.pathsep.join(components)
def inline_activate_virtual_environment():
root = project.virtualenv_location
if os.path.exists(os.path.join(root, "pyvenv.cfg")):
_inline_activate_venv()
else:
_inline_activate_virtualenv()
if "VIRTUAL_ENV" not in os.environ:
os.environ["VIRTUAL_ENV"] = vistir.misc.fs_str(root)
def _launch_windows_subprocess(script):
import subprocess
command = system_which(script.command)
options = {"universal_newlines": True}
# Command not found, maybe this is a shell built-in?
if not command:
return subprocess.Popen(script.cmdify(), shell=True, **options)
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen([command] + script.args, **options)
except WindowsError as e:
if e.winerror != 193:
raise
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), shell=True, **options)
def do_run_nt(script):
p = _launch_windows_subprocess(script)
p.communicate()
sys.exit(p.returncode)
def do_run_posix(script, command):
command_path = system_which(script.command)
if not command_path:
if project.has_script(command):
click.echo(
"{0}: the command {1} (from {2}) could not be found within {3}."
"".format(
crayons.red("Error", bold=True),
crayons.red(script.command),
crayons.normal(command, bold=True),
crayons.normal("PATH", bold=True),
),
err=True,
)
else:
click.echo(
"{0}: the command {1} could not be found within {2} or Pipfile's {3}."
"".format(
crayons.red("Error", bold=True),
crayons.red(command),
crayons.normal("PATH", bold=True),
crayons.normal("[scripts]", bold=True),
),
err=True,
)
sys.exit(1)
os.execl(
command_path, command_path, *[os.path.expandvars(arg) for arg in script.args]
)
def do_run(command, args, three=None, python=False, pypi_mirror=None):
"""Attempt to run command either pulling from project or interpreting as executable.
Args are appended to the command in [scripts] section of project if found.
"""
from .cmdparse import ScriptEmptyError
# Ensure that virtualenv is available.
ensure_project(
three=three, python=python, validate=False, pypi_mirror=pypi_mirror,
)
load_dot_env()
previous_pip_shims_module = os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
# Activate virtualenv under the current interpreter's environment
inline_activate_virtual_environment()
# Set an environment variable, so we know we're in the environment.
# Only set PIPENV_ACTIVE after finishing reading virtualenv_location
# such as in inline_activate_virtual_environment
# otherwise its value will be changed
previous_pipenv_active_value = os.environ.get("PIPENV_ACTIVE")
os.environ["PIPENV_ACTIVE"] = vistir.misc.fs_str("1")
os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
try:
script = project.build_script(command, args)
cmd_string = ' '.join([script.command] + script.args)
if environments.is_verbose():
click.echo(crayons.normal("$ {0}".format(cmd_string)), err=True)
except ScriptEmptyError:
click.echo("Can't run script {0!r}-it's empty?", err=True)
run_args = [script]
run_kwargs = {}
if os.name == "nt":
run_fn = do_run_nt
else:
run_fn = do_run_posix
run_kwargs = {"command": command}
try:
run_fn(*run_args, **run_kwargs)
finally:
os.environ.pop("PIPENV_ACTIVE", None)
if previous_pipenv_active_value is not None:
os.environ["PIPENV_ACTIVE"] = previous_pipenv_active_value
if previous_pip_shims_module is not None:
os.environ["PIP_SHIMS_BASE_MODULE"] = previous_pip_shims_module
def do_check(
three=None,
python=False,
system=False,
unused=False,
ignore=None,
args=None,
pypi_mirror=None,
):
from pipenv.vendor.vistir.compat import JSONDecodeError
from pipenv.vendor.first import first
if not system:
# Ensure that virtualenv is available.
ensure_project(
three=three,
python=python,
validate=False,
warn=False,
pypi_mirror=pypi_mirror,
)
if not args:
args = []
if unused:
deps_required = [k.lower() for k in project.packages.keys()]
deps_needed = [k.lower() for k in import_from_code(unused)]
for dep in deps_needed:
try:
deps_required.remove(dep)
except ValueError:
pass
if deps_required:
click.echo(
crayons.normal(
"The following dependencies appear unused, and may be safe for removal:"
)
)
for dep in deps_required:
click.echo(" - {0}".format(crayons.green(dep)))
sys.exit(1)
else:
sys.exit(0)
click.echo(crayons.normal(decode_for_output("Checking PEP 508 requirements…"), bold=True))
pep508checker_path = pep508checker.__file__.rstrip("cdo")
safety_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "patched", "safety.zip"
)
if not system:
python = which("python")
else:
python = first(system_which(p) for p in ("python", "python3", "python2"))
if not python:
click.echo(crayons.red("The Python interpreter can't be found."), err=True)
sys.exit(1)
_cmd = [vistir.compat.Path(python).as_posix()]
# Run the PEP 508 checker in the virtualenv.
cmd = _cmd + [vistir.compat.Path(pep508checker_path).as_posix()]
c = run_command(cmd)
if c.return_code is not None:
try:
results = simplejson.loads(c.out.strip())
except JSONDecodeError:
click.echo("{0}\n{1}\n{2}".format(
crayons.white(decode_for_output("Failed parsing pep508 results: "), bold=True),
c.out.strip(),
c.err.strip()
))
sys.exit(1)
# Load the pipfile.
p = pipfile.Pipfile.load(project.pipfile_location)
failed = False
# Assert each specified requirement.
for marker, specifier in p.data["_meta"]["requires"].items():
if marker in results:
try:
assert results[marker] == specifier
except AssertionError:
failed = True
click.echo(
"Specifier {0} does not match {1} ({2})."
"".format(
crayons.green(marker),
crayons.blue(specifier),
crayons.red(results[marker]),
),
err=True,
)
if failed:
click.echo(crayons.red("Failed!"), err=True)
sys.exit(1)
else:
click.echo(crayons.green("Passed!"))
click.echo(crayons.normal(
decode_for_output("Checking installed package safety…"), bold=True)
)
if ignore:
if not isinstance(ignore, (tuple, list)):
ignore = [ignore]
ignored = [["--ignore", cve] for cve in ignore]
click.echo(
crayons.normal(
"Notice: Ignoring CVE(s) {0}".format(crayons.yellow(", ".join(ignore)))
),
err=True,
)
else:
ignored = ""
key = "--key={0}".format(PIPENV_PYUP_API_KEY)
cmd = _cmd + [safety_path, "check", "--json", key]
if ignored:
for cve in ignored:
cmd += cve
c = run_command(cmd, catch_exceptions=False)
try:
results = simplejson.loads(c.out)
except (ValueError, JSONDecodeError):
raise exceptions.JSONParseError(c.out, c.err)
except Exception:
raise exceptions.PipenvCmdError(c.cmd, c.out, c.err, c.return_code)
if c.ok:
click.echo(crayons.green("All good!"))
sys.exit(0)
for (package, resolved, installed, description, vuln) in results:
click.echo(
"{0}: {1} {2} resolved ({3} installed)!".format(
crayons.normal(vuln, bold=True),
crayons.green(package),
crayons.red(resolved, bold=False),
crayons.red(installed, bold=True),
)
)
click.echo("{0}".format(description))
click.echo()
else:
sys.exit(1)
def do_graph(bare=False, json=False, json_tree=False, reverse=False):
from pipenv.vendor.vistir.compat import JSONDecodeError
import pipdeptree
pipdeptree_path = pipdeptree.__file__.rstrip("cdo")
try:
python_path = which("python")
except AttributeError:
click.echo(
u"{0}: {1}".format(
crayons.red("Warning", bold=True),
u"Unable to display currently-installed dependency graph information here. "
u"Please run within a Pipenv project.",
),
err=True,
)
sys.exit(1)
except RuntimeError:
pass
else:
python_path = vistir.compat.Path(python_path).as_posix()
pipdeptree_path = vistir.compat.Path(pipdeptree_path).as_posix()
if reverse and json:
click.echo(
u"{0}: {1}".format(
crayons.red("Warning", bold=True),
u"Using both --reverse and --json together is not supported. "
u"Please select one of the two options.",
),
err=True,
)
sys.exit(1)
if reverse and json_tree:
click.echo(
u"{0}: {1}".format(
crayons.red("Warning", bold=True),
u"Using both --reverse and --json-tree together is not supported. "
u"Please select one of the two options.",
),
err=True,
)
sys.exit(1)
if json and json_tree:
click.echo(
u"{0}: {1}".format(
crayons.red("Warning", bold=True),
u"Using both --json and --json-tree together is not supported. "
u"Please select one of the two options.",
),
err=True,
)
sys.exit(1)
flag = ""
if json:
flag = "--json"
if json_tree:
flag = "--json-tree"
if reverse:
flag = "--reverse"
if not project.virtualenv_exists:
click.echo(
u"{0}: No virtualenv has been created for this project yet! Consider "
u"running {1} first to automatically generate one for you or see "
u"{2} for further instructions.".format(
crayons.red("Warning", bold=True),
crayons.green("`pipenv install`"),
crayons.green("`pipenv install --help`"),
),
err=True,
)
sys.exit(1)
cmd_args = [python_path, pipdeptree_path, flag, "-l"]
c = run_command(cmd_args)
# Run dep-tree.
if not bare:
if json:
data = []
try:
parsed = simplejson.loads(c.out.strip())
except JSONDecodeError:
raise exceptions.JSONParseError(c.out, c.err)
else:
for d in parsed:
if d["package"]["key"] not in BAD_PACKAGES:
data.append(d)
click.echo(simplejson.dumps(data, indent=4))
sys.exit(0)
elif json_tree:
def traverse(obj):
if isinstance(obj, list):
return [
traverse(package)
for package in obj
if package["key"] not in BAD_PACKAGES
]
else:
obj["dependencies"] = traverse(obj["dependencies"])
return obj
try:
parsed = simplejson.loads(c.out.strip())
except JSONDecodeError:
raise exceptions.JSONParseError(c.out, c.err)
else:
data = traverse(parsed)
click.echo(simplejson.dumps(data, indent=4))
sys.exit(0)
else:
for line in c.out.strip().split("\n"):
# Ignore bad packages as top level.
# TODO: This should probably be a "==" in + line.partition
if line.split("==")[0] in BAD_PACKAGES and not reverse:
continue
# Bold top-level packages.
if not line.startswith(" "):
click.echo(crayons.normal(line, bold=True))
# Echo the rest.
else:
click.echo(crayons.normal(line, bold=False))
else:
click.echo(c.out)
if c.return_code != 0:
click.echo(
"{0} {1}".format(
crayons.red("ERROR: ", bold=True),
crayons.white("{0}".format(c.err, bold=True)),
),
err=True,
)
# Return its return code.
sys.exit(c.return_code)
def do_sync(
ctx,
dev=False,
three=None,
python=None,
bare=False,
dont_upgrade=False,
user=False,
clear=False,
unused=False,
sequential=False,
pypi_mirror=None,
system=False,
deploy=False,
):
# The lock file needs to exist because sync won't write to it.
if not project.lockfile_exists:
raise exceptions.LockfileNotFound("Pipfile.lock")
# Ensure that virtualenv is available if not system.
ensure_project(
three=three,
python=python,
validate=False,
deploy=deploy,
pypi_mirror=pypi_mirror,
)
# Install everything.
requirements_dir = vistir.path.create_tracked_tempdir(
suffix="-requirements", prefix="pipenv-"
)
do_init(
dev=dev,
concurrent=(not sequential),
requirements_dir=requirements_dir,
ignore_pipfile=True, # Don't check if Pipfile and lock match.
pypi_mirror=pypi_mirror,
deploy=deploy,
system=system,
)
if not bare:
click.echo(crayons.green("All dependencies are now up-to-date!"))
def do_clean(
ctx, three=None, python=None, dry_run=False, bare=False, pypi_mirror=None,
system=False
):
# Ensure that virtualenv is available.
from packaging.utils import canonicalize_name
ensure_project(three=three, python=python, validate=False, pypi_mirror=pypi_mirror)
ensure_lockfile(pypi_mirror=pypi_mirror)
# Make sure that the virtualenv's site packages are configured correctly
# otherwise we may end up removing from the global site packages directory
installed_package_names = project.installed_package_names.copy()
# Remove known "bad packages" from the list.
for bad_package in BAD_PACKAGES:
if canonicalize_name(bad_package) in installed_package_names:
if environments.is_verbose():
click.echo("Ignoring {0}.".format(bad_package), err=True)
installed_package_names.remove(canonicalize_name(bad_package))
# Intelligently detect if --dev should be used or not.
locked_packages = {
canonicalize_name(pkg) for pkg in project.lockfile_package_names["combined"]
}
for used_package in locked_packages:
if used_package in installed_package_names:
installed_package_names.remove(used_package)
failure = False
cmd = [which_pip(allow_global=system), "uninstall", "-y", "-qq"]
for apparent_bad_package in installed_package_names:
if dry_run and not bare:
click.echo(apparent_bad_package)
else:
if not bare:
click.echo(
crayons.white(
fix_utf8("Uninstalling {0}…".format(apparent_bad_package)), bold=True
)
)
# Uninstall the package.
cmd = [which_pip(), "uninstall", apparent_bad_package, "-y"]
c = run_command(cmd)
if c.return_code != 0:
failure = True
sys.exit(int(failure))
| []
| []
| [
"PIP_SHIMS_BASE_MODULE",
"GIT_CONFIG",
"PIP_TRUSTED_HOSTS",
"PIPENV_ACTIVE",
"PIP_SRC",
"PIP_SRC_DIR",
"PYTHONHOME",
"VIRTUAL_ENV",
"PIP_USER",
"PATH"
]
| [] | ["PIP_SHIMS_BASE_MODULE", "GIT_CONFIG", "PIP_TRUSTED_HOSTS", "PIPENV_ACTIVE", "PIP_SRC", "PIP_SRC_DIR", "PYTHONHOME", "VIRTUAL_ENV", "PIP_USER", "PATH"] | python | 10 | 0 | |
run_multiple_alpha_experiment.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
One-player Alpha Zero
@author: Thomas Moerland, Delft University of Technology
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import time
from utils.parser_setup import setup_parser, parse_game_params, parse_alg_name
plt.style.use('ggplot')
from agent import agent
#### Command line call, parsing and plotting ##
colors = ['r', 'b', 'g', 'orange', 'c', 'k', 'purple', 'y']
markers = ['o', 's', 'v', 'D', 'x', '*', '|', '+', '^', '2', '1', '3', '4']
budgets = [1000, 5000, 10000, 20000, 35000]
if __name__ == '__main__':
# Obtain the command_line arguments
args = setup_parser()
start_time = time.time()
time_str = str(start_time)
out_dir = 'logs/' + args.game + '/' + time_str + '/'
def pre_process():
from gym.envs.registration import register
try:
register(
id='Blackjack_pi-v0',
entry_point='envs.blackjack_pi:BlackjackEnv',
)
except:
print("Something wrong registering Blackjack environment")
# Disable GPU acceleration if not specifically requested
if not args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
fun_args = [args.game, args.n_ep, args.n_mcts, args.max_ep_len, args.lr, args.c, args.gamma,
args.data_size, args.batch_size, args.temp, args.n_hidden_layers, args.n_hidden_units,
True, args.eval_freq, args.eval_episodes, args.n_epochs]
exps = []
game_params = parse_game_params(args)
# Define the name of the agent to be stored in the dataframe
if args.stochastic:
agent_name = "dpw_"
elif args.particles > 0:
agent_name = str(args.particles) + "_pf_"
else:
agent_name = "classic_"
if args.mcts_only:
agent_name += "mcts_only"
else:
agent_name += "alphazero"
min_alpha = 0.5
delta_alpha = 0.1
max_alpha = 1.
for budget in budgets:
alpha = min_alpha
while alpha <= max_alpha + 0.01:
# If required, prepare the budget scheduler parameters
scheduler_params = None
print("Performing experiment with budget " + str(budget) + " alpha:" + str(alpha) + "!")
print()
if args.budget_scheduler:
assert args.min_budget < budget, "Minimum budget for the scheduler cannot be larger " \
"than the overall budget"
assert args.slope >= 1.0, "Slope lesser than 1 causes weird schedule function shapes"
scheduler_params = {"slope": args.slope,
"min_budget": args.min_budget,
"mid": args.mid}
alg = parse_alg_name(args)
out_dir = "logs/" + args.game + "/alpha_experiment/"
if not args.budget_scheduler:
out_dir += 'no_scheduler/'
out_dir += str(alpha)[:3]
if args.game == 'RiverSwim-continuous':
out_dir += "/" + "fail_" + str(args.fail_prob)
out_dir += "/" + alg + str(budget) + '/' + time_str + '/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Run experiments
n_mcts = np.inf
out_dir_i = out_dir + '/'
# Run the algorithm
episode_returns, timepoints, a_best, \
seed_best, R_best, offline_scores = agent(game=args.game,
n_ep=args.n_ep,
n_mcts=n_mcts,
max_ep_len=args.max_ep_len,
budget=budget,
lr=args.lr,
c=args.c,
gamma=args.gamma,
data_size=args.data_size,
batch_size=args.batch_size,
temp=args.temp,
n_hidden_layers=args.n_hidden_layers,
n_hidden_units=args.n_hidden_units,
stochastic=args.stochastic,
alpha=alpha,
numpy_dump_dir=out_dir_i,
visualize=False,
eval_freq=args.eval_freq,
eval_episodes=args.eval_episodes,
pre_process=None,
game_params=game_params,
n_epochs=args.n_epochs,
parallelize_evaluation=args.parallel,
mcts_only=args.mcts_only,
particles=args.particles,
n_workers=args.n_workers,
use_sampler=args.use_sampler,
unbiased=args.unbiased,
biased=args.biased,
variance=args.variance,
depth_based_bias=args.depth_based_bias,
max_workers=args.max_workers,
scheduler_params=scheduler_params,
out_dir=out_dir,
second_version=args.second_version,
third_version=args.third_version)
total_rewards = offline_scores[0][0]
undiscounted_returns = offline_scores[0][1]
evaluation_lenghts = offline_scores[0][2]
evaluation_pit_action_counts = offline_scores[0][3]
indices = []
returns = []
lens = []
rews = []
counts = []
gamma = args.gamma
# Compute the discounted return
for r_list in undiscounted_returns:
discount = 1
disc_rew = 0
for r in r_list:
disc_rew += discount * r
discount *= gamma
rews.append(disc_rew)
# Fill the lists for building the dataframe
for ret, length, count in zip(total_rewards, evaluation_lenghts, evaluation_pit_action_counts):
returns.append(ret)
lens.append(length)
indices.append(agent_name)
counts.append(count)
# Store the result of the experiment
data = {"agent": indices,
"total_reward": returns,
"discounted_reward": rews,
"length": lens,
"budget": [budget] * len(indices)}
# Store the count of pit stops only if analyzing Race Strategy problem
if "RaceStrategy" in args.game:
data["pit_count"] = counts
# Write the dataframe to csv
df = pd.DataFrame(data)
df.to_csv(out_dir + "/data.csv", header=True, index=False)
alpha += delta_alpha
alpha = round(alpha, 1)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
extra/slothclasses/munk.py | import discord
from discord.ext import commands, menus
from mysqldb import the_database, the_django_database
from .player import Player, Skill
from .enums import QuestEnum
from extra.menu import ConfirmSkill, SwitchTribePages
from extra import utils
import os
import asyncio
from datetime import datetime
from typing import List, Union, Dict, Any, Optional, Callable
from random import choice
bots_and_commands_channel_id = int(os.getenv('BOTS_AND_COMMANDS_CHANNEL_ID', 123))
approve_thumbnail_channel_id = int(os.getenv('APPROVE_THUMBNAIL_CHANNEL_ID', 123))
class Munk(Player):
emoji = '<:Munk:839498018712715284>'
def __init__(self, client) -> None:
self.client = client
@commands.Cog.listener(name='on_raw_reaction_add')
async def on_raw_reaction_add_munk(self, payload) -> None:
""" Checks reactions related to skill actions. """
# Checks if it wasn't a bot's reaction
if not payload.guild_id:
return
# Checks whether it's a valid member and not a bot
if not payload.member or payload.member.bot:
return
if payload.channel_id != approve_thumbnail_channel_id:
return
skill_action = await self.get_skill_action_by_message_id_and_skill_type(message_id=payload.message_id, skill_type='thumbnail_request')
if skill_action is not None:
emoji = str(payload.emoji)
# Checks whether it's a steal
if emoji == '✅':
await self.delete_skill_action_by_message_id(payload.message_id)
channel = self.client.get_channel(skill_action[5])
message = await channel.fetch_message(skill_action[4])
if message:
tribe = await self.get_tribe_info_by_user_id(user_id=skill_action[0])
message_embed = discord.Embed(
title="Thumbnail Approved!",
description=f"**<@{payload.user_id}>, approved your tribe `{tribe['name']}`'s thumbnail/logo, <@{skill_action[0]}>!**",
color=discord.Color.green(),
url=tribe['link']
)
message_embed.set_image(url=skill_action[8])
await self.bots_txt.send(content=f"<@{skill_action[0]}>", embed=message_embed)
await message.delete()
await self.update_tribe_thumbnail(user_id=skill_action[0], tribe_name=tribe['name'], link=skill_action[8])
elif emoji == '❌':
await self.delete_skill_action_by_message_id(payload.message_id)
channel = self.client.get_channel(skill_action[5])
message = await channel.fetch_message(skill_action[4])
if message:
tribe = await self.get_tribe_info_by_user_id(user_id=skill_action[0])
message_embed = discord.Embed(
title="Thumbnail Refused!",
description=f"**<@{payload.user_id}>, refused your tribe `{tribe['name']}`'s thumbnail/logo, <@{skill_action[0]}>!**",
color=discord.Color.red(),
url=tribe['link']
)
message_embed.set_image(url=skill_action[8])
await self.bots_txt.send(content=f"<@{skill_action[0]}>", embed=message_embed)
await message.delete()
@commands.command()
@Player.poisoned()
@Player.skill_on_cooldown()
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def munk(self, ctx, target: discord.Member = None) -> None:
""" Converts a user into a real Munk.
:param target: The person you want to convert to a Munk. """
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
attacker = ctx.author
attacker_fx = await self.get_user_effects(attacker)
if 'knocked_out' in attacker_fx:
return await ctx.send(f"**{attacker.mention}, you can't use your skill, because you are knocked-out!**")
if not target:
return await ctx.send(f"**Please, choose a member to use the `Munk` skill, {attacker.mention}!**")
if target.bot:
return await ctx.send(f"**You cannot convert a bot into a `Munk`, {attacker.mention}!**")
if attacker.id == target.id:
return await ctx.send(f"**You cannot convert yourself, since you are already a `Munk`, {attacker.mention}!**")
target_fx = await self.get_user_effects(target)
if 'munk' in target_fx:
return await ctx.send(f"**{target.mention} is already a `Munk`, {attacker.mention}!**")
target_sloth_profile = await self.get_sloth_profile(target.id)
if not target_sloth_profile:
return await ctx.send(f"**You cannot convert someone who doesn't have an account, {attacker.mention}!**")
if target_sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot convert someone who has a `default` Sloth class, {attacker.mention}!**")
if 'protected' in target_fx:
return await ctx.send(f"**{attacker.mention}, you cannot convert {target.mention} into a `Munk`, because they are protected against attacks!**")
confirmed = await ConfirmSkill(f"**{attacker.mention}, are you sure you want to convert {target.mention} into a `Munk`?**").prompt(ctx)
if not confirmed:
return await ctx.send("**Not converting them, then!**")
if ctx.invoked_with == 'mirror':
mirrored_skill = await self.get_skill_action_by_user_id_and_skill_type(user_id=attacker.id, skill_type='mirror')
if not mirrored_skill:
return await ctx.send(f"**Something went wrong with this, {attacker.mention}!**")
else:
_, exists = await Player.skill_on_cooldown(skill=Skill.ONE).predicate(ctx)
try:
await target.edit(nick=f"{target.display_name} Munk")
current_timestamp = await utils.get_timestamp()
if ctx.invoked_with != 'mirror':
if exists:
await self.update_user_skill_ts(attacker.id, Skill.ONE, current_timestamp)
else:
await self.insert_user_skill_cooldown(attacker.id, Skill.ONE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=attacker.id)
munk_embed = await self.get_munk_embed(
channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id)
msg = await ctx.send(embed=munk_embed)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong and your `Munk` skill failed, {attacker.mention}!**")
else:
await msg.edit(content=f"<@{target.id}>")
if 'reflect' in target_fx and 'munk' not in attacker_fx:
await self.reflect_attack(ctx, attacker, target, 'munk')
async def get_munk_embed(self, channel, perpetrator_id: int, target_id: int) -> discord.Embed:
""" Makes an embedded message for a munk action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the munk skill.
:param target_id: The ID of the target member that is gonna be protected. """
timestamp = await utils.get_timestamp()
munk_embed = discord.Embed(
title="A Munk Convertion has been delightfully performed!",
description=f"🐿️ <@{perpetrator_id}> converted <@{target_id}> into a `Munk`! 🐿️",
color = discord.Color.green(),
timestamp=datetime.fromtimestamp(timestamp)
)
munk_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Munk.png")
munk_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return munk_embed
async def get_join_tribe_embed(self, channel, inviter: discord.Member, target: discord.Member, tribe: Dict[str, Union[int, str]]) -> discord.Embed:
""" Makes an embedded message for a tribe joining.
:param channel: The context channel.
:param inviter: The inviter.
:param target_id: The target member that is gonna be invited to a tribe.
:param tribe: The tribe and its information. """
timestamp = await utils.get_timestamp()
join_tribe_embed = discord.Embed(
title="Someone just joined a Tribe!",
description=f"🏕️ {target.mention} just joined `{tribe['name']}`! 🏕️",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(timestamp),
url=tribe['link']
)
join_tribe_embed.set_author(name=inviter, icon_url=inviter.display_avatar)
if tribe['thumbnail']:
join_tribe_embed.set_thumbnail(url=tribe['thumbnail'])
join_tribe_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return join_tribe_embed
async def get_tribe_info_by_name(self, name: str) -> Dict[str, Union[str, int]]:
""" Gets information about a specific tribe.
:param name: The name of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM UserTribe WHERE tribe_name = %s", (name,))
tribe = await mycursor.fetchone()
await mycursor.close()
tribe_info = {
'owner_id': None,
'name': None,
'description': None,
'two_emojis': None,
'thumbnail': None,
'form': None,
'link': None
}
if tribe:
tribe_info = {
'owner_id': tribe[0],
'name': tribe[1],
'description': tribe[2],
'two_emojis': tribe[3],
'thumbnail': tribe[4],
'form': tribe[5],
'link': f"https://thelanguagesloth.com/tribes/{tribe[6]}/"
}
return tribe_info
async def get_tribe_info_by_user_id(self, user_id: int) -> Dict[str, Union[str, int]]:
""" Gets information about a specific tribe.
:param user_id: The ID of the user owner of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM UserTribe WHERE user_id = %s", (user_id,))
tribe = await mycursor.fetchone()
await mycursor.close()
tribe_info = {
'owner_id': None,
'name': None,
'description': None,
'two_emojis': None,
'thumbnail': None,
'form': None,
'link': None
}
if tribe:
tribe_info = {
'owner_id': tribe[0],
'name': tribe[1],
'description': tribe[2],
'two_emojis': tribe[3],
'thumbnail': tribe[4],
'form': tribe[5],
'link': f"https://thelanguagesloth.com/tribes/{tribe[6]}/"
}
return tribe_info
async def get_tribe_member(self, user_id: int) -> List[Union[str, int]]:
""" Gets a Tribe Member.
:param user_id: The ID of the tribe member to get. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM TribeMember WHERE member_id = %s", (user_id,))
tribe_member = await mycursor.fetchone()
await mycursor.close()
return tribe_member
async def get_tribe_members(self, tribe_owner_id: int = None, tribe_name: str = None) -> List[List[Union[int, str]]]:
""" Gets a list of IDs of members of a particular tribe.
:param tribe_owner_id: The ID of the owner of the tribe (Optional).
:param tribe_name: The name of the tribe. (Optional).
Ps: At least one of the parameters has to be provided. """
mycursor, _ = await the_database()
tribe_members: List[int] = []
if tribe_owner_id:
await mycursor.execute("SELECT tribe_name FROM UserTribe WHERE user_id = %s", (tribe_owner_id,))
tribe = await mycursor.fetchone()
await mycursor.execute("SELECT member_id, tribe_role FROM TribeMember WHERE tribe_name = %s", (tribe[0],))
tribe_members = await mycursor.fetchall()
elif tribe_name:
await mycursor.execute("SELECT member_id, tribe_role FROM TribeMember WHERE tribe_name = %s", (tribe_name,))
tribe_members = await mycursor.fetchall()
await mycursor.close()
return tribe_members
@commands.group(aliases=['tb'])
@Player.poisoned()
@Player.kidnapped()
async def tribe(self, ctx) -> None:
""" Command for managing and interacting with a tribe.
(Use this without a subcommand to see all subcommands available) """
if ctx.invoked_subcommand:
return
cmd = self.client.get_command('tribe')
prefix = self.client.command_prefix
subcommands = [f"{prefix}{c.qualified_name}" for c in cmd.commands
]
subcommands = '\n'.join(subcommands)
items_embed = discord.Embed(
title="__Subcommads__:",
description=f"```apache\n{subcommands}```",
color=ctx.author.color,
timestamp=ctx.message.created_at
)
await ctx.send(embed=items_embed)
@tribe.command(aliases=['request_logo', 'ask_thumbnail', 'ask_logo'])
@commands.cooldown(1, 3600, commands.BucketType.user)
async def request_thumbnail(self, ctx, image_url: str = None) -> None:
""" Request a thumbnail for your tribe.
:param image_url: The URL link of the thumbnail image. """
requester = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
if not image_url:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL, {requester.mention}!**")
if not image_url.startswith('https://'):
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL that has HTTPS in it, {requester.mention}!**")
if len(image_url) > 200:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL within 200 characters, {requester.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=requester.id)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You don't even have a tribe, you cannot request it, {requester.mention}!**")
confirm = await ConfirmSkill(content=requester.mention,
msg=f"**Are you sure you want to request [this]({image_url}) to be `{user_tribe['name']}`'s thumbnail/logo?**").prompt(ctx)
if confirm:
# Sends message to a moderation-clearance room
room = self.client.get_channel(approve_thumbnail_channel_id)
request_embed = discord.Embed(
title="__Thumbnail Request__",
description=f"{requester.mention} is requesting the image below to be their tribe's (`{user_tribe['name']}`) thumbnail/logo.",
color=requester.color,
timestamp=ctx.message.created_at
)
request_embed.set_image(url=image_url)
request_msg = await room.send(embed=request_embed)
# Don't need to store it, since it is forever
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=requester.id, skill_type="thumbnail_request", skill_timestamp=current_timestamp,
target_id=requester.id, channel_id=room.id, message_id=request_msg.id,
content=image_url
)
await request_msg.add_reaction('✅')
await request_msg.add_reaction('❌')
await ctx.send(f"**Request sent, {ctx.author.mention}!**")
else:
ctx.command.reset_cooldown(ctx)
await ctx.send(f"**Not doing requesting it, then, {requester.mention}!**")
@tribe.command(aliases=['inv'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def invite(self, ctx, member: discord.Member = None) -> None:
""" Invites a user to your tribe.
:param member: The member to invite. """
inviter = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{inviter.mention}, you can only use this command in {self.bots_txt.mention}!**")
tribe_member = await self.get_tribe_member(inviter.id)
if not tribe_member or tribe_member[0] != tribe_member[2]:
return await ctx.send(f"**You don't have a tribe, {inviter.mention}**!")
if not member:
return await ctx.send(f"**Please, inform a member to invite to your tribe, {inviter.mention}!**")
if inviter.id == member.id:
return await ctx.send(f"**You cannot invite yourself into your own tribe, {inviter.mention}!**")
confirm = await ConfirmSkill(f"Are you sure you want to invite, {member.mention} to `{tribe_member[1]}`?").prompt(ctx)
if not confirm:
return await ctx.send("**Not inviting them, then!**")
# Checks whether user is already in a tribe.
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile:
return await ctx.send(f"**You cannot invite someone that doesn't have an account, {inviter.mention}!**")
if sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot invite someone that doesn't have a Sloth Class, {inviter.mention}!**")
if sloth_profile[3]:
return await ctx.send(f"**You cannot invite someone that is already in a tribe, {inviter.mention}!**")
custom_ctx = ctx
custom_ctx.author = member
invite = await ConfirmSkill(content=f"{member.mention}", msg=f"{inviter.mention} invited you to join their tribe called `{tribe_member[1]}`, do you wanna join?").prompt(custom_ctx)
if invite:
user_tribe = await self.get_tribe_info_by_user_id(inviter.id)
try:
await self.insert_tribe_member(owner_id=inviter.id, tribe_name=tribe_member[1], user_id=member.id)
await self.update_someones_tribe(user_id=member.id, tribe_name=tribe_member[1])
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=True)
except:
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}, {inviter.mention}!**")
else:
join_tribe_embed = await self.get_join_tribe_embed(
channel=ctx.channel, inviter=inviter, target=member, tribe=user_tribe)
await ctx.send(embed=join_tribe_embed)
else:
await ctx.send(f"**{member.mention} refused your invitation to join `{tribe_member[1]}`, {inviter.mention}!**")
@tribe.command(aliases=['view', 'display', 'show'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def see(self, ctx, *, name: str = None) -> None:
""" Shows some information about a tribe.
If not provided a tribe name, it will check the one the user is in.
:param name: The tribe name. """
member = ctx.author
tribe = None
if name:
tribe = await self.get_tribe_info_by_name(name)
else:
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile or not sloth_profile[3]:
return await ctx.send(
f"**You didn't provide any tribe name and you're not in a tribe either, {member.mention}!**")
tribe = await self.get_tribe_info_by_name(sloth_profile[3])
if not tribe['name']:
return await ctx.send(f"**No tribes with that name were found, {member.mention}!**")
# Gets all tribe members
tribe_members = await self.get_tribe_members(tribe_name=tribe['name'])
all_members = list(map(lambda mid: f"<@{mid[0]}> ({mid[1]})", tribe_members))
# Additional data:
additional = {
'tribe': tribe,
'change_embed': self._make_tribe_embed
}
pages = menus.MenuPages(source=SwitchTribePages(all_members, **additional), clear_reactions_after=True)
await pages.start(ctx)
async def _make_tribe_embed(self, ctx: commands.Context, tribe: Dict[str, Union[str, int]], entries: int, offset: int, lentries: int) -> discord.Embed:
tribe_owner = self.client.get_user(tribe['owner_id'])
tribe_embed = discord.Embed(
title=f"{tribe['name']} ({tribe['two_emojis']})",
description=tribe['description'],
timestamp=ctx.message.created_at,
color=ctx.author.color,
url=tribe['link']
)
if tribe['thumbnail']:
tribe_embed.set_thumbnail(url=tribe['thumbnail'])
if tribe_owner:
tribe_embed.set_author(name=f"Owner: {tribe_owner}", icon_url=tribe_owner.display_avatar, url=tribe_owner.display_avatar)
tribe_embed.add_field(name="__Members:__", value=', '.join(entries), inline=False)
for i, v in enumerate(entries, start=offset):
tribe_embed.set_footer(text=f"({i} of {lentries})")
return tribe_embed
@tribe.command(aliases=['kick', 'expel', 'kick_out', 'can_i_show_you_the_door?'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def kickout(self, ctx, member: Union[discord.Member, discord.User] = None) -> None:
""" Exepels someone from your tribe.
:param member: The member to expel. """
expeller = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{expeller.mention}, you can only use this command in {self.bots_txt.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=expeller.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {expeller.mention}**!")
if not member:
return await ctx.send(f"**Please, inform a member to kick from your tribe, {expeller.mention}!**")
if expeller.id == member.id:
return await ctx.send(f"**You cannot kick yourself out of your own tribe, {expeller.mention}!**")
member_fx = await self.get_user_effects(member)
if 'kidnapped' in member_fx:
return await ctx.send(f"**You cannot kick someone from your tribe who is kidnapped, {expeller.mention}!**")
confirm = await ConfirmSkill(f"Are you sure you want to kick, {member.mention} from `{user_tribe['name']}`?").prompt(ctx)
if not confirm:
return await ctx.send("**Not kicking them, then!**")
# Checks whether user is already in a tribe.
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile:
return await ctx.send(f"**You cannot kick out someone that doesn't even have an account, {expeller.mention}!**")
if sloth_profile[3] != user_tribe['name']:
return await ctx.send(f"**You cannot kick out someone that is not in your tribe, {expeller.mention}!**")
try:
# await self.update_someones_tribe(user_id=member.id, tribe_name=None)
await self.delete_tribe_member(user_id=member.id)
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=False)
except:
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {expeller.mention}!**")
else:
await ctx.send(f"**You successfully kicked {member.mention} out of `{user_tribe['name']}`, {expeller.mention}!**")
@tribe.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def leave(self, ctx) -> None:
""" Leaves the tribe the user is in. """
member = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{member.mention}, you can only use this command in {self.bots_txt.mention}!**")
tribe_member = await self.get_tribe_member(user_id=member.id)
if not tribe_member[1]:
return await ctx.send(f"**You are not in a tribe, {member.mention}**!")
if member.id == tribe_member[0]:
return await ctx.send(f"**You cannot leave your own tribe, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_name(tribe_member[1])
confirm = await ConfirmSkill(f"Are you sure you want to leave `{user_tribe['name']}`, {member.mention}?").prompt(ctx)
if not confirm:
return await ctx.send("**Not leaving it, then!**")
# Updates user tribe status and nickname
try:
await self.delete_tribe_member(member.id)
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=False)
except Exception as ee:
print(ee)
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**You successfully left `{user_tribe['name']}`, {member.mention}!**")
async def update_someones_tribe(self, user_id: int, tribe_name: str = None) -> None:
""" Updates someone's tribe status.
:param user_id: The ID of the user who's gonna be updated.
:param tribe_name: The name of the tribe the user is gonna be set to. (default = None) """
mycursor, db = await the_database()
await mycursor.execute("UPDATE SlothProfile SET tribe = %s, tribe_user_id = %s WHERE user_id = %s", (tribe_name, user_id, user_id))
await db.commit()
await mycursor.close()
async def update_tribe_thumbnail(self, user_id: int, tribe_name: str, link: str = None) -> None:
""" Updates someone's tribe thumbnail link.
:param user_id: The ID of the tribe's owner.
:param tribe_name: The name of the tribe.
:param link: The link that the tribe's thumbnail will be set to. """
mycursor, db = await the_django_database()
await mycursor.execute("""
UPDATE tribe_tribe SET tribe_thumbnail = %s
WHERE owner_id = %s AND tribe_name = %s""", (link, user_id, tribe_name))
await db.commit()
await mycursor.close()
mycursor, db = await the_database()
await mycursor.execute("""
UPDATE UserTribe SET tribe_thumbnail = %s
WHERE user_id = %s AND tribe_name = %s""", (link, user_id, tribe_name))
await db.commit()
await mycursor.close()
async def update_tribe_name(self, member: discord.Member, two_emojis: str, joining: bool) -> None:
""" Updates someone's nickname so it has their tribe's two-emoji combination identifier.
:param member: The member whose nickname is gonna be updated.
:param two_emojis: The two-emoji combination identifier.
:param joining: Whether the user is joining the tribe. """
dname = member.display_name
if joining:
# Checks whether member is Munked
if dname.endswith('Munk'):
await member.edit(nick=f"{dname.strip()[:-4]} {two_emojis} Munk".strip())
else:
await member.edit(nick=f"{dname.strip()} {two_emojis}".strip())
else:
nick = ' '.join(map(lambda p: p.strip(), dname.rsplit(two_emojis, 1)))
if nick != dname:
await member.edit(nick=nick)
async def check_tribe_creations(self) -> None:
""" Check on-going steals and their expiration time. """
creations = await self.get_skill_actions_by_skill_type('tribe_creation')
guild = self.client.get_guild(int(os.getenv('SERVER_ID', 123)))
for creation in creations:
try:
# Removes skill action from the database
await self.delete_skill_action_by_target_id_and_skill_type(target_id=creation[0], skill_type='tribe_creation')
member = discord.utils.get(guild.members, id=creation[0])
try:
await self.update_tribe_name(member=member, two_emojis=creation[6], joining=True)
except:
pass
except:
pass
@commands.command()
@Player.poisoned()
@Player.skills_used(requirement=5)
@Player.skill_on_cooldown(skill=Skill.TWO)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def create_tribe(self, ctx) -> None:
""" Guides you into the creation of a tribe,
which is a custom group for people to join and do something. """
member = ctx.author
link = 'https://thelanguagesloth.com/tribes'
tribe_embed = discord.Embed(
title="__Tribe Creation__",
description=f"In order to create your tribe, access our website by clicking [here]({link}) or in the button below!",
color=member.color,
timestamp=ctx.message.created_at,
url=link
)
tribe_embed.set_author(name=member, url=member.display_avatar, icon_url=member.display_avatar)
tribe_embed.set_thumbnail(url=member.display_avatar)
tribe_embed.set_footer(text=member.guild.name, icon_url=member.guild.icon.url)
view = discord.ui.View()
view.add_item(discord.ui.Button(style=5, label="Create Tribe", url=link, emoji="🏕️"))
await ctx.send(embed=tribe_embed, view=view)
@commands.command(aliases=['add_tribe_role', 'createtriberole', 'addtriberole'])
@Player.poisoned()
@Player.skills_used(requirement=20)
@Player.skill_on_cooldown(skill=Skill.THREE, seconds=36000)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def create_tribe_role(self, ctx, role_name: str = None) -> None:
""" Creates a tribe role.
With different roles and positions in your tribe, you
can better administrate and know what each person should do
or their purpose inside your tribe.
:param role_name: The name of the tribe role. (MAX = 30 Chars)
* Cooldown: 1 day
Ps: It is not an actual server role. """
perpetrator = ctx.author
# Do the magic here.
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use this skill, because you are knocked-out!**")
user_tribe = await self.get_tribe_info_by_user_id(perpetrator.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {perpetrator.mention}**!")
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {perpetrator.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Please, infom a Tribe Role name under or equal to 30 characters, {perpetrator.mention}!**")
if role_name.lower() in ['owner', 'member']:
return await ctx.send(f"**You cannot use this as your Tribe Role's name, {perpetrator.mention}!**")
tribe_roles = await self.get_tribe_roles(perpetrator.id)
if role_name.lower() in [trole[2].lower() for trole in tribe_roles]:
return await ctx.send(f"**You already have a Tribe Role with that name, {perpetrator.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to create a Tribe Role named `{role_name}`, {perpetrator.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not making it, then, {perpetrator.mention}!**")
_, exists = await Player.skill_on_cooldown(skill=Skill.THREE, seconds=36000).predicate(ctx)
try:
current_timestamp = await utils.get_timestamp()
await self.insert_tribe_role(perpetrator.id, user_tribe['name'], role_name)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.THREE, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.THREE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong with your skill and it failed, {perpetrator.mention}!**")
else:
tribe_role_embed = await self.get_tribe_role_embed(
channel=ctx.channel, owner_id=perpetrator.id, tribe_info=user_tribe, role_name=role_name)
await ctx.send(embed=tribe_role_embed)
@tribe.command(aliases=['remove_role', 'deleterole', 'removerole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete_role(self, ctx, role_name: str = None) -> None:
""" Deletes a specific role from the member's tribe.
:param role_name: The name of the role to delete. """
member = ctx.author
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {member.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Tribe Role names have a limit of 30 characters, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=member.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {member.mention}**!")
tribe_role = await self.get_tribe_role(member.id, role_name)
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {member.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to delete your tribe's `{tribe_role[2]}` role, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
try:
await self.delete_tribe_role(member.id, user_tribe['name'], role_name)
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**Successfully deleted the `{role_name}` role from your tribe, {member.mention}!**")
@tribe.command(aliases=['remove_roles', 'deleteroles', 'removeroles'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete_roles(self, ctx) -> None:
""" Deletes all Tribe Roles from the member's tribe. """
member = ctx.author
user_tribe = await self.get_tribe_info_by_user_id(user_id=member.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {member.mention}**!")
tribe_roles = await self.get_tribe_roles(member.id)
if not tribe_roles:
return await ctx.send(f"**You don't any Tribe Roles, {member.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to delete your tribe's roles, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
try:
await self.delete_tribe_roles(member.id, user_tribe['name'])
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**Successfully deleted all roles from your tribe, {member.mention}!**")
@tribe.command(aliases=['give_role', 'giverole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def promote(self, ctx, member: discord.Member = None, role_name: str = None) -> None:
""" Promotes a Tribe Member to a given Tribe Role.
:param member: The Tribe Member to promote.
:param role_name: The Tribe Role to promote the member to. """
owner = ctx.author
if not member:
return await ctx.send(f"**Please, inform a Tribe Member to promote, {owner.mention}!**")
if owner.id == member.id:
return await ctx.send(f"**You cannot promote yourself, {owner.mention}!**")
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {owner.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Tribe Role names have a limit of 30 characters, {owner.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=owner.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {owner.mention}**!")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
return await ctx.send(f"**{member.mention} is not even in a tribe, {owner.mention}!**")
if tribe_member[1] != user_tribe['name']:
return await ctx.send(f"**{member.mention} is not even from your tribe, {owner.mention}!**")
if str(tribe_member[3]).lower() == role_name.lower():
return await ctx.send(f"**{member.mention} already has this Tribe Role, {owner.mention}!**")
tribe_role = await self.get_tribe_role(owner.id, role_name)
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {owner.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to promote {member.mention} to `{tribe_role[2]}`, {owner.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {owner.mention}!**")
try:
await self.update_user_tribe_role(member.id, tribe_role[2])
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {owner.mention}!**")
else:
await ctx.send(f"**Successfully promoted {member.mention} to `{tribe_role[2]}`, {owner.mention}!**")
@tribe.command(aliases=['take_role', 'takerole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def demote(self, ctx, member: discord.Member = None) -> None:
""" Demotes a Tribe Member from their current Tribe Role.
:param member: The Tribe Member to demote. """
owner = ctx.author
if not member:
return await ctx.send(f"**Please, inform a Tribe Member to promote, {owner.mention}!**")
if owner.id == member.id:
return await ctx.send(f"**You cannot demote yourself, {owner.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=owner.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {owner.mention}**!")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
return await ctx.send(f"**{member.mention} is not even in a tribe, {owner.mention}!**")
if tribe_member[1] != user_tribe['name']:
return await ctx.send(f"**{member.mention} is not even from your tribe, {owner.mention}!**")
if tribe_member[3] == 'Member':
return await ctx.send(f"**{member.mention} already has the default Tribe Role, {owner.mention}!**")
tribe_role = await self.get_tribe_role(owner.id, tribe_member[3])
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {owner.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to demote {member.mention} from `{tribe_role[2]}` to `Member`, {owner.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {owner.mention}!**")
try:
await self.update_user_tribe_role(member.id)
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {owner.mention}!**")
else:
await ctx.send(f"**Successfully demote {member.mention} from `{tribe_role[2]}` to `Member`, {owner.mention}!**")
@tribe.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def roles(self, ctx, tribe_name: Optional[str] = None) -> None:
""" Shows the Tribe Roles of a given tribe.
:param tribe_name: The name of the tribe to show the roles. [Optional]
PS: If a tribe name is not provided, it will fetch the tribe the user is in. """
member = ctx.author
tribe = None
if tribe_name:
tribe = await self.get_tribe_info_by_name(tribe_name)
else:
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile or not sloth_profile[3]:
return await ctx.send(
f"**You didn't provide any tribe name and you're not in a tribe either, {member.mention}!**")
tribe = await self.get_tribe_info_by_name(sloth_profile[3])
if not tribe['name']:
return await ctx.send(f"**No tribe with that name was found, {member.mention}**!")
roles = await self.get_tribe_roles(member.id)
if not roles:
return await ctx.send(f"**This tribe doesn't have any intern roles, {member.mention}!**")
embed = discord.Embed(
title=f"__{tribe['name']}'s Roles__:",
description=', '.join([r[2] for r in roles]),
color=member.color,
timestamp=ctx.message.created_at,
url=tribe['link']
)
embed.set_author(name=member.display_name, url=member.display_avatar, icon_url=member.display_avatar)
if tribe['thumbnail']:
embed.set_thumbnail(url=tribe['thumbnail'])
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon.url)
await ctx.send(embed=embed)
async def get_tribe_role(self, owner_id: int, role_name: str) -> List[Union[int, str]]:
""" Gets a Tribe Role by name.
:param owner_id: The ID of the owner of that tribe.
:param role_name: The name of the role. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM TribeRole WHERE owner_id = %s AND LOWER(role_name) = LOWER(%s)", (owner_id, role_name))
tribe_role = await mycursor.fetchone()
await mycursor.close()
return tribe_role
async def get_tribe_roles(self, owner_id: int) -> List[List[Union[int, str]]]:
""" Gets all Tribe Roles from tribe owner's tribe.
:param owner_id: The ID of the owner of that tribe. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM TribeRole WHERE owner_id = %s", (owner_id,))
tribe_roles = await mycursor.fetchall()
await mycursor.close()
return tribe_roles
async def insert_tribe_role(self, owner_id: int, tribe_name: str, role_name: str) -> None:
""" Inserts a Tribe Role into the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe.
:param role_name: The name of the role. """
mycursor, db = await the_database()
await mycursor.execute("""
INSERT INTO TribeRole (owner_id, tribe_name, role_name) VALUES (%s, %s, %s)
""", (owner_id, tribe_name, role_name))
await db.commit()
await mycursor.close()
async def delete_tribe_role(self, owner_id: int, tribe_name: str, role_name: str) -> None:
""" Deletes a Tribe Role from the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe.
:param role_name: The name of the role. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeRole WHERE owner_id = %s AND LOWER(role_name) = LOWER(%s)", (owner_id, role_name))
await mycursor.execute("""
UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role) WHERE tribe_name = %s AND LOWER(tribe_role) = LOWER(%s)
""", (tribe_name, role_name))
await db.commit()
await mycursor.close()
async def delete_tribe_roles(self, owner_id: int, tribe_name: str) -> None:
""" Deletes all Tribe Roles from the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeRole WHERE owner_id = %s", (owner_id,))
await mycursor.execute("""
UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role)
WHERE tribe_name = %s AND tribe_role <> 'Owner'
""", (tribe_name,))
await db.commit()
await mycursor.close()
async def insert_tribe_member(self, owner_id: int, tribe_name: str, user_id: int, tribe_role: str = 'Member') -> None:
""" Inserts a Tribe Member.
:param owner_id: The ID of the owner of the tribe the user is joining.
:param tribe_name: The tribe name.
:param user_id: The ID of the user.
:param tribe_role: The initial role they're gonna have in the tribe. """
mycursor, db = await the_database()
await mycursor.execute("""
INSERT INTO TribeMember (owner_id, tribe_name, member_id, tribe_role)
VALUES (%s, %s, %s, %s)""", (owner_id, tribe_name, user_id, tribe_role))
await db.commit()
await mycursor.close()
async def delete_tribe_member(self, user_id: int) -> None:
""" Deletes a Tribe Member.
:param user_id: The ID of the tribe member. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeMember WHERE member_id = %s", (user_id,))
await db.commit()
await mycursor.close()
async def get_tribe_role_embed(self, channel: discord.TextChannel, owner_id: int, tribe_info: Dict[str, Union[str, int]], role_name: str) -> discord.Embed:
""" Makes an embedded message for a Tribe Role creation.
:param channel: The context channel.
:param owner_id: The owner of the tribe.
:param tribe_info: The tribe info.
:param role_name: The role created for that tribe. """
current_ts = await utils.get_timestamp()
tribe_role_embed = discord.Embed(
title="__A Tribe Role has been Created__",
description=f"<@{owner_id}> has just created a Tribe Role named `{role_name}` for their tribe named `{tribe_info['name']}`.",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(current_ts)
)
if tribe_info['thumbnail']:
tribe_role_embed.set_thumbnail(url=tribe_info['thumbnail'])
tribe_role_embed.set_image(url='https://media1.tenor.com/images/5327c87ecb310a382e891a0ed209357f/tenor.gif?itemid=18799194')
tribe_role_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return tribe_role_embed
async def update_user_tribe_owner(self, old_owner_id: int, new_owner_id: int) -> None:
""" Updates the user's Tribe Role.
:param old_owner_id: The old Tribe owner's ID.
:param new_owner_id: The new Tribe owner's ID. """
mycursor1, db1 = await the_database()
await mycursor1.execute("UPDATE UserTribe SET user_id = %s WHERE user_id = %s", (new_owner_id, old_owner_id))
await mycursor1.execute("""
UPDATE TribeMember as GL, (
SELECT owner_id, member_id, tribe_role
FROM TribeMember
WHERE member_id = %s
) OG, (
SELECT owner_id, member_id, tribe_role
FROM TribeMember
WHERE member_id = %s
) T
SET GL.tribe_role = (
CASE
WHEN GL.member_id = %s THEN T.tribe_role
WHEN GL.member_id = %s THEN OG.tribe_role
END
)
WHERE GL.member_id in (%s, %s);
""", (new_owner_id, old_owner_id, new_owner_id, old_owner_id, new_owner_id, old_owner_id))
await db1.commit()
await mycursor1.close()
mycursor2, db2 = await the_django_database()
await mycursor2.execute("UPDATE tribe_tribe SET owner_id = %s WHERE owner_id = %s", (new_owner_id, old_owner_id))
await db2.commit()
await mycursor2.close()
async def update_user_tribe_role(self, user_id: int, role_name: Optional[str] = None) -> None:
""" Updates the user's Tribe Role.
:param user_id: The Tribe Member's ID.
:param role_name: The name of the role. [Optional][Default='Member'] """
mycursor, db = await the_database()
if not role_name:
await mycursor.execute("UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role) WHERE member_id = %s", (user_id,))
else:
await mycursor.execute("UPDATE TribeMember SET tribe_role = %s WHERE member_id = %s", (role_name, user_id))
await db.commit()
await mycursor.close()
@tribe.command(aliases=['to', 'transfer'])
@commands.cooldown(1, 60, commands.BucketType.user)
async def transfer_ownership(self, ctx, *, member: discord.Member = None) -> None:
""" Transfers the ownership of your tribe to someone else. """
author = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member, {author.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(author.id)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You don't have a tribe, {author.mention}**!")
if user_tribe['owner_id'] == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You can't transfer the tribe to yourself, {author.mention}!**")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is not even in a tribe, you can't transfer the tribe to them, {author.mention}!**")
if tribe_member[0] != user_tribe['owner_id']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is in a different tribe, you can't transfer the tribe to them, {author.mention}!**")
confirm = await ConfirmSkill(
f"**Are you sure you want to transfer your ownership of `{user_tribe['name']}` to {member.mention}, {author.mention}?**"
).prompt(ctx)
if not confirm:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Not doing it, then, {author.mention}!**")
await self.update_user_tribe_owner(author.id, member.id)
await ctx.send(f"**Successfully transferred ownership of `{user_tribe['name']}` from {author.mention} to {member.mention}!**")
@tribe.command(aliases=["fto", "ftransfer", "force_transfer"])
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.has_permissions(administrator=True)
async def force_transfer_ownership(self, ctx, tribe_name: str = None, member: discord.Member = None) -> None:
""" (ADMIN) Force-transfers the ownership of a Tribe to another user.
:param tribe_name: The name of the tribe from which to transfer ownership.
:param member: The member to transfer the Tribe to. """
author = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member to transfer the tribe to, {author.mention}!**")
if not tribe_name:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform the name of the tribe, {author.mention}!**")
user_tribe = await self.get_tribe_info_by_name(tribe_name)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**No tribes with that name were found, {author.mention}!**")
if user_tribe['owner_id'] == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You can't transfer the tribe to the same user, {author.mention}!**")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is not even in a tribe, you can't transfer the tribe to them, {author.mention}!**")
if tribe_member[0] != user_tribe['owner_id']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is in a different tribe, you can't transfer the tribe to them, {author.mention}!**")
confirm = await ConfirmSkill(
f"**Are you sure you want to transfer ownership of `{user_tribe['name']}` from <@{user_tribe['owner_id']}> to {member.mention}, {author.mention}?**"
).prompt(ctx)
if not confirm:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Not doing it, then, {author.mention}!**")
try:
await self.update_user_tribe_owner(user_tribe['owner_id'], member.id)
except:
await ctx.send(f"**Something went wrong with it, {author.mention}!**")
else:
await ctx.send(f"**Successfully transferred ownership of `{user_tribe['name']}` from <@{user_tribe['owner_id']}> to {member.mention}!**")
@commands.command(aliases=['get_mission', 'gq', 'gm'])
@Player.poisoned()
@Player.skills_used(requirement=50)
@Player.skill_on_cooldown(skill=Skill.FOUR, seconds=172800)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
@Player.not_ready()
async def get_quest(self, ctx) -> None:
""" Gets a Quest for you and your Tribe to complete, and if so,
the involved people will get rewarded.
• Delay = 2 days
• Cost = Free """
perpetrator = ctx.author
# Do the magic here.
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use this skill, because you are knocked-out!**")
tribe_member = await self.get_tribe_member(perpetrator.id)
if not tribe_member:
return await ctx.send(f"**You are not in a tribe, {perpetrator.mention}**!")
user_tribe = await self.get_tribe_info_by_user_id(tribe_member[0])
# Checks whether there's already a max number of 1 open quests in that tribe
if await self.get_skill_action_by_user_id_and_skill_type(user_id=perpetrator.id, skill_type="quest"):
return await ctx.send(f"**You cannot have more than 1 on-going Quest at a time, {perpetrator.mention}!**")
random_quest = await self.generate_random_quest()
_, exists = await Player.skill_on_cooldown(skill=Skill.FOUR, seconds=172800).predicate(ctx)
try:
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=perpetrator.id, skill_type="quest", skill_timestamp=current_timestamp,
target_id=perpetrator.id, channel_id=ctx.channel.id, price=random_quest["enum_value"], content=random_quest["message"]
)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.FOUR, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.FOUR, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong with your skill and it failed, {perpetrator.mention}!**")
else:
tribe_quest_embed = await self.get_tribe_quest_embed(channel=ctx.channel, user_id=perpetrator.id, quest=random_quest, tribe=user_tribe)
await ctx.send(embed=tribe_quest_embed)
async def generate_random_quest(self) -> Any:
""" Generates a random question. """
quests: List[Dict[str, Union[str, int]]] = [
{"message": "Complete 5 `TheLanguageJungle` games.", "enum_value": 1},
{"message": "Rep someone and get repped back.", "enum_value": 2},
{"message": "Win a coinflip betting 50 leaves.", "enum_value": 3},
{"message": "Get a 15+ score in the `Flags` game.", "enum_value": 4},
{"message": "Spend 4 hours in a Voice Channel in a single day.", "enum_value": 5},
{"message": "Buy any item from the SlothShop, if you have all items you need to get ripped-off first.", "enum_value": 6},
{"message": "Ping DNK 3 times in a row and try to evade a BAN!!!!", "enum_value": 7},
]
return choice(quests)
async def get_tribe_quest_embed(self,
channel: Union[discord.TextChannel, discord.Thread], user_id: int, quest: Dict[str, Union[str, int]], tribe: Dict[str, Union[str, int]]
) -> discord.Embed:
""" Makes an embedded message for a Tribe Role creation.
:param channel: The context channel.
:param owner_id: The owner of the tribe.
:param tribe_info: The tribe info.
:param role_name: The role created for that tribe. """
current_ts = await utils.get_timestamp()
tribe_quest_embed = discord.Embed(
title="__A New Quest has been Started__",
description=f"<@{user_id}> has just started a Quest for their Tribe named `{tribe['name']}`!",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(current_ts)
)
tribe_quest_embed.add_field(name="__Quest__:", value=quest["message"])
if tribe["thumbnail"]:
tribe_quest_embed.set_thumbnail(url=tribe["thumbnail"])
tribe_quest_embed.set_image(url='https://c.tenor.com/MJ8Dxo58AJAAAAAC/muggers-quest.gif')
tribe_quest_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return tribe_quest_embed
async def complete_quest(self, user_id: int) -> None:
""" Completes an on-going quest for a member.
:param user_id: The ID of the user who's completing the quest. """
# Gets Quest
quest = await self.get_skill_action_by_user_id_and_skill_type(user_id=user_id, skill_type="quest")
if not quest:
return
# Deletes Quest
await self.delete_skill_action_by_user_id_and_skill_type(user_id=user_id, skill_type='quest')
# Gets enum value
enum_name = QuestEnum.__dict__['_member_names_'][quest[7]-1]
function: Callable = QuestEnum.__getitem__(name=enum_name)
# Runs attached method if there's any
if function:
await function()
@tribe.command(aliases=["mission", "task", "chore", "quests"])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(administrator=True)
async def quest(self, ctx) -> None:
""" Shows all Quests that the tribe you are in has. """
member = ctx.author
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You're not even in a tribe, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(tribe_member[0])
tribe_members = await self.get_tribe_members(tribe_member[0], tribe_member[1])
quests: List[List[Union[str, int]]] = []
for tribe_member in tribe_members:
quest = await self.get_skill_action_by_user_id_and_skill_type(user_id=tribe_member[0], skill_type="quest")
if quest:
quests.append(quest)
if not quests:
return await ctx.send(f"**No quests found in your tribe, {member.mention}!**")
quests_text: str = ''.join(list(map(lambda q: f"```• {q[8]} ({q[7]});```", quests)))
embed: discord.Embed = discord.Embed(
title="__Tribe Quests__",
description=f"Showing all `{len(quests)}` quests from this tribe:\n{quests_text}",
color=member.color,
timestamp=ctx.message.created_at
)
embed.set_footer(text=f"Requested by: {member}", icon_url=member.display_avatar)
if user_tribe["thumbnail"]:
embed.set_thumbnail(url=user_tribe["thumbnail"])
await ctx.send(embed=embed) | []
| []
| [
"BOTS_AND_COMMANDS_CHANNEL_ID",
"APPROVE_THUMBNAIL_CHANNEL_ID",
"SERVER_ID"
]
| [] | ["BOTS_AND_COMMANDS_CHANNEL_ID", "APPROVE_THUMBNAIL_CHANNEL_ID", "SERVER_ID"] | python | 3 | 0 | |
client/cache/redis/redis_test.go | // Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
"github.com/beego/beego/v2/client/cache"
)
func TestRedisCache(t *testing.T) {
redisAddr := os.Getenv("REDIS_ADDR")
if redisAddr == "" {
redisAddr = "127.0.0.1:6379"
}
bm, err := cache.NewCache("redis", fmt.Sprintf(`{"conn": "%s"}`, redisAddr))
if err != nil {
t.Error("init err")
}
timeoutDuration := 10 * time.Second
if err = bm.Put(context.Background(), "astaxie", 1, timeoutDuration); err != nil {
t.Error("set Error", err)
}
if res, _ := bm.IsExist(context.Background(), "astaxie"); !res {
t.Error("check err")
}
time.Sleep(11 * time.Second)
if res, _ := bm.IsExist(context.Background(), "astaxie"); res {
t.Error("check err")
}
if err = bm.Put(context.Background(), "astaxie", 1, timeoutDuration); err != nil {
t.Error("set Error", err)
}
val, _ := bm.Get(context.Background(), "astaxie")
if v, _ := redis.Int(val, err); v != 1 {
t.Error("get err")
}
if err = bm.Incr(context.Background(), "astaxie"); err != nil {
t.Error("Incr Error", err)
}
val, _ = bm.Get(context.Background(), "astaxie")
if v, _ := redis.Int(val, err); v != 2 {
t.Error("get err")
}
if err = bm.Decr(context.Background(), "astaxie"); err != nil {
t.Error("Decr Error", err)
}
val, _ = bm.Get(context.Background(), "astaxie")
if v, _ := redis.Int(val, err); v != 1 {
t.Error("get err")
}
bm.Delete(context.Background(), "astaxie")
if res, _ := bm.IsExist(context.Background(), "astaxie"); res {
t.Error("delete err")
}
// test string
if err = bm.Put(context.Background(), "astaxie", "author", timeoutDuration); err != nil {
t.Error("set Error", err)
}
if res, _ := bm.IsExist(context.Background(), "astaxie"); !res {
t.Error("check err")
}
val, _ = bm.Get(context.Background(), "astaxie")
if v, _ := redis.String(val, err); v != "author" {
t.Error("get err")
}
// test GetMulti
if err = bm.Put(context.Background(), "astaxie1", "author1", timeoutDuration); err != nil {
t.Error("set Error", err)
}
if res, _ := bm.IsExist(context.Background(), "astaxie1"); !res {
t.Error("check err")
}
vv, _ := bm.GetMulti(context.Background(), []string{"astaxie", "astaxie1"})
if len(vv) != 2 {
t.Error("GetMulti ERROR")
}
if v, _ := redis.String(vv[0], nil); v != "author" {
t.Error("GetMulti ERROR")
}
if v, _ := redis.String(vv[1], nil); v != "author1" {
t.Error("GetMulti ERROR")
}
vv, _ = bm.GetMulti(context.Background(), []string{"astaxie0", "astaxie1"})
if vv[0] != nil {
t.Error("GetMulti ERROR")
}
if v, _ := redis.String(vv[1], nil); v != "author1" {
t.Error("GetMulti ERROR")
}
// test clear all
if err = bm.ClearAll(context.Background()); err != nil {
t.Error("clear all err")
}
}
func TestCache_Scan(t *testing.T) {
timeoutDuration := 10 * time.Second
addr := os.Getenv("REDIS_ADDR")
if addr == "" {
addr = "127.0.0.1:6379"
}
// init
bm, err := cache.NewCache("redis", fmt.Sprintf(`{"conn": "%s"}`, addr))
if err != nil {
t.Error("init err")
}
// insert all
for i := 0; i < 100; i++ {
if err = bm.Put(context.Background(), fmt.Sprintf("astaxie%d", i), fmt.Sprintf("author%d", i), timeoutDuration); err != nil {
t.Error("set Error", err)
}
}
time.Sleep(time.Second)
// scan all for the first time
keys, err := bm.(*Cache).Scan(DefaultKey + ":*")
if err != nil {
t.Error("scan Error", err)
}
assert.Equal(t, 100, len(keys), "scan all error")
// clear all
if err = bm.ClearAll(context.Background()); err != nil {
t.Error("clear all err")
}
// scan all for the second time
keys, err = bm.(*Cache).Scan(DefaultKey + ":*")
if err != nil {
t.Error("scan Error", err)
}
if len(keys) != 0 {
t.Error("scan all err")
}
}
| [
"\"REDIS_ADDR\"",
"\"REDIS_ADDR\""
]
| []
| [
"REDIS_ADDR"
]
| [] | ["REDIS_ADDR"] | go | 1 | 0 | |
numpy/distutils/misc_util.py | from __future__ import division, absolute_import, print_function
import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import multiprocessing
import textwrap
import distutils
from distutils.errors import DistutilsError
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib(object):
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
with open(config_file) as fid:
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def sorted_glob(fileglob):
"""sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = sorted_glob(n)
p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
with open(source, 'r') as f:
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
return isinstance(s, basestring)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except Exception:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline += ' '.join(cc_args)
cmdline += ' '.join(extra_postargs)
cmdline += ' '.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = npy_load_module('_'.join(n.split('.')),
setup_py,
('.py', 'U', 1))
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(
['svnversion'], shell=True, cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg identify --num'], shell=True, cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
), stacklevel=2)
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
with open(target, 'w') as f:
f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write(textwrap.dedent("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
"""))
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(textwrap.dedent(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
'''))
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
def get_build_architecture():
# Importing distutils.msvccompiler triggers a warning on non-Windows
# systems, so delay the import to here.
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
| []
| []
| [
"NPY_NUM_BUILD_JOBS",
"OSTYPE",
"SVN_ASP_DOT_NET_HACK",
"MSYSTEM",
"PATH"
]
| [] | ["NPY_NUM_BUILD_JOBS", "OSTYPE", "SVN_ASP_DOT_NET_HACK", "MSYSTEM", "PATH"] | python | 5 | 0 | |
integration-cli/check_test.go | package main
import (
"context"
"fmt"
"io/ioutil"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strconv"
"sync"
"syscall"
"testing"
"time"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/daemon"
"github.com/docker/docker/integration-cli/environment"
testdaemon "github.com/docker/docker/internal/test/daemon"
ienv "github.com/docker/docker/internal/test/environment"
"github.com/docker/docker/internal/test/fakestorage"
"github.com/docker/docker/internal/test/fixtures/plugin"
"github.com/docker/docker/internal/test/registry"
"github.com/docker/docker/pkg/reexec"
"github.com/go-check/check"
"gotest.tools/assert"
)
const (
// the private registry to use for tests
privateRegistryURL = registry.DefaultURL
// path to containerd's ctr binary
ctrBinary = "ctr"
// the docker daemon binary to use
dockerdBinary = "dockerd"
)
var (
testEnv *environment.Execution
// the docker client binary to use
dockerBinary = ""
)
func init() {
var err error
reexec.Init() // This is required for external graphdriver tests
testEnv, err = environment.New()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func TestMain(m *testing.M) {
dockerBinary = testEnv.DockerBinary()
err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
testEnv.Print()
os.Exit(m.Run())
}
func Test(t *testing.T) {
cli.SetTestEnvironment(testEnv)
fakestorage.SetTestEnvironment(&testEnv.Execution)
ienv.ProtectAll(t, &testEnv.Execution)
check.TestingT(t)
}
func init() {
check.Suite(&DockerSuite{})
}
type DockerSuite struct {
}
func (s *DockerSuite) OnTimeout(c *check.C) {
if testEnv.IsRemoteDaemon() {
return
}
path := filepath.Join(os.Getenv("DEST"), "docker.pid")
b, err := ioutil.ReadFile(path)
if err != nil {
c.Fatalf("Failed to get daemon PID from %s\n", path)
}
rawPid, err := strconv.ParseInt(string(b), 10, 32)
if err != nil {
c.Fatalf("Failed to parse pid from %s: %s\n", path, err)
}
daemonPid := int(rawPid)
if daemonPid > 0 {
testdaemon.SignalDaemonDump(daemonPid)
}
}
func (s *DockerSuite) TearDownTest(c *check.C) {
testEnv.Clean(c)
}
func init() {
check.Suite(&DockerRegistrySuite{
ds: &DockerSuite{},
})
}
type DockerRegistrySuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerRegistrySuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
s.reg = registry.NewV2(c)
s.reg.WaitReady(c)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
if s.reg != nil {
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerSchema1RegistrySuite{
ds: &DockerSuite{},
})
}
type DockerSchema1RegistrySuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon)
s.reg = registry.NewV2(c, registry.Schema1)
s.reg.WaitReady(c)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
if s.reg != nil {
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerRegistryAuthHtpasswdSuite{
ds: &DockerSuite{},
})
}
type DockerRegistryAuthHtpasswdSuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
s.reg = registry.NewV2(c, registry.Htpasswd)
s.reg.WaitReady(c)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) {
if s.reg != nil {
out, err := s.d.Cmd("logout", privateRegistryURL)
assert.NilError(c, err, out)
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerRegistryAuthTokenSuite{
ds: &DockerSuite{},
})
}
type DockerRegistryAuthTokenSuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) {
if s.reg != nil {
out, err := s.d.Cmd("logout", privateRegistryURL)
assert.NilError(c, err, out)
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) {
if s == nil {
c.Fatal("registry suite isn't initialized")
}
s.reg = registry.NewV2(c, registry.Token(tokenURL))
s.reg.WaitReady(c)
}
func init() {
check.Suite(&DockerDaemonSuite{
ds: &DockerSuite{},
})
}
type DockerDaemonSuite struct {
ds *DockerSuite
d *daemon.Daemon
}
func (s *DockerDaemonSuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil {
// ignore errors here
// not cleaning up sockets is not really an error
return nil
}
if fi.Mode() == os.ModeSocket {
syscall.Unlink(path)
}
return nil
})
os.RemoveAll(testdaemon.SockRoot)
}
const defaultSwarmPort = 2477
func init() {
check.Suite(&DockerSwarmSuite{
ds: &DockerSuite{},
})
}
type DockerSwarmSuite struct {
server *httptest.Server
ds *DockerSuite
daemonsLock sync.Mutex // protect access to daemons and portIndex
daemons []*daemon.Daemon
portIndex int
}
func (s *DockerSwarmSuite) OnTimeout(c *check.C) {
s.daemonsLock.Lock()
defer s.daemonsLock.Unlock()
for _, d := range s.daemons {
d.DumpStackAndQuit()
}
}
func (s *DockerSwarmSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
}
func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Daemon {
d := daemon.New(c, dockerBinary, dockerdBinary,
testdaemon.WithEnvironment(testEnv.Execution),
testdaemon.WithSwarmPort(defaultSwarmPort+s.portIndex),
)
if joinSwarm {
if len(s.daemons) > 0 {
d.StartAndSwarmJoin(c, s.daemons[0].Daemon, manager)
} else {
d.StartAndSwarmInit(c)
}
} else {
d.StartNode(c)
}
s.daemonsLock.Lock()
s.portIndex++
s.daemons = append(s.daemons, d)
s.daemonsLock.Unlock()
return d
}
func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
testRequires(c, DaemonIsLinux)
s.daemonsLock.Lock()
for _, d := range s.daemons {
if d != nil {
d.Stop(c)
d.Cleanup(c)
}
}
s.daemons = nil
s.portIndex = 0
s.daemonsLock.Unlock()
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerPluginSuite{
ds: &DockerSuite{},
})
}
type DockerPluginSuite struct {
ds *DockerSuite
registry *registry.V2
}
func (ps *DockerPluginSuite) registryHost() string {
return privateRegistryURL
}
func (ps *DockerPluginSuite) getPluginRepo() string {
return path.Join(ps.registryHost(), "plugin", "basic")
}
func (ps *DockerPluginSuite) getPluginRepoWithTag() string {
return ps.getPluginRepo() + ":" + "latest"
}
func (ps *DockerPluginSuite) SetUpSuite(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting)
ps.registry = registry.NewV2(c)
ps.registry.WaitReady(c)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil)
assert.NilError(c, err, "failed to create plugin")
}
func (ps *DockerPluginSuite) TearDownSuite(c *check.C) {
if ps.registry != nil {
ps.registry.Close()
}
}
func (ps *DockerPluginSuite) TearDownTest(c *check.C) {
ps.ds.TearDownTest(c)
}
func (ps *DockerPluginSuite) OnTimeout(c *check.C) {
ps.ds.OnTimeout(c)
}
| [
"\"DEST\""
]
| []
| [
"DEST"
]
| [] | ["DEST"] | go | 1 | 0 | |
speech_to_text.py | import speech_recognition as sr
import os
project_root_path = os.path.abspath(os.path.dirname(__file__))
beep_tone_path = os.path.join(project_root_path, ".tones/beep_ping.wav")
google_keys_path = os.environ.get("google_keys_path")
# checks whether the environment variable value is valid and path exists
google_json_path_exists = isinstance(google_keys_path, str) and os.path.exists(google_keys_path)
if google_json_path_exists:
with open(google_keys_path) as key_json:
google_key = key_json.read()
class SpeechRecognizer:
"""
This module triggers the microphone to accept speech then uses the google api to transcribe it
if google api is not available it uses the wit.ai api
:ivar recognise: instance of the Recognizer() class from the speech-recognition library
:ivar mic: instance of the Microphone() class
"""
def __init__(self):
self.recognise = sr.Recognizer()
self.mic = sr.Microphone()
def beep_sound(self):
"""
adds a beep tone to signify iGlass is waiting for command
"""
try:
if os.path.exists(beep_tone_path):
os.system(f"aplay {beep_tone_path}")
except:
pass
def listen(self):
"""
triggers the mic, coverts audio to text
:var response_google: text string from google cloud speech-to-text
:return: response
:rtype dict
"""
response = {"success": None,
"error": None}
self.beep_sound()
with self.mic as source:
self.recognise.adjust_for_ambient_noise(source) # reduces noise
voice = self.recognise.listen(source)
try:
if google_json_path_exists:
response_google_cloud = self.recognise.recognize_google_cloud(audio_data=voice, credentials_json=google_key)
response["success"] = response_google_cloud
return response
response_google = self.recognise.recognize_google(audio_data=voice)
response["success"] = response_google
return response
except sr.RequestError:
# network related error
response["fail"] = "Request error please try again"
return response
except sr.UnknownValueError:
"""
occurs when there is silence in speech
this returns a None value which is neglected by the Brain instance
"""
response["fail"] = "Unknown Value Error"
return response
| []
| []
| [
"google_keys_path"
]
| [] | ["google_keys_path"] | python | 1 | 0 | |
device/KY_13.py | from .device import Device
import copy
import math
class KY_13(Device):
values = [
{
"name": "temperature",
"value": None,
"unit":"celcius"
}
]
def __init__(self, config, callback):
super(KY_13, self).__init__(config, callback)
self.init_input_outputs(self.__decide_io)
self.read_value_imp = self.__read_value
def __read_value(self):
voltage = self.input_outputs["Analog"].get_state()
temperature = math.log((10000/voltage)*(3300-voltage))
temperature = 1 / (0.001129148 + (0.000234125 + (0.0000000876741 * temperature * temperature)) * temperature)
temperature = temperature - 273.15
values = copy.deepcopy(self.values)
values[0]["value"] = temperature
return values
def __decide_io(self, io_name):
if io_name == "Analog" and self.board == "raspberry_pi":
from input_output import GPIOADCInput
return GPIOADCInput
| []
| []
| []
| [] | [] | python | null | null | null |
certbot/cli.py | """Certbot command line argument & config processing."""
# pylint: disable=too-many-lines
from __future__ import print_function
import argparse
import copy
import glob
import logging.handlers
import sys
import configargparse
import six
import zope.component
import zope.interface
from zope.interface import interfaces as zope_interfaces
from acme import challenges
# pylint: disable=unused-import, no-name-in-module
from acme.magic_typing import Any, Dict, Optional
# pylint: enable=unused-import, no-name-in-module
import certbot
import certbot.plugins.enhancements as enhancements
import certbot.plugins.selection as plugin_selection
from certbot import constants
from certbot import crypto_util
from certbot import errors
from certbot import hooks
from certbot import interfaces
from certbot import util
from certbot.compat import os
from certbot.display import util as display_util
from certbot.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
# Global, to save us from a lot of argument passing within the scope of this module
helpful_parser = None # type: Optional[HelpfulArgumentParser]
# For help strings, figure out how the user ran us.
# When invoked from letsencrypt-auto, sys.argv[0] is something like:
# "/home/user/.local/share/certbot/bin/certbot"
# Note that this won't work if the user set VENV_PATH or XDG_DATA_HOME before
# running letsencrypt-auto (and sudo stops us from seeing if they did), so it
# should only be used for purposes where inability to detect letsencrypt-auto
# fails safely
LEAUTO = "letsencrypt-auto"
if "CERTBOT_AUTO" in os.environ:
# if we're here, this is probably going to be certbot-auto, unless the
# user saved the script under a different name
LEAUTO = os.path.basename(os.environ["CERTBOT_AUTO"])
old_path_fragment = os.path.join(".local", "share", "letsencrypt")
new_path_prefix = os.path.abspath(os.path.join(os.sep, "opt",
"eff.org", "certbot", "venv"))
if old_path_fragment in sys.argv[0] or sys.argv[0].startswith(new_path_prefix):
cli_command = LEAUTO
else:
cli_command = "certbot"
# Argparse's help formatting has a lot of unhelpful peculiarities, so we want
# to replace as much of it as we can...
# This is the stub to include in help generated by argparse
SHORT_USAGE = """
{0} [SUBCOMMAND] [options] [-d DOMAIN] [-d DOMAIN] ...
Certbot can obtain and install HTTPS/TLS/SSL certificates. By default,
it will attempt to use a webserver both for obtaining and installing the
certificate. """.format(cli_command)
# This section is used for --help and --help all ; it needs information
# about installed plugins to be fully formatted
COMMAND_OVERVIEW = """The most common SUBCOMMANDS and flags are:
obtain, install, and renew certificates:
(default) run Obtain & install a certificate in your current webserver
certonly Obtain or renew a certificate, but do not install it
renew Renew all previously obtained certificates that are near expiry
enhance Add security enhancements to your existing configuration
-d DOMAINS Comma-separated list of domains to obtain a certificate for
%s
--standalone Run a standalone webserver for authentication
%s
--webroot Place files in a server's webroot folder for authentication
--manual Obtain certificates interactively, or using shell script hooks
-n Run non-interactively
--test-cert Obtain a test certificate from a staging server
--dry-run Test "renew" or "certonly" without saving any certificates to disk
manage certificates:
certificates Display information about certificates you have from Certbot
revoke Revoke a certificate (supply --cert-path or --cert-name)
delete Delete a certificate
manage your account:
register Create an ACME account
unregister Deactivate an ACME account
update_account Update an ACME account
--agree-tos Agree to the ACME server's Subscriber Agreement
-m EMAIL Email address for important account notifications
"""
# This is the short help for certbot --help, where we disable argparse
# altogether
HELP_AND_VERSION_USAGE = """
More detailed help:
-h, --help [TOPIC] print this message, or detailed help on a topic;
the available TOPICS are:
all, automation, commands, paths, security, testing, or any of the
subcommands or plugins (certonly, renew, install, register, nginx,
apache, standalone, webroot, etc.)
-h all print a detailed help page including all topics
--version print the version number
"""
# These argparse parameters should be removed when detecting defaults.
ARGPARSE_PARAMS_TO_REMOVE = ("const", "nargs", "type",)
# These sets are used when to help detect options set by the user.
EXIT_ACTIONS = set(("help", "version",))
ZERO_ARG_ACTIONS = set(("store_const", "store_true",
"store_false", "append_const", "count",))
# Maps a config option to a set of config options that may have modified it.
# This dictionary is used recursively, so if A modifies B and B modifies C,
# it is determined that C was modified by the user if A was modified.
VAR_MODIFIERS = {"account": set(("server",)),
"renew_hook": set(("deploy_hook",)),
"server": set(("dry_run", "staging",)),
"webroot_map": set(("webroot_path",))}
def report_config_interaction(modified, modifiers):
"""Registers config option interaction to be checked by set_by_cli.
This function can be called by during the __init__ or
add_parser_arguments methods of plugins to register interactions
between config options.
:param modified: config options that can be modified by modifiers
:type modified: iterable or str (string_types)
:param modifiers: config options that modify modified
:type modifiers: iterable or str (string_types)
"""
if isinstance(modified, six.string_types):
modified = (modified,)
if isinstance(modifiers, six.string_types):
modifiers = (modifiers,)
for var in modified:
VAR_MODIFIERS.setdefault(var, set()).update(modifiers)
def possible_deprecation_warning(config):
"A deprecation warning for users with the old, not-self-upgrading letsencrypt-auto."
if cli_command != LEAUTO:
return
if config.no_self_upgrade:
# users setting --no-self-upgrade might be hanging on a client version like 0.3.0
# or 0.5.0 which is the new script, but doesn't set CERTBOT_AUTO; they don't
# need warnings
return
if "CERTBOT_AUTO" not in os.environ:
logger.warning("You are running with an old copy of letsencrypt-auto"
" that does not receive updates, and is less reliable than more"
" recent versions. The letsencrypt client has also been renamed"
" to Certbot. We recommend upgrading to the latest certbot-auto"
" script, or using native OS packages.")
logger.debug("Deprecation warning circumstances: %s / %s", sys.argv[0], os.environ)
class _Default(object):
"""A class to use as a default to detect if a value is set by a user"""
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, _Default)
def __hash__(self):
return id(_Default)
def __nonzero__(self):
return self.__bool__()
def set_by_cli(var):
"""
Return True if a particular config variable has been set by the user
(CLI or config file) including if the user explicitly set it to the
default. Returns False if the variable was assigned a default value.
"""
detector = set_by_cli.detector # type: ignore
if detector is None and helpful_parser is not None:
# Setup on first run: `detector` is a weird version of config in which
# the default value of every attribute is wrangled to be boolean-false
plugins = plugins_disco.PluginsRegistry.find_all()
# reconstructed_args == sys.argv[1:], or whatever was passed to main()
reconstructed_args = helpful_parser.args + [helpful_parser.verb]
detector = set_by_cli.detector = prepare_and_parse_args( # type: ignore
plugins, reconstructed_args, detect_defaults=True)
# propagate plugin requests: eg --standalone modifies config.authenticator
detector.authenticator, detector.installer = ( # type: ignore
plugin_selection.cli_plugin_requests(detector))
if not isinstance(getattr(detector, var), _Default):
logger.debug("Var %s=%s (set by user).", var, getattr(detector, var))
return True
for modifier in VAR_MODIFIERS.get(var, []):
if set_by_cli(modifier):
logger.debug("Var %s=%s (set by user).",
var, VAR_MODIFIERS.get(var, []))
return True
return False
# static housekeeping var
# functions attributed are not supported by mypy
# https://github.com/python/mypy/issues/2087
set_by_cli.detector = None # type: ignore
def has_default_value(option, value):
"""Does option have the default value?
If the default value of option is not known, False is returned.
:param str option: configuration variable being considered
:param value: value of the configuration variable named option
:returns: True if option has the default value, otherwise, False
:rtype: bool
"""
if helpful_parser is not None:
return (option in helpful_parser.defaults and
helpful_parser.defaults[option] == value)
return False
def option_was_set(option, value):
"""Was option set by the user or does it differ from the default?
:param str option: configuration variable being considered
:param value: value of the configuration variable named option
:returns: True if the option was set, otherwise, False
:rtype: bool
"""
return set_by_cli(option) or not has_default_value(option, value)
def argparse_type(variable):
"""Return our argparse type function for a config variable (default: str)"""
# pylint: disable=protected-access
if helpful_parser is not None:
for action in helpful_parser.parser._actions:
if action.type is not None and action.dest == variable:
return action.type
return str
def read_file(filename, mode="rb"):
"""Returns the given file's contents.
:param str filename: path to file
:param str mode: open mode (see `open`)
:returns: absolute path of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
filename = os.path.abspath(filename)
with open(filename, mode) as the_file:
contents = the_file.read()
return filename, contents
except IOError as exc:
raise argparse.ArgumentTypeError(exc.strerror)
def flag_default(name):
"""Default value for CLI flag."""
# XXX: this is an internal housekeeping notion of defaults before
# argparse has been set up; it is not accurate for all flags. Call it
# with caution. Plugin defaults are missing, and some things are using
# defaults defined in this file, not in constants.py :(
return copy.deepcopy(constants.CLI_DEFAULTS[name])
def config_help(name, hidden=False):
"""Extract the help message for an `.IConfig` attribute."""
# pylint: disable=no-member
if hidden:
return argparse.SUPPRESS
field = interfaces.IConfig.__getitem__(name) # type: zope.interface.interface.Attribute # pylint: disable=no-value-for-parameter
return field.__doc__
class HelpfulArgumentGroup(object):
"""Emulates an argparse group for use with HelpfulArgumentParser.
This class is used in the add_group method of HelpfulArgumentParser.
Command line arguments can be added to the group, but help
suppression and default detection is applied by
HelpfulArgumentParser when necessary.
"""
def __init__(self, helpful_arg_parser, topic):
self._parser = helpful_arg_parser
self._topic = topic
def add_argument(self, *args, **kwargs):
"""Add a new command line argument to the argument group."""
self._parser.add(self._topic, *args, **kwargs)
class CustomHelpFormatter(argparse.HelpFormatter):
"""This is a clone of ArgumentDefaultsHelpFormatter, with bugfixes.
In particular we fix https://bugs.python.org/issue28742
"""
def _get_help_string(self, action):
helpstr = action.help
if '%(default)' not in action.help and '(default:' not in action.help:
if action.default != argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
helpstr += ' (default: %(default)s)'
return helpstr
# The attributes here are:
# short: a string that will be displayed by "certbot -h commands"
# opts: a string that heads the section of flags with which this command is documented,
# both for "certbot -h SUBCOMMAND" and "certbot -h all"
# usage: an optional string that overrides the header of "certbot -h SUBCOMMAND"
VERB_HELP = [
("run (default)", {
"short": "Obtain/renew a certificate, and install it",
"opts": "Options for obtaining & installing certificates",
"usage": SHORT_USAGE.replace("[SUBCOMMAND]", ""),
"realname": "run"
}),
("certonly", {
"short": "Obtain or renew a certificate, but do not install it",
"opts": "Options for modifying how a certificate is obtained",
"usage": ("\n\n certbot certonly [options] [-d DOMAIN] [-d DOMAIN] ...\n\n"
"This command obtains a TLS/SSL certificate without installing it anywhere.")
}),
("renew", {
"short": "Renew all certificates (or one specified with --cert-name)",
"opts": ("The 'renew' subcommand will attempt to renew all"
" certificates (or more precisely, certificate lineages) you have"
" previously obtained if they are close to expiry, and print a"
" summary of the results. By default, 'renew' will reuse the options"
" used to create obtain or most recently successfully renew each"
" certificate lineage. You can try it with `--dry-run` first. For"
" more fine-grained control, you can renew individual lineages with"
" the `certonly` subcommand. Hooks are available to run commands"
" before and after renewal; see"
" https://certbot.eff.org/docs/using.html#renewal for more"
" information on these."),
"usage": "\n\n certbot renew [--cert-name CERTNAME] [options]\n\n"
}),
("certificates", {
"short": "List certificates managed by Certbot",
"opts": "List certificates managed by Certbot",
"usage": ("\n\n certbot certificates [options] ...\n\n"
"Print information about the status of certificates managed by Certbot.")
}),
("delete", {
"short": "Clean up all files related to a certificate",
"opts": "Options for deleting a certificate",
"usage": "\n\n certbot delete --cert-name CERTNAME\n\n"
}),
("revoke", {
"short": "Revoke a certificate specified with --cert-path or --cert-name",
"opts": "Options for revocation of certificates",
"usage": "\n\n certbot revoke [--cert-path /path/to/fullchain.pem | "
"--cert-name example.com] [options]\n\n"
}),
("register", {
"short": "Register for account with Let's Encrypt / other ACME server",
"opts": "Options for account registration",
"usage": "\n\n certbot register --email [email protected] [options]\n\n"
}),
("update_account", {
"short": "Update existing account with Let's Encrypt / other ACME server",
"opts": "Options for account modification",
"usage": "\n\n certbot update_account --email [email protected] [options]\n\n"
}),
("unregister", {
"short": "Irrevocably deactivate your account",
"opts": "Options for account deactivation.",
"usage": "\n\n certbot unregister [options]\n\n"
}),
("install", {
"short": "Install an arbitrary certificate in a server",
"opts": "Options for modifying how a certificate is deployed",
"usage": "\n\n certbot install --cert-path /path/to/fullchain.pem "
" --key-path /path/to/private-key [options]\n\n"
}),
("config_changes", {
"short": "Show changes that Certbot has made to server configurations",
"opts": "Options for controlling which changes are displayed",
"usage": "\n\n certbot config_changes --num NUM [options]\n\n"
}),
("rollback", {
"short": "Roll back server conf changes made during certificate installation",
"opts": "Options for rolling back server configuration changes",
"usage": "\n\n certbot rollback --checkpoints 3 [options]\n\n"
}),
("plugins", {
"short": "List plugins that are installed and available on your system",
"opts": 'Options for for the "plugins" subcommand',
"usage": "\n\n certbot plugins [options]\n\n"
}),
("update_symlinks", {
"short": "Recreate symlinks in your /etc/letsencrypt/live/ directory",
"opts": ("Recreates certificate and key symlinks in {0}, if you changed them by hand "
"or edited a renewal configuration file".format(
os.path.join(flag_default("config_dir"), "live"))),
"usage": "\n\n certbot update_symlinks [options]\n\n"
}),
("enhance", {
"short": "Add security enhancements to your existing configuration",
"opts": ("Helps to harden the TLS configuration by adding security enhancements "
"to already existing configuration."),
"usage": "\n\n certbot enhance [options]\n\n"
}),
]
# VERB_HELP is a list in order to preserve order, but a dict is sometimes useful
VERB_HELP_MAP = dict(VERB_HELP)
class HelpfulArgumentParser(object):
"""Argparse Wrapper.
This class wraps argparse, adding the ability to make --help less
verbose, and request help on specific subcategories at a time, eg
'certbot --help security' for security options.
"""
def __init__(self, args, plugins, detect_defaults=False):
from certbot import main
self.VERBS = {
"auth": main.certonly,
"certonly": main.certonly,
"config_changes": main.config_changes,
"run": main.run,
"install": main.install,
"plugins": main.plugins_cmd,
"register": main.register,
"update_account": main.update_account,
"unregister": main.unregister,
"renew": main.renew,
"revoke": main.revoke,
"rollback": main.rollback,
"everything": main.run,
"update_symlinks": main.update_symlinks,
"certificates": main.certificates,
"delete": main.delete,
"enhance": main.enhance,
}
# Get notification function for printing
try:
self.notify = zope.component.getUtility(
interfaces.IDisplay).notification
except zope_interfaces.ComponentLookupError:
self.notify = display_util.NoninteractiveDisplay(
sys.stdout).notification
# List of topics for which additional help can be provided
HELP_TOPICS = ["all", "security", "paths", "automation", "testing"]
HELP_TOPICS += list(self.VERBS) + self.COMMANDS_TOPICS + ["manage"]
plugin_names = list(plugins)
self.help_topics = HELP_TOPICS + plugin_names + [None] # type: ignore
self.detect_defaults = detect_defaults
self.args = args
if self.args and self.args[0] == 'help':
self.args[0] = '--help'
self.determine_verb()
help1 = self.prescan_for_flag("-h", self.help_topics)
help2 = self.prescan_for_flag("--help", self.help_topics)
if isinstance(help1, bool) and isinstance(help2, bool):
self.help_arg = help1 or help2
else:
self.help_arg = help1 if isinstance(help1, six.string_types) else help2
short_usage = self._usage_string(plugins, self.help_arg)
self.visible_topics = self.determine_help_topics(self.help_arg)
# elements are added by .add_group()
self.groups = {} # type: Dict[str, argparse._ArgumentGroup]
# elements are added by .parse_args()
self.defaults = {} # type: Dict[str, Any]
self.parser = configargparse.ArgParser(
prog="certbot",
usage=short_usage,
formatter_class=CustomHelpFormatter,
args_for_setting_config_path=["-c", "--config"],
default_config_files=flag_default("config_files"),
config_arg_help_message="path to config file (default: {0})".format(
" and ".join(flag_default("config_files"))))
# This is the only way to turn off overly verbose config flag documentation
self.parser._add_config_file_help = False # pylint: disable=protected-access
# Help that are synonyms for --help subcommands
COMMANDS_TOPICS = ["command", "commands", "subcommand", "subcommands", "verbs"]
def _list_subcommands(self):
longest = max(len(v) for v in VERB_HELP_MAP)
text = "The full list of available SUBCOMMANDS is:\n\n"
for verb, props in sorted(VERB_HELP):
doc = props.get("short", "")
text += '{0:<{length}} {1}\n'.format(verb, doc, length=longest)
text += "\nYou can get more help on a specific subcommand with --help SUBCOMMAND\n"
return text
def _usage_string(self, plugins, help_arg):
"""Make usage strings late so that plugins can be initialised late
:param plugins: all discovered plugins
:param help_arg: False for none; True for --help; "TOPIC" for --help TOPIC
:rtype: str
:returns: a short usage string for the top of --help TOPIC)
"""
if "nginx" in plugins:
nginx_doc = "--nginx Use the Nginx plugin for authentication & installation"
else:
nginx_doc = "(the certbot nginx plugin is not installed)"
if "apache" in plugins:
apache_doc = "--apache Use the Apache plugin for authentication & installation"
else:
apache_doc = "(the certbot apache plugin is not installed)"
usage = SHORT_USAGE
if help_arg is True:
self.notify(usage + COMMAND_OVERVIEW % (apache_doc, nginx_doc) + HELP_AND_VERSION_USAGE)
sys.exit(0)
elif help_arg in self.COMMANDS_TOPICS:
self.notify(usage + self._list_subcommands())
sys.exit(0)
elif help_arg == "all":
# if we're doing --help all, the OVERVIEW is part of the SHORT_USAGE at
# the top; if we're doing --help someothertopic, it's OT so it's not
usage += COMMAND_OVERVIEW % (apache_doc, nginx_doc)
else:
custom = VERB_HELP_MAP.get(help_arg, {}).get("usage", None)
usage = custom if custom else usage
return usage
def remove_config_file_domains_for_renewal(self, parsed_args):
"""Make "certbot renew" safe if domains are set in cli.ini."""
# Works around https://github.com/certbot/certbot/issues/4096
if self.verb == "renew":
for source, flags in self.parser._source_to_settings.items(): # pylint: disable=protected-access
if source.startswith("config_file") and "domains" in flags:
parsed_args.domains = _Default() if self.detect_defaults else []
def parse_args(self):
"""Parses command line arguments and returns the result.
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
parsed_args = self.parser.parse_args(self.args)
parsed_args.func = self.VERBS[self.verb]
parsed_args.verb = self.verb
self.remove_config_file_domains_for_renewal(parsed_args)
if self.detect_defaults:
return parsed_args
self.defaults = dict((key, copy.deepcopy(self.parser.get_default(key)))
for key in vars(parsed_args))
# Do any post-parsing homework here
if self.verb == "renew":
if parsed_args.force_interactive:
raise errors.Error(
"{0} cannot be used with renew".format(
constants.FORCE_INTERACTIVE_FLAG))
parsed_args.noninteractive_mode = True
if parsed_args.force_interactive and parsed_args.noninteractive_mode:
raise errors.Error(
"Flag for non-interactive mode and {0} conflict".format(
constants.FORCE_INTERACTIVE_FLAG))
if parsed_args.staging or parsed_args.dry_run:
self.set_test_server(parsed_args)
if parsed_args.csr:
self.handle_csr(parsed_args)
if parsed_args.must_staple:
parsed_args.staple = True
if parsed_args.validate_hooks:
hooks.validate_hooks(parsed_args)
if parsed_args.allow_subset_of_names:
if any(util.is_wildcard_domain(d) for d in parsed_args.domains):
raise errors.Error("Using --allow-subset-of-names with a"
" wildcard domain is not supported.")
if parsed_args.hsts and parsed_args.auto_hsts:
raise errors.Error(
"Parameters --hsts and --auto-hsts cannot be used simultaneously.")
possible_deprecation_warning(parsed_args)
return parsed_args
def set_test_server(self, parsed_args):
"""We have --staging/--dry-run; perform sanity check and set config.server"""
if parsed_args.server not in (flag_default("server"), constants.STAGING_URI):
conflicts = ["--staging"] if parsed_args.staging else []
conflicts += ["--dry-run"] if parsed_args.dry_run else []
raise errors.Error("--server value conflicts with {0}".format(
" and ".join(conflicts)))
parsed_args.server = constants.STAGING_URI
if parsed_args.dry_run:
if self.verb not in ["certonly", "renew"]:
raise errors.Error("--dry-run currently only works with the "
"'certonly' or 'renew' subcommands (%r)" % self.verb)
parsed_args.break_my_certs = parsed_args.staging = True
if glob.glob(os.path.join(parsed_args.config_dir, constants.ACCOUNTS_DIR, "*")):
# The user has a prod account, but might not have a staging
# one; we don't want to start trying to perform interactive registration
parsed_args.tos = True
parsed_args.register_unsafely_without_email = True
def handle_csr(self, parsed_args):
"""Process a --csr flag."""
if parsed_args.verb != "certonly":
raise errors.Error("Currently, a CSR file may only be specified "
"when obtaining a new or replacement "
"via the certonly command. Please try the "
"certonly command instead.")
if parsed_args.allow_subset_of_names:
raise errors.Error("--allow-subset-of-names cannot be used with --csr")
csrfile, contents = parsed_args.csr[0:2]
typ, csr, domains = crypto_util.import_csr_file(csrfile, contents)
# This is not necessary for webroot to work, however,
# obtain_certificate_from_csr requires parsed_args.domains to be set
for domain in domains:
add_domains(parsed_args, domain)
if not domains:
# TODO: add CN to domains instead:
raise errors.Error(
"Unfortunately, your CSR %s needs to have a SubjectAltName for every domain"
% parsed_args.csr[0])
parsed_args.actual_csr = (csr, typ)
csr_domains = set([d.lower() for d in domains])
config_domains = set(parsed_args.domains)
if csr_domains != config_domains:
raise errors.ConfigurationError(
"Inconsistent domain requests:\nFrom the CSR: {0}\nFrom command line/config: {1}"
.format(", ".join(csr_domains), ", ".join(config_domains)))
def determine_verb(self):
"""Determines the verb/subcommand provided by the user.
This function works around some of the limitations of argparse.
"""
if "-h" in self.args or "--help" in self.args:
# all verbs double as help arguments; don't get them confused
self.verb = "help"
return
for i, token in enumerate(self.args):
if token in self.VERBS:
verb = token
if verb == "auth":
verb = "certonly"
if verb == "everything":
verb = "run"
self.verb = verb
self.args.pop(i)
return
self.verb = "run"
def prescan_for_flag(self, flag, possible_arguments):
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except IndexError:
pass
return True
def add(self, topics, *args, **kwargs):
"""Add a new command line argument.
:param topics: str or [str] help topic(s) this should be listed under,
or None for options that don't fit under a specific
topic which will only be shown in "--help all" output.
The first entry determines where the flag lives in the
"--help all" output (None -> "optional arguments").
:param list *args: the names of this argument flag
:param dict **kwargs: various argparse settings for this argument
"""
if isinstance(topics, list):
# if this flag can be listed in multiple sections, try to pick the one
# that the user has asked for help about
topic = self.help_arg if self.help_arg in topics else topics[0]
else:
topic = topics # there's only one
if self.detect_defaults:
kwargs = self.modify_kwargs_for_default_detection(**kwargs)
if self.visible_topics[topic]:
if topic in self.groups:
group = self.groups[topic]
group.add_argument(*args, **kwargs)
else:
self.parser.add_argument(*args, **kwargs)
else:
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
def modify_kwargs_for_default_detection(self, **kwargs):
"""Modify an arg so we can check if it was set by the user.
Changes the parameters given to argparse when adding an argument
so we can properly detect if the value was set by the user.
:param dict kwargs: various argparse settings for this argument
:returns: a modified versions of kwargs
:rtype: dict
"""
action = kwargs.get("action", None)
if action not in EXIT_ACTIONS:
kwargs["action"] = ("store_true" if action in ZERO_ARG_ACTIONS else
"store")
kwargs["default"] = _Default()
for param in ARGPARSE_PARAMS_TO_REMOVE:
kwargs.pop(param, None)
return kwargs
def add_deprecated_argument(self, argument_name, num_args):
"""Adds a deprecated argument with the name argument_name.
Deprecated arguments are not shown in the help. If they are used
on the command line, a warning is shown stating that the
argument is deprecated and no other action is taken.
:param str argument_name: Name of deprecated argument.
:param int nargs: Number of arguments the option takes.
"""
util.add_deprecated_argument(
self.parser.add_argument, argument_name, num_args)
def add_group(self, topic, verbs=(), **kwargs):
"""Create a new argument group.
This method must be called once for every topic, however, calls
to this function are left next to the argument definitions for
clarity.
:param str topic: Name of the new argument group.
:param str verbs: List of subcommands that should be documented as part of
this help group / topic
:returns: The new argument group.
:rtype: `HelpfulArgumentGroup`
"""
if self.visible_topics[topic]:
self.groups[topic] = self.parser.add_argument_group(topic, **kwargs)
if self.help_arg:
for v in verbs:
self.groups[topic].add_argument(v, help=VERB_HELP_MAP[v]["short"])
return HelpfulArgumentGroup(self, topic)
def add_plugin_args(self, plugins):
"""
Let each of the plugins add its own command line arguments, which
may or may not be displayed as help topics.
"""
for name, plugin_ep in six.iteritems(plugins):
parser_or_group = self.add_group(name,
description=plugin_ep.long_description)
plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)
def determine_help_topics(self, chosen_topic):
"""
The user may have requested help on a topic, return a dict of which
topics to display. @chosen_topic has prescan_for_flag's return type
:returns: dict
"""
# topics maps each topic to whether it should be documented by
# argparse on the command line
if chosen_topic == "auth":
chosen_topic = "certonly"
if chosen_topic == "everything":
chosen_topic = "run"
if chosen_topic == "all":
# Addition of condition closes #6209 (removal of duplicate route53 option).
return dict([(t, True) if t != 'certbot-route53:auth' else (t, False)
for t in self.help_topics])
elif not chosen_topic:
return dict([(t, False) for t in self.help_topics])
return dict([(t, t == chosen_topic) for t in self.help_topics])
def _add_all_groups(helpful):
helpful.add_group("automation", description="Flags for automating execution & other tweaks")
helpful.add_group("security", description="Security parameters & server settings")
helpful.add_group("testing",
description="The following flags are meant for testing and integration purposes only.")
helpful.add_group("paths", description="Flags for changing execution paths & servers")
helpful.add_group("manage",
description="Various subcommands and flags are available for managing your certificates:",
verbs=["certificates", "delete", "renew", "revoke", "update_symlinks"])
# VERBS
for verb, docs in VERB_HELP:
name = docs.get("realname", verb)
helpful.add_group(name, description=docs["opts"])
def prepare_and_parse_args(plugins, args, detect_defaults=False): # pylint: disable=too-many-statements
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
# pylint: disable=too-many-statements
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because in the "
"event of key loss or account compromise you will irrevocably "
"lose access to your account. You will also be unable to receive "
"notice about impending expiration or revocation of your "
"certificates. Updates to the Subscriber Agreement will still "
"affect you, and will be effective 14 days after posting an "
"update to the web site.")
# TODO: When `certbot register --update-registration` is fully deprecated,
# delete following helpful.add
helpful.add(
"register", "--update-registration", action="store_true",
default=flag_default("update_registration"), dest="update_registration",
help=argparse.SUPPRESS)
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
"automation", "--os-packages-only", action="store_true",
default=flag_default("os_packages_only"),
help="(certbot-auto only) install OS package dependencies and then stop")
helpful.add(
"automation", "--no-self-upgrade", action="store_true",
default=flag_default("no_self_upgrade"),
help="(certbot-auto only) prevent the certbot-auto script from"
" upgrading itself to newer released versions (default: Upgrade"
" automatically)")
helpful.add(
"automation", "--no-bootstrap", action="store_true",
default=flag_default("no_bootstrap"),
help="(certbot-auto only) prevent the certbot-auto script from"
" installing OS-level dependencies (default: Prompt to install "
" OS-wide dependencies, but exit if the user says 'No')")
helpful.add(
"automation", "--no-permissions-check", action="store_true",
default=flag_default("no_permissions_check"),
help="(certbot-auto only) skip the check on the file system"
" permissions of the certbot-auto script")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
helpful.add_deprecated_argument("--agree-dev-preview", 0)
helpful.add_deprecated_argument("--dialog", 0)
# Deprecation of tls-sni-01 related cli flags
# TODO: remove theses flags completely in few releases
class _DeprecatedTLSSNIAction(util._ShowWarning): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
super(_DeprecatedTLSSNIAction, self).__call__(parser, namespace, values, option_string)
namespace.https_port = values
helpful.add(
["testing", "standalone", "apache", "nginx"], "--tls-sni-01-port",
type=int, action=_DeprecatedTLSSNIAction, help=argparse.SUPPRESS)
helpful.add_deprecated_argument("--tls-sni-01-address", 1)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
def _create_subparsers(helpful):
helpful.add("config_changes", "--num", type=int, default=flag_default("num"),
help="How many past revisions you want to be displayed")
from certbot.client import sample_user_agent # avoid import loops
helpful.add(
None, "--user-agent", default=flag_default("user_agent"),
help='Set a custom user agent string for the client. User agent strings allow '
'the CA to collect high level statistics about success rates by OS, '
'plugin and use case, and to know when to deprecate support for past Python '
"versions and flags. If you wish to hide this information from the Let's "
'Encrypt server, set this to "". '
'(default: {0}). The flags encoded in the user agent are: '
'--duplicate, --force-renew, --allow-subset-of-names, -n, and '
'whether any hooks are set.'.format(sample_user_agent()))
helpful.add(
None, "--user-agent-comment", default=flag_default("user_agent_comment"),
type=_user_agent_comment_type,
help="Add a comment to the default user agent string. May be used when repackaging Certbot "
"or calling it from another tool to allow additional statistical data to be collected."
" Ignored if --user-agent is set. (Example: Foo-Wrapper/1.0)")
helpful.add("certonly",
"--csr", default=flag_default("csr"), type=read_file,
help="Path to a Certificate Signing Request (CSR) in DER or PEM format."
" Currently --csr only works with the 'certonly' subcommand.")
helpful.add("revoke",
"--reason", dest="reason",
choices=CaseInsensitiveList(sorted(constants.REVOCATION_REASONS,
key=constants.REVOCATION_REASONS.get)),
action=_EncodeReasonAction, default=flag_default("reason"),
help="Specify reason for revoking certificate. (default: unspecified)")
helpful.add("revoke",
"--delete-after-revoke", action="store_true",
default=flag_default("delete_after_revoke"),
help="Delete certificates after revoking them, along with all previous and later "
"versions of those certificates.")
helpful.add("revoke",
"--no-delete-after-revoke", action="store_false",
dest="delete_after_revoke",
default=flag_default("delete_after_revoke"),
help="Do not delete certificates after revoking them. This "
"option should be used with caution because the 'renew' "
"subcommand will attempt to renew undeleted revoked "
"certificates.")
helpful.add("rollback",
"--checkpoints", type=int, metavar="N",
default=flag_default("rollback_checkpoints"),
help="Revert configuration N number of checkpoints.")
helpful.add("plugins",
"--init", action="store_true", default=flag_default("init"),
help="Initialize plugins.")
helpful.add("plugins",
"--prepare", action="store_true", default=flag_default("prepare"),
help="Initialize and prepare plugins.")
helpful.add("plugins",
"--authenticators", action="append_const", dest="ifaces",
default=flag_default("ifaces"),
const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.")
helpful.add("plugins",
"--installers", action="append_const", dest="ifaces",
default=flag_default("ifaces"),
const=interfaces.IInstaller, help="Limit to installer plugins only.")
class CaseInsensitiveList(list):
"""A list that will ignore case when searching.
This class is passed to the `choices` argument of `argparse.add_arguments`
through the `helpful` wrapper. It is necessary due to special handling of
command line arguments by `set_by_cli` in which the `type_func` is not applied."""
def __contains__(self, element):
return super(CaseInsensitiveList, self).__contains__(element.lower())
def _paths_parser(helpful):
add = helpful.add
verb = helpful.verb
if verb == "help":
verb = helpful.help_arg
cph = "Path to where certificate is saved (with auth --csr), installed from, or revoked."
sections = ["paths", "install", "revoke", "certonly", "manage"]
if verb == "certonly":
add(sections, "--cert-path", type=os.path.abspath,
default=flag_default("auth_cert_path"), help=cph)
elif verb == "revoke":
add(sections, "--cert-path", type=read_file, required=False, help=cph)
else:
add(sections, "--cert-path", type=os.path.abspath, help=cph)
section = "paths"
if verb in ("install", "revoke"):
section = verb
# revoke --key-path reads a file, install --key-path takes a string
add(section, "--key-path",
type=((verb == "revoke" and read_file) or os.path.abspath),
help="Path to private key for certificate installation "
"or revocation (if account key is missing)")
default_cp = None
if verb == "certonly":
default_cp = flag_default("auth_chain_path")
add(["paths", "install"], "--fullchain-path", default=default_cp, type=os.path.abspath,
help="Accompanying path to a full certificate chain (certificate plus chain).")
add("paths", "--chain-path", default=default_cp, type=os.path.abspath,
help="Accompanying path to a certificate chain.")
add("paths", "--config-dir", default=flag_default("config_dir"),
help=config_help("config_dir"))
add("paths", "--work-dir", default=flag_default("work_dir"),
help=config_help("work_dir"))
add("paths", "--logs-dir", default=flag_default("logs_dir"),
help="Logs directory.")
add("paths", "--server", default=flag_default("server"),
help=config_help("server"))
def _plugins_parsing(helpful, plugins):
# It's nuts, but there are two "plugins" topics. Somehow this works
helpful.add_group(
"plugins", description="Plugin Selection: Certbot client supports an "
"extensible plugins architecture. See '%(prog)s plugins' for a "
"list of all installed plugins and their names. You can force "
"a particular plugin by setting options provided below. Running "
"--help <plugin_name> will list flags specific to that plugin.")
helpful.add("plugins", "--configurator", default=flag_default("configurator"),
help="Name of the plugin that is both an authenticator and an installer."
" Should not be used together with --authenticator or --installer. "
"(default: Ask)")
helpful.add("plugins", "-a", "--authenticator", default=flag_default("authenticator"),
help="Authenticator plugin name.")
helpful.add("plugins", "-i", "--installer", default=flag_default("installer"),
help="Installer plugin name (also used to find domains).")
helpful.add(["plugins", "certonly", "run", "install", "config_changes"],
"--apache", action="store_true", default=flag_default("apache"),
help="Obtain and install certificates using Apache")
helpful.add(["plugins", "certonly", "run", "install", "config_changes"],
"--nginx", action="store_true", default=flag_default("nginx"),
help="Obtain and install certificates using Nginx")
helpful.add(["plugins", "certonly"], "--standalone", action="store_true",
default=flag_default("standalone"),
help='Obtain certificates using a "standalone" webserver.')
helpful.add(["plugins", "certonly"], "--manual", action="store_true",
default=flag_default("manual"),
help="Provide laborious manual instructions for obtaining a certificate")
helpful.add(["plugins", "certonly"], "--webroot", action="store_true",
default=flag_default("webroot"),
help="Obtain certificates by placing files in a webroot directory.")
helpful.add(["plugins", "certonly"], "--dns-cloudflare", action="store_true",
default=flag_default("dns_cloudflare"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using Cloudflare for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-cloudxns", action="store_true",
default=flag_default("dns_cloudxns"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using CloudXNS for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-digitalocean", action="store_true",
default=flag_default("dns_digitalocean"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using DigitalOcean for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-dnsimple", action="store_true",
default=flag_default("dns_dnsimple"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using DNSimple for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-dnsmadeeasy", action="store_true",
default=flag_default("dns_dnsmadeeasy"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using DNS Made Easy for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-gehirn", action="store_true",
default=flag_default("dns_gehirn"),
help=("Obtain certificates using a DNS TXT record "
"(if you are using Gehirn Infrastracture Service for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-google", action="store_true",
default=flag_default("dns_google"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using Google Cloud DNS)."))
helpful.add(["plugins", "certonly"], "--dns-linode", action="store_true",
default=flag_default("dns_linode"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using Linode for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-luadns", action="store_true",
default=flag_default("dns_luadns"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using LuaDNS for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-nsone", action="store_true",
default=flag_default("dns_nsone"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using NS1 for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-ovh", action="store_true",
default=flag_default("dns_ovh"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using OVH for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-rfc2136", action="store_true",
default=flag_default("dns_rfc2136"),
help="Obtain certificates using a DNS TXT record (if you are using BIND for DNS).")
helpful.add(["plugins", "certonly"], "--dns-route53", action="store_true",
default=flag_default("dns_route53"),
help=("Obtain certificates using a DNS TXT record (if you are using Route53 for "
"DNS)."))
helpful.add(["plugins", "certonly"], "--dns-sakuracloud", action="store_true",
default=flag_default("dns_sakuracloud"),
help=("Obtain certificates using a DNS TXT record "
"(if you are using Sakura Cloud for DNS)."))
# things should not be reorder past/pre this comment:
# plugins_group should be displayed in --help before plugin
# specific groups (so that plugins_group.description makes sense)
helpful.add_plugin_args(plugins)
class _EncodeReasonAction(argparse.Action):
"""Action class for parsing revocation reason."""
def __call__(self, parser, namespace, reason, option_string=None):
"""Encodes the reason for certificate revocation."""
code = constants.REVOCATION_REASONS[reason.lower()]
setattr(namespace, self.dest, code)
class _DomainsAction(argparse.Action):
"""Action class for parsing domains."""
def __call__(self, parser, namespace, domain, option_string=None):
"""Just wrap add_domains in argparseese."""
add_domains(namespace, domain)
def add_domains(args_or_config, domains):
"""Registers new domains to be used during the current client run.
Domains are not added to the list of requested domains if they have
already been registered.
:param args_or_config: parsed command line arguments
:type args_or_config: argparse.Namespace or
configuration.NamespaceConfig
:param str domain: one or more comma separated domains
:returns: domains after they have been normalized and validated
:rtype: `list` of `str`
"""
validated_domains = []
for domain in domains.split(","):
domain = util.enforce_domain_sanity(domain.strip())
validated_domains.append(domain)
if domain not in args_or_config.domains:
args_or_config.domains.append(domain)
return validated_domains
class _PrefChallAction(argparse.Action):
"""Action class for parsing preferred challenges."""
def __call__(self, parser, namespace, pref_challs, option_string=None):
try:
challs = parse_preferred_challenges(pref_challs.split(","))
except errors.Error as error:
raise argparse.ArgumentError(self, str(error))
namespace.pref_challs.extend(challs)
def parse_preferred_challenges(pref_challs):
"""Translate and validate preferred challenges.
:param pref_challs: list of preferred challenge types
:type pref_challs: `list` of `str`
:returns: validated list of preferred challenge types
:rtype: `list` of `str`
:raises errors.Error: if pref_challs is invalid
"""
aliases = {"dns": "dns-01", "http": "http-01", "tls-sni": "tls-sni-01"}
challs = [c.strip() for c in pref_challs]
challs = [aliases.get(c, c) for c in challs]
# Ignore tls-sni-01 from the list, and generates a deprecation warning
# TODO: remove this option completely in few releases
if "tls-sni-01" in challs:
logger.warning('TLS-SNI-01 support is deprecated. This value is being dropped from the '
'setting of --preferred-challenges and future versions of Certbot will '
'error if it is included.')
challs = [chall for chall in challs if chall != "tls-sni-01"]
unrecognized = ", ".join(name for name in challs
if name not in challenges.Challenge.TYPES)
if unrecognized:
raise errors.Error(
"Unrecognized challenges: {0}".format(unrecognized))
return challs
def _user_agent_comment_type(value):
if "(" in value or ")" in value:
raise argparse.ArgumentTypeError("may not contain parentheses")
return value
class _DeployHookAction(argparse.Action):
"""Action class for parsing deploy hooks."""
def __call__(self, parser, namespace, values, option_string=None):
renew_hook_set = namespace.deploy_hook != namespace.renew_hook
if renew_hook_set and namespace.renew_hook != values:
raise argparse.ArgumentError(
self, "conflicts with --renew-hook value")
namespace.deploy_hook = namespace.renew_hook = values
class _RenewHookAction(argparse.Action):
"""Action class for parsing renew hooks."""
def __call__(self, parser, namespace, values, option_string=None):
deploy_hook_set = namespace.deploy_hook is not None
if deploy_hook_set and namespace.deploy_hook != values:
raise argparse.ArgumentError(
self, "conflicts with --deploy-hook value")
namespace.renew_hook = values
def nonnegative_int(value):
"""Converts value to an int and checks that it is not negative.
This function should used as the type parameter for argparse
arguments.
:param str value: value provided on the command line
:returns: integer representation of value
:rtype: int
:raises argparse.ArgumentTypeError: if value isn't a non-negative integer
"""
try:
int_value = int(value)
except ValueError:
raise argparse.ArgumentTypeError("value must be an integer")
if int_value < 0:
raise argparse.ArgumentTypeError("value must be non-negative")
return int_value
| []
| []
| [
"CERTBOT_AUTO"
]
| [] | ["CERTBOT_AUTO"] | python | 1 | 0 | |
cloudasset/v1p1beta1/cloudasset-gen.go | // Copyright 2021 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package cloudasset provides access to the Cloud Asset API.
//
// For product documentation, see: https://cloud.google.com/asset-inventory/docs/quickstart
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/cloudasset/v1p1beta1"
// ...
// ctx := context.Background()
// cloudassetService, err := cloudasset.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// cloudassetService, err := cloudasset.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// cloudassetService, err := cloudasset.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package cloudasset // import "google.golang.org/api/cloudasset/v1p1beta1"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "cloudasset:v1p1beta1"
const apiName = "cloudasset"
const apiVersion = "v1p1beta1"
const basePath = "https://cloudasset.googleapis.com/"
const mtlsBasePath = "https://cloudasset.mtls.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// See, edit, configure, and delete your Google Cloud data and see the
// email address for your Google Account.
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/cloud-platform",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.IamPolicies = NewIamPoliciesService(s)
s.Resources = NewResourcesService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
IamPolicies *IamPoliciesService
Resources *ResourcesService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewIamPoliciesService(s *Service) *IamPoliciesService {
rs := &IamPoliciesService{s: s}
return rs
}
type IamPoliciesService struct {
s *Service
}
func NewResourcesService(s *Service) *ResourcesService {
rs := &ResourcesService{s: s}
return rs
}
type ResourcesService struct {
s *Service
}
// AnalyzeIamPolicyLongrunningMetadata: Represents the metadata of the
// longrunning operation for the AnalyzeIamPolicyLongrunning rpc.
type AnalyzeIamPolicyLongrunningMetadata struct {
// CreateTime: Output only. The time the operation was created.
CreateTime string `json:"createTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "CreateTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CreateTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AnalyzeIamPolicyLongrunningMetadata) MarshalJSON() ([]byte, error) {
type NoMethod AnalyzeIamPolicyLongrunningMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AnalyzeIamPolicyLongrunningResponse: A response message for
// AssetService.AnalyzeIamPolicyLongrunning.
type AnalyzeIamPolicyLongrunningResponse struct {
}
// AuditConfig: Specifies the audit configuration for a service. The
// configuration determines which permission types are logged, and what
// identities, if any, are exempted from logging. An AuditConfig must
// have one or more AuditLogConfigs. If there are AuditConfigs for both
// `allServices` and a specific service, the union of the two
// AuditConfigs is used for that service: the log_types specified in
// each AuditConfig are enabled, and the exempted_members in each
// AuditLogConfig are exempted. Example Policy with multiple
// AuditConfigs: { "audit_configs": [ { "service": "allServices",
// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members":
// [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, {
// "log_type": "ADMIN_READ" } ] }, { "service":
// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type":
// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [
// "user:[email protected]" ] } ] } ] } For sampleservice, this policy
// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts
// [email protected] from DATA_READ logging, and [email protected] from
// DATA_WRITE logging.
type AuditConfig struct {
// AuditLogConfigs: The configuration for logging of each type of
// permission.
AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"`
// Service: Specifies a service that will be enabled for audit logging.
// For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
// `allServices` is a special value that covers all services.
Service string `json:"service,omitempty"`
// ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AuditLogConfigs") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AuditConfig) MarshalJSON() ([]byte, error) {
type NoMethod AuditConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AuditLogConfig: Provides the configuration for logging a type of
// permissions. Example: { "audit_log_configs": [ { "log_type":
// "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, {
// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and
// 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ
// logging.
type AuditLogConfig struct {
// ExemptedMembers: Specifies the identities that do not cause logging
// for this type of permission. Follows the same format of
// Binding.members.
ExemptedMembers []string `json:"exemptedMembers,omitempty"`
// LogType: The log type that this config enables.
//
// Possible values:
// "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this.
// "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy
// "DATA_WRITE" - Data writes. Example: CloudSQL Users create
// "DATA_READ" - Data reads. Example: CloudSQL Users list
LogType string `json:"logType,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExemptedMembers") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExemptedMembers") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AuditLogConfig) MarshalJSON() ([]byte, error) {
type NoMethod AuditLogConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Binding: Associates `members`, or principals, with a `role`.
type Binding struct {
// Condition: The condition that is associated with this binding. If the
// condition evaluates to `true`, then this binding applies to the
// current request. If the condition evaluates to `false`, then this
// binding does not apply to the current request. However, a different
// role binding might grant the same role to one or more of the
// principals in this binding. To learn which resources support
// conditions in their IAM policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Condition *Expr `json:"condition,omitempty"`
// Members: Specifies the principals requesting access for a Cloud
// Platform resource. `members` can have the following values: *
// `allUsers`: A special identifier that represents anyone who is on the
// internet; with or without a Google account. *
// `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account. *
// `user:{emailid}`: An email address that represents a specific Google
// account. For example, `[email protected]` . *
// `serviceAccount:{emailid}`: An email address that represents a
// service account. For example,
// `[email protected]`. * `group:{emailid}`: An
// email address that represents a Google group. For example,
// `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An
// email address (plus unique identifier) representing a user that has
// been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered
// user retains the role in the binding. *
// `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
// (plus unique identifier) representing a service account that has been
// recently deleted. For example,
// `[email protected]?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains
// the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`:
// An email address (plus unique identifier) representing a Google group
// that has been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the group is
// recovered, this value reverts to `group:{emailid}` and the recovered
// group retains the role in the binding. * `domain:{domain}`: The G
// Suite domain (primary) that represents all the users of that domain.
// For example, `google.com` or `example.com`.
Members []string `json:"members,omitempty"`
// Role: Role that is assigned to the list of `members`, or principals.
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
Role string `json:"role,omitempty"`
// ForceSendFields is a list of field names (e.g. "Condition") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Condition") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Binding) MarshalJSON() ([]byte, error) {
type NoMethod Binding
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Explanation: Explanation about the IAM policy search result.
type Explanation struct {
// MatchedPermissions: The map from roles to their included permission
// matching the permission query (e.g. containing
// `policy.role.permissions:`). Example role string:
// "roles/compute.instanceAdmin". The roles can also be found in the
// returned `policy` bindings. Note that the map is populated only if
// requesting with a permission query.
MatchedPermissions map[string]Permissions `json:"matchedPermissions,omitempty"`
// ForceSendFields is a list of field names (e.g. "MatchedPermissions")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MatchedPermissions") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Explanation) MarshalJSON() ([]byte, error) {
type NoMethod Explanation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Expr: Represents a textual expression in the Common Expression
// Language (CEL) syntax. CEL is a C-like expression language. The
// syntax and semantics of CEL are documented at
// https://github.com/google/cel-spec. Example (Comparison): title:
// "Summary size limit" description: "Determines if a summary is less
// than 100 chars" expression: "document.summary.size() < 100" Example
// (Equality): title: "Requestor is owner" description: "Determines if
// requestor is the document owner" expression: "document.owner ==
// request.auth.claims.email" Example (Logic): title: "Public documents"
// description: "Determine whether the document should be publicly
// visible" expression: "document.type != 'private' && document.type !=
// 'internal'" Example (Data Manipulation): title: "Notification string"
// description: "Create a notification string with a timestamp."
// expression: "'New message received at ' +
// string(document.create_time)" The exact variables and functions that
// may be referenced within an expression are determined by the service
// that evaluates it. See the service documentation for additional
// information.
type Expr struct {
// Description: Optional. Description of the expression. This is a
// longer text which describes the expression, e.g. when hovered over it
// in a UI.
Description string `json:"description,omitempty"`
// Expression: Textual representation of an expression in Common
// Expression Language syntax.
Expression string `json:"expression,omitempty"`
// Location: Optional. String indicating the location of the expression
// for error reporting, e.g. a file name and a position in the file.
Location string `json:"location,omitempty"`
// Title: Optional. Title for the expression, i.e. a short string
// describing its purpose. This can be used e.g. in UIs which allow to
// enter the expression.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Expr) MarshalJSON() ([]byte, error) {
type NoMethod Expr
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudAssetV1p7beta1Asset: An asset in Google Cloud. An asset
// can be any resource in the Google Cloud resource hierarchy
// (https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy),
// a resource outside the Google Cloud resource hierarchy (such as
// Google Kubernetes Engine clusters and objects), or a policy (e.g.
// Cloud IAM policy). See Supported asset types
// (https://cloud.google.com/asset-inventory/docs/supported-asset-types)
// for more information.
type GoogleCloudAssetV1p7beta1Asset struct {
// AccessLevel: Please also refer to the access level user guide
// (https://cloud.google.com/access-context-manager/docs/overview#access-levels).
AccessLevel *GoogleIdentityAccesscontextmanagerV1AccessLevel `json:"accessLevel,omitempty"`
// AccessPolicy: Please also refer to the access policy user guide
// (https://cloud.google.com/access-context-manager/docs/overview#access-policies).
AccessPolicy *GoogleIdentityAccesscontextmanagerV1AccessPolicy `json:"accessPolicy,omitempty"`
// Ancestors: The ancestry path of an asset in Google Cloud resource
// hierarchy
// (https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy),
// represented as a list of relative resource names. An ancestry path
// starts with the closest ancestor in the hierarchy and ends at root.
// If the asset is a project, folder, or organization, the ancestry path
// starts from the asset itself. Example: `["projects/123456789",
// "folders/5432", "organizations/1234"]`
Ancestors []string `json:"ancestors,omitempty"`
// AssetType: The type of the asset. Example:
// `compute.googleapis.com/Disk` See Supported asset types
// (https://cloud.google.com/asset-inventory/docs/supported-asset-types)
// for more information.
AssetType string `json:"assetType,omitempty"`
// IamPolicy: A representation of the Cloud IAM policy set on a Google
// Cloud resource. There can be a maximum of one Cloud IAM policy set on
// any given resource. In addition, Cloud IAM policies inherit their
// granted access scope from any policies set on parent resources in the
// resource hierarchy. Therefore, the effectively policy is the union of
// both the policy set on this resource and each policy set on all of
// the resource's ancestry resource levels in the hierarchy. See this
// topic (https://cloud.google.com/iam/docs/policies#inheritance) for
// more information.
IamPolicy *Policy `json:"iamPolicy,omitempty"`
// Name: The full name of the asset. Example:
// `//compute.googleapis.com/projects/my_project_123/zones/zone1/instance
// s/instance1` See Resource names
// (https://cloud.google.com/apis/design/resource_names#full_resource_name)
// for more information.
Name string `json:"name,omitempty"`
// OrgPolicy: A representation of an organization policy
// (https://cloud.google.com/resource-manager/docs/organization-policy/overview#organization_policy).
// There can be more than one organization policy with different
// constraints set on a given resource.
OrgPolicy []*GoogleCloudOrgpolicyV1Policy `json:"orgPolicy,omitempty"`
// RelatedAssets: The related assets of the asset of one relationship
// type. One asset only represents one type of relationship.
RelatedAssets *GoogleCloudAssetV1p7beta1RelatedAssets `json:"relatedAssets,omitempty"`
// Resource: A representation of the resource.
Resource *GoogleCloudAssetV1p7beta1Resource `json:"resource,omitempty"`
// ServicePerimeter: Please also refer to the service perimeter user
// guide (https://cloud.google.com/vpc-service-controls/docs/overview).
ServicePerimeter *GoogleIdentityAccesscontextmanagerV1ServicePerimeter `json:"servicePerimeter,omitempty"`
// UpdateTime: The last update timestamp of an asset. update_time is
// updated when create/update/delete operation is performed.
UpdateTime string `json:"updateTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "AccessLevel") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AccessLevel") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudAssetV1p7beta1Asset) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudAssetV1p7beta1Asset
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudAssetV1p7beta1RelatedAsset: An asset identify in Google
// Cloud which contains its name, type and ancestors. An asset can be
// any resource in the Google Cloud resource hierarchy
// (https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy),
// a resource outside the Google Cloud resource hierarchy (such as
// Google Kubernetes Engine clusters and objects), or a policy (e.g.
// Cloud IAM policy). See Supported asset types
// (https://cloud.google.com/asset-inventory/docs/supported-asset-types)
// for more information.
type GoogleCloudAssetV1p7beta1RelatedAsset struct {
// Ancestors: The ancestors of an asset in Google Cloud resource
// hierarchy
// (https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy),
// represented as a list of relative resource names. An ancestry path
// starts with the closest ancestor in the hierarchy and ends at root.
// Example: `["projects/123456789", "folders/5432",
// "organizations/1234"]`
Ancestors []string `json:"ancestors,omitempty"`
// Asset: The full name of the asset. Example:
// `//compute.googleapis.com/projects/my_project_123/zones/zone1/instance
// s/instance1` See Resource names
// (https://cloud.google.com/apis/design/resource_names#full_resource_name)
// for more information.
Asset string `json:"asset,omitempty"`
// AssetType: The type of the asset. Example:
// `compute.googleapis.com/Disk` See Supported asset types
// (https://cloud.google.com/asset-inventory/docs/supported-asset-types)
// for more information.
AssetType string `json:"assetType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Ancestors") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Ancestors") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudAssetV1p7beta1RelatedAsset) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudAssetV1p7beta1RelatedAsset
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudAssetV1p7beta1RelatedAssets: The detailed related assets
// with the `relationship_type`.
type GoogleCloudAssetV1p7beta1RelatedAssets struct {
// Assets: The peer resources of the relationship.
Assets []*GoogleCloudAssetV1p7beta1RelatedAsset `json:"assets,omitempty"`
// RelationshipAttributes: The detailed relation attributes.
RelationshipAttributes *GoogleCloudAssetV1p7beta1RelationshipAttributes `json:"relationshipAttributes,omitempty"`
// ForceSendFields is a list of field names (e.g. "Assets") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Assets") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudAssetV1p7beta1RelatedAssets) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudAssetV1p7beta1RelatedAssets
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudAssetV1p7beta1RelationshipAttributes: The relationship
// attributes which include `type`, `source_resource_type`,
// `target_resource_type` and `action`.
type GoogleCloudAssetV1p7beta1RelationshipAttributes struct {
// Action: The detail of the relationship, e.g. `contains`, `attaches`
Action string `json:"action,omitempty"`
// SourceResourceType: The source asset type. Example:
// `compute.googleapis.com/Instance`
SourceResourceType string `json:"sourceResourceType,omitempty"`
// TargetResourceType: The target asset type. Example:
// `compute.googleapis.com/Disk`
TargetResourceType string `json:"targetResourceType,omitempty"`
// Type: The unique identifier of the relationship type. Example:
// `INSTANCE_TO_INSTANCEGROUP`
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Action") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Action") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudAssetV1p7beta1RelationshipAttributes) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudAssetV1p7beta1RelationshipAttributes
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudAssetV1p7beta1Resource: A representation of a Google Cloud
// resource.
type GoogleCloudAssetV1p7beta1Resource struct {
// Data: The content of the resource, in which some sensitive fields are
// removed and may not be present.
Data googleapi.RawMessage `json:"data,omitempty"`
// DiscoveryDocumentUri: The URL of the discovery document containing
// the resource's JSON schema. Example:
// `https://www.googleapis.com/discovery/v1/apis/compute/v1/rest` This
// value is unspecified for resources that do not have an API based on a
// discovery document, such as Cloud Bigtable.
DiscoveryDocumentUri string `json:"discoveryDocumentUri,omitempty"`
// DiscoveryName: The JSON schema name listed in the discovery document.
// Example: `Project` This value is unspecified for resources that do
// not have an API based on a discovery document, such as Cloud
// Bigtable.
DiscoveryName string `json:"discoveryName,omitempty"`
// Location: The location of the resource in Google Cloud, such as its
// zone and region. For more information, see
// https://cloud.google.com/about/locations/.
Location string `json:"location,omitempty"`
// Parent: The full name of the immediate parent of this resource. See
// Resource Names
// (https://cloud.google.com/apis/design/resource_names#full_resource_name)
// for more information. For Google Cloud assets, this value is the
// parent resource defined in the Cloud IAM policy hierarchy
// (https://cloud.google.com/iam/docs/overview#policy_hierarchy).
// Example:
// `//cloudresourcemanager.googleapis.com/projects/my_project_123` For
// third-party assets, this field may be set differently.
Parent string `json:"parent,omitempty"`
// ResourceUrl: The REST URL for accessing the resource. An HTTP `GET`
// request using this URL returns the resource itself. Example:
// `https://cloudresourcemanager.googleapis.com/v1/projects/my-project-12
// 3` This value is unspecified for resources without a REST API.
ResourceUrl string `json:"resourceUrl,omitempty"`
// Version: The API version. Example: `v1`
Version string `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "Data") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Data") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudAssetV1p7beta1Resource) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudAssetV1p7beta1Resource
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudOrgpolicyV1BooleanPolicy: Used in `policy_type` to specify
// how `boolean_policy` will behave at this resource.
type GoogleCloudOrgpolicyV1BooleanPolicy struct {
// Enforced: If `true`, then the `Policy` is enforced. If `false`, then
// any configuration is acceptable. Suppose you have a `Constraint`
// `constraints/compute.disableSerialPortAccess` with
// `constraint_default` set to `ALLOW`. A `Policy` for that `Constraint`
// exhibits the following behavior: - If the `Policy` at this resource
// has enforced set to `false`, serial port connection attempts will be
// allowed. - If the `Policy` at this resource has enforced set to
// `true`, serial port connection attempts will be refused. - If the
// `Policy` at this resource is `RestoreDefault`, serial port connection
// attempts will be allowed. - If no `Policy` is set at this resource or
// anywhere higher in the resource hierarchy, serial port connection
// attempts will be allowed. - If no `Policy` is set at this resource,
// but one exists higher in the resource hierarchy, the behavior is as
// if the`Policy` were set at this resource. The following examples
// demonstrate the different possible layerings: Example 1 (nearest
// `Constraint` wins): `organizations/foo` has a `Policy` with:
// {enforced: false} `projects/bar` has no `Policy` set. The constraint
// at `projects/bar` and `organizations/foo` will not be enforced.
// Example 2 (enforcement gets replaced): `organizations/foo` has a
// `Policy` with: {enforced: false} `projects/bar` has a `Policy` with:
// {enforced: true} The constraint at `organizations/foo` is not
// enforced. The constraint at `projects/bar` is enforced. Example 3
// (RestoreDefault): `organizations/foo` has a `Policy` with: {enforced:
// true} `projects/bar` has a `Policy` with: {RestoreDefault: {}} The
// constraint at `organizations/foo` is enforced. The constraint at
// `projects/bar` is not enforced, because `constraint_default` for the
// `Constraint` is `ALLOW`.
Enforced bool `json:"enforced,omitempty"`
// ForceSendFields is a list of field names (e.g. "Enforced") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Enforced") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudOrgpolicyV1BooleanPolicy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudOrgpolicyV1BooleanPolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudOrgpolicyV1ListPolicy: Used in `policy_type` to specify
// how `list_policy` behaves at this resource. `ListPolicy` can define
// specific values and subtrees of Cloud Resource Manager resource
// hierarchy (`Organizations`, `Folders`, `Projects`) that are allowed
// or denied by setting the `allowed_values` and `denied_values` fields.
// This is achieved by using the `under:` and optional `is:` prefixes.
// The `under:` prefix is used to denote resource subtree values. The
// `is:` prefix is used to denote specific values, and is required only
// if the value contains a ":". Values prefixed with "is:" are treated
// the same as values with no prefix. Ancestry subtrees must be in one
// of the following formats: - "projects/", e.g.
// "projects/tokyo-rain-123" - "folders/", e.g. "folders/1234" -
// "organizations/", e.g. "organizations/1234" The `supports_under`
// field of the associated `Constraint` defines whether ancestry
// prefixes can be used. You can set `allowed_values` and
// `denied_values` in the same `Policy` if `all_values` is
// `ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used to allow or deny
// all values. If `all_values` is set to either `ALLOW` or `DENY`,
// `allowed_values` and `denied_values` must be unset.
type GoogleCloudOrgpolicyV1ListPolicy struct {
// AllValues: The policy all_values state.
//
// Possible values:
// "ALL_VALUES_UNSPECIFIED" - Indicates that allowed_values or
// denied_values must be set.
// "ALLOW" - A policy with this set allows all values.
// "DENY" - A policy with this set denies all values.
AllValues string `json:"allValues,omitempty"`
// AllowedValues: List of values allowed at this resource. Can only be
// set if `all_values` is set to `ALL_VALUES_UNSPECIFIED`.
AllowedValues []string `json:"allowedValues,omitempty"`
// DeniedValues: List of values denied at this resource. Can only be set
// if `all_values` is set to `ALL_VALUES_UNSPECIFIED`.
DeniedValues []string `json:"deniedValues,omitempty"`
// InheritFromParent: Determines the inheritance behavior for this
// `Policy`. By default, a `ListPolicy` set at a resource supersedes any
// `Policy` set anywhere up the resource hierarchy. However, if
// `inherit_from_parent` is set to `true`, then the values from the
// effective `Policy` of the parent resource are inherited, meaning the
// values set in this `Policy` are added to the values inherited up the
// hierarchy. Setting `Policy` hierarchies that inherit both allowed
// values and denied values isn't recommended in most circumstances to
// keep the configuration simple and understandable. However, it is
// possible to set a `Policy` with `allowed_values` set that inherits a
// `Policy` with `denied_values` set. In this case, the values that are
// allowed must be in `allowed_values` and not present in
// `denied_values`. For example, suppose you have a `Constraint`
// `constraints/serviceuser.services`, which has a `constraint_type` of
// `list_constraint`, and with `constraint_default` set to `ALLOW`.
// Suppose that at the Organization level, a `Policy` is applied that
// restricts the allowed API activations to {`E1`, `E2`}. Then, if a
// `Policy` is applied to a project below the Organization that has
// `inherit_from_parent` set to `false` and field all_values set to
// DENY, then an attempt to activate any API will be denied. The
// following examples demonstrate different possible layerings for
// `projects/bar` parented by `organizations/foo`: Example 1 (no
// inherited values): `organizations/foo` has a `Policy` with values:
// {allowed_values: "E1" allowed_values:"E2"} `projects/bar` has
// `inherit_from_parent` `false` and values: {allowed_values: "E3"
// allowed_values: "E4"} The accepted values at `organizations/foo` are
// `E1`, `E2`. The accepted values at `projects/bar` are `E3`, and `E4`.
// Example 2 (inherited values): `organizations/foo` has a `Policy` with
// values: {allowed_values: "E1" allowed_values:"E2"} `projects/bar` has
// a `Policy` with values: {value: "E3" value: "E4" inherit_from_parent:
// true} The accepted values at `organizations/foo` are `E1`, `E2`. The
// accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`.
// Example 3 (inheriting both allowed and denied values):
// `organizations/foo` has a `Policy` with values: {allowed_values: "E1"
// allowed_values: "E2"} `projects/bar` has a `Policy` with:
// {denied_values: "E1"} The accepted values at `organizations/foo` are
// `E1`, `E2`. The value accepted at `projects/bar` is `E2`. Example 4
// (RestoreDefault): `organizations/foo` has a `Policy` with values:
// {allowed_values: "E1" allowed_values:"E2"} `projects/bar` has a
// `Policy` with values: {RestoreDefault: {}} The accepted values at
// `organizations/foo` are `E1`, `E2`. The accepted values at
// `projects/bar` are either all or none depending on the value of
// `constraint_default` (if `ALLOW`, all; if `DENY`, none). Example 5
// (no policy inherits parent policy): `organizations/foo` has no
// `Policy` set. `projects/bar` has no `Policy` set. The accepted values
// at both levels are either all or none depending on the value of
// `constraint_default` (if `ALLOW`, all; if `DENY`, none). Example 6
// (ListConstraint allowing all): `organizations/foo` has a `Policy`
// with values: {allowed_values: "E1" allowed_values: "E2"}
// `projects/bar` has a `Policy` with: {all: ALLOW} The accepted values
// at `organizations/foo` are `E1`, E2`. Any value is accepted at
// `projects/bar`. Example 7 (ListConstraint allowing none):
// `organizations/foo` has a `Policy` with values: {allowed_values: "E1"
// allowed_values: "E2"} `projects/bar` has a `Policy` with: {all: DENY}
// The accepted values at `organizations/foo` are `E1`, E2`. No value is
// accepted at `projects/bar`. Example 10 (allowed and denied subtrees
// of Resource Manager hierarchy): Given the following resource
// hierarchy O1->{F1, F2}; F1->{P1}; F2->{P2, P3}, `organizations/foo`
// has a `Policy` with values: {allowed_values:
// "under:organizations/O1"} `projects/bar` has a `Policy` with:
// {allowed_values: "under:projects/P3"} {denied_values:
// "under:folders/F2"} The accepted values at `organizations/foo` are
// `organizations/O1`, `folders/F1`, `folders/F2`, `projects/P1`,
// `projects/P2`, `projects/P3`. The accepted values at `projects/bar`
// are `organizations/O1`, `folders/F1`, `projects/P1`.
InheritFromParent bool `json:"inheritFromParent,omitempty"`
// SuggestedValue: Optional. The Google Cloud Console will try to
// default to a configuration that matches the value specified in this
// `Policy`. If `suggested_value` is not set, it will inherit the value
// specified higher in the hierarchy, unless `inherit_from_parent` is
// `false`.
SuggestedValue string `json:"suggestedValue,omitempty"`
// ForceSendFields is a list of field names (e.g. "AllValues") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllValues") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudOrgpolicyV1ListPolicy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudOrgpolicyV1ListPolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudOrgpolicyV1Policy: Defines a Cloud Organization `Policy`
// which is used to specify `Constraints` for configurations of Cloud
// Platform resources.
type GoogleCloudOrgpolicyV1Policy struct {
// BooleanPolicy: For boolean `Constraints`, whether to enforce the
// `Constraint` or not.
BooleanPolicy *GoogleCloudOrgpolicyV1BooleanPolicy `json:"booleanPolicy,omitempty"`
// Constraint: The name of the `Constraint` the `Policy` is configuring,
// for example, `constraints/serviceuser.services`. A list of available
// constraints
// (/resource-manager/docs/organization-policy/org-policy-constraints)
// is available. Immutable after creation.
Constraint string `json:"constraint,omitempty"`
// Etag: An opaque tag indicating the current version of the `Policy`,
// used for concurrency control. When the `Policy` is returned from
// either a `GetPolicy` or a `ListOrgPolicy` request, this `etag`
// indicates the version of the current `Policy` to use when executing a
// read-modify-write loop. When the `Policy` is returned from a
// `GetEffectivePolicy` request, the `etag` will be unset. When the
// `Policy` is used in a `SetOrgPolicy` method, use the `etag` value
// that was returned from a `GetOrgPolicy` request as part of a
// read-modify-write loop for concurrency control. Not setting the
// `etag`in a `SetOrgPolicy` request will result in an unconditional
// write of the `Policy`.
Etag string `json:"etag,omitempty"`
// ListPolicy: List of values either allowed or disallowed.
ListPolicy *GoogleCloudOrgpolicyV1ListPolicy `json:"listPolicy,omitempty"`
// RestoreDefault: Restores the default behavior of the constraint;
// independent of `Constraint` type.
RestoreDefault *GoogleCloudOrgpolicyV1RestoreDefault `json:"restoreDefault,omitempty"`
// UpdateTime: The time stamp the `Policy` was previously updated. This
// is set by the server, not specified by the caller, and represents the
// last time a call to `SetOrgPolicy` was made for that `Policy`. Any
// value set by the client will be ignored.
UpdateTime string `json:"updateTime,omitempty"`
// Version: Version of the `Policy`. Default version is 0;
Version int64 `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "BooleanPolicy") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BooleanPolicy") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudOrgpolicyV1Policy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudOrgpolicyV1Policy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudOrgpolicyV1RestoreDefault: Ignores policies set above this
// resource and restores the `constraint_default` enforcement behavior
// of the specific `Constraint` at this resource. Suppose that
// `constraint_default` is set to `ALLOW` for the `Constraint`
// `constraints/serviceuser.services`. Suppose that organization foo.com
// sets a `Policy` at their Organization resource node that restricts
// the allowed service activations to deny all service activations. They
// could then set a `Policy` with the `policy_type` `restore_default` on
// several experimental projects, restoring the `constraint_default`
// enforcement of the `Constraint` for only those projects, allowing
// those projects to have all services activated.
type GoogleCloudOrgpolicyV1RestoreDefault struct {
}
// GoogleIdentityAccesscontextmanagerV1AccessLevel: An `AccessLevel` is
// a label that can be applied to requests to Google Cloud services,
// along with a list of requirements necessary for the label to be
// applied.
type GoogleIdentityAccesscontextmanagerV1AccessLevel struct {
// Basic: A `BasicLevel` composed of `Conditions`.
Basic *GoogleIdentityAccesscontextmanagerV1BasicLevel `json:"basic,omitempty"`
// Custom: A `CustomLevel` written in the Common Expression Language.
Custom *GoogleIdentityAccesscontextmanagerV1CustomLevel `json:"custom,omitempty"`
// Description: Description of the `AccessLevel` and its use. Does not
// affect behavior.
Description string `json:"description,omitempty"`
// Name: Required. Resource name for the Access Level. The `short_name`
// component must begin with a letter and only include alphanumeric and
// '_'. Format:
// `accessPolicies/{access_policy}/accessLevels/{access_level}`. The
// maximum length of the `access_level` component is 50 characters.
Name string `json:"name,omitempty"`
// Title: Human readable title. Must be unique within the Policy.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Basic") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Basic") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1AccessLevel) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1AccessLevel
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1AccessPolicy: `AccessPolicy` is a
// container for `AccessLevels` (which define the necessary attributes
// to use Google Cloud services) and `ServicePerimeters` (which define
// regions of services able to freely pass data within a perimeter). An
// access policy is globally visible within an organization, and the
// restrictions it specifies apply to all projects within an
// organization.
type GoogleIdentityAccesscontextmanagerV1AccessPolicy struct {
// Etag: Output only. An opaque identifier for the current version of
// the `AccessPolicy`. This will always be a strongly validated etag,
// meaning that two Access Polices will be identical if and only if
// their etags are identical. Clients should not expect this to be in
// any specific format.
Etag string `json:"etag,omitempty"`
// Name: Output only. Resource name of the `AccessPolicy`. Format:
// `accessPolicies/{access_policy}`
Name string `json:"name,omitempty"`
// Parent: Required. The parent of this `AccessPolicy` in the Cloud
// Resource Hierarchy. Currently immutable once created. Format:
// `organizations/{organization_id}`
Parent string `json:"parent,omitempty"`
// Scopes: The scopes of a policy define which resources an ACM policy
// can restrict, and where ACM resources can be referenced. For example,
// a policy with scopes=["folders/123"] has the following behavior: -
// vpcsc perimeters can only restrict projects within folders/123 -
// access levels can only be referenced by resources within folders/123.
// If empty, there are no limitations on which resources can be
// restricted by an ACM policy, and there are no limitations on where
// ACM resources can be referenced. Only one policy can include a given
// scope (attempting to create a second policy which includes
// "folders/123" will result in an error). Currently, scopes cannot be
// modified after a policy is created. Currently, policies can only have
// a single scope. Format: list of `folders/{folder_number}` or
// `projects/{project_number}`
Scopes []string `json:"scopes,omitempty"`
// Title: Required. Human readable title. Does not affect behavior.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Etag") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Etag") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1AccessPolicy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1AccessPolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1ApiOperation: Identification for
// an API Operation.
type GoogleIdentityAccesscontextmanagerV1ApiOperation struct {
// MethodSelectors: API methods or permissions to allow. Method or
// permission must belong to the service specified by `service_name`
// field. A single MethodSelector entry with `*` specified for the
// `method` field will allow all methods AND permissions for the service
// specified in `service_name`.
MethodSelectors []*GoogleIdentityAccesscontextmanagerV1MethodSelector `json:"methodSelectors,omitempty"`
// ServiceName: The name of the API whose methods or permissions the
// IngressPolicy or EgressPolicy want to allow. A single ApiOperation
// with `service_name` field set to `*` will allow all methods AND
// permissions for all services.
ServiceName string `json:"serviceName,omitempty"`
// ForceSendFields is a list of field names (e.g. "MethodSelectors") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MethodSelectors") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1ApiOperation) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1ApiOperation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1BasicLevel: `BasicLevel` is an
// `AccessLevel` using a set of recommended features.
type GoogleIdentityAccesscontextmanagerV1BasicLevel struct {
// CombiningFunction: How the `conditions` list should be combined to
// determine if a request is granted this `AccessLevel`. If AND is used,
// each `Condition` in `conditions` must be satisfied for the
// `AccessLevel` to be applied. If OR is used, at least one `Condition`
// in `conditions` must be satisfied for the `AccessLevel` to be
// applied. Default behavior is AND.
//
// Possible values:
// "AND" - All `Conditions` must be true for the `BasicLevel` to be
// true.
// "OR" - If at least one `Condition` is true, then the `BasicLevel`
// is true.
CombiningFunction string `json:"combiningFunction,omitempty"`
// Conditions: Required. A list of requirements for the `AccessLevel` to
// be granted.
Conditions []*GoogleIdentityAccesscontextmanagerV1Condition `json:"conditions,omitempty"`
// ForceSendFields is a list of field names (e.g. "CombiningFunction")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CombiningFunction") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1BasicLevel) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1BasicLevel
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1Condition: A condition necessary
// for an `AccessLevel` to be granted. The Condition is an AND over its
// fields. So a Condition is true if: 1) the request IP is from one of
// the listed subnetworks AND 2) the originating device complies with
// the listed device policy AND 3) all listed access levels are granted
// AND 4) the request was sent at a time allowed by the
// DateTimeRestriction.
type GoogleIdentityAccesscontextmanagerV1Condition struct {
// DevicePolicy: Device specific restrictions, all restrictions must
// hold for the Condition to be true. If not specified, all devices are
// allowed.
DevicePolicy *GoogleIdentityAccesscontextmanagerV1DevicePolicy `json:"devicePolicy,omitempty"`
// IpSubnetworks: CIDR block IP subnetwork specification. May be IPv4 or
// IPv6. Note that for a CIDR IP address block, the specified IP address
// portion must be properly truncated (i.e. all the host bits must be
// zero) or the input is considered malformed. For example,
// "192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, for
// IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" is not.
// The originating IP of a request must be in one of the listed subnets
// in order for this Condition to be true. If empty, all IP addresses
// are allowed.
IpSubnetworks []string `json:"ipSubnetworks,omitempty"`
// Members: The request must be made by one of the provided user or
// service accounts. Groups are not supported. Syntax: `user:{emailid}`
// `serviceAccount:{emailid}` If not specified, a request may come from
// any user.
Members []string `json:"members,omitempty"`
// Negate: Whether to negate the Condition. If true, the Condition
// becomes a NAND over its non-empty fields, each field must be false
// for the Condition overall to be satisfied. Defaults to false.
Negate bool `json:"negate,omitempty"`
// Regions: The request must originate from one of the provided
// countries/regions. Must be valid ISO 3166-1 alpha-2 codes.
Regions []string `json:"regions,omitempty"`
// RequiredAccessLevels: A list of other access levels defined in the
// same `Policy`, referenced by resource name. Referencing an
// `AccessLevel` which does not exist is an error. All access levels
// listed must be granted for the Condition to be true. Example:
// "accessPolicies/MY_POLICY/accessLevels/LEVEL_NAME"
RequiredAccessLevels []string `json:"requiredAccessLevels,omitempty"`
// ForceSendFields is a list of field names (e.g. "DevicePolicy") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DevicePolicy") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1Condition) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1Condition
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1CustomLevel: `CustomLevel` is an
// `AccessLevel` using the Cloud Common Expression Language to represent
// the necessary conditions for the level to apply to a request. See CEL
// spec at: https://github.com/google/cel-spec
type GoogleIdentityAccesscontextmanagerV1CustomLevel struct {
// Expr: Required. A Cloud CEL expression evaluating to a boolean.
Expr *Expr `json:"expr,omitempty"`
// ForceSendFields is a list of field names (e.g. "Expr") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Expr") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1CustomLevel) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1CustomLevel
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1DevicePolicy: `DevicePolicy`
// specifies device specific restrictions necessary to acquire a given
// access level. A `DevicePolicy` specifies requirements for requests
// from devices to be granted access levels, it does not do any
// enforcement on the device. `DevicePolicy` acts as an AND over all
// specified fields, and each repeated field is an OR over its elements.
// Any unset fields are ignored. For example, if the proto is { os_type
// : DESKTOP_WINDOWS, os_type : DESKTOP_LINUX, encryption_status:
// ENCRYPTED}, then the DevicePolicy will be true for requests
// originating from encrypted Linux desktops and encrypted Windows
// desktops.
type GoogleIdentityAccesscontextmanagerV1DevicePolicy struct {
// AllowedDeviceManagementLevels: Allowed device management levels, an
// empty list allows all management levels.
//
// Possible values:
// "MANAGEMENT_UNSPECIFIED" - The device's management level is not
// specified or not known.
// "NONE" - The device is not managed.
// "BASIC" - Basic management is enabled, which is generally limited
// to monitoring and wiping the corporate account.
// "COMPLETE" - Complete device management. This includes more
// thorough monitoring and the ability to directly manage the device
// (such as remote wiping). This can be enabled through the Android
// Enterprise Platform.
AllowedDeviceManagementLevels []string `json:"allowedDeviceManagementLevels,omitempty"`
// AllowedEncryptionStatuses: Allowed encryptions statuses, an empty
// list allows all statuses.
//
// Possible values:
// "ENCRYPTION_UNSPECIFIED" - The encryption status of the device is
// not specified or not known.
// "ENCRYPTION_UNSUPPORTED" - The device does not support encryption.
// "UNENCRYPTED" - The device supports encryption, but is currently
// unencrypted.
// "ENCRYPTED" - The device is encrypted.
AllowedEncryptionStatuses []string `json:"allowedEncryptionStatuses,omitempty"`
// OsConstraints: Allowed OS versions, an empty list allows all types
// and all versions.
OsConstraints []*GoogleIdentityAccesscontextmanagerV1OsConstraint `json:"osConstraints,omitempty"`
// RequireAdminApproval: Whether the device needs to be approved by the
// customer admin.
RequireAdminApproval bool `json:"requireAdminApproval,omitempty"`
// RequireCorpOwned: Whether the device needs to be corp owned.
RequireCorpOwned bool `json:"requireCorpOwned,omitempty"`
// RequireScreenlock: Whether or not screenlock is required for the
// DevicePolicy to be true. Defaults to `false`.
RequireScreenlock bool `json:"requireScreenlock,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AllowedDeviceManagementLevels") to unconditionally include in API
// requests. By default, fields with empty or default values are omitted
// from API requests. However, any non-pointer, non-interface field
// appearing in ForceSendFields will be sent to the server regardless of
// whether the field is empty or not. This may be used to include empty
// fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g.
// "AllowedDeviceManagementLevels") to include in API requests with the
// JSON null value. By default, fields with empty values are omitted
// from API requests. However, any field with an empty value appearing
// in NullFields will be sent to the server as null. It is an error if a
// field in this list has a non-empty value. This may be used to include
// null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1DevicePolicy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1DevicePolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1EgressFrom: Defines the
// conditions under which an EgressPolicy matches a request. Conditions
// based on information about the source of the request. Note that if
// the destination of the request is also protected by a
// ServicePerimeter, then that ServicePerimeter must have an
// IngressPolicy which allows access in order for this request to
// succeed.
type GoogleIdentityAccesscontextmanagerV1EgressFrom struct {
// Identities: A list of identities that are allowed access through this
// [EgressPolicy]. Should be in the format of email address. The email
// address should represent individual user or service account only.
Identities []string `json:"identities,omitempty"`
// IdentityType: Specifies the type of identities that are allowed
// access to outside the perimeter. If left unspecified, then members of
// `identities` field will be allowed access.
//
// Possible values:
// "IDENTITY_TYPE_UNSPECIFIED" - No blanket identity group specified.
// "ANY_IDENTITY" - Authorize access from all identities outside the
// perimeter.
// "ANY_USER_ACCOUNT" - Authorize access from all human users outside
// the perimeter.
// "ANY_SERVICE_ACCOUNT" - Authorize access from all service accounts
// outside the perimeter.
IdentityType string `json:"identityType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Identities") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Identities") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1EgressFrom) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1EgressFrom
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1EgressPolicy: Policy for egress
// from perimeter. EgressPolicies match requests based on `egress_from`
// and `egress_to` stanzas. For an EgressPolicy to match, both
// `egress_from` and `egress_to` stanzas must be matched. If an
// EgressPolicy matches a request, the request is allowed to span the
// ServicePerimeter boundary. For example, an EgressPolicy can be used
// to allow VMs on networks within the ServicePerimeter to access a
// defined set of projects outside the perimeter in certain contexts
// (e.g. to read data from a Cloud Storage bucket or query against a
// BigQuery dataset). EgressPolicies are concerned with the *resources*
// that a request relates as well as the API services and API actions
// being used. They do not related to the direction of data movement.
// More detailed documentation for this concept can be found in the
// descriptions of EgressFrom and EgressTo.
type GoogleIdentityAccesscontextmanagerV1EgressPolicy struct {
// EgressFrom: Defines conditions on the source of a request causing
// this EgressPolicy to apply.
EgressFrom *GoogleIdentityAccesscontextmanagerV1EgressFrom `json:"egressFrom,omitempty"`
// EgressTo: Defines the conditions on the ApiOperation and destination
// resources that cause this EgressPolicy to apply.
EgressTo *GoogleIdentityAccesscontextmanagerV1EgressTo `json:"egressTo,omitempty"`
// ForceSendFields is a list of field names (e.g. "EgressFrom") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "EgressFrom") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1EgressPolicy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1EgressPolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1EgressTo: Defines the conditions
// under which an EgressPolicy matches a request. Conditions are based
// on information about the ApiOperation intended to be performed on the
// `resources` specified. Note that if the destination of the request is
// also protected by a ServicePerimeter, then that ServicePerimeter must
// have an IngressPolicy which allows access in order for this request
// to succeed. The request must match `operations` AND `resources`
// fields in order to be allowed egress out of the perimeter.
type GoogleIdentityAccesscontextmanagerV1EgressTo struct {
// Operations: A list of ApiOperations allowed to be performed by the
// sources specified in the corresponding EgressFrom. A request matches
// if it uses an operation/service in this list.
Operations []*GoogleIdentityAccesscontextmanagerV1ApiOperation `json:"operations,omitempty"`
// Resources: A list of resources, currently only projects in the form
// `projects/`, that are allowed to be accessed by sources defined in
// the corresponding EgressFrom. A request matches if it contains a
// resource in this list. If `*` is specified for `resources`, then this
// EgressTo rule will authorize access to all resources outside the
// perimeter.
Resources []string `json:"resources,omitempty"`
// ForceSendFields is a list of field names (e.g. "Operations") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Operations") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1EgressTo) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1EgressTo
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1IngressFrom: Defines the
// conditions under which an IngressPolicy matches a request. Conditions
// are based on information about the source of the request. The request
// must satisfy what is defined in `sources` AND identity related fields
// in order to match.
type GoogleIdentityAccesscontextmanagerV1IngressFrom struct {
// Identities: A list of identities that are allowed access through this
// ingress policy. Should be in the format of email address. The email
// address should represent individual user or service account only.
Identities []string `json:"identities,omitempty"`
// IdentityType: Specifies the type of identities that are allowed
// access from outside the perimeter. If left unspecified, then members
// of `identities` field will be allowed access.
//
// Possible values:
// "IDENTITY_TYPE_UNSPECIFIED" - No blanket identity group specified.
// "ANY_IDENTITY" - Authorize access from all identities outside the
// perimeter.
// "ANY_USER_ACCOUNT" - Authorize access from all human users outside
// the perimeter.
// "ANY_SERVICE_ACCOUNT" - Authorize access from all service accounts
// outside the perimeter.
IdentityType string `json:"identityType,omitempty"`
// Sources: Sources that this IngressPolicy authorizes access from.
Sources []*GoogleIdentityAccesscontextmanagerV1IngressSource `json:"sources,omitempty"`
// ForceSendFields is a list of field names (e.g. "Identities") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Identities") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1IngressFrom) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1IngressFrom
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1IngressPolicy: Policy for ingress
// into ServicePerimeter. IngressPolicies match requests based on
// `ingress_from` and `ingress_to` stanzas. For an ingress policy to
// match, both the `ingress_from` and `ingress_to` stanzas must be
// matched. If an IngressPolicy matches a request, the request is
// allowed through the perimeter boundary from outside the perimeter.
// For example, access from the internet can be allowed either based on
// an AccessLevel or, for traffic hosted on Google Cloud, the project of
// the source network. For access from private networks, using the
// project of the hosting network is required. Individual ingress
// policies can be limited by restricting which services and/or actions
// they match using the `ingress_to` field.
type GoogleIdentityAccesscontextmanagerV1IngressPolicy struct {
// IngressFrom: Defines the conditions on the source of a request
// causing this IngressPolicy to apply.
IngressFrom *GoogleIdentityAccesscontextmanagerV1IngressFrom `json:"ingressFrom,omitempty"`
// IngressTo: Defines the conditions on the ApiOperation and request
// destination that cause this IngressPolicy to apply.
IngressTo *GoogleIdentityAccesscontextmanagerV1IngressTo `json:"ingressTo,omitempty"`
// ForceSendFields is a list of field names (e.g. "IngressFrom") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "IngressFrom") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1IngressPolicy) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1IngressPolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1IngressSource: The source that
// IngressPolicy authorizes access from.
type GoogleIdentityAccesscontextmanagerV1IngressSource struct {
// AccessLevel: An AccessLevel resource name that allow resources within
// the ServicePerimeters to be accessed from the internet. AccessLevels
// listed must be in the same policy as this ServicePerimeter.
// Referencing a nonexistent AccessLevel will cause an error. If no
// AccessLevel names are listed, resources within the perimeter can only
// be accessed via Google Cloud calls with request origins within the
// perimeter. Example: `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL`.
// If a single `*` is specified for `access_level`, then all
// IngressSources will be allowed.
AccessLevel string `json:"accessLevel,omitempty"`
// Resource: A Google Cloud resource that is allowed to ingress the
// perimeter. Requests from these resources will be allowed to access
// perimeter data. Currently only projects are allowed. Format:
// `projects/{project_number}` The project may be in any Google Cloud
// organization, not just the organization that the perimeter is defined
// in. `*` is not allowed, the case of allowing all Google Cloud
// resources only is not supported.
Resource string `json:"resource,omitempty"`
// ForceSendFields is a list of field names (e.g. "AccessLevel") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AccessLevel") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1IngressSource) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1IngressSource
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1IngressTo: Defines the conditions
// under which an IngressPolicy matches a request. Conditions are based
// on information about the ApiOperation intended to be performed on the
// target resource of the request. The request must satisfy what is
// defined in `operations` AND `resources` in order to match.
type GoogleIdentityAccesscontextmanagerV1IngressTo struct {
// Operations: A list of ApiOperations allowed to be performed by the
// sources specified in corresponding IngressFrom in this
// ServicePerimeter.
Operations []*GoogleIdentityAccesscontextmanagerV1ApiOperation `json:"operations,omitempty"`
// Resources: A list of resources, currently only projects in the form
// `projects/`, protected by this ServicePerimeter that are allowed to
// be accessed by sources defined in the corresponding IngressFrom. If a
// single `*` is specified, then access to all resources inside the
// perimeter are allowed.
Resources []string `json:"resources,omitempty"`
// ForceSendFields is a list of field names (e.g. "Operations") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Operations") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1IngressTo) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1IngressTo
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1MethodSelector: An allowed method
// or permission of a service specified in ApiOperation.
type GoogleIdentityAccesscontextmanagerV1MethodSelector struct {
// Method: Value for `method` should be a valid method name for the
// corresponding `service_name` in ApiOperation. If `*` used as value
// for `method`, then ALL methods and permissions are allowed.
Method string `json:"method,omitempty"`
// Permission: Value for `permission` should be a valid Cloud IAM
// permission for the corresponding `service_name` in ApiOperation.
Permission string `json:"permission,omitempty"`
// ForceSendFields is a list of field names (e.g. "Method") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Method") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1MethodSelector) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1MethodSelector
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1OsConstraint: A restriction on
// the OS type and version of devices making requests.
type GoogleIdentityAccesscontextmanagerV1OsConstraint struct {
// MinimumVersion: The minimum allowed OS version. If not set, any
// version of this OS satisfies the constraint. Format:
// "major.minor.patch". Examples: "10.5.301", "9.2.1".
MinimumVersion string `json:"minimumVersion,omitempty"`
// OsType: Required. The allowed OS type.
//
// Possible values:
// "OS_UNSPECIFIED" - The operating system of the device is not
// specified or not known.
// "DESKTOP_MAC" - A desktop Mac operating system.
// "DESKTOP_WINDOWS" - A desktop Windows operating system.
// "DESKTOP_LINUX" - A desktop Linux operating system.
// "DESKTOP_CHROME_OS" - A desktop ChromeOS operating system.
// "ANDROID" - An Android operating system.
// "IOS" - An iOS operating system.
OsType string `json:"osType,omitempty"`
// RequireVerifiedChromeOs: Only allows requests from devices with a
// verified Chrome OS. Verifications includes requirements that the
// device is enterprise-managed, conformant to domain policies, and the
// caller has permission to call the API targeted by the request.
RequireVerifiedChromeOs bool `json:"requireVerifiedChromeOs,omitempty"`
// ForceSendFields is a list of field names (e.g. "MinimumVersion") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MinimumVersion") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1OsConstraint) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1OsConstraint
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1ServicePerimeter:
// `ServicePerimeter` describes a set of Google Cloud resources which
// can freely import and export data amongst themselves, but not export
// outside of the `ServicePerimeter`. If a request with a source within
// this `ServicePerimeter` has a target outside of the
// `ServicePerimeter`, the request will be blocked. Otherwise the
// request is allowed. There are two types of Service Perimeter -
// Regular and Bridge. Regular Service Perimeters cannot overlap, a
// single Google Cloud project can only belong to a single regular
// Service Perimeter. Service Perimeter Bridges can contain only Google
// Cloud projects as members, a single Google Cloud project may belong
// to multiple Service Perimeter Bridges.
type GoogleIdentityAccesscontextmanagerV1ServicePerimeter struct {
// Description: Description of the `ServicePerimeter` and its use. Does
// not affect behavior.
Description string `json:"description,omitempty"`
// Name: Required. Resource name for the ServicePerimeter. The
// `short_name` component must begin with a letter and only include
// alphanumeric and '_'. Format:
// `accessPolicies/{access_policy}/servicePerimeters/{service_perimeter}`
Name string `json:"name,omitempty"`
// PerimeterType: Perimeter type indicator. A single project is allowed
// to be a member of single regular perimeter, but multiple service
// perimeter bridges. A project cannot be a included in a perimeter
// bridge without being included in regular perimeter. For perimeter
// bridges, the restricted service list as well as access level lists
// must be empty.
//
// Possible values:
// "PERIMETER_TYPE_REGULAR" - Regular Perimeter.
// "PERIMETER_TYPE_BRIDGE" - Perimeter Bridge.
PerimeterType string `json:"perimeterType,omitempty"`
// Spec: Proposed (or dry run) ServicePerimeter configuration. This
// configuration allows to specify and test ServicePerimeter
// configuration without enforcing actual access restrictions. Only
// allowed to be set when the "use_explicit_dry_run_spec" flag is set.
Spec *GoogleIdentityAccesscontextmanagerV1ServicePerimeterConfig `json:"spec,omitempty"`
// Status: Current ServicePerimeter configuration. Specifies sets of
// resources, restricted services and access levels that determine
// perimeter content and boundaries.
Status *GoogleIdentityAccesscontextmanagerV1ServicePerimeterConfig `json:"status,omitempty"`
// Title: Human readable title. Must be unique within the Policy.
Title string `json:"title,omitempty"`
// UseExplicitDryRunSpec: Use explicit dry run spec flag. Ordinarily, a
// dry-run spec implicitly exists for all Service Perimeters, and that
// spec is identical to the status for those Service Perimeters. When
// this flag is set, it inhibits the generation of the implicit spec,
// thereby allowing the user to explicitly provide a configuration
// ("spec") to use in a dry-run version of the Service Perimeter. This
// allows the user to test changes to the enforced config ("status")
// without actually enforcing them. This testing is done through
// analyzing the differences between currently enforced and suggested
// restrictions. use_explicit_dry_run_spec must bet set to True if any
// of the fields in the spec are set to non-default values.
UseExplicitDryRunSpec bool `json:"useExplicitDryRunSpec,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1ServicePerimeter) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1ServicePerimeter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1ServicePerimeterConfig:
// `ServicePerimeterConfig` specifies a set of Google Cloud resources
// that describe specific Service Perimeter configuration.
type GoogleIdentityAccesscontextmanagerV1ServicePerimeterConfig struct {
// AccessLevels: A list of `AccessLevel` resource names that allow
// resources within the `ServicePerimeter` to be accessed from the
// internet. `AccessLevels` listed must be in the same policy as this
// `ServicePerimeter`. Referencing a nonexistent `AccessLevel` is a
// syntax error. If no `AccessLevel` names are listed, resources within
// the perimeter can only be accessed via Google Cloud calls with
// request origins within the perimeter. Example:
// "accessPolicies/MY_POLICY/accessLevels/MY_LEVEL". For Service
// Perimeter Bridge, must be empty.
AccessLevels []string `json:"accessLevels,omitempty"`
// EgressPolicies: List of EgressPolicies to apply to the perimeter. A
// perimeter may have multiple EgressPolicies, each of which is
// evaluated separately. Access is granted if any EgressPolicy grants
// it. Must be empty for a perimeter bridge.
EgressPolicies []*GoogleIdentityAccesscontextmanagerV1EgressPolicy `json:"egressPolicies,omitempty"`
// IngressPolicies: List of IngressPolicies to apply to the perimeter. A
// perimeter may have multiple IngressPolicies, each of which is
// evaluated separately. Access is granted if any Ingress Policy grants
// it. Must be empty for a perimeter bridge.
IngressPolicies []*GoogleIdentityAccesscontextmanagerV1IngressPolicy `json:"ingressPolicies,omitempty"`
// Resources: A list of Google Cloud resources that are inside of the
// service perimeter. Currently only projects are allowed. Format:
// `projects/{project_number}`
Resources []string `json:"resources,omitempty"`
// RestrictedServices: Google Cloud services that are subject to the
// Service Perimeter restrictions. For example, if
// `storage.googleapis.com` is specified, access to the storage buckets
// inside the perimeter must meet the perimeter's access restrictions.
RestrictedServices []string `json:"restrictedServices,omitempty"`
// VpcAccessibleServices: Configuration for APIs allowed within
// Perimeter.
VpcAccessibleServices *GoogleIdentityAccesscontextmanagerV1VpcAccessibleServices `json:"vpcAccessibleServices,omitempty"`
// ForceSendFields is a list of field names (e.g. "AccessLevels") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AccessLevels") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1ServicePerimeterConfig) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1ServicePerimeterConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleIdentityAccesscontextmanagerV1VpcAccessibleServices: Specifies
// how APIs are allowed to communicate within the Service Perimeter.
type GoogleIdentityAccesscontextmanagerV1VpcAccessibleServices struct {
// AllowedServices: The list of APIs usable within the Service
// Perimeter. Must be empty unless 'enable_restriction' is True. You can
// specify a list of individual services, as well as include the
// 'RESTRICTED-SERVICES' value, which automatically includes all of the
// services protected by the perimeter.
AllowedServices []string `json:"allowedServices,omitempty"`
// EnableRestriction: Whether to restrict API calls within the Service
// Perimeter to the list of APIs specified in 'allowed_services'.
EnableRestriction bool `json:"enableRestriction,omitempty"`
// ForceSendFields is a list of field names (e.g. "AllowedServices") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllowedServices") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleIdentityAccesscontextmanagerV1VpcAccessibleServices) MarshalJSON() ([]byte, error) {
type NoMethod GoogleIdentityAccesscontextmanagerV1VpcAccessibleServices
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// IamPolicySearchResult: The result for an IAM policy search.
type IamPolicySearchResult struct {
// Explanation: Explanation about the IAM policy search result. It
// contains additional information that explains why the search result
// matches the query.
Explanation *Explanation `json:"explanation,omitempty"`
// Policy: The IAM policy attached to the specified resource. Note that
// the original IAM policy can contain multiple bindings. This only
// contains the bindings that match the given query. For queries that
// don't contain a constraint on policies (e.g. an empty query), this
// contains all the bindings.
Policy *Policy `json:"policy,omitempty"`
// Project: The project that the associated Google Cloud resource
// belongs to, in the form of `projects/{project_number}`. If an IAM
// policy is set on a resource -- such as a Compute Engine instance or a
// Cloud Storage bucket -- the project field will indicate the project
// that contains the resource. If an IAM policy is set on a folder or
// orgnization, the project field will be empty.
Project string `json:"project,omitempty"`
// Resource: The full resource name
// (https://cloud.google.com/apis/design/resource_names#full_resource_name)
// of the resource associated with this IAM policy.
Resource string `json:"resource,omitempty"`
// ForceSendFields is a list of field names (e.g. "Explanation") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Explanation") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *IamPolicySearchResult) MarshalJSON() ([]byte, error) {
type NoMethod IamPolicySearchResult
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Permissions: IAM permissions.
type Permissions struct {
// Permissions: A list of permissions. Example permission string:
// "compute.disk.get".
Permissions []string `json:"permissions,omitempty"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Permissions) MarshalJSON() ([]byte, error) {
type NoMethod Permissions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Policy: An Identity and Access Management (IAM) policy, which
// specifies access controls for Google Cloud resources. A `Policy` is a
// collection of `bindings`. A `binding` binds one or more `members`, or
// principals, to a single `role`. Principals can be user accounts,
// service accounts, Google groups, and domains (such as G Suite). A
// `role` is a named list of permissions; each `role` can be an IAM
// predefined role or a user-created custom role. For some types of
// Google Cloud resources, a `binding` can also specify a `condition`,
// which is a logical expression that allows access to a resource only
// if the expression evaluates to `true`. A condition can add
// constraints based on attributes of the request, the resource, or
// both. To learn which resources support conditions in their IAM
// policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
// **JSON example:** { "bindings": [ { "role":
// "roles/resourcemanager.organizationAdmin", "members": [
// "user:[email protected]", "group:[email protected]",
// "domain:google.com",
// "serviceAccount:[email protected]" ] }, {
// "role": "roles/resourcemanager.organizationViewer", "members": [
// "user:[email protected]" ], "condition": { "title": "expirable access",
// "description": "Does not grant access after Sep 2020", "expression":
// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ],
// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: -
// members: - user:[email protected] - group:[email protected] -
// domain:google.com -
// serviceAccount:[email protected] role:
// roles/resourcemanager.organizationAdmin - members: -
// user:[email protected] role: roles/resourcemanager.organizationViewer
// condition: title: expirable access description: Does not grant access
// after Sep 2020 expression: request.time <
// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3
// For a description of IAM and its features, see the IAM documentation
// (https://cloud.google.com/iam/docs/).
type Policy struct {
// AuditConfigs: Specifies cloud audit logging configuration for this
// policy.
AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"`
// Bindings: Associates a list of `members`, or principals, with a
// `role`. Optionally, may specify a `condition` that determines how and
// when the `bindings` are applied. Each of the `bindings` must contain
// at least one principal. The `bindings` in a `Policy` can refer to up
// to 1,500 principals; up to 250 of these principals can be Google
// groups. Each occurrence of a principal counts towards these limits.
// For example, if the `bindings` grant 50 different roles to
// `user:[email protected]`, and not to any other principal, then you
// can add another 1,450 principals to the `bindings` in the `Policy`.
Bindings []*Binding `json:"bindings,omitempty"`
// Etag: `etag` is used for optimistic concurrency control as a way to
// help prevent simultaneous updates of a policy from overwriting each
// other. It is strongly suggested that systems make use of the `etag`
// in the read-modify-write cycle to perform policy updates in order to
// avoid race conditions: An `etag` is returned in the response to
// `getIamPolicy`, and systems are expected to put that etag in the
// request to `setIamPolicy` to ensure that their change will be applied
// to the same version of the policy. **Important:** If you use IAM
// Conditions, you must include the `etag` field whenever you call
// `setIamPolicy`. If you omit this field, then IAM allows you to
// overwrite a version `3` policy with a version `1` policy, and all of
// the conditions in the version `3` policy are lost.
Etag string `json:"etag,omitempty"`
// Version: Specifies the format of the policy. Valid values are `0`,
// `1`, and `3`. Requests that specify an invalid value are rejected.
// Any operation that affects conditional role bindings must specify
// version `3`. This requirement applies to the following operations: *
// Getting a policy that includes a conditional role binding * Adding a
// conditional role binding to a policy * Changing a conditional role
// binding in a policy * Removing any role binding, with or without a
// condition, from a policy that includes conditions **Important:** If
// you use IAM Conditions, you must include the `etag` field whenever
// you call `setIamPolicy`. If you omit this field, then IAM allows you
// to overwrite a version `3` policy with a version `1` policy, and all
// of the conditions in the version `3` policy are lost. If a policy
// does not include any conditions, operations on that policy may
// specify any valid version or leave the field unset. To learn which
// resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Version int64 `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "AuditConfigs") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AuditConfigs") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Policy) MarshalJSON() ([]byte, error) {
type NoMethod Policy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SearchAllIamPoliciesResponse: Search all IAM policies response.
type SearchAllIamPoliciesResponse struct {
// NextPageToken: Set if there are more results than those appearing in
// this response; to get the next set of results, call this method
// again, using this value as the `page_token`.
NextPageToken string `json:"nextPageToken,omitempty"`
// Results: A list of IamPolicy that match the search query. Related
// information such as the associated resource is returned along with
// the policy.
Results []*IamPolicySearchResult `json:"results,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SearchAllIamPoliciesResponse) MarshalJSON() ([]byte, error) {
type NoMethod SearchAllIamPoliciesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SearchAllResourcesResponse: Search all resources response.
type SearchAllResourcesResponse struct {
// NextPageToken: If there are more results than those appearing in this
// response, then `next_page_token` is included. To get the next set of
// results, call this method again using the value of `next_page_token`
// as `page_token`.
NextPageToken string `json:"nextPageToken,omitempty"`
// Results: A list of resource that match the search query.
Results []*StandardResourceMetadata `json:"results,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SearchAllResourcesResponse) MarshalJSON() ([]byte, error) {
type NoMethod SearchAllResourcesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// StandardResourceMetadata: The standard metadata of a cloud resource.
type StandardResourceMetadata struct {
// AdditionalAttributes: Additional searchable attributes of this
// resource. Informational only. The exact set of attributes is subject
// to change. For example: project id, DNS name etc.
AdditionalAttributes []string `json:"additionalAttributes,omitempty"`
// AssetType: The type of this resource. For example:
// "compute.googleapis.com/Disk".
AssetType string `json:"assetType,omitempty"`
// Description: One or more paragraphs of text description of this
// resource. Maximum length could be up to 1M bytes.
Description string `json:"description,omitempty"`
// DisplayName: The display name of this resource.
DisplayName string `json:"displayName,omitempty"`
// Labels: Labels associated with this resource. See Labelling and
// grouping GCP resources
// (https://cloud.google.com/blog/products/gcp/labelling-and-grouping-your-google-cloud-platform-resources)
// for more information.
Labels map[string]string `json:"labels,omitempty"`
// Location: Location can be "global", regional like "us-east1", or
// zonal like "us-west1-b".
Location string `json:"location,omitempty"`
// Name: The full resource name. For example:
// `//compute.googleapis.com/projects/my_project_123/zones/zone1/instance
// s/instance1`. See Resource Names
// (https://cloud.google.com/apis/design/resource_names#full_resource_name)
// for more information.
Name string `json:"name,omitempty"`
// NetworkTags: Network tags associated with this resource. Like labels,
// network tags are a type of annotations used to group GCP resources.
// See Labelling GCP resources
// (lhttps://cloud.google.com/blog/products/gcp/labelling-and-grouping-yo
// ur-google-cloud-platform-resources) for more information.
NetworkTags []string `json:"networkTags,omitempty"`
// Project: The project that this resource belongs to, in the form of
// `projects/{project_number}`.
Project string `json:"project,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AdditionalAttributes") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AdditionalAttributes") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *StandardResourceMetadata) MarshalJSON() ([]byte, error) {
type NoMethod StandardResourceMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "cloudasset.iamPolicies.searchAll":
type IamPoliciesSearchAllCall struct {
s *Service
scope string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// SearchAll: Searches all the IAM policies within a given accessible
// CRM scope (project/folder/organization). This RPC gives callers
// especially administrators the ability to search all the IAM policies
// within a scope, even if they don't have `.getIamPolicy` permission of
// all the IAM policies. Callers should have
// `cloud.assets.SearchAllIamPolicies` permission on the requested
// scope, otherwise the request will be rejected.
//
// - scope: The relative name of an asset. The search is limited to the
// resources within the `scope`. The allowed value must be: *
// Organization number (such as "organizations/123") * Folder
// number(such as "folders/1234") * Project number (such as
// "projects/12345") * Project id (such as "projects/abc").
func (r *IamPoliciesService) SearchAll(scope string) *IamPoliciesSearchAllCall {
c := &IamPoliciesSearchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.scope = scope
return c
}
// PageSize sets the optional parameter "pageSize": The page size for
// search result pagination. Page size is capped at 500 even if a larger
// value is given. If set to zero, server will pick an appropriate
// default. Returned results may be fewer than requested. When this
// happens, there could be more results as long as `next_page_token` is
// returned.
func (c *IamPoliciesSearchAllCall) PageSize(pageSize int64) *IamPoliciesSearchAllCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present,
// retrieve the next batch of results from the preceding call to this
// method. `page_token` must be the value of `next_page_token` from the
// previous response. The values of all other method parameters must be
// identical to those in the previous call.
func (c *IamPoliciesSearchAllCall) PageToken(pageToken string) *IamPoliciesSearchAllCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Query sets the optional parameter "query": The query statement.
// Examples: * "policy:[email protected]" *
// "policy:([email protected] viewer)"
func (c *IamPoliciesSearchAllCall) Query(query string) *IamPoliciesSearchAllCall {
c.urlParams_.Set("query", query)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *IamPoliciesSearchAllCall) Fields(s ...googleapi.Field) *IamPoliciesSearchAllCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *IamPoliciesSearchAllCall) IfNoneMatch(entityTag string) *IamPoliciesSearchAllCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *IamPoliciesSearchAllCall) Context(ctx context.Context) *IamPoliciesSearchAllCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *IamPoliciesSearchAllCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *IamPoliciesSearchAllCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211227")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/{+scope}/iamPolicies:searchAll")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"scope": c.scope,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudasset.iamPolicies.searchAll" call.
// Exactly one of *SearchAllIamPoliciesResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *SearchAllIamPoliciesResponse.ServerResponse.Header or (if a
// response was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *IamPoliciesSearchAllCall) Do(opts ...googleapi.CallOption) (*SearchAllIamPoliciesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &SearchAllIamPoliciesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Searches all the IAM policies within a given accessible CRM scope (project/folder/organization). This RPC gives callers especially administrators the ability to search all the IAM policies within a scope, even if they don't have `.getIamPolicy` permission of all the IAM policies. Callers should have `cloud.assets.SearchAllIamPolicies` permission on the requested scope, otherwise the request will be rejected.",
// "flatPath": "v1p1beta1/{v1p1beta1Id}/{v1p1beta1Id1}/iamPolicies:searchAll",
// "httpMethod": "GET",
// "id": "cloudasset.iamPolicies.searchAll",
// "parameterOrder": [
// "scope"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The page size for search result pagination. Page size is capped at 500 even if a larger value is given. If set to zero, server will pick an appropriate default. Returned results may be fewer than requested. When this happens, there could be more results as long as `next_page_token` is returned.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, retrieve the next batch of results from the preceding call to this method. `page_token` must be the value of `next_page_token` from the previous response. The values of all other method parameters must be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "query": {
// "description": "Optional. The query statement. Examples: * \"policy:[email protected]\" * \"policy:([email protected] viewer)\"",
// "location": "query",
// "type": "string"
// },
// "scope": {
// "description": "Required. The relative name of an asset. The search is limited to the resources within the `scope`. The allowed value must be: * Organization number (such as \"organizations/123\") * Folder number(such as \"folders/1234\") * Project number (such as \"projects/12345\") * Project id (such as \"projects/abc\")",
// "location": "path",
// "pattern": "^[^/]+/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1p1beta1/{+scope}/iamPolicies:searchAll",
// "response": {
// "$ref": "SearchAllIamPoliciesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *IamPoliciesSearchAllCall) Pages(ctx context.Context, f func(*SearchAllIamPoliciesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudasset.resources.searchAll":
type ResourcesSearchAllCall struct {
s *Service
scope string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// SearchAll: Searches all the resources within a given accessible CRM
// scope (project/folder/organization). This RPC gives callers
// especially administrators the ability to search all the resources
// within a scope, even if they don't have `.get` permission of all the
// resources. Callers should have `cloud.assets.SearchAllResources`
// permission on the requested scope, otherwise the request will be
// rejected.
//
// - scope: The relative name of an asset. The search is limited to the
// resources within the `scope`. The allowed value must be: *
// Organization number (such as "organizations/123") * Folder
// number(such as "folders/1234") * Project number (such as
// "projects/12345") * Project id (such as "projects/abc").
func (r *ResourcesService) SearchAll(scope string) *ResourcesSearchAllCall {
c := &ResourcesSearchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.scope = scope
return c
}
// AssetTypes sets the optional parameter "assetTypes": A list of asset
// types that this request searches for. If empty, it will search all
// the supported asset types.
func (c *ResourcesSearchAllCall) AssetTypes(assetTypes ...string) *ResourcesSearchAllCall {
c.urlParams_.SetMulti("assetTypes", append([]string{}, assetTypes...))
return c
}
// OrderBy sets the optional parameter "orderBy": A comma separated list
// of fields specifying the sorting order of the results. The default
// order is ascending. Add ` DESC` after the field name to indicate
// descending order. Redundant space characters are ignored. For
// example, ` location DESC , name `.
func (c *ResourcesSearchAllCall) OrderBy(orderBy string) *ResourcesSearchAllCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The page size for
// search result pagination. Page size is capped at 500 even if a larger
// value is given. If set to zero, server will pick an appropriate
// default. Returned results may be fewer than requested. When this
// happens, there could be more results as long as `next_page_token` is
// returned.
func (c *ResourcesSearchAllCall) PageSize(pageSize int64) *ResourcesSearchAllCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. `page_token` must be the value of `next_page_token` from the
// previous response. The values of all other method parameters, must be
// identical to those in the previous call.
func (c *ResourcesSearchAllCall) PageToken(pageToken string) *ResourcesSearchAllCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Query sets the optional parameter "query": The query statement.
func (c *ResourcesSearchAllCall) Query(query string) *ResourcesSearchAllCall {
c.urlParams_.Set("query", query)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ResourcesSearchAllCall) Fields(s ...googleapi.Field) *ResourcesSearchAllCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ResourcesSearchAllCall) IfNoneMatch(entityTag string) *ResourcesSearchAllCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ResourcesSearchAllCall) Context(ctx context.Context) *ResourcesSearchAllCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ResourcesSearchAllCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ResourcesSearchAllCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211227")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/{+scope}/resources:searchAll")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"scope": c.scope,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudasset.resources.searchAll" call.
// Exactly one of *SearchAllResourcesResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *SearchAllResourcesResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ResourcesSearchAllCall) Do(opts ...googleapi.CallOption) (*SearchAllResourcesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &SearchAllResourcesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Searches all the resources within a given accessible CRM scope (project/folder/organization). This RPC gives callers especially administrators the ability to search all the resources within a scope, even if they don't have `.get` permission of all the resources. Callers should have `cloud.assets.SearchAllResources` permission on the requested scope, otherwise the request will be rejected.",
// "flatPath": "v1p1beta1/{v1p1beta1Id}/{v1p1beta1Id1}/resources:searchAll",
// "httpMethod": "GET",
// "id": "cloudasset.resources.searchAll",
// "parameterOrder": [
// "scope"
// ],
// "parameters": {
// "assetTypes": {
// "description": "Optional. A list of asset types that this request searches for. If empty, it will search all the supported asset types.",
// "location": "query",
// "repeated": true,
// "type": "string"
// },
// "orderBy": {
// "description": "Optional. A comma separated list of fields specifying the sorting order of the results. The default order is ascending. Add ` DESC` after the field name to indicate descending order. Redundant space characters are ignored. For example, ` location DESC , name `.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "Optional. The page size for search result pagination. Page size is capped at 500 even if a larger value is given. If set to zero, server will pick an appropriate default. Returned results may be fewer than requested. When this happens, there could be more results as long as `next_page_token` is returned.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. `page_token` must be the value of `next_page_token` from the previous response. The values of all other method parameters, must be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "query": {
// "description": "Optional. The query statement.",
// "location": "query",
// "type": "string"
// },
// "scope": {
// "description": "Required. The relative name of an asset. The search is limited to the resources within the `scope`. The allowed value must be: * Organization number (such as \"organizations/123\") * Folder number(such as \"folders/1234\") * Project number (such as \"projects/12345\") * Project id (such as \"projects/abc\")",
// "location": "path",
// "pattern": "^[^/]+/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1p1beta1/{+scope}/resources:searchAll",
// "response": {
// "$ref": "SearchAllResourcesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ResourcesSearchAllCall) Pages(ctx context.Context, f func(*SearchAllResourcesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.