filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tools/perf/scripts_smoke_unittest.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from telemetry import decorators
from telemetry.testing import options_for_unittests
RUNNER_SCRIPTS_DIR = os.path.join(os.path.dirname(__file__),
'..', '..', 'testing', 'scripts')
sys.path.append(RUNNER_SCRIPTS_DIR)
import run_performance_tests # pylint: disable=wrong-import-position,import-error
class ScriptsSmokeTest(unittest.TestCase):
perf_dir = os.path.dirname(__file__)
def setUp(self):
self.options = options_for_unittests.GetCopy()
def RunPerfScript(self, args, env=None):
# TODO(crbug.com/985712): Switch all clients to pass a list of args rather
# than a string which we may not be parsing correctly.
if not isinstance(args, list):
args = args.split(' ')
proc = subprocess.Popen([sys.executable] + args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=self.perf_dir,
env=env)
stdout = proc.communicate()[0]
return_code = proc.returncode
return return_code, stdout.decode('utf-8')
def testRunBenchmarkHelp(self):
return_code, stdout = self.RunPerfScript('run_benchmark --help')
self.assertEquals(return_code, 0, stdout)
self.assertIn('usage: run_benchmark', stdout)
@decorators.Disabled('chromeos') # crbug.com/754913
def testRunBenchmarkListBenchmarks(self):
cmdline = ['run_benchmark', 'list', '--browser', self.options.browser_type]
if self.options.browser_type == 'exact':
# If we're running with an exact browser and it was not specified with
# an absolute path, then there's no guarantee that we can actually find it
# now, so make the test a no-op.
if not os.path.isabs(self.options.browser_executable):
return
cmdline.extend(['--browser-executable', self.options.browser_executable])
return_code, stdout = self.RunPerfScript(cmdline)
self.assertRegexpMatches(stdout, r'Available benchmarks .*? are:')
self.assertEqual(return_code, 0)
def testRunBenchmarkRunListsOutBenchmarks(self):
return_code, stdout = self.RunPerfScript('run_benchmark run')
self.assertIn('Pass --browser to list benchmarks', stdout)
self.assertNotEquals(return_code, 0)
def testRunBenchmarkRunNonExistingBenchmark(self):
return_code, stdout = self.RunPerfScript('run_benchmark foo')
self.assertIn('no such benchmark: foo', stdout)
self.assertNotEquals(return_code, 0)
def testRunRecordWprHelp(self):
return_code, stdout = self.RunPerfScript('record_wpr')
self.assertEquals(return_code, 0, stdout)
self.assertIn('optional arguments:', stdout)
@decorators.Disabled('chromeos') # crbug.com/814068
def testRunRecordWprList(self):
return_code, stdout = self.RunPerfScript('record_wpr --list-benchmarks')
# TODO(nednguyen): Remove this once we figure out why importing
# small_profile_extender fails on Android dbg.
# crbug.com/561668
if 'ImportError: cannot import name small_profile_extender' in stdout:
self.skipTest('small_profile_extender is missing')
self.assertEquals(return_code, 0, stdout)
self.assertIn('kraken', stdout)
@decorators.Disabled('chromeos') # crbug.com/754913
def testRunPerformanceTestsTelemetry_end2end(self):
tempdir = tempfile.mkdtemp()
benchmarks = ['dummy_benchmark.stable_benchmark_1',
'dummy_benchmark.noisy_benchmark_1']
cmdline = ('../../testing/scripts/run_performance_tests.py '
'../../tools/perf/run_benchmark '
'--benchmarks=%s '
'--browser=%s '
'--isolated-script-test-also-run-disabled-tests '
'--isolated-script-test-output=%s' %
(','.join(benchmarks), self.options.browser_type,
os.path.join(tempdir, 'output.json')))
if self.options.browser_type == 'exact':
# If the path to the browser executable is not absolute, there is no
# guarantee that we can actually find it at this point, so no-op the
# test.
if not os.path.isabs(self.options.browser_executable):
return
cmdline += ' --browser-executable=%s' % self.options.browser_executable
return_code, stdout = self.RunPerfScript(cmdline)
self.assertEquals(return_code, 0, stdout)
try:
with open(os.path.join(tempdir, 'output.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
benchmarks_run = [str(b) for b in test_results['tests'].keys()]
self.assertEqual(sorted(benchmarks_run), sorted(benchmarks))
story_runs = test_results['num_failures_by_type']['PASS']
self.assertEqual(
story_runs, 2,
'Total runs should be 2 since each benchmark has one story.')
for benchmark in benchmarks:
with open(os.path.join(tempdir, benchmark, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
with open(os.path.join(tempdir, benchmark, 'perf_results.json')) as f:
perf_results = json.load(f)
self.assertIsNotNone(
perf_results, 'json perf results should be populated: ' + stdout)
except IOError as e:
self.fail('json_test_results should be populated: ' + stdout + str(e))
except AssertionError as e:
self.fail('Caught assertion error: ' + str(e) + 'With stdout: ' + stdout)
finally:
shutil.rmtree(tempdir)
@decorators.Enabled('linux') # Testing platform-independent code.
def testRunPerformanceTestsTelemetry_NoTestResults(self):
"""Test that test results output gets returned for complete failures."""
tempdir = tempfile.mkdtemp()
benchmarks = ['benchmark1', 'benchmark2']
return_code, stdout = self.RunPerfScript(
'../../testing/scripts/run_performance_tests.py '
'../../tools/perf/testdata/fail_and_do_nothing '
'--benchmarks=%s '
'--browser=%s '
'--isolated-script-test-output=%s' % (
','.join(benchmarks),
self.options.browser_type,
os.path.join(tempdir, 'output.json')
))
self.assertNotEqual(return_code, 0)
try:
with open(os.path.join(tempdir, 'output.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
self.assertTrue(
test_results['interrupted'],
'if the benchmark does not populate test results, then we should '
'populate it with a failure.')
for benchmark in benchmarks:
with open(os.path.join(tempdir, benchmark, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
self.assertTrue(
test_results['interrupted'],
'if the benchmark does not populate test results, then we should '
'populate it with a failure.')
except IOError as e:
self.fail('json_test_results should be populated: ' + stdout + str(e))
finally:
shutil.rmtree(tempdir)
# Android: crbug.com/932301
# ChromeOS: crbug.com/754913
# Windows: crbug.com/1024767
# Linux: crbug.com/1024767
# all: Disabled everywhere because the smoke test shard map
# needed to be changed to fix crbug.com/1024767.
@decorators.Disabled('all')
def testRunPerformanceTestsTelemetrySharded_end2end(self):
tempdir = tempfile.mkdtemp()
env = os.environ.copy()
env['GTEST_SHARD_INDEX'] = '0'
env['GTEST_TOTAL_SHARDS'] = '2'
return_code, stdout = self.RunPerfScript(
'../../testing/scripts/run_performance_tests.py '
'../../tools/perf/run_benchmark '
'--test-shard-map-filename=smoke_test_benchmark_shard_map.json '
'--browser=%s '
'--run-ref-build '
'--isolated-script-test-filter=dummy_benchmark.noisy_benchmark_1/'
'dummy_page.html::dummy_benchmark.stable_benchmark_1/dummy_page.html '
'--isolated-script-test-repeat=2 '
'--isolated-script-test-also-run-disabled-tests '
'--isolated-script-test-output=%s' % (
self.options.browser_type,
os.path.join(tempdir, 'output.json')
), env=env)
test_results = None
try:
self.assertEquals(return_code, 0)
expected_benchmark_folders = (
'dummy_benchmark.stable_benchmark_1',
'dummy_benchmark.stable_benchmark_1.reference',
'dummy_gtest')
with open(os.path.join(tempdir, 'output.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated.')
test_runs = test_results['num_failures_by_type']['PASS']
# 1 gtest runs (since --isolated-script-test-repeat doesn't work for gtest
# yet) plus 2 dummy_benchmark runs = 3 runs.
self.assertEqual(
test_runs, 3, '--isolated-script-test-repeat=2 should work.')
for folder in expected_benchmark_folders:
with open(os.path.join(tempdir, folder, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json test results should be populated.')
test_repeats = test_results['num_failures_by_type']['PASS']
if 'dummy_gtest' not in folder: # Repeats don't work for gtest yet.
self.assertEqual(
test_repeats, 2, '--isolated-script-test-repeat=2 should work.')
with open(os.path.join(tempdir, folder, 'perf_results.json')) as f:
perf_results = json.load(f)
self.assertIsNotNone(
perf_results, 'json perf results should be populated.')
except Exception as exc:
logging.error(
'Failed with error: %s\nOutput from run_performance_tests.py:\n\n%s',
exc, stdout)
if test_results is not None:
logging.error(
'Got test_results: %s\n', json.dumps(test_results, indent=2))
raise
finally:
shutil.rmtree(tempdir)
def RunGtest(self, generate_trace):
tempdir = tempfile.mkdtemp()
benchmark = 'dummy_gtest'
return_code, stdout = self.RunPerfScript(
'../../testing/scripts/run_performance_tests.py ' +
('../../tools/perf/run_gtest_benchmark.py ' if generate_trace else '') +
os.path.join('..', '..', 'tools', 'perf', 'testdata',
'dummy_gtest') +
(' --use-gtest-benchmark-script --output-format=histograms'
if generate_trace else '') +
' --non-telemetry=true '
'--this-arg=passthrough '
'--argument-to-check-that-arguments-work '
'--gtest-benchmark-name dummy_gtest '
'--isolated-script-test-output=%s' % (
os.path.join(tempdir, 'output.json')
))
try:
self.assertEquals(return_code, 0, stdout)
except AssertionError:
try:
with open(os.path.join(tempdir, benchmark, 'benchmark_log.txt')) as fh:
print(fh.read())
# pylint: disable=bare-except
except:
# pylint: enable=bare-except
pass
raise
try:
with open(os.path.join(tempdir, 'output.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
with open(os.path.join(tempdir, benchmark, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
with open(os.path.join(tempdir, benchmark, 'perf_results.json')) as f:
perf_results = json.load(f)
self.assertIsNotNone(
perf_results, 'json perf results should be populated: ' + stdout)
except IOError as e:
self.fail('json_test_results should be populated: ' + stdout + str(e))
finally:
shutil.rmtree(tempdir)
# Windows: ".exe" is auto-added which breaks Windows.
# ChromeOS: crbug.com/754913.
@decorators.Disabled('win', 'chromeos')
def testRunPerformanceTestsGtest_end2end(self):
self.RunGtest(generate_trace=False)
# Windows: ".exe" is auto-added which breaks Windows.
# ChromeOS: crbug.com/754913.
@decorators.Disabled('win', 'chromeos')
def testRunPerformanceTestsGtestTrace_end2end(self):
self.RunGtest(generate_trace=True)
def testRunPerformanceTestsShardedArgsParser(self):
options = run_performance_tests.parse_arguments([
'../../tools/perf/run_benchmark', '-v', '--browser=release_x64',
'--upload-results', '--run-ref-build',
'--test-shard-map-filename=win-10-perf_map.json',
'--assert-gpu-compositing',
r'--isolated-script-test-output=c:\a\b\c\output.json',
r'--isolated-script-test-perf-output=c:\a\b\c\perftest-output.json',
'--passthrough-arg=--a=b',
])
self.assertIn('--assert-gpu-compositing', options.passthrough_args)
self.assertIn('--browser=release_x64', options.passthrough_args)
self.assertIn('-v', options.passthrough_args)
self.assertIn('--a=b', options.passthrough_args)
self.assertEqual(options.executable, '../../tools/perf/run_benchmark')
self.assertEqual(options.isolated_script_test_output,
r'c:\a\b\c\output.json')
def testRunPerformanceTestsTelemetryCommandGenerator_ReferenceBrowserComeLast(self):
"""This tests for crbug.com/928928."""
options = run_performance_tests.parse_arguments([
'../../tools/perf/run_benchmark', '--browser=release_x64',
'--run-ref-build',
'--test-shard-map-filename=win-10-perf_map.json',
r'--isolated-script-test-output=c:\a\b\c\output.json',
])
self.assertIn('--browser=release_x64', options.passthrough_args)
command = run_performance_tests.TelemetryCommandGenerator(
'fake_benchmark_name', options, is_reference=True).generate(
'fake_output_dir')
original_browser_arg_index = command.index('--browser=release_x64')
reference_browser_arg_index = command.index('--browser=reference')
self.assertTrue(reference_browser_arg_index > original_browser_arg_index)
def testRunPerformanceTestsTelemetryCommandGenerator_StorySelectionConfig_Unabridged(self):
options = run_performance_tests.parse_arguments([
'../../tools/perf/run_benchmark', '--browser=release_x64',
'--run-ref-build',
r'--isolated-script-test-output=c:\a\b\c\output.json',
])
story_selection_config = {
'abridged': False,
'begin': 1,
'end': 5,
}
command = run_performance_tests.TelemetryCommandGenerator(
'fake_benchmark_name', options, story_selection_config).generate(
'fake_output_dir')
self.assertNotIn('--run-abridged-story-set', command)
self.assertIn('--story-shard-begin-index=1', command)
self.assertIn('--story-shard-end-index=5', command)
def testRunPerformanceTestsTelemetryCommandGenerator_StorySelectionConfig_Abridged(self):
options = run_performance_tests.parse_arguments([
'../../tools/perf/run_benchmark', '--browser=release_x64',
'--run-ref-build',
r'--isolated-script-test-output=c:\a\b\c\output.json',
])
story_selection_config = {
'abridged': True,
}
command = run_performance_tests.TelemetryCommandGenerator(
'fake_benchmark_name', options, story_selection_config).generate(
'fake_output_dir')
self.assertIn('--run-abridged-story-set', command)
def testRunPerformanceTestsGtestArgsParser(self):
options = run_performance_tests.parse_arguments([
'media_perftests',
'--non-telemetry=true',
'--single-process-tests',
'--test-launcher-retry-limit=0',
'--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::'
'*unoptimized_aligned',
'--gtest-benchmark-name',
'media_perftests',
'--isolated-script-test-output=/x/y/z/output.json',
])
self.assertIn('--single-process-tests', options.passthrough_args)
self.assertIn('--test-launcher-retry-limit=0', options.passthrough_args)
self.assertEqual(options.executable, 'media_perftests')
self.assertEqual(options.isolated_script_test_output, r'/x/y/z/output.json')
def testRunPerformanceTestsExecuteGtest_OSError(self):
class FakeCommandGenerator(object):
def __init__(self):
self.executable_name = 'binary_that_doesnt_exist'
self._ignore_shard_env_vars = False
def generate(self, unused_path):
return [self.executable_name]
tempdir = tempfile.mkdtemp()
try:
fake_command_generator = FakeCommandGenerator()
output_paths = run_performance_tests.OutputFilePaths(
tempdir, 'fake_gtest')
output_paths.SetUp()
return_code = run_performance_tests.execute_gtest_perf_test(
fake_command_generator, output_paths, is_unittest=True)
self.assertEqual(return_code, 1)
with open(output_paths.test_results) as fh:
json_test_results = json.load(fh)
self.assertGreater(json_test_results['num_failures_by_type']['FAIL'], 0)
finally:
shutil.rmtree(tempdir)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
beta-vae/configs.py
|
"""
This configuration file stores common variables for use across the program.
It uses a simple file exists check to determine whether or not it is running remotely or locally
and changes the filepaths accordingly for convenience.
"""
#%% Imports
import os
LATENT_DIM = 64
IMG_SIZE = 192 # 224, 256 (128)
KL_WEIGHT = 20.0 # rough guess for "optimally disentangled"
# CURR_IMGRUN_ID = '1014-0001'
CURR_IMGRUN_ID = None # train from scratch
# CURR_TXTRUN_ID = '0922-1614'
CURR_TXTRUN_ID = None # train from scratch
N_IMGRUN_EPOCH = 400
N_TXTRUN_EPOCH = 3000
BATCH_SIZE = 32
IMGRUN_LR = 4e-4
TXTRUN_LR = 1e-4
VALIDATION_FRAC = 20.0 / 100.0 # standard 80/20 test/validate split
#%% Properties
# This folder path should only exist on the remote machine
REMOTE = os.path.isdir("/data/sn/all")
# HOME = "/Users/ergonyc" #osx
# HOME = "/home/ergonyc" #linux
HOME = os.environ.get("HOME")
if REMOTE: # Running on EC2 instance or similar
META_DATA = "/data/sn/all/meta/dfmeta"
VOXEL_FILEPATH = "/data/sn/all/all/"
IMGRUN_DIR = "/data/sn/all/runs/"
TXTRUN_DIR = "/data/sn/all/txtruns/"
DATA_DIR = "/data/sn/all/data/"
else: # Running locally
META_DATA = HOME + "/Projects/DATABASE/SnkrScrpr/data/full_data" # csv pickle and json
DESCRIPTIONS = HOME + "/Projects/DATABASE/SnkrScrpr/data/basic_data_clean"
"""
This is the directory file that stores all of the metadata information gathered and analyzed by the program to generate descriptions.
It is made by the program and shouldn't require additional action but is a useful resource to inspect manually
"""
# ROOT_FILEPATH = HOME + "/Projects/Project2.0/SnkrScrpr/data/"
# FILEPATH_GOAT = HOME + "/Projects/Project2.0/SnkrScrpr/data/goat/img/"
# FILEPATH_SNS = HOME + "/Projects/Project2.0/SnkrScrpr/data/sns/img/"
ROOT_FILEPATH = HOME + "/Projects/DATABASE/SnkrScrpr/data/"
FILEPATH_GOAT = HOME + "/Projects/DATABASE/SnkrScrpr/data/goat/img/"
FILEPATH_SNS = HOME + "/Projects/DATABASE/SnkrScrpr/data/sns/img/"
IMAGE_FILEPATH = ROOT_FILEPATH
"""
This is the location of the image data scraped from GOAT and SNS.
"""
IMGRUN_DIR = HOME + "/Projects/Project2.0/SnkrGen/beta-vae/imgruns/"
TXTRUN_DIR = HOME + "/Projects/Project2.0/SnkrGen/beta-vae/txtruns/"
"""
These are the run log and model checkpoint folders. This folder structure is generated and managed by the logger.py class.
Example run directory tree structure:
TODO
RUN_DIR
├── 0217-0434
│ ├── code_file.txt
│ ├── configs.csv
│ ├── logs
│ │ └── events.out.tfevents.1581942983.ip-172-31-21-198
│ ├── models
│ │ ├── checkpoint
│ │ ├── ckpt-161.data-00000-of-00002
│ │ ├── ckpt-161.data-00001-of-00002
│ │ ├── ckpt-161.index
│ │ └── epoch_161.h5
│ └── plots
├── 0217-0437
│ ├── code_file.txt
│ ├── configs.csv
│ ├── logs
│ │ └── events.out.tfevents.1581943124.ip-172-31-24-21
│ ├── models
│ │ ├── checkpoint
│ │ ├── ckpt-258.data-00000-of-00002
│ │ ├── ckpt-258.data-00001-of-00002
│ │ ├── ckpt-258.index
│ │ └── epoch_258.h5
│ ├── saved_data
│ └── plots
...
"""
DATA_DIR = HOME + "/Projects/Project2.0/SnkrGen/beta-vae/data/"
"""
This folder is used to cache various computation and memory intensive generated files like the randomized descriptions of objects.
"""
RENDERS_DIR = HOME + "/Projects/Project2.0/SnkrGen/beta-vae/renders/"
"""
This folder is used to store rendered images of the models for quick and easy viewing and for use in the streamlit app.
Primarily used when inspecting the quality of generated descriptions.
"""
PARTNET_META_STATS_DIR = HOME + "/Projects/Project2.0/SnkrGen/beta-vae/stats/"
"""
This folder contains all of the metadata that is used to generate the descriptions.
TODO
Specifically, only the meta.json and result_after_merging.json files are necessary.
"""
# %%
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
blogs/rl-on-gcp/cartpole_policy_gradients/rl_model_code/trainer/model.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple policy gradients for Reinforcement learning on Google Cloud.
Also includes code for hyperparameter tuning. Adapted from:
https://github.com/ageron/handson-ml/blob/master/16_reinforcement_learning.ipynb
"""
import json
import os
import gym
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
# task.py arguments.
N_GAMES_PER_UPDATE = None
DISCOUNT_RATE = None
N_HIDDEN = None
LEARNING_RATE = None
# Currently hardcoded.
n_max_steps = 1000
n_iterations = 30
save_iterations = 5
# For cartpole.
env = gym.make('CartPole-v0')
n_inputs = 4
n_outputs = 1
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [
discount_rewards(rewards, discount_rate) for rewards in all_rewards
]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std
for discounted_rewards in all_discounted_rewards]
def hp_directory(model_dir):
"""If running a hyperparam job, create subfolder name with trial ID.
If not running a hyperparam job, just keep original model_dir.
"""
trial_id = json.loads(os.environ.get('TF_CONFIG', '{}')).get('task', {}).get(
'trial', '')
return os.path.join(model_dir, trial_id)
# Play games and train agent. Or evaluate and make gifs.
def run(outdir, train_mode):
# Build network.
initializer = tf.keras.initializers.VarianceScaling()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(
X, N_HIDDEN, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
# Optimizer, gradients.
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
# For TensorBoard.
episode_reward = tf.placeholder(dtype=tf.float32, shape=[])
tf.summary.scalar('reward', episode_reward)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
if train_mode:
hp_save_dir = hp_directory(outdir)
with tf.Session() as sess:
init.run()
# For TensorBoard.
print('hp_save_dir')
train_writer = tf.summary.FileWriter(hp_save_dir, sess.graph)
for iteration in range(n_iterations):
all_rewards = []
all_gradients = []
for game in range(N_GAMES_PER_UPDATE):
current_rewards = []
current_gradients = []
obs = env.reset()
for _ in range(n_max_steps):
action_val, gradients_val = sess.run(
[action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
avg_reward = np.mean(([np.sum(r) for r in all_rewards]))
print('\rIteration: {}, Reward: {}'.format(
iteration, avg_reward, end=''))
all_rewards = discount_and_normalize_rewards(
all_rewards, discount_rate=DISCOUNT_RATE)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([
reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)
],
axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
print('Saving model to ', hp_save_dir)
model_file = '{}/my_policy_net_pg.ckpt'.format(hp_save_dir)
saver.save(sess, model_file)
# Also save event files for TB.
merge = tf.summary.merge_all()
summary = sess.run(merge, feed_dict={episode_reward: avg_reward})
train_writer.add_summary(summary, iteration)
obs = env.reset()
steps = []
done = False
else: # Make a gif.
from moviepy.editor import ImageSequenceClip
model_file = '{}/my_policy_net_pg.ckpt'.format(outdir)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path=model_file)
# Run model.
obs = env.reset()
done = False
steps = []
rewards = []
while not done:
s = env.render('rgb_array')
steps.append(s)
action_val = sess.run(action, feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
rewards.append(reward)
print('Final reward :', np.mean(rewards))
clip = ImageSequenceClip(steps, fps=30)
clip.write_gif('cartpole.gif', fps=30)
|
[] |
[] |
[
"TF_CONFIG"
] |
[]
|
["TF_CONFIG"]
|
python
| 1 | 0 | |
service/glacier/api_op_GetVaultLock.go
|
// Code generated by smithy-go-codegen DO NOT EDIT.
package glacier
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
glaciercust "github.com/aws/aws-sdk-go-v2/service/glacier/internal/customizations"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// This operation retrieves the following attributes from the lock-policy
// subresource set on the specified vault:
//
// * The vault lock policy set on the
// vault.
//
// * The state of the vault lock, which is either InProgess or
// Locked.
//
// * When the lock ID expires. The lock ID is used to complete the
// vault locking process.
//
// * When the vault lock was initiated and put into the
// InProgress state.
//
// A vault lock is put into the InProgress state by calling
// InitiateVaultLock. A vault lock is put into the Locked state by calling
// CompleteVaultLock. You can abort the vault locking process by calling
// AbortVaultLock. For more information about the vault locking process, Amazon
// Glacier Vault Lock
// (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). If there
// is no vault lock policy set on the vault, the operation returns a 404 Not found
// error. For more information about vault lock policies, Amazon Glacier Access
// Control with Vault Lock Policies
// (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
func (c *Client) GetVaultLock(ctx context.Context, params *GetVaultLockInput, optFns ...func(*Options)) (*GetVaultLockOutput, error) {
if params == nil {
params = &GetVaultLockInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetVaultLock", params, optFns, addOperationGetVaultLockMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetVaultLockOutput)
out.ResultMetadata = metadata
return out, nil
}
// The input values for GetVaultLock.
type GetVaultLockInput struct {
// The AccountId value is the AWS account ID of the account that owns the vault.
// You can either specify an AWS account ID or optionally a single '-' (hyphen), in
// which case Amazon S3 Glacier uses the AWS account ID associated with the
// credentials used to sign the request. If you use an account ID, do not include
// any hyphens ('-') in the ID.
//
// This member is required.
AccountId *string
// The name of the vault.
//
// This member is required.
VaultName *string
}
// Contains the Amazon S3 Glacier response to your request.
type GetVaultLockOutput struct {
// The UTC date and time at which the vault lock was put into the InProgress state.
CreationDate *string
// The UTC date and time at which the lock ID expires. This value can be null if
// the vault lock is in a Locked state.
ExpirationDate *string
// The vault lock policy as a JSON string, which uses "\" as an escape character.
Policy *string
// The state of the vault lock. InProgress or Locked.
State *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationGetVaultLockMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpGetVaultLock{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetVaultLock{}, middleware.After)
if err != nil {
return err
}
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpGetVaultLockValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opGetVaultLock(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
glaciercust.AddTreeHashMiddleware(stack)
glaciercust.AddGlacierAPIVersionMiddleware(stack, ServiceAPIVersion)
glaciercust.AddDefaultAccountIDMiddleware(stack, setDefaultAccountID)
return nil
}
func newServiceMetadataMiddleware_opGetVaultLock(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "glacier",
OperationName: "GetVaultLock",
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tests/__init__.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import logging
import sys
import socket
import platform
import os
from concurrent.futures import ThreadPoolExecutor
log = logging.getLogger()
log.setLevel('DEBUG')
# if nose didn't already attach a log handler, add one here
if not log.handlers:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s [%(module)s:%(lineno)s]: %(message)s'))
log.addHandler(handler)
def is_eventlet_monkey_patched():
if 'eventlet.patcher' not in sys.modules:
return False
import eventlet.patcher
return eventlet.patcher.is_monkey_patched('socket')
def is_gevent_monkey_patched():
if 'gevent.monkey' not in sys.modules:
return False
import gevent.socket
return socket.socket is gevent.socket.socket
def is_monkey_patched():
return is_gevent_monkey_patched() or is_eventlet_monkey_patched()
MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False))
EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', '')
# If set to to true this will force the Cython tests to run regardless of whether they are installed
cython_env = os.getenv('VERIFY_CYTHON', "False")
VERIFY_CYTHON = False
if(cython_env == 'True'):
VERIFY_CYTHON = True
thread_pool_executor_class = ThreadPoolExecutor
if "gevent" in EVENT_LOOP_MANAGER:
import gevent.monkey
gevent.monkey.patch_all()
from cassandra.io.geventreactor import GeventConnection
connection_class = GeventConnection
elif "eventlet" in EVENT_LOOP_MANAGER:
from eventlet import monkey_patch
monkey_patch()
from cassandra.io.eventletreactor import EventletConnection
connection_class = EventletConnection
try:
from futurist import GreenThreadPoolExecutor
thread_pool_executor_class = GreenThreadPoolExecutor
except:
# futurist is installed only with python >=3.7
pass
elif "asyncore" in EVENT_LOOP_MANAGER:
from cassandra.io.asyncorereactor import AsyncoreConnection
connection_class = AsyncoreConnection
elif "twisted" in EVENT_LOOP_MANAGER:
from cassandra.io.twistedreactor import TwistedConnection
connection_class = TwistedConnection
elif "asyncio" in EVENT_LOOP_MANAGER:
from cassandra.io.asyncioreactor import AsyncioConnection
connection_class = AsyncioConnection
else:
try:
from cassandra.io.libevreactor import LibevConnection
connection_class = LibevConnection
except ImportError as e:
log.debug('Could not import LibevConnection, '
'using connection_class=None; '
'failed with error:\n {}'.format(
repr(e)
))
connection_class = None
def is_windows():
return "Windows" in platform.system()
notwindows = unittest.skipUnless(not is_windows(), "This test is not adequate for windows")
notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy")
|
[] |
[] |
[
"VERIFY_CYTHON",
"EVENT_LOOP_MANAGER",
"MONKEY_PATCH_LOOP"
] |
[]
|
["VERIFY_CYTHON", "EVENT_LOOP_MANAGER", "MONKEY_PATCH_LOOP"]
|
python
| 3 | 0 | |
sign_certd.go
|
package main
import (
"bytes"
"crypto/rand"
"encoding/base32"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/cloudtools/ssh-cert-authority/client"
"github.com/cloudtools/ssh-cert-authority/util"
"github.com/cloudtools/ssh-cert-authority/version"
"github.com/codegangsta/cli"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
)
// Yanked from PROTOCOL.certkeys
var supportedCriticalOptions = []string{
"force-command",
"source-address",
}
func isSupportedOption(x string) bool {
for optionIdx := range supportedCriticalOptions {
if supportedCriticalOptions[optionIdx] == x {
return true
}
}
return false
}
func areCriticalOptionsValid(criticalOptions map[string]string) error {
for optionName := range criticalOptions {
if !isSupportedOption(optionName) {
return fmt.Errorf("Invalid critical option name: '%s'", optionName)
}
}
return nil
}
type certRequest struct {
// This struct tracks state for certificate requests. Imagine this one day
// being stored in a persistent data store.
request *ssh.Certificate
submitTime time.Time
environment string
signatures map[string]bool
certSigned bool
certRejected bool
reason string
}
func compareCerts(one, two *ssh.Certificate) bool {
/* Compare two SSH certificates in a special way.
The specialness is in that we expect these certs to be more or less the
same but they will have been signed by different people. The act of signing
the cert changes the Key, SignatureKey, Signature and Nonce fields of the
Certificate struct so we compare the cert except for those fields.
*/
if one.Serial != two.Serial {
return false
}
if one.CertType != two.CertType {
return false
}
if one.KeyId != two.KeyId {
return false
}
if !reflect.DeepEqual(one.ValidPrincipals, two.ValidPrincipals) {
return false
}
if one.ValidAfter != two.ValidAfter {
return false
}
if one.ValidBefore != two.ValidBefore {
return false
}
if !reflect.DeepEqual(one.CriticalOptions, two.CriticalOptions) {
return false
}
if !reflect.DeepEqual(one.Extensions, two.Extensions) {
return false
}
if !bytes.Equal(one.Reserved, two.Reserved) {
return false
}
if !reflect.DeepEqual(one.Key, two.Key) {
return false
}
return true
}
func newcertRequest() certRequest {
var cr certRequest
cr.submitTime = time.Now()
cr.certSigned = false
cr.signatures = make(map[string]bool)
return cr
}
type certRequestHandler struct {
Config map[string]ssh_ca_util.SignerdConfig
state map[string]certRequest
sshAgentConn io.ReadWriter
stateMutex sync.RWMutex
}
type signingRequest struct {
config *ssh_ca_util.SignerdConfig
environment string
cert *ssh.Certificate
}
func (h *certRequestHandler) setupPrivateKeys(config map[string]ssh_ca_util.SignerdConfig) error {
for env, cfg := range config {
if cfg.PrivateKeyFile == "" {
continue
}
keyUrl, err := url.Parse(cfg.PrivateKeyFile)
if err != nil {
log.Printf("Ignoring invalid private key file: '%s'. Error parsing: %s", cfg.PrivateKeyFile, err)
continue
}
if keyUrl.Scheme == "gcpkms" {
cfg = config[env]
cfg.SigningKeyFingerprint = cfg.PrivateKeyFile
config[env] = cfg
} else if keyUrl.Scheme == "" || keyUrl.Scheme == "file" {
keyContents, err := ioutil.ReadFile(keyUrl.Path)
if err != nil {
return fmt.Errorf("Failed reading private key file %s: %v", keyUrl.Path, err)
}
if strings.HasSuffix(keyUrl.Path, ".kms") {
var region string
if cfg.KmsRegion != "" {
region = cfg.KmsRegion
} else {
region, err = ec2metadata.New(session.New(), aws.NewConfig()).Region()
if err != nil {
return fmt.Errorf("Unable to determine our region: %s", err)
}
}
svc := kms.New(session.New(), aws.NewConfig().WithRegion(region))
params := &kms.DecryptInput{
CiphertextBlob: keyContents,
}
resp, err := svc.Decrypt(params)
if err != nil {
// We try only one time to speak with KMS. If this pukes, and it
// will occasionally because "the cloud", the caller is responsible
// for trying again, possibly after a crash/restart.
return fmt.Errorf("Unable to decrypt CA key: %v\n", err)
}
keyContents = resp.Plaintext
}
key, err := ssh.ParseRawPrivateKey(keyContents)
if err != nil {
return fmt.Errorf("Failed parsing private key %s: %v", keyUrl.Path, err)
}
keyToAdd := agent.AddedKey{
PrivateKey: key,
Comment: fmt.Sprintf("ssh-cert-authority-%s-%s", env, keyUrl.Path),
LifetimeSecs: 0,
}
agentClient := agent.NewClient(h.sshAgentConn)
err = agentClient.Add(keyToAdd)
if err != nil {
return fmt.Errorf("Unable to add private key %s: %v", keyUrl.Path, err)
}
signer, err := ssh.NewSignerFromKey(key)
if err != nil {
return fmt.Errorf("Unable to create signer from pk %s: %v", keyUrl.Path, err)
}
keyFp := ssh_ca_util.MakeFingerprint(signer.PublicKey().Marshal())
log.Printf("Added private key for env %s: %s", env, keyFp)
cfg = config[env]
cfg.SigningKeyFingerprint = keyFp
config[env] = cfg
}
}
return nil
}
func (h *certRequestHandler) createSigningRequest(rw http.ResponseWriter, req *http.Request) {
err := req.ParseForm()
if err != nil {
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
cert, err := h.extractCertFromRequest(req)
if err != nil {
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
environment, ok := cert.Extensions["[email protected]"]
if !ok || environment == "" {
http.Error(rw, "You forgot to send in the environment", http.StatusBadRequest)
return
}
reason, ok := cert.Extensions["[email protected]"]
if !ok || reason == "" {
http.Error(rw, "You forgot to send in a reason", http.StatusBadRequest)
return
}
config, ok := h.Config[environment]
if !ok {
http.Error(rw, "Unknown environment.", http.StatusBadRequest)
return
}
err = h.validateCert(cert, config.AuthorizedUsers)
if err != nil {
log.Printf("Invalid certificate signing request received from %s, ignoring", req.RemoteAddr)
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
// Ideally we put the critical options into the cert and let validateCert
// do the validation. However, this also checks the signature on the cert
// which would fail if we modified it prior to validation. So we validate
// by hand.
if len(config.CriticalOptions) > 0 {
for optionName, optionVal := range config.CriticalOptions {
cert.CriticalOptions[optionName] = optionVal
}
}
requestID := make([]byte, 8)
rand.Reader.Read(requestID)
requestIDStr := base32.StdEncoding.EncodeToString(requestID)
requestIDStr = strings.Replace(requestIDStr, "=", "", 10)
// the serial number is the same as the request id, just encoded differently.
var nextSerial uint64
nextSerial = 0
for _, byteVal := range requestID {
nextSerial <<= 8
nextSerial |= uint64(byteVal)
}
requesterFp := ssh_ca_util.MakeFingerprint(cert.SignatureKey.Marshal())
signed, err := h.saveSigningRequest(config, environment, reason, requestIDStr, nextSerial, cert)
if err != nil {
http.Error(rw, fmt.Sprintf("Request not made: %v", err), http.StatusBadRequest)
return
}
// Serial and id are the same value, just encoded differently. Logging them
// both because they didn't use to be the same value and folks may be
// parsing these log messages and I don't want to break the format.
log.Printf("Cert request serial %d id %s env %s from %s (%s) @ %s principals %v valid from %d to %d for '%s'\n",
cert.Serial, requestIDStr, environment, requesterFp, config.AuthorizedUsers[requesterFp],
req.RemoteAddr, cert.ValidPrincipals, cert.ValidAfter, cert.ValidBefore, reason)
if config.SlackUrl != "" {
slackMsg := fmt.Sprintf("SSH cert request from %s with id %s for %s", config.AuthorizedUsers[requesterFp], requestIDStr, reason)
err = ssh_ca_client.PostToSlack(config.SlackUrl, config.SlackChannel, slackMsg)
if err != nil {
log.Printf("Unable to post to slack: %v", err)
}
}
var returnStatus int
if signed {
slackMsg := fmt.Sprintf("SSH cert request %s auto signed.", requestIDStr)
err := ssh_ca_client.PostToSlack(config.SlackUrl, config.SlackChannel, slackMsg)
if err != nil {
log.Printf("Unable to post to slack for %s: %v", requestIDStr, err)
}
returnStatus = http.StatusAccepted
} else {
returnStatus = http.StatusCreated
}
rw.WriteHeader(returnStatus)
rw.Write([]byte(requestIDStr))
return
}
func (h *certRequestHandler) saveSigningRequest(config ssh_ca_util.SignerdConfig, environment, reason, requestIDStr string, requestSerial uint64, cert *ssh.Certificate) (bool, error) {
requesterFp := ssh_ca_util.MakeFingerprint(cert.SignatureKey.Marshal())
maxValidBefore := uint64(time.Now().Add(time.Duration(config.MaxCertLifetime) * time.Second).Unix())
if config.MaxCertLifetime != 0 && cert.ValidBefore > maxValidBefore {
return false, fmt.Errorf("Certificate is valid longer than maximum permitted by configuration %d > %d",
cert.ValidBefore, maxValidBefore)
}
// We override keyid here so that its a server controlled value. Instead of
// letting a requester attempt to spoof it.
var ok bool
cert.KeyId, ok = config.AuthorizedUsers[requesterFp]
if !ok {
return false, fmt.Errorf("Requester fingerprint (%s) not found in config", requesterFp)
}
if requestSerial == 0 {
return false, fmt.Errorf("Serial number not set.")
}
cert.Serial = requestSerial
certRequest := newcertRequest()
certRequest.request = cert
if environment == "" {
return false, fmt.Errorf("Environment is a required field")
}
certRequest.environment = environment
if reason == "" {
return false, fmt.Errorf("Reason is a required field")
}
certRequest.reason = reason
if len(requestIDStr) < 12 {
return false, fmt.Errorf("Request id is too short to be useful.")
}
h.stateMutex.RLock()
_, ok = h.state[requestIDStr]
h.stateMutex.RUnlock()
if ok {
return false, fmt.Errorf("Request id '%s' already in use.", requestIDStr)
}
h.stateMutex.Lock()
h.state[requestIDStr] = certRequest
h.stateMutex.Unlock()
// This is the special case of supporting auto-signing.
if config.NumberSignersRequired < 0 {
signed, err := h.maybeSignWithCa(requestIDStr, config.NumberSignersRequired, config.SigningKeyFingerprint)
if signed && err == nil {
return true, nil
}
}
return false, nil
}
func (h *certRequestHandler) extractCertFromRequest(req *http.Request) (*ssh.Certificate, error) {
if req.Form["cert"] == nil || len(req.Form["cert"]) == 0 {
err := errors.New("Please specify exactly one cert request")
return nil, err
}
rawCertRequest, err := base64.StdEncoding.DecodeString(req.Form["cert"][0])
if err != nil {
err := errors.New("Unable to base64 decode cert request")
return nil, err
}
pubKey, err := ssh.ParsePublicKey(rawCertRequest)
if err != nil {
err := errors.New("Unable to parse cert request")
return nil, err
}
return pubKey.(*ssh.Certificate), nil
}
func (h *certRequestHandler) validateCert(cert *ssh.Certificate, authorizedSigners map[string]string) error {
var certChecker ssh.CertChecker
certChecker.IsUserAuthority = func(auth ssh.PublicKey) bool {
fingerprint := ssh_ca_util.MakeFingerprint(auth.Marshal())
_, ok := authorizedSigners[fingerprint]
return ok
}
certChecker.SupportedCriticalOptions = supportedCriticalOptions
err := certChecker.CheckCert(cert.ValidPrincipals[0], cert)
if err != nil {
err := fmt.Errorf("Cert not valid: %v", err)
return err
}
if cert.CertType != ssh.UserCert {
err = errors.New("Cert not valid: not a user certificate")
return err
}
// explicitly call IsUserAuthority
if !certChecker.IsUserAuthority(cert.SignatureKey) {
err = errors.New("Cert not valid: not signed by an authorized key")
return err
}
return nil
}
type listResponseElement struct {
Signed bool
Rejected bool
CertBlob string
NumSignatures int
SignaturesRequired int
Serial uint64
Environment string
Reason string
Cert *ssh.Certificate `json:"-"`
}
type certRequestResponse map[string]listResponseElement
func newResponseElement(cert *ssh.Certificate, certBlob string, signed bool, rejected bool, numSignatures, signaturesRequired int, serial uint64, reason string, environment string) listResponseElement {
var element listResponseElement
element.Cert = cert
element.CertBlob = certBlob
element.Signed = signed
element.Rejected = rejected
element.NumSignatures = numSignatures
element.SignaturesRequired = signaturesRequired
element.Serial = serial
element.Reason = reason
element.Environment = environment
return element
}
func (h *certRequestHandler) listEnvironments(rw http.ResponseWriter, req *http.Request) {
var environments []string
for k := range h.Config {
environments = append(environments, k)
}
result, err := json.Marshal(environments)
if err != nil {
http.Error(rw, fmt.Sprintf("Unable to marshal environment names: %v", err), http.StatusInternalServerError)
return
}
log.Printf("List environments received from '%s'\n", req.RemoteAddr)
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
rw.Write(result)
}
func (h *certRequestHandler) listPendingRequests(rw http.ResponseWriter, req *http.Request) {
var certRequestID string
err := req.ParseForm()
if err != nil {
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
certRequestIDs, ok := req.Form["certRequestId"]
if ok {
certRequestID = certRequestIDs[0]
}
matched, _ := regexp.MatchString("^[A-Z2-7=]{10,16}$", certRequestID)
if certRequestID != "" && !matched {
http.Error(rw, "Invalid certRequestId", http.StatusBadRequest)
return
}
log.Printf("List pending requests received from %s for request id '%s'\n",
req.RemoteAddr, certRequestID)
foundSomething := false
results := make(certRequestResponse)
h.stateMutex.RLock()
defer h.stateMutex.RUnlock()
for k, v := range h.state {
encodedCert := base64.StdEncoding.EncodeToString(v.request.Marshal())
element := newResponseElement(v.request, encodedCert, v.certSigned, v.certRejected, len(v.signatures), h.Config[v.environment].NumberSignersRequired, v.request.Serial, v.reason, v.environment)
// Two ways to use this URL. If caller specified a certRequestId
// then we return only that one. Otherwise everything.
if certRequestID == "" {
results[k] = element
foundSomething = true
} else {
if certRequestID == k {
results[k] = element
foundSomething = true
break
}
}
}
if foundSomething {
output, err := json.Marshal(results)
if err != nil {
http.Error(rw, fmt.Sprintf("Trouble marshaling json response %v", err), http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
rw.Write(output)
} else {
http.Error(rw, fmt.Sprintf("No certs found."), http.StatusNotFound)
return
}
}
func (h *certRequestHandler) getRequestStatus(rw http.ResponseWriter, req *http.Request) {
uriVars := mux.Vars(req)
requestID := uriVars["requestID"]
type Response struct {
certSigned bool
certRejected bool
cert string
}
h.stateMutex.RLock()
defer h.stateMutex.RUnlock()
if h.state[requestID].certSigned {
rw.Write([]byte(h.state[requestID].request.Type()))
rw.Write([]byte(" "))
rw.Write([]byte(base64.StdEncoding.EncodeToString(h.state[requestID].request.Marshal())))
rw.Write([]byte("\n"))
} else if h.state[requestID].certRejected {
http.Error(rw, "Cert request was rejected.", http.StatusPreconditionFailed)
} else {
http.Error(rw, "Cert not signed yet.", http.StatusPreconditionFailed)
}
}
func (h *certRequestHandler) signOrRejectRequest(rw http.ResponseWriter, req *http.Request) {
requestID := mux.Vars(req)["requestID"]
h.stateMutex.RLock()
originalRequest, ok := h.state[requestID]
h.stateMutex.RUnlock()
if !ok {
http.Error(rw, "Unknown request id", http.StatusNotFound)
return
}
if originalRequest.certSigned {
http.Error(rw, "Request already signed.", http.StatusConflict)
return
}
if originalRequest.certRejected {
http.Error(rw, "Request already rejected.", http.StatusConflict)
return
}
err := req.ParseForm()
if err != nil {
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
envConfig, ok := h.Config[originalRequest.environment]
if !ok {
http.Error(rw, "Original request found to have an invalid env. Weird.", http.StatusBadRequest)
return
}
signedCert, err := h.extractCertFromRequest(req)
if err != nil {
log.Printf("Unable to extract certificate signing request from %s, ignoring", req.RemoteAddr)
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
err = h.validateCert(signedCert, envConfig.AuthorizedSigners)
if err != nil {
log.Printf("Invalid certificate signing request received from %s, ignoring", req.RemoteAddr)
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
signerFp := ssh_ca_util.MakeFingerprint(signedCert.SignatureKey.Marshal())
// Verifying that the cert being posted to us here matches the one in the
// request. That is, that an attacker isn't using an old signature to sign a
// new/different request id
h.stateMutex.RLock()
requestedCert := h.state[requestID].request
h.stateMutex.RUnlock()
if !compareCerts(requestedCert, signedCert) {
log.Printf("Signature was valid, but cert didn't match from %s.", req.RemoteAddr)
log.Printf("Orig req: %#v\n", requestedCert)
log.Printf("Sign req: %#v\n", signedCert)
http.Error(rw, "Signature was valid, but cert didn't match.", http.StatusBadRequest)
return
}
requesterFp := ssh_ca_util.MakeFingerprint(requestedCert.Key.Marshal())
// Make sure the key attempting to sign the request is not the same as the key in the CSR
if signerFp == requesterFp {
err = errors.New("Signed by the same key as key in request")
http.Error(rw, fmt.Sprintf("%v", err), http.StatusBadRequest)
return
}
log.Printf("Signature for serial %d id %s received from %s (%s) @ %s and determined valid\n",
signedCert.Serial, requestID, signerFp, envConfig.AuthorizedSigners[signerFp], req.RemoteAddr)
if req.Method == "POST" {
err = h.addConfirmation(requestID, signerFp, envConfig)
} else {
err = h.rejectRequest(requestID, signerFp, envConfig)
}
if err != nil {
http.Error(rw, fmt.Sprintf("%v", err), http.StatusNotFound)
}
}
func (h *certRequestHandler) rejectRequest(requestID string, signerFp string, envConfig ssh_ca_util.SignerdConfig) error {
log.Printf("Reject received for id %s", requestID)
h.stateMutex.Lock()
defer h.stateMutex.Unlock()
stateInfo := h.state[requestID]
stateInfo.certRejected = true
// this is weird. see: https://code.google.com/p/go/issues/detail?id=3117
h.state[requestID] = stateInfo
return nil
}
func (h *certRequestHandler) addConfirmation(requestID string, signerFp string, envConfig ssh_ca_util.SignerdConfig) error {
h.stateMutex.RLock()
certRejected := h.state[requestID].certRejected
h.stateMutex.RUnlock()
if certRejected {
return fmt.Errorf("Attempt to sign a rejected cert.")
}
h.stateMutex.Lock()
h.state[requestID].signatures[signerFp] = true
h.stateMutex.Unlock()
if envConfig.SlackUrl != "" {
slackMsg := fmt.Sprintf("SSH cert %s signed by %s making %d/%d signatures.",
requestID, envConfig.AuthorizedSigners[signerFp],
len(h.state[requestID].signatures), envConfig.NumberSignersRequired)
err := ssh_ca_client.PostToSlack(envConfig.SlackUrl, envConfig.SlackChannel, slackMsg)
if err != nil {
log.Printf("Unable to post to slack for %s: %v", requestID, err)
}
}
signed, err := h.maybeSignWithCa(requestID, envConfig.NumberSignersRequired, envConfig.SigningKeyFingerprint)
if signed && err == nil {
slackMsg := fmt.Sprintf("SSH cert request %s fully signed.", requestID)
err := ssh_ca_client.PostToSlack(envConfig.SlackUrl, envConfig.SlackChannel, slackMsg)
if err != nil {
log.Printf("Unable to post to slack for %s: %v", requestID, err)
}
}
return err
}
func (h *certRequestHandler) maybeSignWithCa(requestID string, numSignersRequired int, signingKeyFingerprint string) (bool, error) {
h.stateMutex.Lock()
defer h.stateMutex.Unlock()
if len(h.state[requestID].signatures) >= numSignersRequired {
if h.sshAgentConn == nil {
// This is used for testing. We're effectively disabling working
// with the ssh agent to avoid needing to mock it.
log.Print("ssh agent uninitialized, will not attempt signing. This is normal in unittests")
return true, nil
}
log.Printf("Received %d signatures for %s, signing now.\n", len(h.state[requestID].signatures), requestID)
signer, err := ssh_ca_util.GetSignerForFingerprintOrUrl(signingKeyFingerprint, h.sshAgentConn)
if err != nil {
log.Printf("Couldn't find signing key for request %s, unable to sign request: %s\n", requestID, err)
return false, fmt.Errorf("Couldn't find signing key, unable to sign. Sorry.")
}
stateInfo := h.state[requestID]
for extensionName := range stateInfo.request.Extensions {
// sshd up to version 6.8 has a bug where optional extensions are
// treated as critical. If a cert contains any non-standard
// extensions, like ours, the server rejects the cert because it
// doesn't understand the extension. To cope with this we simply
// strip our non-standard extensions before doing the final
// signature. https://bugzilla.mindrot.org/show_bug.cgi?id=2387
if strings.Contains(extensionName, "@") {
delete(stateInfo.request.Extensions, extensionName)
}
}
stateInfo.request.SignCert(rand.Reader, signer)
stateInfo.certSigned = true
// this is weird. see: https://code.google.com/p/go/issues/detail?id=3117
h.state[requestID] = stateInfo
return true, nil
}
return false, nil
}
func signdFlags() []cli.Flag {
home := os.Getenv("HOME")
if home == "" {
home = "/"
}
configPath := home + "/.ssh_ca/sign_certd_config.json"
return []cli.Flag{
cli.StringFlag{
Name: "config-file",
Value: configPath,
Usage: "Path to config.json",
},
cli.StringFlag{
Name: "listen-address",
Value: "127.0.0.1:8080",
Usage: "HTTP service address",
},
cli.BoolFlag{
Name: "reverse-proxy",
Usage: "Set when service is behind a reverse proxy, like nginx",
EnvVar: "SSH_CERT_AUTHORITY_PROXY",
},
}
}
func signCertd(c *cli.Context) error {
configPath := c.String("config-file")
config := make(map[string]ssh_ca_util.SignerdConfig)
err := ssh_ca_util.LoadConfig(configPath, &config)
if err != nil {
return cli.NewExitError(fmt.Sprintf("Load Config failed: %s", err), 1)
}
for envName, configObj := range config {
err = areCriticalOptionsValid(configObj.CriticalOptions)
if err != nil {
return cli.NewExitError(fmt.Sprintf("Error validation config for env '%s': %s", envName, err), 1)
}
}
err = runSignCertd(config, c.String("listen-address"), c.Bool("reverse-proxy"))
return err
}
func makeCertRequestHandler(config map[string]ssh_ca_util.SignerdConfig) certRequestHandler {
var requestHandler certRequestHandler
requestHandler.Config = config
requestHandler.state = make(map[string]certRequest)
return requestHandler
}
func runSignCertd(config map[string]ssh_ca_util.SignerdConfig, addr string, is_proxied bool) error {
log.Println("Server running version", version.BuildVersion)
log.Println("Using SSH agent at", os.Getenv("SSH_AUTH_SOCK"))
sshAgentConn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
if err != nil {
return cli.NewExitError(fmt.Sprintf("Dial failed: %s", err), 1)
}
requestHandler := makeCertRequestHandler(config)
requestHandler.sshAgentConn = sshAgentConn
err = requestHandler.setupPrivateKeys(config)
if err != nil {
return cli.NewExitError(fmt.Sprintf("Failed CA key load: %v\n", err), 1)
}
log.Printf("Server started with config %#v\n", config)
r := mux.NewRouter()
requests := r.Path("/cert/requests").Subrouter()
requests.Methods("POST").HandlerFunc(requestHandler.createSigningRequest)
requests.Methods("GET").HandlerFunc(requestHandler.listPendingRequests)
request := r.Path("/cert/requests/{requestID}").Subrouter()
request.Methods("GET").HandlerFunc(requestHandler.getRequestStatus)
request.Methods("POST", "DELETE").HandlerFunc(requestHandler.signOrRejectRequest)
environments := r.Path("/config/environments").Subrouter()
environments.Methods("GET").HandlerFunc(requestHandler.listEnvironments)
if is_proxied {
http.ListenAndServe(addr, handlers.ProxyHeaders(r))
} else {
http.ListenAndServe(addr, r)
}
return nil
}
|
[
"\"HOME\"",
"\"SSH_AUTH_SOCK\"",
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"HOME",
"SSH_AUTH_SOCK"
] |
[]
|
["HOME", "SSH_AUTH_SOCK"]
|
go
| 2 | 0 | |
runsc/container/container.go
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package container creates and manipulates containers.
package container
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/cenkalti/backoff"
"github.com/gofrs/flock"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/pkg/sentry/control"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/cgroup"
"gvisor.googlesource.com/gvisor/runsc/sandbox"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
const (
// metadataFilename is the name of the metadata file relative to the
// container root directory that holds sandbox metadata.
metadataFilename = "meta.json"
// metadataLockFilename is the name of a lock file in the container
// root directory that is used to prevent concurrent modifications to
// the container state and metadata.
metadataLockFilename = "meta.lock"
)
// validateID validates the container id.
func validateID(id string) error {
// See libcontainer/factory_linux.go.
idRegex := regexp.MustCompile(`^[\w+-\.]+$`)
if !idRegex.MatchString(id) {
return fmt.Errorf("invalid container id: %v", id)
}
return nil
}
// Container represents a containerized application. When running, the
// container is associated with a single Sandbox.
//
// Container metadata can be saved and loaded to disk. Within a root directory,
// we maintain subdirectories for each container named with the container id.
// The container metadata is stored as a json within the container directory
// in a file named "meta.json". This metadata format is defined by us and is
// not part of the OCI spec.
//
// Containers must write their metadata files after any change to their internal
// states. The entire container directory is deleted when the container is
// destroyed.
//
// When the container is stopped, all processes that belong to the container
// must be stopped before Destroy() returns. containerd makes roughly the
// following calls to stop a container:
// - First it attempts to kill the container process with
// 'runsc kill SIGTERM'. After some time, it escalates to SIGKILL. In a
// separate thread, it's waiting on the container. As soon as the wait
// returns, it moves on to the next step:
// - It calls 'runsc kill --all SIGKILL' to stop every process that belongs to
// the container. 'kill --all SIGKILL' waits for all processes before
// returning.
// - Containerd waits for stdin, stdout and stderr to drain and be closed.
// - It calls 'runsc delete'. runc implementation kills --all SIGKILL once
// again just to be sure, waits, and then proceeds with remaining teardown.
//
type Container struct {
// ID is the container ID.
ID string `json:"id"`
// Spec is the OCI runtime spec that configures this container.
Spec *specs.Spec `json:"spec"`
// BundleDir is the directory containing the container bundle.
BundleDir string `json:"bundleDir"`
// Root is the directory containing the container metadata file.
Root string `json:"root"`
// CreatedAt is the time the container was created.
CreatedAt time.Time `json:"createdAt"`
// Owner is the container owner.
Owner string `json:"owner"`
// ConsoleSocket is the path to a unix domain socket that will receive
// the console FD.
ConsoleSocket string `json:"consoleSocket"`
// Status is the current container Status.
Status Status `json:"status"`
// GoferPid is the PID of the gofer running along side the sandbox. May
// be 0 if the gofer has been killed.
GoferPid int `json:"goferPid"`
// goferIsChild is set if a gofer process is a child of the current process.
//
// This field isn't saved to json, because only a creator of a gofer
// process will have it as a child process.
goferIsChild bool
// Sandbox is the sandbox this container is running in. It's set when the
// container is created and reset when the sandbox is destroyed.
Sandbox *sandbox.Sandbox `json:"sandbox"`
}
// Load loads a container with the given id from a metadata file. id may be an
// abbreviation of the full container id, in which case Load loads the
// container to which id unambiguously refers to.
// Returns ErrNotExist if container doesn't exist.
func Load(rootDir, id string) (*Container, error) {
log.Debugf("Load container %q %q", rootDir, id)
if err := validateID(id); err != nil {
return nil, fmt.Errorf("validating id: %v", err)
}
cRoot, err := findContainerRoot(rootDir, id)
if err != nil {
// Preserve error so that callers can distinguish 'not found' errors.
return nil, err
}
// Lock the container metadata to prevent other runsc instances from
// writing to it while we are reading it.
unlock, err := lockContainerMetadata(cRoot)
if err != nil {
return nil, err
}
defer unlock()
// Read the container metadata file and create a new Container from it.
metaFile := filepath.Join(cRoot, metadataFilename)
metaBytes, err := ioutil.ReadFile(metaFile)
if err != nil {
if os.IsNotExist(err) {
// Preserve error so that callers can distinguish 'not found' errors.
return nil, err
}
return nil, fmt.Errorf("reading container metadata file %q: %v", metaFile, err)
}
var c Container
if err := json.Unmarshal(metaBytes, &c); err != nil {
return nil, fmt.Errorf("unmarshaling container metadata from %q: %v", metaFile, err)
}
// If the status is "Running" or "Created", check that the sandbox
// process still exists, and set it to Stopped if it does not.
//
// This is inherently racey.
if c.Status == Running || c.Status == Created {
// Check if the sandbox process is still running.
if !c.isSandboxRunning() {
// Sandbox no longer exists, so this container definitely does not exist.
c.changeStatus(Stopped)
} else if c.Status == Running {
// Container state should reflect the actual state of the application, so
// we don't consider gofer process here.
if err := c.SignalContainer(syscall.Signal(0), false); err != nil {
c.changeStatus(Stopped)
}
}
}
return &c, nil
}
func findContainerRoot(rootDir, partialID string) (string, error) {
// Check whether the id fully specifies an existing container.
cRoot := filepath.Join(rootDir, partialID)
if _, err := os.Stat(cRoot); err == nil {
return cRoot, nil
}
// Now see whether id could be an abbreviation of exactly 1 of the
// container ids. If id is ambigious (it could match more than 1
// container), it is an error.
cRoot = ""
ids, err := List(rootDir)
if err != nil {
return "", err
}
for _, id := range ids {
if strings.HasPrefix(id, partialID) {
if cRoot != "" {
return "", fmt.Errorf("id %q is ambiguous and could refer to multiple containers: %q, %q", partialID, cRoot, id)
}
cRoot = id
}
}
if cRoot == "" {
return "", os.ErrNotExist
}
log.Debugf("abbreviated id %q resolves to full id %q", partialID, cRoot)
return filepath.Join(rootDir, cRoot), nil
}
// List returns all container ids in the given root directory.
func List(rootDir string) ([]string, error) {
log.Debugf("List containers %q", rootDir)
fs, err := ioutil.ReadDir(rootDir)
if err != nil {
return nil, fmt.Errorf("reading dir %q: %v", rootDir, err)
}
var out []string
for _, f := range fs {
out = append(out, f.Name())
}
return out, nil
}
// Create creates the container in a new Sandbox process, unless the metadata
// indicates that an existing Sandbox should be used. The caller must call
// Destroy() on the container.
func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (*Container, error) {
log.Debugf("Create container %q in root dir: %s", id, conf.RootDir)
if err := validateID(id); err != nil {
return nil, err
}
// Lock the container metadata file to prevent concurrent creations of
// containers with the same id.
containerRoot := filepath.Join(conf.RootDir, id)
unlock, err := lockContainerMetadata(containerRoot)
if err != nil {
return nil, err
}
defer unlock()
// Check if the container already exists by looking for the metadata
// file.
if _, err := os.Stat(filepath.Join(containerRoot, metadataFilename)); err == nil {
return nil, fmt.Errorf("container with id %q already exists", id)
} else if !os.IsNotExist(err) {
return nil, fmt.Errorf("looking for existing container in %q: %v", containerRoot, err)
}
c := &Container{
ID: id,
Spec: spec,
ConsoleSocket: consoleSocket,
BundleDir: bundleDir,
Root: containerRoot,
Status: Creating,
Owner: os.Getenv("USER"),
}
// The Cleanup object cleans up partially created containers when an error occurs.
// Any errors occuring during cleanup itself are ignored.
cu := specutils.MakeCleanup(func() { _ = c.Destroy() })
defer cu.Clean()
// If the metadata annotations indicate that this container should be
// started in an existing sandbox, we must do so. The metadata will
// indicate the ID of the sandbox, which is the same as the ID of the
// init container in the sandbox.
if specutils.ShouldCreateSandbox(spec) {
log.Debugf("Creating new sandbox for container %q", id)
// Setup rootfs and mounts. It returns a new mount list with destination
// paths resolved. Since the spec for the root container is read from disk,
// Write the new spec to a new file that will be used by the sandbox.
cleanMounts, err := setupFS(spec, conf, bundleDir)
if err != nil {
return nil, fmt.Errorf("setup mounts: %v", err)
}
spec.Mounts = cleanMounts
if err := specutils.WriteCleanSpec(bundleDir, spec); err != nil {
return nil, fmt.Errorf("writing clean spec: %v", err)
}
// Create and join cgroup before processes are created to ensure they are
// part of the cgroup from the start (and all tneir children processes).
cg := cgroup.New(spec)
if cg != nil {
// If there is cgroup config, install it before creating sandbox process.
if err := cg.Install(spec.Linux.Resources); err != nil {
return nil, fmt.Errorf("configuring cgroup: %v", err)
}
}
if err := runInCgroup(cg, func() error {
ioFiles, err := c.createGoferProcess(spec, conf, bundleDir)
if err != nil {
return err
}
// Start a new sandbox for this container. Any errors after this point
// must destroy the container.
c.Sandbox, err = sandbox.New(id, spec, conf, bundleDir, consoleSocket, userLog, ioFiles, cg)
return err
}); err != nil {
return nil, err
}
} else {
// This is sort of confusing. For a sandbox with a root
// container and a child container in it, runsc sees:
// * A container struct whose sandbox ID is equal to the
// container ID. This is the root container that is tied to
// the creation of the sandbox.
// * A container struct whose sandbox ID is equal to the above
// container/sandbox ID, but that has a different container
// ID. This is the child container.
sbid, ok := specutils.SandboxID(spec)
if !ok {
return nil, fmt.Errorf("no sandbox ID found when creating container")
}
log.Debugf("Creating new container %q in sandbox %q", c.ID, sbid)
// Find the sandbox associated with this ID.
sb, err := Load(conf.RootDir, sbid)
if err != nil {
return nil, err
}
c.Sandbox = sb.Sandbox
if err := c.Sandbox.CreateContainer(c.ID); err != nil {
return nil, err
}
}
c.changeStatus(Created)
// Save the metadata file.
if err := c.save(); err != nil {
return nil, err
}
// Write the PID file. Containerd considers the create complete after
// this file is created, so it must be the last thing we do.
if pidFile != "" {
if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil {
return nil, fmt.Errorf("error writing PID file: %v", err)
}
}
cu.Release()
return c, nil
}
// Start starts running the containerized process inside the sandbox.
func (c *Container) Start(conf *boot.Config) error {
log.Debugf("Start container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if err := c.requireStatus("start", Created); err != nil {
return err
}
// "If any prestart hook fails, the runtime MUST generate an error,
// stop and destroy the container" -OCI spec.
if c.Spec.Hooks != nil {
if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {
return err
}
}
if specutils.ShouldCreateSandbox(c.Spec) {
if err := c.Sandbox.StartRoot(c.Spec, conf); err != nil {
return err
}
} else {
// Setup rootfs and mounts. It returns a new mount list with destination
// paths resolved. Replace the original spec with new mount list and start
// container.
cleanMounts, err := setupFS(c.Spec, conf, c.BundleDir)
if err != nil {
return fmt.Errorf("setup mounts: %v", err)
}
c.Spec.Mounts = cleanMounts
if err := specutils.WriteCleanSpec(c.BundleDir, c.Spec); err != nil {
return fmt.Errorf("writing clean spec: %v", err)
}
// Join cgroup to strt gofer process to ensure it's part of the cgroup from
// the start (and all tneir children processes).
if err := runInCgroup(c.Sandbox.Cgroup, func() error {
// Create the gofer process.
ioFiles, err := c.createGoferProcess(c.Spec, conf, c.BundleDir)
if err != nil {
return err
}
return c.Sandbox.StartContainer(c.Spec, conf, c.ID, ioFiles)
}); err != nil {
return err
}
}
// "If any poststart hook fails, the runtime MUST log a warning, but
// the remaining hooks and lifecycle continue as if the hook had
// succeeded" -OCI spec.
if c.Spec.Hooks != nil {
executeHooksBestEffort(c.Spec.Hooks.Poststart, c.State())
}
c.changeStatus(Running)
return c.save()
}
// Restore takes a container and replaces its kernel and file system
// to restore a container from its state file.
func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile string) error {
log.Debugf("Restore container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if err := c.requireStatus("restore", Created); err != nil {
return err
}
if err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {
return err
}
c.changeStatus(Running)
return c.save()
}
// Run is a helper that calls Create + Start + Wait.
func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (syscall.WaitStatus, error) {
log.Debugf("Run container %q in root dir: %s", id, conf.RootDir)
c, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile, userLog)
if err != nil {
return 0, fmt.Errorf("creating container: %v", err)
}
// Clean up partially created container if an error ocurrs.
// Any errors returned by Destroy() itself are ignored.
defer c.Destroy()
if err := c.Start(conf); err != nil {
return 0, fmt.Errorf("starting container: %v", err)
}
return c.Wait()
}
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
func (c *Container) Execute(args *control.ExecArgs) (int32, error) {
log.Debugf("Execute in container %q, args: %+v", c.ID, args)
if err := c.requireStatus("execute in", Created, Running); err != nil {
return 0, err
}
args.ContainerID = c.ID
return c.Sandbox.Execute(args)
}
// Event returns events for the container.
func (c *Container) Event() (*boot.Event, error) {
log.Debugf("Getting events for container %q", c.ID)
if err := c.requireStatus("get events for", Created, Running, Paused); err != nil {
return nil, err
}
return c.Sandbox.Event(c.ID)
}
// SandboxPid returns the Pid of the sandbox the container is running in, or -1 if the
// container is not running.
func (c *Container) SandboxPid() int {
if err := c.requireStatus("get PID", Created, Running, Paused); err != nil {
return -1
}
return c.Sandbox.Pid
}
// Wait waits for the container to exit, and returns its WaitStatus.
// Call to wait on a stopped container is needed to retrieve the exit status
// and wait returns immediately.
func (c *Container) Wait() (syscall.WaitStatus, error) {
log.Debugf("Wait on container %q", c.ID)
return c.Sandbox.Wait(c.ID)
}
// WaitRootPID waits for process 'pid' in the sandbox's PID namespace and
// returns its WaitStatus.
func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {
log.Debugf("Wait on PID %d in sandbox %q", pid, c.Sandbox.ID)
if !c.isSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
}
return c.Sandbox.WaitPID(c.Sandbox.ID, pid, clearStatus)
}
// WaitPID waits for process 'pid' in the container's PID namespace and returns
// its WaitStatus.
func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {
log.Debugf("Wait on PID %d in container %q", pid, c.ID)
if !c.isSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
}
return c.Sandbox.WaitPID(c.ID, pid, clearStatus)
}
// SignalContainer sends the signal to the container. If all is true and signal
// is SIGKILL, then waits for all processes to exit before returning.
// SignalContainer returns an error if the container is already stopped.
// TODO: Distinguish different error types.
func (c *Container) SignalContainer(sig syscall.Signal, all bool) error {
log.Debugf("Signal container %q: %v", c.ID, sig)
// Signaling container in Stopped state is allowed. When all=false,
// an error will be returned anyway; when all=true, this allows
// sending signal to other processes inside the container even
// after the init process exits. This is especially useful for
// container cleanup.
if err := c.requireStatus("signal", Running, Stopped); err != nil {
return err
}
if !c.isSandboxRunning() {
return fmt.Errorf("sandbox is not running")
}
return c.Sandbox.SignalContainer(c.ID, sig, all)
}
// SignalProcess sends sig to a specific process in the container.
func (c *Container) SignalProcess(sig syscall.Signal, pid int32) error {
log.Debugf("Signal process %d in container %q: %v", pid, c.ID, sig)
if err := c.requireStatus("signal a process inside", Running); err != nil {
return err
}
if !c.isSandboxRunning() {
return fmt.Errorf("sandbox is not running")
}
return c.Sandbox.SignalProcess(c.ID, int32(pid), sig, false)
}
// ForwardSignals forwards all signals received by the current process to the
// container process inside the sandbox. It returns a function that will stop
// forwarding signals.
func (c *Container) ForwardSignals(pid int32, fgProcess bool) func() {
log.Debugf("Forwarding all signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess)
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh)
go func() {
for s := range sigCh {
log.Debugf("Forwarding signal %d to container %q PID %d fgProcess=%t", s, c.ID, pid, fgProcess)
if err := c.Sandbox.SignalProcess(c.ID, pid, s.(syscall.Signal), fgProcess); err != nil {
log.Warningf("error forwarding signal %d to container %q: %v", s, c.ID, err)
}
}
log.Debugf("Done forwarding signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess)
}()
return func() {
signal.Stop(sigCh)
close(sigCh)
}
}
// Checkpoint sends the checkpoint call to the container.
// The statefile will be written to f, the file at the specified image-path.
func (c *Container) Checkpoint(f *os.File) error {
log.Debugf("Checkpoint container %q", c.ID)
if err := c.requireStatus("checkpoint", Created, Running, Paused); err != nil {
return err
}
return c.Sandbox.Checkpoint(c.ID, f)
}
// Pause suspends the container and its kernel.
// The call only succeeds if the container's status is created or running.
func (c *Container) Pause() error {
log.Debugf("Pausing container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if c.Status != Created && c.Status != Running {
return fmt.Errorf("cannot pause container %q in state %v", c.ID, c.Status)
}
if err := c.Sandbox.Pause(c.ID); err != nil {
return fmt.Errorf("pausing container: %v", err)
}
c.changeStatus(Paused)
return c.save()
}
// Resume unpauses the container and its kernel.
// The call only succeeds if the container's status is paused.
func (c *Container) Resume() error {
log.Debugf("Resuming container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if c.Status != Paused {
return fmt.Errorf("cannot resume container %q in state %v", c.ID, c.Status)
}
if err := c.Sandbox.Resume(c.ID); err != nil {
return fmt.Errorf("resuming container: %v", err)
}
c.changeStatus(Running)
return c.save()
}
// State returns the metadata of the container.
func (c *Container) State() specs.State {
return specs.State{
Version: specs.Version,
ID: c.ID,
Status: c.Status.String(),
Pid: c.SandboxPid(),
Bundle: c.BundleDir,
}
}
// Processes retrieves the list of processes and associated metadata inside a
// container.
func (c *Container) Processes() ([]*control.Process, error) {
if err := c.requireStatus("get processes of", Running, Paused); err != nil {
return nil, err
}
return c.Sandbox.Processes(c.ID)
}
// Destroy stops all processes and frees all resources associated with the
// container.
func (c *Container) Destroy() error {
log.Debugf("Destroy container %q", c.ID)
// We must perform the following cleanup steps:
// * stop the container and gofer processes,
// * remove the container filesystem on the host, and
// * delete the container metadata directory.
//
// It's possible for one or more of these steps to fail, but we should
// do our best to perform all of the cleanups. Hence, we keep a slice
// of errors return their concatenation.
var errs []string
if err := c.stop(); err != nil {
err = fmt.Errorf("stopping container: %v", err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
if err := destroyFS(c.Spec); err != nil {
err = fmt.Errorf("destroying container fs: %v", err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
if err := os.RemoveAll(c.Root); err != nil && !os.IsNotExist(err) {
err = fmt.Errorf("deleting container root directory %q: %v", c.Root, err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
c.changeStatus(Stopped)
// "If any poststop hook fails, the runtime MUST log a warning, but the
// remaining hooks and lifecycle continue as if the hook had succeeded" -OCI spec.
// Based on the OCI, "The post-stop hooks MUST be called after the container is
// deleted but before the delete operation returns"
// Run it here to:
// 1) Conform to the OCI.
// 2) Make sure it only runs once, because the root has been deleted, the container
// can't be loaded again.
if c.Spec.Hooks != nil {
executeHooksBestEffort(c.Spec.Hooks.Poststop, c.State())
}
if len(errs) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errs, "\n"))
}
// save saves the container metadata to a file.
//
// Precondition: container must be locked with container.lock().
func (c *Container) save() error {
log.Debugf("Save container %q", c.ID)
metaFile := filepath.Join(c.Root, metadataFilename)
meta, err := json.Marshal(c)
if err != nil {
return fmt.Errorf("invalid container metadata: %v", err)
}
if err := ioutil.WriteFile(metaFile, meta, 0640); err != nil {
return fmt.Errorf("writing container metadata: %v", err)
}
return nil
}
// stop stops the container (for regular containers) or the sandbox (for
// root containers), and waits for the container or sandbox and the gofer
// to stop. If any of them doesn't stop before timeout, an error is returned.
func (c *Container) stop() error {
var cgroup *cgroup.Cgroup
if c.Sandbox != nil {
log.Debugf("Destroying container %q", c.ID)
if err := c.Sandbox.DestroyContainer(c.ID); err != nil {
return fmt.Errorf("destroying container %q: %v", c.ID, err)
}
// Only uninstall cgroup for sandbox stop.
if c.Sandbox.IsRootContainer(c.ID) {
cgroup = c.Sandbox.Cgroup
}
// Only set sandbox to nil after it has been told to destroy the container.
c.Sandbox = nil
}
// Try killing gofer if it does not exit with container.
if c.GoferPid != 0 {
log.Debugf("Killing gofer for container %q, PID: %d", c.ID, c.GoferPid)
if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {
// The gofer may already be stopped, log the error.
log.Warningf("Error sending signal %d to gofer %d: %v", syscall.SIGKILL, c.GoferPid, err)
}
}
if err := c.waitForStopped(); err != nil {
return err
}
// Gofer is running in cgroups, so Cgroup.Uninstall has to be called after it.
if cgroup != nil {
if err := cgroup.Uninstall(); err != nil {
return err
}
}
return nil
}
func (c *Container) waitForStopped() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
if c.isSandboxRunning() {
if err := c.SignalContainer(syscall.Signal(0), false); err == nil {
return fmt.Errorf("container is still running")
}
}
if c.GoferPid == 0 {
return nil
}
if c.goferIsChild {
// The gofer process is a child of the current process,
// so we can wait it and collect its zombie.
wpid, err := syscall.Wait4(int(c.GoferPid), nil, syscall.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the gofer process: %v", err)
}
if wpid == 0 {
return fmt.Errorf("gofer is still running")
}
} else if err := syscall.Kill(c.GoferPid, 0); err == nil {
return fmt.Errorf("gofer is still running")
}
c.GoferPid = 0
return nil
}
return backoff.Retry(op, b)
}
func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string) ([]*os.File, error) {
// Start with the general config flags.
args := conf.ToFlags()
var goferEnds []*os.File
// nextFD is the next available file descriptor for the gofer process.
// It starts at 3 because 0-2 are used by stdin/stdout/stderr.
nextFD := 3
if conf.LogFilename != "" {
logFile, err := os.OpenFile(conf.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, fmt.Errorf("opening log file %q: %v", conf.LogFilename, err)
}
defer logFile.Close()
goferEnds = append(goferEnds, logFile)
args = append(args, "--log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.DebugLog != "" {
debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, "gofer")
if err != nil {
return nil, fmt.Errorf("opening debug log file in %q: %v", conf.DebugLog, err)
}
defer debugLogFile.Close()
goferEnds = append(goferEnds, debugLogFile)
args = append(args, "--debug-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
args = append(args, "gofer", "--bundle", bundleDir)
if conf.Overlay {
args = append(args, "--panic-on-write=true")
}
// Open the spec file to donate to the sandbox.
specFile, err := specutils.OpenCleanSpec(bundleDir)
if err != nil {
return nil, fmt.Errorf("opening spec file: %v", err)
}
defer specFile.Close()
goferEnds = append(goferEnds, specFile)
args = append(args, "--spec-fd="+strconv.Itoa(nextFD))
nextFD++
// Add root mount and then add any other additional mounts.
mountCount := 1
// Add additional mounts.
for _, m := range spec.Mounts {
if specutils.Is9PMount(m) {
mountCount++
}
}
sandEnds := make([]*os.File, 0, mountCount)
for i := 0; i < mountCount; i++ {
fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
if err != nil {
return nil, err
}
sandEnds = append(sandEnds, os.NewFile(uintptr(fds[0]), "sandbox IO FD"))
goferEnd := os.NewFile(uintptr(fds[1]), "gofer IO FD")
defer goferEnd.Close()
goferEnds = append(goferEnds, goferEnd)
args = append(args, fmt.Sprintf("--io-fds=%d", nextFD))
nextFD++
}
binPath := specutils.ExePath
cmd := exec.Command(binPath, args...)
cmd.ExtraFiles = goferEnds
cmd.Args[0] = "runsc-gofer"
// Enter new namespaces to isolate from the rest of the system. Don't unshare
// cgroup because gofer is added to a cgroup in the caller's namespace.
nss := []specs.LinuxNamespace{
{Type: specs.IPCNamespace},
{Type: specs.MountNamespace},
{Type: specs.NetworkNamespace},
{Type: specs.PIDNamespace},
{Type: specs.UTSNamespace},
}
// Setup any uid/gid mappings, and create or join the configured user
// namespace so the gofer's view of the filesystem aligns with the
// users in the sandbox.
nss = append(nss, specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)...)
specutils.SetUIDGIDMappings(cmd, spec)
// Start the gofer in the given namespace.
log.Debugf("Starting gofer: %s %v", binPath, args)
if err := specutils.StartInNS(cmd, nss); err != nil {
return nil, err
}
log.Infof("Gofer started, PID: %d", cmd.Process.Pid)
c.GoferPid = cmd.Process.Pid
c.goferIsChild = true
return sandEnds, nil
}
// changeStatus transitions from one status to another ensuring that the
// transition is valid.
func (c *Container) changeStatus(s Status) {
switch s {
case Creating:
// Initial state, never transitions to it.
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
case Created:
if c.Status != Creating {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Paused:
if c.Status != Running {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Running:
if c.Status != Created && c.Status != Paused {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Stopped:
if c.Status != Creating && c.Status != Created && c.Status != Running && c.Status != Stopped {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
default:
panic(fmt.Sprintf("invalid new state: %v", s))
}
c.Status = s
}
func (c *Container) isSandboxRunning() bool {
return c.Sandbox != nil && c.Sandbox.IsRunning()
}
func (c *Container) requireStatus(action string, statuses ...Status) error {
for _, s := range statuses {
if c.Status == s {
return nil
}
}
return fmt.Errorf("cannot %s container %q in state %s", action, c.ID, c.Status)
}
// lock takes a file lock on the container metadata lock file.
func (c *Container) lock() (func() error, error) {
return lockContainerMetadata(filepath.Join(c.Root, c.ID))
}
// lockContainerMetadata takes a file lock on the metadata lock file in the
// given container root directory.
func lockContainerMetadata(containerRootDir string) (func() error, error) {
if err := os.MkdirAll(containerRootDir, 0711); err != nil {
return nil, fmt.Errorf("creating container root directory %q: %v", containerRootDir, err)
}
f := filepath.Join(containerRootDir, metadataLockFilename)
l := flock.NewFlock(f)
if err := l.Lock(); err != nil {
return nil, fmt.Errorf("acquiring lock on container lock file %q: %v", f, err)
}
return l.Unlock, nil
}
// runInCgroup executes fn inside the specified cgroup. If cg is nil, execute
// it in the current context.
func runInCgroup(cg *cgroup.Cgroup, fn func() error) error {
if cg == nil {
return fn()
}
restore, err := cg.Join()
defer restore()
if err != nil {
return err
}
return fn()
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
tests/src/integration/validate-packages-in-manifest/validate-packages-in-manifest_test.go
|
// +build integration
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tests
import (
"github.com/apache/incubator-openwhisk-wskdeploy/tests/src/integration/common"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestPackagesInManifest(t *testing.T) {
wskdeploy := common.NewWskdeploy()
_, err := wskdeploy.Deploy(manifestPath, deploymentPath)
assert.Equal(t, nil, err, "Failed to deploy based on the manifest and deployment files.")
_, err = wskdeploy.Undeploy(manifestPath, deploymentPath)
assert.Equal(t, nil, err, "Failed to undeploy based on the manifest and deployment files.")
}
var (
manifestPath = os.Getenv("GOPATH") + "/src/github.com/apache/incubator-openwhisk-wskdeploy/tests/src/integration/validate-packages-in-manifest/manifest.yaml"
deploymentPath = os.Getenv("GOPATH") + "/src/github.com/apache/incubator-openwhisk-wskdeploy/tests/src/integration/validate-packages-in-manifest/deployment.yaml"
)
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
vendor/github.com/gonum/plot/vg/fonts/mk-fonts.go
|
// Copyright ©2016 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"archive/tar"
"compress/gzip"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
)
const (
baseUrl = "https://fedorahosted.org/releases/l/i/liberation-fonts/"
fontsName = "liberation-fonts-ttf-2.00.1"
)
func main() {
log.SetPrefix("mk-vg-fonts: ")
log.SetFlags(0)
tmpdir, err := ioutil.TempDir("", "gonum-mk-fonts-")
if err != nil {
log.Fatalf("error creating temporary directory: %v\n", err)
}
defer os.RemoveAll(tmpdir)
tarf, err := os.Create(filepath.Join(tmpdir, fontsName+".tar.gz"))
if err != nil {
log.Fatalf("error creating local fonts tar file: %v\n", err)
}
defer tarf.Close()
urlSrc := baseUrl + fontsName + ".tar.gz"
log.Printf("downloading [%v]...\n", urlSrc)
resp, err := http.DefaultClient.Get(urlSrc)
if err != nil {
log.Fatalf("error getting url %v: %v\n", urlSrc, err)
}
defer resp.Body.Close()
err = untar(tmpdir, resp.Body)
if err != nil {
log.Fatalf("error untarring: %v\n", err)
}
fontsDir := getFontsDir()
err = exec.Command("go", "get", "github.com/jteeuwen/go-bindata/...").Run()
if err != nil {
log.Fatalf("error go-getting go-bindata: %v\n", err)
}
fname := filepath.Join(fontsDir, "liberation_fonts_generated.go")
log.Printf("generating fonts: %v\n", fname)
cmd := exec.Command("go-bindata", "-pkg=fonts", "-o", fname, ".")
cmd.Dir = filepath.Join(tmpdir, fontsName)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
log.Fatalf("error generating asset-data: %v\n", err)
}
err = prependHeaders(fname)
if err != nil {
log.Fatalf("error prepending headers to [%s]: %v\n", fname, err)
}
cmd = exec.Command("gofmt", "-w", fname)
cmd.Dir = fontsDir
cmd.Stdin = os.Stdin
cmd.Stdout = cmd.Stdout
cmd.Stderr = cmd.Stderr
err = cmd.Run()
if err != nil {
log.Fatalf("error running gofmt on %v: %v\n", fname, err)
}
}
func getFontsDir() string {
dir := "github.com/gonum/plot/vg"
gopath := os.Getenv("GOPATH")
if gopath == "" {
log.Fatal("no GOPATH environment variable")
}
for _, p := range strings.Split(gopath, string(os.PathListSeparator)) {
if p == "" {
continue
}
n := filepath.Join(p, "src", dir, "fonts")
_, err := os.Stat(n)
if err != nil {
continue
}
return n
}
log.Fatal("could not find %q anywhere under $GOPATH", dir)
panic("unreachable")
}
func untar(odir string, r io.Reader) error {
gz, err := gzip.NewReader(r)
if err != nil {
return err
}
defer gz.Close()
tr := tar.NewReader(gz)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Printf("error: %v\n", err)
continue
}
switch hdr.Typeflag {
case tar.TypeDir:
dir := filepath.Join(odir, hdr.Name)
err = os.MkdirAll(dir, 0755)
if err != nil {
return err
}
continue
case tar.TypeReg, tar.TypeRegA:
// ok
default:
log.Printf("error: %v\n", hdr.Typeflag)
return err
}
oname := filepath.Join(odir, hdr.Name)
dir := filepath.Dir(oname)
err = os.MkdirAll(dir, 0755)
if err != nil {
return err
}
o, err := os.OpenFile(
oname,
os.O_WRONLY|os.O_CREATE,
os.FileMode(hdr.Mode),
)
if err != nil {
return err
}
defer o.Close()
_, err = io.Copy(o, tr)
if err != nil {
return err
}
err = o.Close()
if err != nil {
return err
}
}
return nil
}
func prependHeaders(name string) error {
src, err := os.Open(name)
if err != nil {
return err
}
defer src.Close()
dst, err := os.Create(name + ".tmp")
if err != nil {
return err
}
defer dst.Close()
_, err = dst.WriteString(`// Automatically generated by vg/fonts/mk-fonts.go
// DO NOT EDIT.
// Copyright ©2016 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Digitized data copyright (c) 2010 Google Corporation
// with Reserved Font Arimo, Tinos and Cousine.
// Copyright (c) 2012 Red Hat, Inc.
// with Reserved Font Name Liberation.
//
// This Font Software is licensed under the SIL Open Font License,
// Version 1.1.
`)
if err != nil {
return err
}
_, err = io.Copy(dst, src)
if err != nil {
return err
}
err = src.Close()
if err != nil {
return err
}
err = dst.Close()
if err != nil {
return err
}
return os.Rename(dst.Name(), src.Name())
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
bert-large-cased-whole-word-masking.py
|
import os
# -- GPU TO USE --
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# -- PARAMETERS --
MODEL_NAME = 'bert-large-cased-whole-word-masking'
MODEL_PREFIX = 'Bert'
DATASET = 'union'
LANGS = ['en']
TRAIN_BATCH_SIZE = 4
ACCUMULATION_STEPS = 4
LEARN_RATE = 1e-5
EPOCHS = 24
WARMUP_STEPS = 1024
SEQUENCE_LENGTH = 512
# ----------------
import json
from transformers import *
from torch.utils.data import DataLoader, RandomSampler
from util.train import training
from util.dataset import load_semeval, tokenize, dataset
from util.val_datasets import val_datasets
from util.hotload import load_model
from itertools import chain
EXPERIMENT = '{}-{:d}-{:.0E}-{}-{}'.format(MODEL_NAME, TRAIN_BATCH_SIZE * ACCUMULATION_STEPS, LEARN_RATE, DATASET,
'_'.join(LANGS))
# Create log and dump config
output_dir = 'logs/{}'.format(EXPERIMENT)
if os.path.exists(output_dir):
raise RuntimeError('Experiment already runned!')
else:
os.makedirs(output_dir)
with open(output_dir + '/parameters.json', 'w+') as config_file:
json.dump({
'MODEL_PREFIX': MODEL_PREFIX,
'MODEL_NAME': MODEL_NAME,
'DATASET': DATASET,
'LANGS': LANGS,
'TRAIN_BATCH_SIZE': TRAIN_BATCH_SIZE,
'ACCUMULATION_STEPS': ACCUMULATION_STEPS,
'LEARN_RATE': LEARN_RATE,
'EPOCHS': EPOCHS,
'WARMUP_STEPS': WARMUP_STEPS,
'SEQUENCE_LENGTH': SEQUENCE_LENGTH,
}, config_file, sort_keys=True, indent=4, separators=(',', ': '))
# Load and initialize model
MODEL_CLASS = load_model(MODEL_PREFIX)
TOKENIZER = MODEL_CLASS[0].from_pretrained(MODEL_NAME)
CONFIG = MODEL_CLASS[1].from_pretrained(MODEL_NAME, num_labels=3)
MODEL = MODEL_CLASS[2].from_pretrained(MODEL_NAME, config=CONFIG)
# Load training data
train_dataset = dataset(
tokenize(chain(*(load_semeval(DATASET, 'train', lang) for lang in LANGS)), TOKENIZER, SEQUENCE_LENGTH))
train_sampler = RandomSampler(train_dataset)
train_dataset = DataLoader(train_dataset, sampler=train_sampler, batch_size=TRAIN_BATCH_SIZE, drop_last=True)
# Run Training
training(
train_dataset,
val_datasets(TOKENIZER, SEQUENCE_LENGTH),
MODEL,
EXPERIMENT,
LEARN_RATE,
WARMUP_STEPS,
TRAIN_BATCH_SIZE,
EPOCHS,
ACCUMULATION_STEPS
)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
eval/cifar100_vgg16.py
|
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(8)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(80)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(800)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import pandas as pd
import keras
from keras.datasets import cifar100
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.layers import Dropout
from keras.layers import Flatten, Dense, Activation
from keras import optimizers
from keras import regularizers
from keras.callbacks import LearningRateScheduler
from sklearn.model_selection import StratifiedShuffleSplit
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
import math
from scipy.stats import binom
import scipy
class VGG16_CIFAR100:
def __init__(self):
self.num_classes = 100
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.batch_size = 128
self.epoches = 250
self.learning_rate = 0.1
self.lr_decay = 1e-6
# Function to create dataset for training and validation of model
def create_dataset(self):
num_classes = self.num_classes
# Create Train and Test datasets:
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalize the data
x_train, x_test = self.normalize(x_train, x_test)
# Create one-hot encodings
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
# Function to normalize train and validation datasets
def normalize(self,X_train,X_test):
# Compute Mean
mean = np.mean(X_train,axis=(0, 1, 2, 3))
# Compute Standard Deviation
std = np.std(X_train, axis=(0, 1, 2, 3))
# Normalize the data
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
# Function to build the model
def buildmodel(self):
weight_decay = self.weight_decay
num_classes = self.num_classes
x_shape = self.x_shape
model = Sequential()
# First group of convolutional layer
model.add(Conv2D(64, (3, 3), padding='same',
input_shape = x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Second group of convolutional layer
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Third group of convolutional layer
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Fourth group of convolutional layer
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Fifth group of convolutional layer
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Two Fully connected layer
model.add(Flatten())
model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
# Function to train the model
def model_train(self, model, x_train, y_train, x_test, y_test, weights):
if weights: # If model weights are already avaialble
model.load_weights('cifar100_vgg16.h5')
else:
# Training parameters
batch_size = self.batch_size
number_epoches = self.epoches
learning_rate = self.learning_rate
lr_decay = self.lr_decay
# Data augmentation
dataaugmentation = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
dataaugmentation.fit(x_train)
# Optimization details
sgd = optimizers.SGD(lr=0.0, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# Function to reduce learning rate by half after every 25 epochs
def step_decay(epoch):
# LearningRate = InitialLearningRate * DropRate^floor(Epoch / EpochDrop)
initial_lrate = 0.1
drop = 0.5
epochs_drop = 25.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# Callback for learning rate schedule
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
# spe = Steps per epoch
spe = x_train.shape[0] // batch_size
# Fit the model
model.fit_generator(dataaugmentation.flow(x_train, y_train,
batch_size = batch_size),
steps_per_epoch = spe, callbacks=callbacks_list,
epochs = number_epoches,
validation_data = (x_test, y_test))
# Save model weights
model.save_weights('cifar100_vgg16.h5')
return model
# Create class object
model_cifar100 = VGG16_CIFAR100()
# Training and validation datasets
x_train, y_train, x_test, y_test = model_cifar100.create_dataset()
# Create model
model = model_cifar100.buildmodel()
# Train the model
model = model_cifar100.model_train(model, x_train, y_train, x_test, y_test, weights = True)
# Prediction on test set
predict_test = model.predict(x_test)
# Get highest probability on test set
predict_test_prob = np.max(predict_test,1)
# 0 for correct prediction and 1 for wrong prediction
residuals = (np.argmax(predict_test,1) != np.argmax(y_test,1))
# Loss computation
loss = (-1)*((residuals*np.log10(predict_test_prob)) + ((1-residuals)*np.log(1-predict_test_prob)))
# Checking validation accuracy is matching with our calculations
Accuracy = ((10000 - sum(residuals))/10000)*100
print("Accuracy is: ", Accuracy)
# Splitting the validation dataset for training and testing SGR algorithm
sss = StratifiedShuffleSplit(n_splits=2, test_size=0.5, random_state=8)
for train_index, test_index in sss.split(x_test, y_test):
sgr_x_train, sgr_x_test = x_test[train_index], x_test[test_index]
sgr_y_train, sgr_y_test = y_test[train_index], y_test[test_index]
# Prediction on SGR train set
predict_sgr_train = model.predict(sgr_x_train)
# Get highest probability on SGR train set
predict_sgr_train_prob = np.max(predict_sgr_train,1)
# 0 for wrong prediction and 1 for correct prediction for SGR train set
residuals_sgr_train = (np.argmax(predict_sgr_train,1)!=np.argmax(sgr_y_train,1))
# Loss computation on SGR train set
loss_sgr_train = (-1)*((residuals_sgr_train*np.log10(predict_sgr_train_prob)) + ((1-residuals_sgr_train)*np.log(1-predict_sgr_train_prob)))
# Prediction on SGR test set
predict_sgr_test = model.predict(sgr_x_test)
# Get highest probability on SGR test set
predict_sgr_test_prob = np.max(predict_sgr_test,1)
# 0 for wrong prediction and 1 for correct prediction for SGR test set
residuals_sgr_test = (np.argmax(predict_sgr_test,1)!=np.argmax(sgr_y_test,1))
# Loss computation on SGR test set
loss_sgr_test = (-1)*((residuals_sgr_test*np.log10(predict_sgr_test_prob)) + ((1-residuals_sgr_test)*np.log(1-predict_sgr_test_prob)))
def calculate_bound(delta, m, risk):
epsilon = 1e-7
x = risk # Lower bound
z = 1 # Upper bound
y = (x + z)/2 # mid point
epsilonhat = (-1*delta) + scipy.stats.binom.cdf(int(m*risk), m, y)
while abs(epsilonhat)>epsilon:
if epsilonhat>0:
x = y
else:
z = y
y = (x + z)/2
#print("x", x)
#print("y", y)
epsilonhat = (-1*delta) + scipy.stats.binom.cdf(int(m*risk), m, y)
#print(epsilonhat)
return y
def SGR(targetrisk, delta, predict_sgr_train_prob, predict_sgr_test_prob, residuals_sgr_train, residuals_sgr_test):
# Number of training samples for SGR algorithm
m = len(residuals_sgr_train)
# Sort the probabilities
probs_idx_sorted = np.argsort(predict_sgr_train_prob)
zmin = 0
zmax = m-1
deltahat = delta/math.ceil(math.log2(m))
for i in range(math.ceil(math.log2(m) + 1)):
#print("iteration", i)
mid = math.ceil((zmin+zmax)/2)
mi = len(residuals_sgr_train[probs_idx_sorted[mid:]])
theta = predict_sgr_train_prob[probs_idx_sorted[mid]]
trainrisk = sum(residuals_sgr_train[probs_idx_sorted[mid:]])/mi
testrisk = (sum(residuals_sgr_test[predict_sgr_test_prob>=theta]))/(len(residuals_sgr_test[predict_sgr_test_prob>=theta])+1)
testcoverage = (len(residuals_sgr_test[predict_sgr_test_prob>=theta]))/(len(predict_sgr_test_prob))
bound = calculate_bound(deltahat, mi, trainrisk)
traincoverage = mi/m
if bound>targetrisk:
zmin = mid
else:
zmax = mid
return targetrisk, trainrisk, traincoverage, testrisk, testcoverage, bound
# Define confidence level parameter delta
delta = 0.001
desired_risk = []
train_risk = []
train_coverage = []
test_risk = []
test_coverage = []
risk_bound = []
# Different desired risk values
rstar = [0.02, 0.05, 0.10, 0.15, 0.20, 0.25]
# Testing the SGR algorithm for different desired risk values
for i in range(len(rstar)):
# For desired risk 0.01
desiredrisk, trainrisk, traincov, testrisk, testcov, riskbound = SGR(rstar[i],delta, predict_sgr_train_prob, predict_sgr_test_prob, residuals_sgr_train, residuals_sgr_test)
# Append the values to the list
desired_risk.append(desiredrisk)
train_risk.append(trainrisk)
train_coverage.append(traincov)
test_risk.append(testrisk)
test_coverage.append(testcov)
risk_bound.append(riskbound)
Result = [('Desired Risk', desired_risk) ,
('Train Risk', train_risk),
('Train Coverage', train_coverage),
('Test Risk', test_risk),
('Test Coverage', test_coverage),
('Risk bound', risk_bound)]
Result = pd.DataFrame.from_items(Result)
print(Result)
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
cmd/upspin/keygen.go
|
// Copyright 2016 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
// This file contains the implementation of the keygen command.
import (
"encoding/binary"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"upspin.io/errors"
"upspin.io/key/proquint"
"upspin.io/pack/ee"
)
func (s *State) keygen(args ...string) {
const help = `
Keygen creates a new Upspin key pair and stores the pair in local
files secret.upspinkey and public.upspinkey in $HOME/.ssh. Existing
key pairs are appended to $HOME/.ssh/secret2.upspinkey. Keygen does
not update the information in the key server; use the user -put
command for that.
New users should instead use the signup command to create their
first key. Keygen can be used to create new keys.
See the description for rotate for information about updating keys.
`
fs := flag.NewFlagSet("keygen", flag.ExitOnError)
fs.String("curve", "p256", "cryptographic curve `name`: p256, p384, or p521")
fs.String("secretseed", "", "128 bit secret `seed` in proquint format")
fs.String("where", filepath.Join(os.Getenv("HOME"), ".ssh"), "`directory` to store keys")
s.parseFlags(fs, args, help, "keygen [-curve=256] [-secretseed=seed] [-where=$HOME/.ssh]")
if fs.NArg() != 0 {
fs.Usage()
}
s.keygenCommand(fs)
}
func (s *State) keygenCommand(fs *flag.FlagSet) {
curve := stringFlag(fs, "curve")
switch curve {
case "p256", "p384", "p521":
// ok
default:
log.Printf("no such curve %q", curve)
fs.Usage()
}
public, private, proquintStr, err := createKeys(curve, stringFlag(fs, "secretseed"))
if err != nil {
s.exitf("creating keys: %v", err)
}
where := stringFlag(fs, "where")
if where == "" {
s.exitf("-where must not be empty")
}
err = saveKeys(where)
if err != nil {
s.exitf("saving previous keys failed(%v); keys not generated", err)
}
err = writeKeys(where, public, private)
if err != nil {
s.exitf("writing keys: %v", err)
}
fmt.Println("Upspin private/public key pair written to:")
fmt.Printf("\t%s\n", filepath.Join(where, "public.upspinkey"))
fmt.Printf("\t%s\n", filepath.Join(where, "secret.upspinkey"))
fmt.Println("This key pair provides access to your Upspin identity and data.")
if proquintStr != "" {
fmt.Println("If you lose the keys you can re-create them by running this command:")
fmt.Printf("\tupspin keygen -secretseed %s\n", proquintStr)
fmt.Println("Write this command down and store it in a secure, private place.")
fmt.Println("Do not share your private key or this command with anyone.")
} else {
fmt.Println("Do not share your private key with anyone.")
}
fmt.Println()
}
func createKeys(curveName, secret string) (public string, private, proquintStr string, err error) {
// Pick secret 128 bits.
// TODO(ehg) Consider whether we are willing to ask users to write long seeds for P521.
b := make([]byte, 16)
if len(secret) > 0 {
if len((secret)) != 47 || (secret)[5] != '-' {
log.Printf("expected secret like\n lusab-babad-gutih-tugad.gutuk-bisog-mudof-sakat\n"+
"not\n %s\nkey not generated", secret)
return "", "", "", errors.E("keygen", errors.Invalid, errors.Str("bad format for secret"))
}
for i := 0; i < 8; i++ {
binary.BigEndian.PutUint16(b[2*i:2*i+2], proquint.Decode([]byte((secret)[6*i:6*i+5])))
}
} else {
ee.GenEntropy(b)
proquints := make([]interface{}, 8)
for i := 0; i < 8; i++ {
proquints[i] = proquint.Encode(binary.BigEndian.Uint16(b[2*i : 2*i+2]))
}
proquintStr = fmt.Sprintf("%s-%s-%s-%s.%s-%s-%s-%s", proquints...)
// Ignore punctuation on input; this format is just to help the user keep their place.
}
pub, priv, err := ee.CreateKeys(curveName, b)
if err != nil {
return "", "", "", err
}
return string(pub), priv, proquintStr, nil
}
// writeKeyFile writes a single key to its file, removing the file
// beforehand if necessary due to permission errors.
func writeKeyFile(name, key string) error {
const create = os.O_RDWR | os.O_CREATE | os.O_TRUNC
fd, err := os.OpenFile(name, create, 0400)
if os.IsPermission(err) && os.Remove(name) == nil {
// Create may fail if file already exists and is unwritable,
// which is how it was created.
fd, err = os.OpenFile(name, create, 0400)
}
if err != nil {
return err
}
defer fd.Close()
_, err = fd.WriteString(key)
return err
}
// writeKeys save both the public and private keys to their respective files.
func writeKeys(where, publicKey, privateKey string) error {
err := writeKeyFile(filepath.Join(where, "secret.upspinkey"), privateKey)
if err != nil {
return err
}
err = writeKeyFile(filepath.Join(where, "public.upspinkey"), publicKey)
if err != nil {
return err
}
return nil
}
func saveKeys(where string) error {
var (
publicFile = filepath.Join(where, "public.upspinkey")
privateFile = filepath.Join(where, "secret.upspinkey")
archiveFile = filepath.Join(where, "secret2.upspinkey")
)
// Read existing key pair.
private, err := ioutil.ReadFile(privateFile)
if os.IsNotExist(err) {
return nil // There is nothing we need to save.
}
if err != nil {
return err
}
public, err := ioutil.ReadFile(publicFile)
if err != nil {
return err // Halt. Existing files are corrupted and need manual attention.
}
// Write old key pair to archive file.
archive, err := os.OpenFile(archiveFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err // We don't have permission to archive old keys?
}
// TODO(ehg) add file date
_, err = fmt.Fprintf(archive, "# EE\n%s%s", public, private)
if err != nil {
return err
}
return archive.Close()
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import tempfile
from test_dist_fleet_base import TestFleetBase
class TestDistMnistSync2x2(TestFleetBase):
def _setup_config(self):
self._mode = "sync"
self._reader = "pyreader"
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"http_proxy": "",
"CPU_NUM": "2"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def test_dist_train(self):
self.check_with_place(
"dist_fleet_ctr.py", delta=1e-5, check_error_log=True)
class TestDistMnistAsync2x2(TestFleetBase):
def _setup_config(self):
self._mode = "async"
self._reader = "pyreader"
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"http_proxy": "",
"CPU_NUM": "2"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def test_dist_train(self):
self.check_with_place(
"dist_fleet_ctr.py", delta=1e-5, check_error_log=True)
class TestDistMnistAsyncDataset2x2(TestFleetBase):
def _setup_config(self):
self._mode = "async"
self._reader = "dataset"
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"http_proxy": "",
"SAVE_MODEL": "1",
"dump_param": "concat_0.tmp_0",
"dump_fields": "dnn-fc-3.tmp_0,dnn-fc-3.tmp_0@GRAD",
"dump_fields_path": tempfile.mkdtemp(),
"Debug": "1"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def test_dist_train(self):
self.check_with_place(
"dist_fleet_ctr.py", delta=1e-5, check_error_log=True)
class TestDistCtrHalfAsync2x2(TestFleetBase):
def _setup_config(self):
self._mode = "half_async"
self._reader = "pyreader"
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "30000", # 5sec to fail fast
"http_proxy": "",
"FLAGS_communicator_send_queue_size": "2",
"FLAGS_communicator_max_merge_var_num": "2",
"CPU_NUM": "2",
"SAVE_MODEL": "0"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def test_dist_train(self):
self.check_with_place(
"dist_fleet_ctr.py", delta=1e-5, check_error_log=True)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"LD_LIBRARY_PATH",
"PATH",
"PYTHONPATH"
] |
[]
|
["LD_LIBRARY_PATH", "PATH", "PYTHONPATH"]
|
python
| 3 | 0 | |
train.py
|
# -*- coding: utf-8 -*-
"""
train DREAM
@TODO
- Optimizer Choosing
- Hyper-parameter tuning
"""
import constants
from config import Config
from dream import DreamModel
from data import Dataset, BasketConstructor
from utils import batchify, repackage_hidden
import os
import pdb
import torch
import pickle
import random
import numpy as np
from time import time
from math import ceil
from sklearn.model_selection import train_test_split
# CUDA environtments
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,3,2,1"
# Prepare input
bc = BasketConstructor(constants.RAW_DATA_DIR, constants.FEAT_DATA_DIR)
# Users' baskets
ub_basket =bc.get_baskets('prior', reconstruct = False)
# Users' reordered baskets, comment it if you do not need reorder prediction
ub_rbks = bc.get_baskets('prior', reconstruct = False, reordered = True)
# User's item history, comment it if you do not need reorder prediction
ub_ihis = bc.get_item_history('prior', reconstruct = False)
# Train test split
train_ub, test_ub, train_rbks, test_rbks, train_ihis, test_ihis = train_test_split(ub_basket, ub_rbks, ub_ihis, test_size = 0.2)
del ub_basket, ub_rbks, ub_ihis
# train_ub, test_ub = Dataset(train_ub), Dataset(test_ub)
# reorder dream input data, comment it and use the above line if you do not need reorder prediction
train_ub, test_ub = Dataset(train_ub, train_rbks, train_ihis), Dataset(test_ub, test_rbks, test_ihis)
del train_rbks, test_rbks, train_ihis, test_ihis
# Model config
dr_config = Config(constants.DREAM_CONFIG)
dr_model = DreamModel(dr_config)
if dr_config.cuda:
dr_model.cuda()
# Optimizer
optim = torch.optim.Adam(dr_model.parameters(), lr = dr_config.learning_rate)
# optim = torch.optim.Adadelta(dr_model.parameters())
# optim = torch.optim.SGD(dr_model.parameters(), lr=dr_config.learning_rate, momentum=0.9)
def reorder_bpr_loss(re_x, his_x, dynamic_user, item_embedding, config):
'''
loss function for reorder prediction
re_x padded reorder baskets
his_x padded history bought items
'''
nll = 0
ub_seqs = []
for u, h, du in zip(re_x, his_x, dynamic_user):
du_p_product = torch.mm(du, item_embedding.t()) # shape: max_len, num_item
nll_u = [] # nll for user
for t, basket_t in enumerate(u):
if basket_t[0] != 0:
pos_idx = torch.cuda.LongTensor(basket_t) if config.cuda else torch.LongTensor(basket_t)
# Sample negative products
neg = [random.choice(h[t]) for _ in range(len(basket_t))] # replacement
# neg = random.sample(range(1, config.num_product), len(basket_t)) # without replacement
neg_idx = torch.cuda.LongTensor(neg) if config.cuda else torch.LongTensor(neg)
# Score p(u, t, v > v')
score = du_p_product[t - 1][pos_idx] - du_p_product[t - 1][neg_idx]
# Average Negative log likelihood for basket_t
nll_u.append(- torch.mean(torch.nn.LogSigmoid()(score)))
nll += torch.mean(torch.cat(nll_u))
return nll
def bpr_loss(x, dynamic_user, item_embedding, config):
'''
bayesian personalized ranking loss for implicit feedback
parameters:
- x: batch of users' baskets
- dynamic_user: batch of users' dynamic representations
- item_embedding: item_embedding matrix
- config: model configuration
'''
nll = 0
ub_seqs = []
for u,du in zip(x, dynamic_user):
du_p_product = torch.mm(du, item_embedding.t()) # shape: max_len, num_item
nll_u = [] # nll for user
for t, basket_t in enumerate(u):
if basket_t[0] != 0 and t != 0:
pos_idx = torch.cuda.LongTensor(basket_t) if config.cuda else torch.LongTensor(basket_t)
# Sample negative products
neg = [random.choice(range(1, config.num_product)) for _ in range(len(basket_t))] # replacement
# neg = random.sample(range(1, config.num_product), len(basket_t)) # without replacement
neg_idx = torch.cuda.LongTensor(neg) if config.cuda else torch.LongTensor(neg)
# Score p(u, t, v > v')
score = du_p_product[t - 1][pos_idx] - du_p_product[t - 1][neg_idx]
#Average Negative log likelihood for basket_t
nll_u.append(- torch.mean(torch.nn.LogSigmoid()(score)))
nll += torch.mean(torch.cat(nll_u))
return nll
def train_dream():
dr_model.train() # turn on training mode for dropout
dr_hidden = dr_model.init_hidden(dr_config.batch_size)
total_loss = 0
start_time = time()
num_batchs = ceil(len(train_ub) / dr_config.batch_size)
for i,x in enumerate(batchify(train_ub, dr_config.batch_size)):
baskets, lens, _ = x
dr_hidden = repackage_hidden(dr_hidden) # repackage hidden state for RNN
dr_model.zero_grad() # optim.zero_grad()
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
loss = bpr_loss(baskets, dynamic_user, dr_model.encode.weight, dr_config)
loss.backward()
# Clip to avoid gradient exploding
torch.nn.utils.clip_grad_norm(dr_model.parameters(), dr_config.clip)
# Parameter updating
# manual SGD
# for p in dr_model.parameters(): # Update parameters by -lr*grad
# p.data.add_(- dr_config.learning_rate, p.grad.data)
# adam
optim.step()
total_loss += loss.data
# Logging
if i % dr_config.log_interval == 0 and i > 0:
elapsed = (time() - start_time) * 1000 / dr_config.log_interval
cur_loss = total_loss[0] / dr_config.log_interval # turn tensor into float
total_loss = 0
start_time = time()
print('[Training]| Epochs {:3d} | Batch {:5d} / {:5d} | ms/batch {:02.2f} | Loss {:05.2f} |'.format(epoch, i, num_batchs, elapsed, cur_loss))
def train_reorder_dream():
dr_model.train() # turn on training mode for dropout
dr_hidden = dr_model.init_hidden(dr_config.batch_size)
total_loss = 0
start_time = time()
num_batchs = ceil(len(train_ub) / dr_config.batch_size)
for i,x in enumerate(batchify(train_ub, dr_config.batch_size, is_reordered = True)):
baskets, lens, ids, r_baskets, h_baskets = x
dr_hidden = repackage_hidden(dr_hidden) # repackage hidden state for RNN
dr_model.zero_grad() # optim.zero_grad()
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
loss = reorder_bpr_loss(r_baskets, h_baskets, dynamic_user, dr_model.encode.weight, dr_config)
try:
loss.backward()
except RuntimeError: # for debugging
print('caching')
tmp = {'baskets':baskets, 'ids':ids, 'r_baskets':r_baskets, 'h_baskets':h_baskets,
'dynamic_user':dynamic_user, 'item_embedding':dr_model.encode.weight}
print(baskets)
print(ids)
print(r_baskets)
print(h_baskets)
print(item_embedding.encode.weight)
print(dynamic_user.data)
with open('tmp.pkl', 'wb') as f:
pickle.dump(tmp, f, pickle.HIGHEST_PROTOCOL)
break
# Clip to avoid gradient exploding
torch.nn.utils.clip_grad_norm(dr_model.parameters(), dr_config.clip)
# Parameter updating
# manual SGD
# for p in dr_model.parameters(): # Update parameters by -lr*grad
# p.data.add_(- dr_config.learning_rate, p.grad.data)
# adam
optim.step()
total_loss += loss.data
# Logging
if i % dr_config.log_interval == 0 and i > 0:
elapsed = (time() - start_time) * 1000 / dr_config.log_interval
cur_loss = total_loss[0] / dr_config.log_interval # turn tensor into float
total_loss = 0
start_time = time()
print('[Training]| Epochs {:3d} | Batch {:5d} / {:5d} | ms/batch {:02.2f} | Loss {:05.2f} |'.format(epoch, i, num_batchs, elapsed, cur_loss))
def evaluate_dream():
dr_model.eval()
dr_hidden = dr_model.init_hidden(dr_config.batch_size)
total_loss = 0
start_time = time()
num_batchs = ceil(len(test_ub) / dr_config.batch_size)
for i,x in enumerate(batchify(test_ub, dr_config.batch_size)):
baskets, lens, _ = x
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
loss = bpr_loss(baskets, dynamic_user, dr_model.encode.weight, dr_config)
dr_hidden = repackage_hidden(dr_hidden)
total_loss += loss.data
# Logging
elapsed = (time() - start_time) * 1000 / num_batchs
total_loss = total_loss[0] / num_batchs
print('[Evaluation]| Epochs {:3d} | Elapsed {:02.2f} | Loss {:05.2f} |'.format(epoch, elapsed, total_loss))
return total_loss
def evaluate_reorder_dream():
dr_model.eval()
dr_hidden = dr_model.init_hidden(dr_config.batch_size)
total_loss = 0
start_time = time()
num_batchs = ceil(len(test_ub) / dr_config.batch_size)
for i,x in enumerate(batchify(test_ub, dr_config.batch_size, is_reordered = True)):
baskets, lens, _, r_baskets, h_baskets = x
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
loss = reorder_bpr_loss(r_baskets, h_baskets, dynamic_user, dr_model.encode.weight, dr_config)
dr_hidden = repackage_hidden(dr_hidden)
total_loss += loss.data
# Logging
elapsed = (time() - start_time) * 1000 / num_batchs
total_loss = total_loss[0] / num_batchs
print('[Evaluation]| Epochs {:3d} | Elapsed {:02.2f} | Loss {:05.2f} |'.format(epoch, elapsed, total_loss))
return total_loss
best_val_loss = None
try:
# print(dr_config)
for k,v in constants.DREAM_CONFIG.items():
print(k,v)
# training
for epoch in range(dr_config.epochs):
# train_dream()
train_reorder_dream()
print('-' * 89)
# val_loss = evaluate_dream()
val_loss = evaluate_reorder_dream()
print('-' * 89)
# checkpoint
if not best_val_loss or val_loss < best_val_loss:
with open(dr_config.checkpoint_dir.format(epoch = epoch, loss = val_loss), 'wb') as f:
torch.save(dr_model, f)
best_val_loss = val_loss
else:
# Manual SGD slow down lr if no improvement in val_loss
# dr_config.learning_rate = dr_config.learning_rate / 4
pass
except KeyboardInterrupt:
print('*' * 89)
print('Early Stopping!')
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
l2geth/mobile/android_test.go
|
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package geth
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/cespare/cp"
)
// androidTestClass is a Java class to do some lightweight tests against the Android
// bindings. The goal is not to test each individual functionality, rather just to
// catch breaking API and/or implementation changes.
const androidTestClass = `
package go;
import android.test.InstrumentationTestCase;
import android.test.MoreAsserts;
import java.math.BigInteger;
import java.util.Arrays;
import org.ethereum.geth.*;
public class AndroidTest extends InstrumentationTestCase {
public AndroidTest() {}
public void testAccountManagement() {
// Create an encrypted keystore with light crypto parameters.
KeyStore ks = new KeyStore(getInstrumentation().getContext().getFilesDir() + "/keystore", Geth.LightScryptN, Geth.LightScryptP);
try {
// Create a new account with the specified encryption passphrase.
Account newAcc = ks.newAccount("Creation password");
// Export the newly created account with a different passphrase. The returned
// data from this method invocation is a JSON encoded, encrypted key-file.
byte[] jsonAcc = ks.exportKey(newAcc, "Creation password", "Export password");
// Update the passphrase on the account created above inside the local keystore.
ks.updateAccount(newAcc, "Creation password", "Update password");
// Delete the account updated above from the local keystore.
ks.deleteAccount(newAcc, "Update password");
// Import back the account we've exported (and then deleted) above with yet
// again a fresh passphrase.
Account impAcc = ks.importKey(jsonAcc, "Export password", "Import password");
// Create a new account to sign transactions with
Account signer = ks.newAccount("Signer password");
Transaction tx = new Transaction(
1, new Address("0x0000000000000000000000000000000000000000"),
new BigInt(0), 0, new BigInt(1), null); // Random empty transaction
BigInt chain = new BigInt(1); // Chain identifier of the main net
// Sign a transaction with a single authorization
Transaction signed = ks.signTxPassphrase(signer, "Signer password", tx, chain);
// Sign a transaction with multiple manually cancelled authorizations
ks.unlock(signer, "Signer password");
signed = ks.signTx(signer, tx, chain);
ks.lock(signer.getAddress());
// Sign a transaction with multiple automatically cancelled authorizations
ks.timedUnlock(signer, "Signer password", 1000000000);
signed = ks.signTx(signer, tx, chain);
} catch (Exception e) {
fail(e.toString());
}
}
public void testInprocNode() {
Context ctx = new Context();
try {
// Start up a new inprocess node
Node node = new Node(getInstrumentation().getContext().getFilesDir() + "/.ethereum", new NodeConfig());
node.start();
// Retrieve some data via function calls (we don't really care about the results)
NodeInfo info = node.getNodeInfo();
info.getName();
info.getListenerAddress();
info.getProtocols();
// Retrieve some data via the APIs (we don't really care about the results)
EthereumClient ec = node.getEthereumClient();
ec.getBlockByNumber(ctx, -1).getNumber();
NewHeadHandler handler = new NewHeadHandler() {
@Override public void onError(String error) {}
@Override public void onNewHead(final Header header) {}
};
ec.subscribeNewHead(ctx, handler, 16);
} catch (Exception e) {
fail(e.toString());
}
}
// Tests that recovering transaction signers works for both Homestead and EIP155
// signatures too. Regression test for go-ethereum issue #14599.
public void testIssue14599() {
try {
byte[] preEIP155RLP = new BigInteger("f901fc8032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b561ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884", 16).toByteArray();
preEIP155RLP = Arrays.copyOfRange(preEIP155RLP, 1, preEIP155RLP.length);
byte[] postEIP155RLP = new BigInteger("f86b80847735940082520894ef5bbb9bba2e1ca69ef81b23a8727d889f3ef0a1880de0b6b3a7640000802ba06fef16c44726a102e6d55a651740636ef8aec6df3ebf009e7b0c1f29e4ac114aa057e7fbc69760b522a78bb568cfc37a58bfdcf6ea86cb8f9b550263f58074b9cc", 16).toByteArray();
postEIP155RLP = Arrays.copyOfRange(postEIP155RLP, 1, postEIP155RLP.length);
Transaction preEIP155 = new Transaction(preEIP155RLP);
Transaction postEIP155 = new Transaction(postEIP155RLP);
preEIP155.getFrom(null); // Homestead should accept homestead
preEIP155.getFrom(new BigInt(4)); // EIP155 should accept homestead (missing chain ID)
postEIP155.getFrom(new BigInt(4)); // EIP155 should accept EIP 155
try {
postEIP155.getFrom(null);
fail("EIP155 transaction accepted by Homestead");
} catch (Exception e) {}
} catch (Exception e) {
fail(e.toString());
}
}
}
`
// TestAndroid runs the Android java test class specified above.
//
// This requires the gradle command in PATH and the Android SDK whose path is available
// through ANDROID_HOME environment variable. To successfully run the tests, an Android
// device must also be available with debugging enabled.
//
// This method has been adapted from golang.org/x/mobile/bind/java/seq_test.go/runTest
func TestAndroid(t *testing.T) {
t.Skip("Skipping Android Test (OVM)")
// Skip tests on Windows altogether
if runtime.GOOS == "windows" {
t.Skip("cannot test Android bindings on Windows, skipping")
}
// Make sure all the Android tools are installed
if _, err := exec.Command("which", "gradle").CombinedOutput(); err != nil {
t.Skip("command gradle not found, skipping")
}
if sdk := os.Getenv("ANDROID_HOME"); sdk == "" {
// Android SDK not explicitly given, try to auto-resolve
autopath := filepath.Join(os.Getenv("HOME"), "Android", "Sdk")
if _, err := os.Stat(autopath); err != nil {
t.Skip("ANDROID_HOME environment var not set, skipping")
}
os.Setenv("ANDROID_HOME", autopath)
}
if _, err := exec.Command("which", "gomobile").CombinedOutput(); err != nil {
t.Log("gomobile missing, installing it...")
if out, err := exec.Command("go", "get", "golang.org/x/mobile/cmd/gomobile").CombinedOutput(); err != nil {
t.Fatalf("install failed: %v\n%s", err, string(out))
}
t.Log("initializing gomobile...")
start := time.Now()
if _, err := exec.Command("gomobile", "init").CombinedOutput(); err != nil {
t.Fatalf("initialization failed: %v", err)
}
t.Logf("initialization took %v", time.Since(start))
}
// Create and switch to a temporary workspace
workspace, err := ioutil.TempDir("", "geth-android-")
if err != nil {
t.Fatalf("failed to create temporary workspace: %v", err)
}
defer os.RemoveAll(workspace)
pwd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get current working directory: %v", err)
}
if err := os.Chdir(workspace); err != nil {
t.Fatalf("failed to switch to temporary workspace: %v", err)
}
defer os.Chdir(pwd)
// Create the skeleton of the Android project
for _, dir := range []string{"src/main", "src/androidTest/java/org/ethereum/gethtest", "libs"} {
err = os.MkdirAll(dir, os.ModePerm)
if err != nil {
t.Fatal(err)
}
}
// Generate the mobile bindings for Geth and add the tester class
gobind := exec.Command("gomobile", "bind", "-javapkg", "org.ethereum", "github.com/ethereum-optimism/optimism/l2geth/mobile")
if output, err := gobind.CombinedOutput(); err != nil {
t.Logf("%s", output)
t.Fatalf("failed to run gomobile bind: %v", err)
}
cp.CopyFile(filepath.Join("libs", "geth.aar"), "geth.aar")
if err = ioutil.WriteFile(filepath.Join("src", "androidTest", "java", "org", "ethereum", "gethtest", "AndroidTest.java"), []byte(androidTestClass), os.ModePerm); err != nil {
t.Fatalf("failed to write Android test class: %v", err)
}
// Finish creating the project and run the tests via gradle
if err = ioutil.WriteFile(filepath.Join("src", "main", "AndroidManifest.xml"), []byte(androidManifest), os.ModePerm); err != nil {
t.Fatalf("failed to write Android manifest: %v", err)
}
if err = ioutil.WriteFile("build.gradle", []byte(gradleConfig), os.ModePerm); err != nil {
t.Fatalf("failed to write gradle build file: %v", err)
}
if output, err := exec.Command("gradle", "connectedAndroidTest").CombinedOutput(); err != nil {
t.Logf("%s", output)
t.Errorf("failed to run gradle test: %v", err)
}
}
const androidManifest = `<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.ethereum.gethtest"
android:versionCode="1"
android:versionName="1.0">
<uses-permission android:name="android.permission.INTERNET" />
</manifest>`
const gradleConfig = `buildscript {
repositories {
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:2.2.3'
}
}
allprojects {
repositories { jcenter() }
}
apply plugin: 'com.android.library'
android {
compileSdkVersion 'android-19'
buildToolsVersion '21.1.2'
defaultConfig { minSdkVersion 15 }
}
repositories {
flatDir { dirs 'libs' }
}
dependencies {
compile 'com.android.support:appcompat-v7:19.0.0'
compile(name: "geth", ext: "aar")
}
`
|
[
"\"ANDROID_HOME\"",
"\"HOME\""
] |
[] |
[
"HOME",
"ANDROID_HOME"
] |
[]
|
["HOME", "ANDROID_HOME"]
|
go
| 2 | 0 | |
nebula_bench/setting.py
|
# -*- coding: utf-8 -*-
import pathlib
import os
from datetime import datetime
from dotenv import load_dotenv
load_dotenv()
WORKSPACE_PATH = pathlib.Path(__file__).parent.parent
DATA_FOLDER = os.environ.get("DATA_FOLDER") or "target/data/test_data"
REPORT_FOLDER = os.environ.get("REPORT_FOLDER") or "nginx/data"
NEBULA_SPACE = os.environ.get("NEBULA_SPACE") or "stress_test_{}".format(
datetime.now().strftime("%m%d")
)
NEBULA_USER = os.environ.get("NEBULA_USER") or "root"
NEBULA_PASSWORD = os.environ.get("NEBULA_PASSWORD") or "nebula"
NEBULA_ADDRESS = os.environ.get("NEBULA_ADDRESS") or "127.0.0.1:9669"
DINGDING_SECRET = os.environ.get("DINGDING_SECRET")
DINGDING_WEBHOOK = os.environ.get("DINGDING_WEBHOOK")
if os.environ.get("NEBULA_MAX_CONNECTION"):
NEBULA_MAX_CONNECTION = int(os.environ.get("NEBULA_MAX_CONNECTION"))
else:
NEBULA_MAX_CONNECTION = 400
SQLALCHEMY_URI = os.environ.get("SQLALCHEMY_URI") or "sqlite:///./nebula-bench.db"
INFLUXDB_URL = os.environ.get("INFLUXDB_URL", None)
|
[] |
[] |
[
"INFLUXDB_URL",
"NEBULA_ADDRESS",
"SQLALCHEMY_URI",
"DINGDING_SECRET",
"NEBULA_USER",
"NEBULA_SPACE",
"DINGDING_WEBHOOK",
"NEBULA_PASSWORD",
"REPORT_FOLDER",
"DATA_FOLDER",
"NEBULA_MAX_CONNECTION"
] |
[]
|
["INFLUXDB_URL", "NEBULA_ADDRESS", "SQLALCHEMY_URI", "DINGDING_SECRET", "NEBULA_USER", "NEBULA_SPACE", "DINGDING_WEBHOOK", "NEBULA_PASSWORD", "REPORT_FOLDER", "DATA_FOLDER", "NEBULA_MAX_CONNECTION"]
|
python
| 11 | 0 | |
serial_scripts/rbac/base.py
|
import test_v1
from rbac_test import RbacFixture
from vn_test import VNFixture
from vm_test import VMFixture
from port_fixture import PortFixture
from security_group import SecurityGroupFixture
from svc_template_fixture import SvcTemplateFixture
from project_test import ProjectFixture
from floating_ip import FloatingIPFixture
from lbaasv2_fixture import LBaasV2Fixture
from common.servicechain.firewall.verify import VerifySvcFirewall
from tcutils.util import get_random_name
from tcutils.config.vnc_introspect_utils import VNCApiInspect
from vnc_api.exceptions import PermissionDenied
from common.openstack_libs import neutron_forbidden
from vnc_api.vnc_api import VirtualNetworkType
import os
import fixtures
class VerifySvcFirewallFixture(fixtures.Fixture, VerifySvcFirewall):
def __init__(self, connections, use_vnc_api=False):
self.use_vnc_api = use_vnc_api
self.connections = connections
self.inputs = connections.inputs
self.orch = connections.orch
self.vnc_lib = connections.vnc_lib
self.logger = connections.logger
class BaseRbac(test_v1.BaseTestCase_v1):
@classmethod
def setUpClass(cls):
super(BaseRbac, cls).setUpClass()
cls.rbac_for_analytics = False
if cls.inputs.get_analytics_aaa_mode() == 'rbac':
cls.rbac_for_analytics = True
try:
if os.getenv('RBAC_USER1') and os.getenv('RBAC_PASS1'):
cls.user1 = os.getenv('RBAC_USER1')
cls.pass1 = os.getenv('RBAC_PASS1')
else:
cls.pass1 = cls.user1 = get_random_name(cls.__name__)
cls.admin_connections.auth.create_user(cls.user1, cls.pass1)
if os.getenv('RBAC_USER2') and os.getenv('RBAC_PASS2'):
cls.user2 = os.getenv('RBAC_USER2')
cls.pass2 = os.getenv('RBAC_PASS2')
else:
cls.pass2 = cls.user2 = get_random_name(cls.__name__)
cls.admin_connections.auth.create_user(cls.user2, cls.pass2)
if os.getenv('RBAC_USER3') and os.getenv('RBAC_PASS3'):
cls.user3 = os.getenv('RBAC_USER3')
cls.pass3 = os.getenv('RBAC_PASS3')
else:
cls.pass3 = cls.user3 = get_random_name(cls.__name__)
cls.admin_connections.auth.create_user(cls.user3, cls.pass3)
if os.getenv('RBAC_ROLE1'):
cls.role1 = os.getenv('RBAC_ROLE1')
else:
cls.role1 = get_random_name(cls.__name__)
cls.admin_connections.auth.create_role(cls.role1)
if os.getenv('RBAC_ROLE2'):
cls.role2 = os.getenv('RBAC_ROLE2')
else:
cls.role2 = get_random_name(cls.__name__)
cls.admin_connections.auth.create_role(cls.role2)
if os.getenv('RBAC_ROLE3'):
cls.role3 = os.getenv('RBAC_ROLE3')
else:
cls.role3 = get_random_name(cls.__name__)
cls.admin_connections.auth.create_role(cls.role3)
cls.project_fixture = ProjectFixture(connections=cls.admin_connections,
project_name=cls.inputs.project_name,
domain_name=cls.inputs.domain_name)
cls.populate_default_rules_in_global_acl()
cls.get_api_server_inspects()
except:
cls.tearDownClass()
raise
def is_test_applicable(self):
if self.get_aaa_mode() != 'rbac':
return (False, 'RBAC is not enabled')
return (True, None)
def get_aaa_mode(self):
return self.admin_connections.api_server_inspect.get_aaa_mode()
def set_aaa_mode(self, aaa_mode):
for inspect_h in self.api_inspects:
inspect_h.set_aaa_mode(aaa_mode)
@classmethod
def get_api_server_inspects(cls):
cls.api_inspects = []
for cfgm_ip in cls.inputs.cfgm_ips:
cls.api_inspects.append(VNCApiInspect(cfgm_ip,
inputs=cls.inputs,
port=cls.inputs.api_server_port,
protocol=cls.inputs.api_protocol,
base_url='/',
insecure=cls.inputs.insecure,
logger=cls.logger))
@classmethod
def populate_default_rules_in_global_acl(cls):
cls.default_rules = [{'rule_object': 'ref-update',
'rule_field': None,
'perms': [{'role': '*', 'crud': 'CRUD'}]
},
{'rule_object': 'project',
'rule_field': None,
'perms': [{'role': '*', 'crud': 'R'}]
},
{'rule_object': 'network-ipam',
'rule_field': None,
'perms': [{'role': '*', 'crud': 'R'}]
},
{'rule_object': 'routing-instance',
'rule_field': None,
'perms': [{'role': '*', 'crud': 'R'}]
},
{'rule_object': 'domain',
'rule_field': None,
'perms': [{'role': '*', 'crud': 'R'}]
}]
cls.global_acl = RbacFixture(connections=cls.connections,
parent_fqname='default-global-system-config',
parent_type='global-system-config')
cls.global_acl.setUp()
cls.global_acl.add_rules(cls.default_rules)
@classmethod
def tearDownClass(cls):
cls.cleanUpObjects()
super(BaseRbac, cls).tearDownClass()
@classmethod
def cleanUpObjects(cls):
if not os.getenv('RBAC_USER1'):
cls.admin_connections.auth.delete_user(cls.user1)
if not os.getenv('RBAC_USER2'):
cls.admin_connections.auth.delete_user(cls.user2)
if not os.getenv('RBAC_USER3'):
cls.admin_connections.auth.delete_user(cls.user3)
if not os.getenv('RBAC_ROLE1'):
cls.admin_connections.auth.delete_role(cls.role1)
if not os.getenv('RBAC_ROLE2'):
cls.admin_connections.auth.delete_role(cls.role2)
if not os.getenv('RBAC_ROLE3'):
cls.admin_connections.auth.delete_role(cls.role3)
if getattr(cls, 'global_acl', None):
cls.global_acl.delete_rules(cls.default_rules)
def add_user_to_project(self, username, role, project_name=None):
if not project_name:
project_name = self.inputs.project_name
auth = self.admin_connections.auth
auth.add_user_to_project(username, project_name, role)
self.addCleanup(auth.remove_user_from_project,
username, role, project_name)
def get_connections(self, username, password, project_fixture=None):
if not project_fixture:
project_fixture = self.project_fixture
return project_fixture.get_project_connections(username=username, password=password)
def create_project(self):
project_name = get_random_name(self.__class__.__name__)
project_fixture = self.create_fixture(ProjectFixture,
connections=self.admin_connections,
project_name=project_name)
return project_fixture
def create_rbac_acl(self, connections=None, rules=None,
parent_type=None, parent_fqname=None, verify=True):
connections = connections or self.connections
parent_type = parent_type or 'project'
if not parent_fqname:
if parent_type == 'project':
parent_fqname = '%s:%s'%(connections.domain_name,
connections.project_name)
elif parent_type == 'domain':
parent_fqname = connections.domain_name
else:
parent_fqname = 'default-global-system-config'
rbac_fixture = self.create_fixture(RbacFixture, connections=connections,
parent_type=parent_type, rules=rules,
parent_fqname=parent_fqname)
assert rbac_fixture, 'RBAC ACL creation failed'
if verify:
assert rbac_fixture.verify_on_setup(), 'Rbac verification failed'
return rbac_fixture
def share_obj(self, obj=None, project=None, perms=7, connections=None):
connections = connections or self.connections
project_id = project.uuid
vnc_h = connections.orch.vnc_h
vnc_h.set_share_tenants(obj=obj, tenant=project_id, tenant_access=perms)
def set_owner(self, obj=None, project=None, connections=None):
connections = connections or self.connections
project_id = project.uuid
vnc_h = connections.orch.vnc_h
vnc_h.set_owner(obj=obj, tenant=project_id)
def delete_vn(self, vn_fix, connections=None):
connections = connections or self.connections
status = connections.orch.delete_vn(vn_fix.obj)
if status:
self.remove_from_cleanups(vn_fix)
return status
def create_vn(self, connections=None, verify=True, option='contrail', **kwargs):
connections = connections or self.connections
vn_fixture = self.create_fixture(VNFixture, connections=connections,
option=option, **kwargs)
if vn_fixture and verify:
#WA of verifying using admin creds since RI etal system objects
#wont be visible to the regular user
vn_admin_fixture = VNFixture(connections=self.connections,
option=option, uuid=vn_fixture.uuid)
vn_admin_fixture.read()
assert vn_admin_fixture.verify_on_setup(), 'VN verification failed'
return vn_fixture
def create_vm(self, vn_fixture, connections=None, verify=True):
connections = connections or self.connections
vm_fixture = self.create_fixture(VMFixture, connections=connections,
vn_obj=vn_fixture.obj,
image_name='cirros',
admin_connections=self.connections)
if vm_fixture and verify:
assert vm_fixture.verify_on_setup(), 'VM verification failed'
return vm_fixture
def create_vmi(self, vn_fixture, connections=None, verify=True):
connections = connections or self.connections
try:
vmi_fixture = self.useFixture(PortFixture(vn_fixture.uuid,
connections=connections))
except PermissionDenied:
return None
if vmi_fixture and verify:
assert vmi_fixture.verify_on_setup(), 'VMI verification failed'
return vmi_fixture
def create_st(self, connections=None, verify=True):
connections = connections or self.connections
st_fixture = self.create_fixture(SvcTemplateFixture,
connections=connections,
st_name=get_random_name(connections.project_name),
svc_img_name='tiny_nat_fw',
service_type='firewall',
if_details={'management': {}, 'left': {}, 'right': {}},
service_mode='in-network-nat',
svc_scaling=False)
if st_fixture and verify:
assert st_fixture.verify_on_setup(), 'ST verification failed'
return st_fixture
def create_sc(self, connections=None, st_version=1, **kwargs):
connections = connections or self.connections
svc = self.create_fixture(VerifySvcFirewallFixture,
connections=connections,
use_vnc_api=True, **kwargs)
if svc:
assert svc.verify_svc_chain(service_mode='in-network',
svc_img_name='tiny_in_net',
create_svms=True)
return svc
def create_lbaas(self, lb_name, network_id, connections=None, verify=True, **kwargs):
connections = connections or self.connections
lbaas_fixture = self.create_fixture(LBaasV2Fixture,
connections=connections,
lb_name=lb_name,
network_id=network_id,
**kwargs)
if lbaas_fixture and verify:
assert lbaas_fixture.verify_lb_in_api_server(), 'LB verificaiton failed'
lb_fixture = LBaasV2Fixture(connections=self.connections,
lb_uuid=lbaas_fixture.lb_uuid,
listener_uuid=lbaas_fixture.listener_uuid)
lb_fixture.lb_read()
lb_fixture.read()
assert lb_fixture.verify_on_setup(), 'LB verification failed'
return lbaas_fixture
def create_sg(self, connections=None, verify=True, option='orch', **kwargs):
connections = connections or self.connections
sg = self.create_fixture(SecurityGroupFixture,
connections=connections,
option=option, **kwargs)
if sg and verify:
assert sg.verify_on_setup()
rules = [
{'direction': '<>',
'protocol': 'tcp',
'src_addresses': [{'security_group': 'local'}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
sg.create_sg_rule(sg.uuid, rules)
return sg
def associate_sg(self, sg_fixture, vm_fixture, verify=True):
vm_fixture.add_security_group(sg_fixture.uuid)
if verify:
result, msg = vm_fixture.verify_security_group(sg_fixture.secgrp_name)
assert result, msg
def create_fip_pool(self, vn_fixture, connections=None, verify=True):
connections = connections or self.connections
fip_pool = self.create_fixture(FloatingIPFixture,
connections=connections,
vn_id=vn_fixture.uuid)
if fip_pool and verify:
assert fip_pool.verify_on_setup(), 'FIP Pool verification failed'
return fip_pool
def create_fip(self, fip_pool, connections=None, vm_fixture=None,
pub_vn_fixture=None, option='contrail', verify=True):
connections = connections or self.connections
vnc_h = connections.orch.vnc_h
vnc_lib_h = connections.vnc_lib_fixture
if option == 'contrail':
try:
project_obj = vnc_lib_h.get_project_obj()
(fip, fip_id) = vnc_h.create_floating_ip(
pool_obj=fip_pool.fip_pool_obj,
project_obj=project_obj,
owner=project_obj.uuid)
self.addCleanup(vnc_h.delete_floating_ip, fip_id)
if vm_fixture:
vnc_h.assoc_floating_ip(fip_id=fip_id, vm_id=vm_fixture.uuid)
except PermissionDenied:
return (None, None)
else:
try:
(fip, fip_id) = fip_pool.create_floatingip(fip_pool.get_vn_id())
self.addCleanup(fip_pool.disassoc_and_delete_fip, fip_id)
if vm_fixture:
self.assoc_floatingip(fip_id=fip_id, vm_id=vm_fixture.uuid)
except PermissionDenied:
return (None, None)
if verify and vm_fixture and pub_vn_fixture:
assert fip_pool.verify_fip(fip_id, vm_fixture, pub_vn_fixture)
return (fip, fip_id)
def create_fixture(self, fixturecls, **kwargs):
try:
return self.useFixture(fixturecls(**kwargs))
except (PermissionDenied, neutron_forbidden):
return None
def read_fip_pool(self, connections, uuid):
try:
obj = connections.api_server_inspect.get_cs_fip_pool(uuid)
except PermissionDenied:
obj = None
if obj:
self.logger.info('API Server: Read FIP Pool %s'%uuid)
else:
self.logger.info('API Server: Permission Denied to read FIP Pool %s'%uuid)
return obj
def read_vn(self, connections, uuid, option='contrail'):
try:
if option == 'contrail':
obj = connections.api_server_inspect.get_cs_vn_by_id(uuid, refresh=True)
else:
obj = connections.orch.get_vn_obj_from_id(uuid)
except PermissionDenied:
obj = None
if obj:
self.logger.info('API Server: Read VN %s'%uuid)
else:
self.logger.info('API Server: Permission Denied to read VN %s'%uuid)
return obj
def read_vmi(self, connections, uuid):
try:
obj = connections.api_server_inspect.get_cs_vmi_by_id(uuid, refresh=True)
except PermissionDenied:
obj = None
if obj:
self.logger.info('API Server: Read VMI %s'%uuid)
else:
self.logger.info('API Server: Permission Denied to read VMI %s'%uuid)
return obj
def read_st(self, connections, uuid):
try:
obj = connections.api_server_inspect.get_cs_st_by_id(uuid, refresh=True)
except PermissionDenied:
obj = None
if obj:
self.logger.info('API Server: Read Service-Template %s'%uuid)
else:
self.logger.info('API Server: Permission Denied to read ST %s'%uuid)
return obj
def update_vn(self, connections=None, uuid=None, prop_kv=None, obj=None):
vnc_h = connections.orch.vnc_h
if not obj:
obj = vnc_h.virtual_network_read(id=uuid)
for k,v in prop_kv.items():
if '.' in k: #SubField Match
field = k.split('.')[0]
subfield = k.split('.')[1]
prop = eval('obj.get_'+field)() or VirtualNetworkType() #ToDo
setattr(prop, subfield, v)
eval('obj.set_'+field)(prop)
else:
setattr(obj, k, v)
try:
vnc_h.virtual_network_update(obj)
self.logger.info('Updated VN %s'%uuid)
return True
except PermissionDenied:
self.logger.info('Permission Denied to update VN %s, kv %s'%(uuid, prop_kv))
return False
def list_vn(self, connections=None, option='contrail'):
connections = connections or self.connections
vn_ids = list()
try:
if option == 'contrail':
vn_ids = connections.api_server_inspect.get_cs_vn_list()
else:
vns = connections.orch.list_networks()
for vn in vns or []:
vn_ids.append(vn['id'])
self.logger.info('API Server: List VN %s'%vn_ids)
except PermissionDenied:
self.logger.info('API Server: Permission Denied to list VN')
return vn_ids
def list_fip_pool(self, connections=None):
connections = connections or self.connections
pool_ids = list()
try:
pool_ids = connections.api_server_inspect.get_cs_fip_pool_list()
self.logger.info('API Server: List FIP Pool %s'%pool_ids)
except PermissionDenied:
self.logger.info('API Server: Permission Denied to read FIP Pool')
return pool_ids
def list_analytics_nodes_from_analytics(self, connections):
try:
return connections.ops_inspect.get_hrefs_to_all_UVEs_of_a_given_UVE_type(
uveType='analytics-nodes')
except PermissionDenied:
self.logger.info('Analytics API Server: Permission Denied to list nodes')
def list_vn_from_analytics(self, connections):
try:
vns = connections.ops_inspect.get_hrefs_to_all_UVEs_of_a_given_UVE_type(
uveType='virtual-networks') or []
except PermissionDenied:
self.logger.info('Analytics API Server: Permission Denied to list VNs')
return list()
return [vn['name'] for vn in vns]
def get_vn_from_analytics(self, connections, fq_name_str):
try:
return connections.analytics_obj.get_vn_uve(fq_name_str)
except PermissionDenied:
self.logger.info('Analytics API Server: Permission Denied to read VN')
def get_vmi_from_analytics(self, connections, fq_name_str):
try:
return connections.ops_inspect.get_ops_vm_intf(fq_name_str)
except PermissionDenied:
self.logger.info('Analytics API Server: Permission Denied to read VMI')
def remove_from_cleanups(self, fixture):
for cleanup in self._cleanups:
if fixture.cleanUp in cleanup:
self._cleanups.remove(cleanup)
break
|
[] |
[] |
[
"RBAC_ROLE2",
"RBAC_USER2",
"RBAC_PASS2",
"RBAC_ROLE1",
"RBAC_PASS1",
"RBAC_ROLE3",
"RBAC_USER3",
"RBAC_USER1",
"RBAC_PASS3"
] |
[]
|
["RBAC_ROLE2", "RBAC_USER2", "RBAC_PASS2", "RBAC_ROLE1", "RBAC_PASS1", "RBAC_ROLE3", "RBAC_USER3", "RBAC_USER1", "RBAC_PASS3"]
|
python
| 9 | 0 | |
google/ads/googleads/v5/services/services/account_budget_proposal_service/client.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v5.resources.types import account_budget_proposal
from google.ads.googleads.v5.services.types import (
account_budget_proposal_service,
)
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
from .transports.base import (
AccountBudgetProposalServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import AccountBudgetProposalServiceGrpcTransport
class AccountBudgetProposalServiceClientMeta(type):
"""Metaclass for the AccountBudgetProposalService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AccountBudgetProposalServiceTransport]]
_transport_registry["grpc"] = AccountBudgetProposalServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AccountBudgetProposalServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AccountBudgetProposalServiceClient(
metaclass=AccountBudgetProposalServiceClientMeta
):
"""A service for managing account-level budgets via proposals.
A proposal is a request to create a new budget or make changes
to an existing one.
Reads for account-level budgets managed by these proposals will
be supported in a future version. Until then, please use the
BudgetOrderService from the AdWords API. Learn more at
https://developers.google.com/adwords/api/docs/guides/budget-
order
Mutates:
The CREATE operation creates a new proposal.
UPDATE operations aren't supported.
The REMOVE operation cancels a pending proposal.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AccountBudgetProposalServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AccountBudgetProposalServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AccountBudgetProposalServiceTransport:
"""Return the transport used by the client instance.
Returns:
AccountBudgetProposalServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def account_budget_path(customer: str, account_budget: str,) -> str:
"""Return a fully-qualified account_budget string."""
return "customers/{customer}/accountBudgets/{account_budget}".format(
customer=customer, account_budget=account_budget,
)
@staticmethod
def parse_account_budget_path(path: str) -> Dict[str, str]:
"""Parse a account_budget path into its component segments."""
m = re.match(
r"^customers/(?P<customer>.+?)/accountBudgets/(?P<account_budget>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def account_budget_proposal_path(
customer: str, account_budget_proposal: str,
) -> str:
"""Return a fully-qualified account_budget_proposal string."""
return "customers/{customer}/accountBudgetProposals/{account_budget_proposal}".format(
customer=customer, account_budget_proposal=account_budget_proposal,
)
@staticmethod
def parse_account_budget_proposal_path(path: str) -> Dict[str, str]:
"""Parse a account_budget_proposal path into its component segments."""
m = re.match(
r"^customers/(?P<customer>.+?)/accountBudgetProposals/(?P<account_budget_proposal>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def billing_setup_path(customer: str, billing_setup: str,) -> str:
"""Return a fully-qualified billing_setup string."""
return "customers/{customer}/billingSetups/{billing_setup}".format(
customer=customer, billing_setup=billing_setup,
)
@staticmethod
def parse_billing_setup_path(path: str) -> Dict[str, str]:
"""Parse a billing_setup path into its component segments."""
m = re.match(
r"^customers/(?P<customer>.+?)/billingSetups/(?P<billing_setup>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[
str, AccountBudgetProposalServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the account budget proposal service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AccountBudgetProposalServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AccountBudgetProposalServiceTransport):
# transport is a AccountBudgetProposalServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AccountBudgetProposalServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_account_budget_proposal(
self,
request: account_budget_proposal_service.GetAccountBudgetProposalRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> account_budget_proposal.AccountBudgetProposal:
r"""Returns an account-level budget proposal in full
detail.
Args:
request (:class:`google.ads.googleads.v5.services.types.GetAccountBudgetProposalRequest`):
The request object. Request message for
[AccountBudgetProposalService.GetAccountBudgetProposal][google.ads.googleads.v5.services.AccountBudgetProposalService.GetAccountBudgetProposal].
resource_name (:class:`str`):
Required. The resource name of the
account-level budget proposal to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v5.resources.types.AccountBudgetProposal:
An account-level budget proposal.
All fields prefixed with 'proposed' may not
necessarily be applied directly. For example,
proposed spending limits may be adjusted before their
application. This is true if the 'proposed' field has
an 'approved' counterpart, e.g. spending limits.
Please note that the proposal type (proposal_type)
changes which fields are required and which must
remain empty.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a account_budget_proposal_service.GetAccountBudgetProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
account_budget_proposal_service.GetAccountBudgetProposalRequest,
):
request = account_budget_proposal_service.GetAccountBudgetProposalRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_account_budget_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_account_budget_proposal(
self,
request: account_budget_proposal_service.MutateAccountBudgetProposalRequest = None,
*,
customer_id: str = None,
operation: account_budget_proposal_service.AccountBudgetProposalOperation = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> account_budget_proposal_service.MutateAccountBudgetProposalResponse:
r"""Creates, updates, or removes account budget
proposals. Operation statuses are returned.
Args:
request (:class:`google.ads.googleads.v5.services.types.MutateAccountBudgetProposalRequest`):
The request object. Request message for
[AccountBudgetProposalService.MutateAccountBudgetProposal][google.ads.googleads.v5.services.AccountBudgetProposalService.MutateAccountBudgetProposal].
customer_id (:class:`str`):
Required. The ID of the customer.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operation (:class:`google.ads.googleads.v5.services.types.AccountBudgetProposalOperation`):
Required. The operation to perform on
an individual account-level budget
proposal.
This corresponds to the ``operation`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v5.services.types.MutateAccountBudgetProposalResponse:
Response message for account-level
budget mutate operations.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operation]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a account_budget_proposal_service.MutateAccountBudgetProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
account_budget_proposal_service.MutateAccountBudgetProposalRequest,
):
request = account_budget_proposal_service.MutateAccountBudgetProposalRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operation is not None:
request.operation = operation
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_account_budget_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AccountBudgetProposalServiceClient",)
|
[] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"]
|
python
| 2 | 0 | |
cmd/influxd/upgrade/upgrade.go
|
package upgrade
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/user"
"path/filepath"
"strings"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/bolt"
"github.com/influxdata/influxdb/v2/dbrp"
"github.com/influxdata/influxdb/v2/fluxinit"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/kit/cli"
"github.com/influxdata/influxdb/v2/kit/metric"
"github.com/influxdata/influxdb/v2/kit/prom"
"github.com/influxdata/influxdb/v2/kv"
"github.com/influxdata/influxdb/v2/kv/migration"
"github.com/influxdata/influxdb/v2/kv/migration/all"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/tenant"
authv1 "github.com/influxdata/influxdb/v2/v1/authorization"
"github.com/influxdata/influxdb/v2/v1/services/meta"
"github.com/influxdata/influxdb/v2/v1/services/meta/filestore"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Simplified 1.x config.
type configV1 struct {
Meta struct {
Dir string `toml:"dir"`
} `toml:"meta"`
Data struct {
Dir string `toml:"dir"`
WALDir string `toml:"wal-dir"`
} `toml:"data"`
Http struct {
BindAddress string `toml:"bind-address"`
HttpsEnabled bool `toml:"https-enabled"`
} `toml:"http"`
}
func (c *configV1) dbURL() string {
address := c.Http.BindAddress
if address == "" { // fallback to default
address = ":8086"
}
var url url.URL
if c.Http.HttpsEnabled {
url.Scheme = "https"
} else {
url.Scheme = "http"
}
if strings.HasPrefix(address, ":") { // address is just :port
url.Host = "localhost" + address
} else {
url.Host = address
}
return url.String()
}
type optionsV1 struct {
metaDir string
walDir string
dataDir string
dbURL string
// cmd option
dbDir string
configFile string
}
// populateDirs sets values for expected sub-directories of o.dbDir
func (o *optionsV1) populateDirs() {
o.metaDir = filepath.Join(o.dbDir, "meta")
o.dataDir = filepath.Join(o.dbDir, "data")
o.walDir = filepath.Join(o.dbDir, "wal")
}
type optionsV2 struct {
boltPath string
cliConfigsPath string
enginePath string
userName string
password string
orgName string
bucket string
orgID influxdb.ID
userID influxdb.ID
token string
retention string
}
var options = struct {
// flags for source InfluxDB
source optionsV1
// flags for target InfluxDB
target optionsV2
// verbose output
verbose bool
// logging
logLevel string
logPath string
force bool
}{}
func NewCommand(v *viper.Viper) *cobra.Command {
// source flags
v1dir, err := influxDirV1()
if err != nil {
panic("error fetching default InfluxDB 1.x dir: " + err.Error())
}
// target flags
v2dir, err := fs.InfluxDir()
if err != nil {
panic("error fetching default InfluxDB 2.0 dir: " + err.Error())
}
cmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade a 1.x version of InfluxDB",
Long: `
Upgrades a 1.x version of InfluxDB by performing the following actions:
1. Reads the 1.x config file and creates a 2.x config file with matching options. Unsupported 1.x options are reported.
2. Copies 1.x database files.
3. Creates influx CLI configurations.
If the config file is not available, 1.x db folder (--v1-dir options) is taken as an input.
Target 2.x database dir is specified by the --engine-path option. If changed, the bolt path should be changed as well.
`,
RunE: runUpgradeE,
Args: cobra.NoArgs,
}
opts := []cli.Opt{
{
DestP: &options.source.dbDir,
Flag: "v1-dir",
Default: v1dir,
Desc: "path to source 1.x db directory containing meta, data and wal sub-folders",
},
{
DestP: &options.verbose,
Flag: "verbose",
Default: true,
Desc: "verbose output",
Short: 'v',
},
{
DestP: &options.target.boltPath,
Flag: "bolt-path",
Default: filepath.Join(v2dir, bolt.DefaultFilename),
Desc: "path for boltdb database",
Short: 'm',
},
{
DestP: &options.target.cliConfigsPath,
Flag: "influx-configs-path",
Default: filepath.Join(v2dir, "configs"),
Desc: "path for 2.x CLI configurations file",
Short: 'c',
},
{
DestP: &options.target.enginePath,
Flag: "engine-path",
Default: filepath.Join(v2dir, "engine"),
Desc: "path for persistent engine files",
Short: 'e',
},
{
DestP: &options.target.userName,
Flag: "username",
Default: "",
Desc: "primary username",
Short: 'u',
Required: true,
},
{
DestP: &options.target.password,
Flag: "password",
Default: "",
Desc: "password for username",
Short: 'p',
Required: true,
},
{
DestP: &options.target.orgName,
Flag: "org",
Default: "",
Desc: "primary organization name",
Short: 'o',
Required: true,
},
{
DestP: &options.target.bucket,
Flag: "bucket",
Default: "",
Desc: "primary bucket name",
Short: 'b',
Required: true,
},
{
DestP: &options.target.retention,
Flag: "retention",
Default: "",
Desc: "optional: duration bucket will retain data. 0 is infinite. The default is 0.",
Short: 'r',
},
{
DestP: &options.target.token,
Flag: "token",
Default: "",
Desc: "optional: token for username, else auto-generated",
Short: 't',
},
{
DestP: &options.source.configFile,
Flag: "config-file",
Default: influxConfigPathV1(),
Desc: "optional: Custom InfluxDB 1.x config file path, else the default config file",
},
{
DestP: &options.logLevel,
Flag: "log-level",
Default: zapcore.InfoLevel.String(),
Desc: "supported log levels are debug, info, warn and error",
},
{
DestP: &options.logPath,
Flag: "log-path",
Default: filepath.Join(homeOrAnyDir(), "upgrade.log"),
Desc: "optional: custom log file path",
},
{
DestP: &options.force,
Flag: "force",
Default: false,
Desc: "skip the confirmation prompt",
Short: 'f',
},
}
cli.BindOptions(v, cmd, opts)
// add sub commands
cmd.AddCommand(v1DumpMetaCommand)
cmd.AddCommand(v2DumpMetaCommand)
return cmd
}
type influxDBv1 struct {
meta *meta.Client
}
type influxDBv2 struct {
log *zap.Logger
boltClient *bolt.Client
store *bolt.KVStore
kvStore kv.SchemaStore
tenantStore *tenant.Store
ts *tenant.Service
dbrpSvc influxdb.DBRPMappingServiceV2
bucketSvc influxdb.BucketService
onboardSvc influxdb.OnboardingService
authSvc *authv1.Service
kvService *kv.Service
meta *meta.Client
}
func (i *influxDBv2) close() error {
err := i.meta.Close()
if err != nil {
return err
}
err = i.boltClient.Close()
if err != nil {
return err
}
err = i.store.Close()
if err != nil {
return err
}
return nil
}
var fluxInitialized bool
func runUpgradeE(*cobra.Command, []string) error {
// This command is executed multiple times by test code. Initialization can happen only once.
if !fluxInitialized {
fluxinit.FluxInit()
fluxInitialized = true
}
var lvl zapcore.Level
if err := lvl.Set(options.logLevel); err != nil {
return errors.New("unknown log level; supported levels are debug, info, warn and error")
}
ctx := context.Background()
config := zap.NewProductionConfig()
config.Level = zap.NewAtomicLevelAt(lvl)
config.OutputPaths = append(config.OutputPaths, options.logPath)
config.ErrorOutputPaths = append(config.ErrorOutputPaths, options.logPath)
log, err := config.Build()
if err != nil {
return err
}
err = validatePaths(&options.source, &options.target)
if err != nil {
return err
}
log.Info("Starting InfluxDB 1.x upgrade")
if options.source.configFile != "" {
log.Info("Upgrading config file", zap.String("file", options.source.configFile))
v1Config, err := upgradeConfig(options.source.configFile, options.target, log)
if err != nil {
return err
}
options.source.metaDir = v1Config.Meta.Dir
options.source.dataDir = v1Config.Data.Dir
options.source.walDir = v1Config.Data.WALDir
options.source.dbURL = v1Config.dbURL()
} else {
log.Info("No InfluxDB 1.x config file specified, skipping its upgrade")
}
log.Info("Upgrade source paths", zap.String("meta", options.source.metaDir), zap.String("data", options.source.dataDir))
log.Info("Upgrade target paths", zap.String("bolt", options.target.boltPath), zap.String("engine", options.target.enginePath))
v1, err := newInfluxDBv1(&options.source)
if err != nil {
return err
}
v2, err := newInfluxDBv2(ctx, &options.target, log)
if err != nil {
return err
}
defer func() {
if err := v2.close(); err != nil {
log.Error("Failed to close 2.0 services.", zap.Error(err))
}
}()
canOnboard, err := v2.onboardSvc.IsOnboarding(ctx)
if err != nil {
return err
}
if !canOnboard {
return errors.New("InfluxDB has been already set up")
}
req, err := onboardingRequest()
if err != nil {
return err
}
or, err := setupAdmin(ctx, v2, req)
if err != nil {
return err
}
options.target.orgID = or.Org.ID
options.target.userID = or.User.ID
options.target.token = or.Auth.Token
err = saveLocalConfig(&options.source, &options.target, log)
if err != nil {
return err
}
db2BucketIds, err := upgradeDatabases(ctx, v1, v2, &options.source, &options.target, or.Org.ID, log)
if err != nil {
//remove all files
log.Info("Database upgrade error, removing data")
if e := os.Remove(options.target.boltPath); e != nil {
log.Error("Unable to remove bolt database.", zap.Error(e))
}
if e := os.RemoveAll(options.target.enginePath); e != nil {
log.Error("Unable to remove time series data.", zap.Error(e))
}
return err
}
if err = upgradeUsers(ctx, v1, v2, &options.target, db2BucketIds, log); err != nil {
return err
}
log.Info("Upgrade successfully completed. Start service now")
return nil
}
// validatePaths ensures that all filesystem paths provided as input
// are usable by the upgrade command
func validatePaths(sourceOpts *optionsV1, targetOpts *optionsV2) error {
fi, err := os.Stat(sourceOpts.dbDir)
if err != nil {
return fmt.Errorf("1.x DB dir '%s' does not exist", sourceOpts.dbDir)
}
if !fi.IsDir() {
return fmt.Errorf("1.x DB dir '%s' is not a directory", sourceOpts.dbDir)
}
sourceOpts.populateDirs()
metaDb := filepath.Join(sourceOpts.metaDir, "meta.db")
_, err = os.Stat(metaDb)
if err != nil {
return fmt.Errorf("1.x meta.db '%s' does not exist", metaDb)
}
if sourceOpts.configFile != "" {
_, err = os.Stat(sourceOpts.configFile)
if err != nil {
return fmt.Errorf("1.x config file '%s' does not exist", sourceOpts.configFile)
}
v2Config := translateV1ConfigPath(sourceOpts.configFile)
if _, err := os.Stat(v2Config); err == nil {
return fmt.Errorf("file present at target path for upgraded 2.x config file '%s'", v2Config)
}
}
if _, err = os.Stat(targetOpts.boltPath); err == nil {
return fmt.Errorf("file present at target path for upgraded 2.x bolt DB: '%s'", targetOpts.boltPath)
}
if fi, err = os.Stat(targetOpts.enginePath); err == nil {
if !fi.IsDir() {
return fmt.Errorf("upgraded 2.x engine path '%s' is not a directory", targetOpts.enginePath)
}
entries, err := ioutil.ReadDir(targetOpts.enginePath)
if err != nil {
return err
}
if len(entries) > 0 {
return fmt.Errorf("upgraded 2.x engine directory '%s' must be empty", targetOpts.enginePath)
}
}
if _, err = os.Stat(targetOpts.cliConfigsPath); err == nil {
return fmt.Errorf("file present at target path for 2.x CLI configs '%s'", targetOpts.cliConfigsPath)
}
return nil
}
func newInfluxDBv1(opts *optionsV1) (svc *influxDBv1, err error) {
svc = &influxDBv1{}
svc.meta, err = openV1Meta(opts.metaDir)
if err != nil {
return nil, fmt.Errorf("error opening 1.x meta.db: %w", err)
}
return svc, nil
}
func newInfluxDBv2(ctx context.Context, opts *optionsV2, log *zap.Logger) (svc *influxDBv2, err error) {
reg := prom.NewRegistry(log.With(zap.String("service", "prom_registry")))
svc = &influxDBv2{}
svc.log = log
// *********************
// V2 specific services
serviceConfig := kv.ServiceConfig{}
// Create BoltDB store and K/V service
svc.boltClient = bolt.NewClient(log.With(zap.String("service", "bolt")))
svc.boltClient.Path = opts.boltPath
if err := svc.boltClient.Open(ctx); err != nil {
log.Error("Failed opening bolt", zap.Error(err))
return nil, err
}
svc.store = bolt.NewKVStore(log.With(zap.String("service", "kvstore-bolt")), opts.boltPath)
svc.store.WithDB(svc.boltClient.DB())
svc.kvStore = svc.store
svc.kvService = kv.NewService(log.With(zap.String("store", "kv")), svc.store, serviceConfig)
// ensure migrator is run
migrator, err := migration.NewMigrator(
log.With(zap.String("service", "migrations")),
svc.kvStore,
all.Migrations[:]...,
)
if err != nil {
log.Error("Failed to initialize kv migrator", zap.Error(err))
return nil, err
}
// apply migrations to metadata store
if err := migrator.Up(ctx); err != nil {
log.Error("Failed to apply migrations", zap.Error(err))
return nil, err
}
// other required services
var (
authSvc influxdb.AuthorizationService = svc.kvService
)
// Create Tenant service (orgs, buckets, )
svc.tenantStore = tenant.NewStore(svc.kvStore)
svc.ts = tenant.NewSystem(svc.tenantStore, log.With(zap.String("store", "new")), reg, metric.WithSuffix("new"))
svc.meta = meta.NewClient(meta.NewConfig(), svc.kvStore)
if err := svc.meta.Open(); err != nil {
return nil, err
}
// DB/RP service
svc.dbrpSvc = dbrp.NewService(ctx, svc.ts.BucketService, svc.kvStore)
svc.bucketSvc = svc.ts.BucketService
engine := storage.NewEngine(
opts.enginePath,
storage.NewConfig(),
storage.WithMetaClient(svc.meta),
)
svc.ts.BucketService = storage.NewBucketService(log, svc.ts.BucketService, engine)
// on-boarding service (influx setup)
svc.onboardSvc = tenant.NewOnboardService(svc.ts, authSvc)
// v1 auth service
authStore, err := authv1.NewStore(svc.kvStore)
if err != nil {
return nil, err
}
svc.authSvc = authv1.NewService(authStore, svc.ts)
return svc, nil
}
func openV1Meta(dir string) (*meta.Client, error) {
cfg := meta.NewConfig()
cfg.Dir = dir
store := filestore.New(cfg.Dir, string(meta.BucketName), "meta.db")
c := meta.NewClient(cfg, store)
if err := c.Open(); err != nil {
return nil, err
}
return c, nil
}
// influxDirV1 retrieves the influxdb directory.
func influxDirV1() (string, error) {
var dir string
// By default, store meta and data files in current users home directory
u, err := user.Current()
if err == nil {
dir = u.HomeDir
} else if home := os.Getenv("HOME"); home != "" {
dir = home
} else {
wd, err := os.Getwd()
if err != nil {
return "", err
}
dir = wd
}
dir = filepath.Join(dir, ".influxdb")
return dir, nil
}
// influxConfigPathV1 returns default 1.x config file path or empty path if not found.
func influxConfigPathV1() string {
if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" {
return envVar
}
for _, path := range []string{
os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"),
"/etc/influxdb/influxdb.conf",
} {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
// homeOrAnyDir retrieves user's home directory, current working one or just none.
func homeOrAnyDir() string {
var dir string
u, err := user.Current()
if err == nil {
dir = u.HomeDir
} else if home := os.Getenv("HOME"); home != "" {
dir = home
} else if home := os.Getenv("USERPROFILE"); home != "" {
dir = home
} else {
wd, err := os.Getwd()
if err != nil {
dir = ""
} else {
dir = wd
}
}
return dir
}
|
[
"\"HOME\"",
"\"INFLUXDB_CONFIG_PATH\"",
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"INFLUXDB_CONFIG_PATH",
"HOME",
"USERPROFILE"
] |
[]
|
["INFLUXDB_CONFIG_PATH", "HOME", "USERPROFILE"]
|
go
| 3 | 0 | |
train_telenet.py
|
import io
import zipfile
import os
import random
from telenet.config import get as tn_config
RND_SEED = tn_config('train.random_seed')
os.environ['PYTHONHASHSEED'] = str(RND_SEED)
random.seed(RND_SEED)
import numpy as np
import pandas as pd
np.random.seed(RND_SEED)
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_docs as tfdocs
import tensorflow_docs.plots # Do not remove this import
#import tensorflow.keras.mixed_precision as mp
from tensorflow.python.training.tracking.data_structures import NoDependency
tf.random.set_seed(RND_SEED)
from matplotlib import pyplot as plt
import telenet.model as tn_model
import telenet.dataset_data as tn_data
from tqdm import tqdm
#mp.set_global_policy(mp.Policy('mixed_float16'))
DATASET_NAME = tn_config('train.dataset')
MODEL_VARIANT = tn_config('model.variant')
if 'teresa' in DATASET_NAME:
tn_data.load_names(f'teresa-names.json')
else:
tn_data.load_names(f'{DATASET_NAME}-names.json')
TRAIN_DATA = tn_data.load_json_xz(f'{DATASET_NAME}-train-without-val')
VAL_DATA = tn_data.load_json_xz(f'{DATASET_NAME}-val')
for known_dataset in ['vrd', 'vg', DATASET_NAME]:
if DATASET_NAME.startswith(known_dataset):
SEM_VECTORS = tf.convert_to_tensor(np.load(tn_data.path(f'{known_dataset}-semvecs.npy')))
zf_pi = zipfile.ZipFile(tn_data.path(f'{known_dataset}-yolo-train.zip'), 'r')
zf_om = zipfile.ZipFile(tn_data.path(f'{known_dataset}-mask-train.zip'), 'r')
def get_priors(src,dst):
return np.zeros((tn_data.NUM_RELS,), np.float32)
def preprocess_gt_nongt(img):
img['gt'] = gt = {}
for rel in img['rels']:
src_id = rel['si']
dst_id = rel['di']
y_real = np.zeros((tn_data.NUM_RELS,), np.float32)
for i in rel['v']:
y_real[i] = 1.
gt[(src_id,dst_id)] = {
'sv': rel['sv'],
'dv': rel['dv'],
'a': SEM_VECTORS[rel['sv']],
'b': SEM_VECTORS[rel['dv']],
'p': get_priors(rel['sv'],rel['dv']),
'y': y_real
}
img['nongt'] = nongt = set()
for i in range(len(img['objs'])):
for j in range(len(img['objs'])):
if i != j and (i,j) not in gt:
nongt.add((i,j))
# Preprocess training/validation data
for img in TRAIN_DATA:
preprocess_gt_nongt(img)
for img in VAL_DATA:
preprocess_gt_nongt(img)
def stupid_adapter(f):
return io.BytesIO(f.read())
class TelenetTrainer(tn_model.CombinedRelationshipDetector):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.trn_batch_size = NoDependency(32)
self.trn_batch_gt_size = NoDependency(self.trn_batch_size - int(.5 + .5 * self.trn_batch_size))
self.trn_loss_tracker = NoDependency(tf.keras.metrics.Mean(name="loss"))
self.val_loss_tracker = NoDependency(tf.keras.metrics.Mean(name="loss"))
def prepare_minibatch(self, img):
img_name = img['id']
with stupid_adapter(zf_pi.open(f'{img_name}.npy','r')) as f:
img_features = tf.expand_dims(tf.convert_to_tensor(np.load(f), tf.float32), axis=0)
with stupid_adapter(zf_om.open(f'{img_name}.npy','r')) as f:
obj_masks = tf.convert_to_tensor(np.load(f)[0,:,:,:], tf.float32)
num_objs = len(img['objs'])
num_pairs = num_objs * (num_objs - 1)
if num_pairs == 0:
return (None, None, None)
ground_truth = img['gt']
non_ground_truth = img['nongt']
num_gt_pairs = len(ground_truth)
num_non_gt_pairs = len(non_ground_truth)
batch_mask = []
batch_srcsem = []
batch_dstsem = []
batch_priors = []
batch_y_real = []
def sample_gt_pair(pair, pairdata):
src_id,dst_id = pair
batch_mask.append(tf.stack([obj_masks[:,:,src_id], obj_masks[:,:,dst_id]], axis=-1))
batch_srcsem.append(pairdata['a'])
batch_dstsem.append(pairdata['b'])
batch_priors.append(pairdata['p'])
batch_y_real.append(pairdata['y'])
def sample_non_gt_pair(pair):
src_id,dst_id = pair
src_objid = img['objs'][src_id]['v']
dst_objid = img['objs'][dst_id]['v']
batch_mask.append(tf.stack([obj_masks[:,:,src_id], obj_masks[:,:,dst_id]], axis=-1))
batch_srcsem.append(SEM_VECTORS[src_objid])
batch_dstsem.append(SEM_VECTORS[dst_objid])
batch_priors.append(get_priors(src_objid, dst_objid))
batch_y_real.append(np.zeros((tn_data.NUM_RELS,), np.float32))
num_sampled_gt_pairs = np.minimum(self.trn_batch_gt_size, num_gt_pairs)
num_sampled_non_gt_pairs = np.minimum(self.trn_batch_size - num_sampled_gt_pairs, num_non_gt_pairs)
num_dupes = self.trn_batch_size - num_sampled_gt_pairs - num_sampled_non_gt_pairs
for pair,pairdata in random.sample(list(ground_truth.items()), k=num_sampled_gt_pairs):
sample_gt_pair(pair, pairdata)
for pair in random.sample(list(non_ground_truth), k=num_sampled_non_gt_pairs):
sample_non_gt_pair(pair)
# Fill batch with dupes
if num_dupes > 0:
for i in random.choices(list(range(len(batch_mask))), k=num_dupes):
batch_mask.append(batch_mask[i])
batch_srcsem.append(batch_srcsem[i])
batch_dstsem.append(batch_dstsem[i])
batch_priors.append(batch_priors[i])
batch_y_real.append(batch_y_real[i])
batch_mask = tf.stack(batch_mask, axis=0)
batch_srcsem = tf.stack(batch_srcsem, axis=0)
batch_dstsem = tf.stack(batch_dstsem, axis=0)
batch_priors = tf.stack(batch_priors, axis=0)
batch_y_real = tf.stack(batch_y_real, axis=0)
batch_x = (img_features, batch_mask, batch_srcsem, batch_dstsem, batch_priors)
return (batch_x, batch_y_real)
@property
def metrics(self):
return [self.trn_loss_tracker, self.val_loss_tracker]
def ranking_loss(self, y_real, y_pred, margin=1.):
scores_0, scores_1 = tf.dynamic_partition(y_pred, tf.cast(y_real, tf.int32), 2)
scale = tf.size(y_real, out_type=tf.float32)
return tf.reduce_sum(tf.vectorized_map(lambda val: tf.reduce_sum(tf.nn.relu(margin - (scores_1 - val))), elems=scores_0)) / scale
@tf.function
def train_kernel(self, x, y_real):
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.ranking_loss(y_real, y_pred)
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
return loss
def train_step(self, data):
batch_x, batch_y_real = self.prepare_minibatch(TRAIN_DATA[int(data)])
if batch_x is not None:
loss = self.train_kernel(batch_x, batch_y_real)
self.trn_loss_tracker.update_state(loss)
return { 'loss': self.trn_loss_tracker.result() }
@tf.function
def test_kernel(self, x, y_real):
y_pred = self(x, training=False)
return self.ranking_loss(y_real, y_pred)
def test_step(self, data):
batch_x, batch_y_real = self.prepare_minibatch(VAL_DATA[int(data)])
if batch_x is not None:
loss = self.test_kernel(batch_x, batch_y_real)
self.val_loss_tracker.update_state(loss)
return { 'loss': self.val_loss_tracker.result() }
mdl = TelenetTrainer(N=tn_data.NUM_RELS)
mdl.compile(
optimizer=tfa.optimizers.AdamW(learning_rate=tn_config('train.lr'), weight_decay=tn_config('train.wd')),
run_eagerly=True
)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=tn_config('train.early_stopping'),
mode='min',
restore_best_weights=True
)
tensorboard = tf.keras.callbacks.TensorBoard(
log_dir=f'tensorboard/{MODEL_VARIANT}',
histogram_freq=1
)
history = mdl.fit(
x = tf.data.Dataset.range(len(TRAIN_DATA)).shuffle(256, seed=RND_SEED, reshuffle_each_iteration=True),
validation_data = tf.data.Dataset.range(len(VAL_DATA)),
callbacks = [ early_stopping, tensorboard ],
epochs = tn_config('train.epochs')
)
mdl.save_weights(f'weights/telenet+{MODEL_VARIANT}')
plt.figure()
plotter = tfdocs.plots.HistoryPlotter(metric = 'loss')
plotter.plot({ 'Model': history })
plt.savefig(f"train-results/{MODEL_VARIANT}.png")
with open(f'train-results/{MODEL_VARIANT}.csv', mode='wt', encoding='utf-8') as f:
pd.DataFrame(history.history).to_csv(f)
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
appleBot.py
|
import tweepy
import logging
from botConfig import create_api
import time
from shutil import copyfile
import os,sys
import subprocess
from datetime import datetime
from unidecode import unidecode
import re
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def check_mentions(api, since_id):
logger.info("Retrieving mentions")
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline, since_id=since_id, tweet_mode='extended').items():
new_since_id = max(tweet.id, new_since_id)
logger.info(f"Tweet from {tweet.user.name}")
#remove @ mentions, leaving just the BASIC code
basiccode = re.sub('^(@.+?\s)+','',tweet.full_text)
basiccode = unidecode(basiccode)
#unescape >, <, and &
basiccode = basiccode.replace("<", "<")
basiccode = basiccode.replace(">", ">")
basiccode = basiccode.replace("&", "&")
#look for start time command
exp = "{\w*?B(\d\d?)\w*(?:}|\s)" # {B\d\d B= Begin
result = re.search(exp,basiccode)
if result:
starttime = int(result.group(1))
logger.info(f" Requests start at {starttime} seconds")
else:
starttime = 3
#look for length of time to record command
exp = "{\w*?S(\d\d?)\w*(?:}|\s)" # {S\d\d S= Seconds to record
result=re.search(exp,basiccode)
if result:
recordtime = int(result.group(1))
logger.info(f" Requests record for {recordtime} seconds")
else:
recordtime = 30
if recordtime <1:
recordtime=1
green=0 #default is color
exp = "{\w*?G\w*(?:}|\s)" #{G
if re.search(exp,basiccode):
green=1 #greenscreen
logger.info("requests green screen")
exp = "{\w*?A\w*(?:}|\s)" #{A
if re.search(exp,basiccode):
green=2 #amberscreen
logger.info("requests amber screen")
language = 0 # default to BASIC
exp = "{\w*?L\w*(?:}|\s)" #{L
if re.search(exp,basiccode):
language=1 #it's LOGO
logger.info("it's LOGO")
if(starttime == 3):
starttime = 0
exp = "{\w*?T\w*(?:}|\s)" #{T
if re.search(exp,basiccode):
language=2 #it's Terrapin Logo
logger.info("it's Terrapin LOGO")
if(starttime == 3):
starttime = 5
#remove any { command
exp = "{\w*(?:}|\s)" #{anything till space or }
basiccode = re.sub(exp,'',basiccode)
#whitespace
basiccode = basiccode.strip()
#halt if string is empty
if not basiccode:
logger.info("!!! basiccode string is empty, SKIPPING")
continue;
exp='(https?|ftp):\/\/([\da-z\.-]+)\.([a-z]{2,6})([\/\w\?\.-]*)*\/?'
if (re.search(exp,basiccode)):
logger.info("Ooh, its a URL to a disk image")
if(os.path.isfile("working/BOT.dsk")):
os.remove("working/BOT.dsk")
result=os.system('curl -L -k -o working/BOT.dsk --max-filesize 143360 ' + basiccode)
if(os.path.isfile("working/BOT.dsk") == False):
logger.info("!!! DISK IMAGE FAILED, SKIPPING")
continue
if(os.path.getsize("working/BOT.dsk") != 143360):
logger.info("!!! NOT DSK IMAGE SIZE, SKIPPING")
continue
else:
outputFile = open('working/incomingBASIC.txt','w')
outputFile.write(basiccode)
outputFile.close()
if (language==0): #basic
logger.info("Parsing BASIC program")
result = os.system('python3 tokenize.py working/incomingBASIC.txt working/tokenized')
logger.info(f"Result {result}")
if result==256:
logger.info("!!! PARSER FAILED, SKIPPING")
continue
logger.info("Fresh disk image")
copyfile('assets/DOS33FRESHIE.dsk','working/BOT.dsk')
logger.info("Moving tokenized file into disk image")
result = os.system('java -jar ac.jar -p working/BOT.dsk HELLO BAS 0x801 < working/tokenized')
###the following doesn't work
if result==256:
logger.info("!!! APPLECOMMANDER FAILED, SKIPPING")
continue
elif (language==1): #Apple LOGO
logger.info("Fresh Apple logo disk images")
copyfile('assets/apple_logo_ii.dsk','working/BOT.dsk')
copyfile('assets/blank-prodos.dsk','working/BOT2.dsk')
logger.info("Moving logo commands into disk image")
result = os.system('java -jar ac.jar -ptx working/BOT.dsk STARTUP TXT < working/incomingBASIC.txt')
else: #Terrapin
logger.info("Fresh Terrapin logo disk images")
copyfile('assets/Terrapin1.dsk','working/BOT.dsk')
copyfile('assets/Terrapin2.dsk','working/BOT2.dsk')
logger.info("Moving logo commands into disk image")
result = os.system('java -jar ac.jar -ptx working/BOT.dsk STARTUP.LOGO TXT < working/incomingBASIC.txt')
if green==1:
logger.info("Fresh linapple conf file (green)")
copyfile('assets/linapple-green.conf','linapple.conf')
elif green==2:
logger.info("Fresh linapple conf file (amber)")
copyfile('assets/linapple-amber.conf','linapple.conf')
else:
logger.info("Fresh linapple conf file")
copyfile('assets/linapple.conf','linapple.conf')
logger.info("Firing up emulator")
if (language==0):
cmd = '/home/atari8/apple2bot/linapple -1 working/BOT.dsk'.split()
elif (language==1):
cmd = '/home/atari8/apple2bot/linapple -1 working/BOT.dsk -2 working/BOT2.dsk'.split()
elif (language==2):
cmd = '/home/atari8/apple2bot/linapple -1 working/BOT.dsk -2 working/BOT2.dsk'.split()
emuPid = subprocess.Popen(cmd)
logger.info(f" Process ID {emuPid.pid}")
if language==1: #Logo
time.sleep(15) #time to boot before typing
logger.info("Typing RETURN to start logo")
os.system('xdotool search --class apple key --delay 200 Return')
#if language==2: #Terrapin Logo
#time.sleep(20) #time to boot before recording
time.sleep(starttime)
logger.info("Recording with ffmpeg")
result = os.system(f'/usr/bin/ffmpeg -y -hide_banner -loglevel warning -draw_mouse 0 -f x11grab -r 30 -video_size 850x630 -i :98+100,70 -q:v 0 -pix_fmt yuv422p -t {recordtime} working/APPLE_BIG.mp4')
logger.info("Stopping emulator")
emuPid.kill()
logger.info("Converting video")
result = os.system('ffmpeg -loglevel warning -y -i working/APPLE_BIG.mp4 -vcodec libx264 -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" -pix_fmt yuv420p -strict experimental -r 30 -t 2:20 -acodec aac -vb 1024k -minrate 1024k -maxrate 1024k -bufsize 1024k -ar 44100 -ac 2 working/APPLE_SMALL.mp4')
#per https://gist.github.com/nikhan/26ddd9c4e99bbf209dd7#gistcomment-3232972
logger.info("Uploading video")
media = api.media_upload("working/APPLE_SMALL.mp4")
logger.info(f"Media ID is {media.media_id}")
time.sleep(5)
#TODO replace with get_media_upload_status per https://github.com/tweepy/tweepy/pull/1414
logger.info(f"Posting tweet to @{tweet.user.screen_name}")
tweettext = f"@{tweet.user.screen_name} "
post_result = api.update_status(auto_populate_reply_metadata=False, status=tweettext, media_ids=[media.media_id], in_reply_to_status_id=tweet.id)
logger.info("Done!")
return new_since_id
def main():
os.chdir('/home/atari8/apple2bot/')
api = create_api()
now = datetime.now()
logger.info("START TIME:")
logger.info(now.strftime("%Y %m %d %H:%M:%S"))
try:
sinceFile = open('sinceFile.txt','r')
since_id = sinceFile.read()
except:
sinceFile = open('sinceFile.txt','w')
sinceFile.write("1")
logger.info("created new sinceFile")
since_id = 1
sinceFile.close()
since_id = int(since_id)
logger.info(f"Starting since_id {since_id}")
os.environ["DISPLAY"] = ":98"
while True:
didatweet=0
new_since_id = check_mentions(api, since_id)
if new_since_id != since_id:
since_id = new_since_id
logger.info(f"Since_id now {since_id}")
sinceFile = open('sinceFile.txt','w')
sinceFile.write(str(since_id))
sinceFile.close()
didatweet=1
if didatweet==0:
logger.info("Waiting...")
time.sleep(120)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
python
| 1 | 0 | |
build.py
|
import os
from conan.packager import ConanMultiPackager
# Common settings
username = "odant" if "CONAN_USERNAME" not in os.environ else None
if __name__ == "__main__":
builder = ConanMultiPackager(
username=username,
exclude_vcvars_precommand=True
)
builder.add()
builder.run()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config/options.go
|
package config
import (
"fmt"
"github.com/rkcpi/vell/repos"
"github.com/rkcpi/vell/rpm"
"os"
)
var (
httpPort = os.Getenv("VELL_HTTP_PORT")
httpAddress = os.Getenv("VELL_HTTP_ADDRESS")
ReposPath = os.Getenv("VELL_REPOS_PATH")
RepoStore repos.RepositoryStore
ListenAddress string
)
func init() {
if httpPort == "" {
httpPort = "8080"
}
if ReposPath == "" {
ReposPath = "/var/lib/vell/repositories"
}
RepoStore = rpm.NewRepositoryStore(ReposPath)
ListenAddress = fmt.Sprintf("%s:%s", httpAddress, httpPort)
}
|
[
"\"VELL_HTTP_PORT\"",
"\"VELL_HTTP_ADDRESS\"",
"\"VELL_REPOS_PATH\""
] |
[] |
[
"VELL_HTTP_PORT",
"VELL_REPOS_PATH",
"VELL_HTTP_ADDRESS"
] |
[]
|
["VELL_HTTP_PORT", "VELL_REPOS_PATH", "VELL_HTTP_ADDRESS"]
|
go
| 3 | 0 | |
src/uploader/main/main.go
|
/*
SPDX short identifier: MIT
Copyright 2020 Jevgēnijs Protopopovs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
package main
import (
"fmt"
"os"
"strconv"
"strings"
"time"
uploaderLib "github.com/protopopov1122/file-share/src/uploader/lib"
)
func getFilename() string {
originalCmd := os.Getenv("SSH_ORIGINAL_COMMAND")
if len(originalCmd) == 0 {
return strconv.FormatInt(time.Now().UTC().Unix(), 10)
} else {
return strings.Fields(originalCmd)[0]
}
}
func main() {
if len(os.Args) < 3 {
fmt.Println("Misconfigured: provide upload parameters")
os.Exit(1)
}
lifetime, err := strconv.ParseUint(os.Args[2], 10, 32)
if err != nil {
fmt.Println("Failed to parse lifetime parameter due to", err)
os.Exit(1)
}
uploader := uploaderLib.Uploader{
APIURL: os.Args[1],
Lifetime: uint(lifetime),
}
res, err := uploader.Upload(os.Stdin, getFilename())
if err != nil {
fmt.Println("Failed to upload file due to ", err)
} else {
fmt.Println("Uploaded file available at", res, "for", uploader.Lifetime, "second(s)")
}
}
|
[
"\"SSH_ORIGINAL_COMMAND\""
] |
[] |
[
"SSH_ORIGINAL_COMMAND"
] |
[]
|
["SSH_ORIGINAL_COMMAND"]
|
go
| 1 | 0 | |
go/perf/suite/suite.go
|
// Copyright 2016 Attic Labs, Inc. All rights reserved.
// Licensed under the Apache License, version 2.0:
// http://www.apache.org/licenses/LICENSE-2.0
// Package suite implements a performance test suite for Noms, intended for
// measuring and reporting long running tests.
//
// Usage is similar to testify's suite:
// 1. Define a test suite struct which inherits from suite.PerfSuite.
// 2. Define methods on that struct that start with the word "Test", optionally
// followed by digits, then followed a non-empty capitalized string.
// 3. Call suite.Run with an instance of that struct.
// 4. Run go test with the -perf <path to noms db> flag.
//
// Flags:
// -perf.mem Backs the database by a memory store, instead of nbs.
// -perf.prefix Gives the dataset IDs for test results a prefix.
// -perf.repeat Sets how many times tests are repeated ("reps").
// -perf.run Only run tests that match a regex (case insensitive).
// -perf.testdata Sets a custom path to the Noms testdata directory.
//
// PerfSuite also supports testify/suite style Setup/TearDown methods:
// Setup/TearDownSuite is called exactly once.
// Setup/TearDownRep is called for each repetition of the test runs, i.e. -perf.repeat times.
// Setup/TearDownTest is called for every test.
//
// Test results are written to Noms, along with a dump of the environment they were recorded in.
//
// Test names are derived from that "non-empty capitalized string": "Test" is omitted because it's
// redundant, and leading digits are omitted to allow for manual test ordering. For example:
//
// > cat ./samples/go/csv/csv-import/perf_test.go
// type perfSuite {
// suite.PerfSuite
// }
//
// func (s *perfSuite) TestFoo() { ... }
// func (s *perfSuite) TestZoo() { ... }
// func (s *perfSuite) Test01Qux() { ... }
// func (s *perfSuite) Test02Bar() { ... }
//
// func TestPerf(t *testing.T) {
// suite.Run("csv-import", t, &perfSuite{})
// }
//
// > noms serve &
// > go test -v ./samples/go/csv/... -perf http://localhost:8000 -perf.repeat 3
// (perf) RUN(1/3) Test01Qux (recorded as "Qux")
// (perf) PASS: Test01Qux (5s, paused 15s, total 20s)
// (perf) RUN(1/3) Test02Bar (recorded as "Bar")
// (perf) PASS: Test02Bar (15s, paused 2s, total 17s)
// (perf) RUN(1/3) TestFoo (recorded as "Foo")
// (perf) PASS: TestFoo (10s, paused 1s, total 11s)
// (perf) RUN(1/3) TestZoo (recorded as "Zoo")
// (perf) PASS: TestZoo (1s, paused 42s, total 43s)
// ...
//
// > noms show http://localhost:8000::csv-import
// {
// environment: ...
// tests: [{
// "Bar": {elapsed: 15s, paused: 2s, total: 17s},
// "Foo": {elapsed: 10s, paused: 1s, total: 11s},
// "Qux": {elapsed: 5s, paused: 15s, total: 20s},
// "Zoo": {elapsed: 1s, paused: 42s, total: 43s},
// }, ...]
// ...
// }
package suite
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/attic-labs/noms/go/chunks"
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/datas"
"github.com/attic-labs/noms/go/marshal"
"github.com/attic-labs/noms/go/nbs"
"github.com/attic-labs/noms/go/spec"
"github.com/attic-labs/noms/go/types"
"github.com/attic-labs/testify/assert"
testifySuite "github.com/attic-labs/testify/suite"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/mem"
)
var (
perfFlag = flag.String("perf", "", "The database to write perf tests to. If this isn't specified, perf tests are skipped. If you want a dry run, use \"mem\" as a database")
perfMemFlag = flag.Bool("perf.mem", false, "Back the test database by a memory store, not nbs. This will affect test timing, but it's provided in case you're low on disk space")
perfPrefixFlag = flag.String("perf.prefix", "", `Prefix for the dataset IDs where results are written. For example, a prefix of "foo/" will write test datasets like "foo/csv-import" instead of just "csv-import"`)
perfRepeatFlag = flag.Int("perf.repeat", 1, "The number of times to repeat each perf test")
perfRunFlag = flag.String("perf.run", "", "Only run perf tests that match a regular expression")
perfTestdataFlag = flag.String("perf.testdata", "", "Path to the noms testdata directory. By default this is ../testdata relative to the noms directory")
testNamePattern = regexp.MustCompile("^Test[0-9]*([A-Z].*$)")
)
// PerfSuite is the core of the perf testing suite. See package documentation for details.
type PerfSuite struct {
// T is the testing.T instance set when the suite is passed into Run.
T *testing.T
// W is the io.Writer to write test output, which only outputs if the verbose flag is set.
W io.Writer
// AtticLabs is the path to the attic-labs directory (e.g. /path/to/go/src/github.com/attic-labs).
AtticLabs string
// Testdata is the path to the testdata directory - typically /path/to/go/src/github.com/attic-labs, but it can be overridden with the -perf.testdata flag.
Testdata string
// Database is a Noms database that tests can use for reading and writing. State is persisted across a single Run of a suite.
Database datas.Database
// DatabaseSpec is the Noms spec of Database (typically a localhost URL).
DatabaseSpec string
tempFiles []*os.File
tempDirs []string
paused time.Duration
datasetID string
}
// SetupRepSuite has a SetupRep method, which runs every repetition of the test, i.e. -perf.repeat times in total.
type SetupRepSuite interface {
SetupRep()
}
// TearDownRepSuite has a TearDownRep method, which runs every repetition of the test, i.e. -perf.repeat times in total.
type TearDownRepSuite interface {
TearDownRep()
}
type perfSuiteT interface {
Suite() *PerfSuite
}
type environment struct {
DiskUsages map[string]disk.UsageStat
Cpus map[int]cpu.InfoStat
Mem mem.VirtualMemoryStat
Host host.InfoStat
Partitions map[string]disk.PartitionStat
}
type timeInfo struct {
elapsed, paused, total time.Duration
}
type testRep map[string]timeInfo
type nopWriter struct{}
func (r nopWriter) Write(p []byte) (int, error) {
return len(p), nil
}
// Run runs suiteT and writes results to dataset datasetID in the database given by the -perf command line flag.
func Run(datasetID string, t *testing.T, suiteT perfSuiteT) {
assert := assert.New(t)
if !assert.NotEqual("", datasetID) {
return
}
// Piggy-back off the go test -v flag.
verboseFlag := flag.Lookup("test.v")
assert.NotNil(verboseFlag)
verbose := verboseFlag.Value.(flag.Getter).Get().(bool)
if *perfFlag == "" {
if verbose {
fmt.Printf("(perf) Skipping %s, -perf flag not set\n", datasetID)
}
return
}
suite := suiteT.Suite()
suite.T = t
if verbose {
suite.W = os.Stdout
} else {
suite.W = nopWriter{}
}
gopath := os.Getenv("GOPATH")
if !assert.NotEmpty(gopath) {
return
}
suite.AtticLabs = path.Join(gopath, "src", "github.com", "attic-labs")
suite.Testdata = *perfTestdataFlag
if suite.Testdata == "" {
suite.Testdata = path.Join(suite.AtticLabs, "testdata")
}
// Clean up temporary directories/files last.
defer func() {
for _, f := range suite.tempFiles {
os.Remove(f.Name())
}
for _, d := range suite.tempDirs {
os.RemoveAll(d)
}
}()
suite.datasetID = datasetID
// This is the database the perf test results are written to.
sp, err := spec.ForDatabase(*perfFlag)
if !assert.NoError(err) {
return
}
defer sp.Close()
// List of test runs, each a map of test name => timing info.
testReps := make([]testRep, *perfRepeatFlag)
// Note: the default value of perfRunFlag is "", which is actually a valid
// regular expression that matches everything.
perfRunRe, err := regexp.Compile("(?i)" + *perfRunFlag)
if !assert.NoError(err, `Invalid regular expression "%s"`, *perfRunFlag) {
return
}
defer func() {
reps := make([]types.Value, *perfRepeatFlag)
for i, rep := range testReps {
timesSlice := types.ValueSlice{}
for name, info := range rep {
timesSlice = append(timesSlice, types.String(name), types.NewStruct("", types.StructData{
"elapsed": types.Number(info.elapsed.Nanoseconds()),
"paused": types.Number(info.paused.Nanoseconds()),
"total": types.Number(info.total.Nanoseconds()),
}))
}
reps[i] = types.NewMap(timesSlice...)
}
record := types.NewStruct("", map[string]types.Value{
"environment": suite.getEnvironment(),
"nomsRevision": types.String(suite.getGitHead(path.Join(suite.AtticLabs, "noms"))),
"testdataRevision": types.String(suite.getGitHead(suite.Testdata)),
"reps": types.NewList(reps...),
})
db := sp.GetDatabase()
ds := db.GetDataset(*perfPrefixFlag + datasetID)
var err error
ds, err = db.CommitValue(ds, record)
assert.NoError(err)
}()
if t, ok := suiteT.(testifySuite.SetupAllSuite); ok {
t.SetupSuite()
}
for repIdx := 0; repIdx < *perfRepeatFlag; repIdx++ {
testReps[repIdx] = testRep{}
serverHost, stopServerFn := suite.StartRemoteDatabase()
suite.DatabaseSpec = serverHost
suite.Database = datas.NewDatabase(datas.NewHTTPChunkStore(serverHost, ""))
if t, ok := suiteT.(SetupRepSuite); ok {
t.SetupRep()
}
for t, mIdx := reflect.TypeOf(suiteT), 0; mIdx < t.NumMethod(); mIdx++ {
m := t.Method(mIdx)
parts := testNamePattern.FindStringSubmatch(m.Name)
if parts == nil {
continue
}
recordName := parts[1]
if !perfRunRe.MatchString(recordName) && !perfRunRe.MatchString(m.Name) {
continue
}
if _, ok := testReps[repIdx][recordName]; ok {
assert.Fail(`Multiple tests are named "%s"`, recordName)
continue
}
if verbose {
fmt.Printf("(perf) RUN(%d/%d) %s (as \"%s\")\n", repIdx+1, *perfRepeatFlag, m.Name, recordName)
}
if t, ok := suiteT.(testifySuite.SetupTestSuite); ok {
t.SetupTest()
}
start := time.Now()
suite.paused = 0
err := callSafe(m.Name, m.Func, suiteT)
total := time.Since(start)
elapsed := total - suite.paused
if verbose && err == nil {
fmt.Printf("(perf) PASS: %s (%s, paused for %s, total %s)\n", m.Name, elapsed, suite.paused, total)
} else if err != nil {
fmt.Printf("(perf) FAIL: %s (%s, paused for %s, total %s)\n", m.Name, elapsed, suite.paused, total)
fmt.Println(err)
}
testReps[repIdx][recordName] = timeInfo{elapsed, suite.paused, total}
if t, ok := suiteT.(testifySuite.TearDownTestSuite); ok {
t.TearDownTest()
}
}
if t, ok := suiteT.(TearDownRepSuite); ok {
t.TearDownRep()
}
stopServerFn()
}
if t, ok := suiteT.(testifySuite.TearDownAllSuite); ok {
t.TearDownSuite()
}
}
func (suite *PerfSuite) Suite() *PerfSuite {
return suite
}
// NewAssert returns the assert.Assertions instance for this test.
func (suite *PerfSuite) NewAssert() *assert.Assertions {
return assert.New(suite.T)
}
// TempFile creates a temporary file, which will be automatically cleaned up by
// the perf test suite. Files will be prefixed with the test's dataset ID
func (suite *PerfSuite) TempFile() *os.File {
f, err := ioutil.TempFile("", suite.tempPrefix())
assert.NoError(suite.T, err)
suite.tempFiles = append(suite.tempFiles, f)
return f
}
// TempDir creates a temporary directory, which will be automatically cleaned
// up by the perf test suite. Directories will be prefixed with the test's
// dataset ID.
func (suite *PerfSuite) TempDir() string {
d, err := ioutil.TempDir("", suite.tempPrefix())
assert.NoError(suite.T, err)
suite.tempDirs = append(suite.tempDirs, d)
return d
}
func (suite *PerfSuite) tempPrefix() string {
sep := fmt.Sprintf("%c", os.PathSeparator)
return strings.Replace(fmt.Sprintf("perf.%s.", suite.datasetID), sep, ".", -1)
}
// Pause pauses the test timer while fn is executing. Useful for omitting long setup code (e.g. copying files) from the test elapsed time.
func (suite *PerfSuite) Pause(fn func()) {
start := time.Now()
fn()
suite.paused += time.Since(start)
}
// OpenGlob opens the concatenation of all files that match pattern, returned
// as []io.Reader so it can be used immediately with io.MultiReader.
//
// Large CSV files in testdata are broken up into foo.a, foo.b, etc to get
// around GitHub file size restrictions.
func (suite *PerfSuite) OpenGlob(pattern ...string) []io.Reader {
assert := suite.NewAssert()
glob, err := filepath.Glob(path.Join(pattern...))
assert.NoError(err)
files := make([]io.Reader, len(glob))
for i, m := range glob {
f, err := os.Open(m)
assert.NoError(err)
files[i] = f
}
return files
}
// CloseGlob closes all of the files, designed to be used with OpenGlob.
func (suite *PerfSuite) CloseGlob(files []io.Reader) {
assert := suite.NewAssert()
for _, f := range files {
assert.NoError(f.(*os.File).Close())
}
}
func callSafe(name string, fun reflect.Value, args ...interface{}) error {
funArgs := make([]reflect.Value, len(args))
for i, arg := range args {
funArgs[i] = reflect.ValueOf(arg)
}
return d.Try(func() {
fun.Call(funArgs)
})
}
func (suite *PerfSuite) getEnvironment() types.Value {
assert := suite.NewAssert()
env := environment{
DiskUsages: map[string]disk.UsageStat{},
Cpus: map[int]cpu.InfoStat{},
Partitions: map[string]disk.PartitionStat{},
}
partitions, err := disk.Partitions(false)
assert.NoError(err)
for _, p := range partitions {
usage, err := disk.Usage(p.Mountpoint)
assert.NoError(err)
env.DiskUsages[p.Mountpoint] = *usage
env.Partitions[p.Device] = p
}
cpus, err := cpu.Info()
assert.NoError(err)
for i, c := range cpus {
env.Cpus[i] = c
}
mem, err := mem.VirtualMemory()
assert.NoError(err)
env.Mem = *mem
hostInfo, err := host.Info()
assert.NoError(err)
env.Host = *hostInfo
envStruct, err := marshal.Marshal(env)
assert.NoError(err)
return envStruct
}
func (suite *PerfSuite) getGitHead(dir string) string {
stdout := &bytes.Buffer{}
cmd := exec.Command("git", "rev-parse", "HEAD")
cmd.Stdout = stdout
cmd.Dir = dir
if err := cmd.Run(); err != nil {
return ""
}
return strings.TrimSpace(stdout.String())
}
// StartRemoteDatabase creates a new remote database on an arbitrary free port,
// running on a separate goroutine. Returns the hostname that that database was
// started on, and a callback to run to shut down the server.
//
// If the -perf.mem flag is specified, the remote database is hosted in memory,
// not on disk (in a temporary nbs directory).
//
// - Why not use a local database + memory store?
// Firstly, because the spec would be "mem", and the spec library doesn't
// know how to reuse stores.
// Secondly, because it's an unrealistic performance measurement.
//
// - Why use a remote (HTTP) database?
// It's more realistic to exercise the HTTP stack, even if it's just talking
// over localhost.
//
// - Why provide an option for nbs vs memory underlying store?
// Again, nbs is more realistic than memory, and in common cases disk
// space > memory space.
// However, on this developer's laptop, there is
// actually very little disk space, and a lot of memory; plus making the
// test run a little bit faster locally is nice.
func (suite *PerfSuite) StartRemoteDatabase() (host string, stopFn func()) {
var chunkStore chunks.ChunkStore
if *perfMemFlag {
st := &chunks.MemoryStorage{}
chunkStore = st.NewView()
} else {
dbDir := suite.TempDir()
chunkStore = nbs.NewLocalStore(dbDir, 128*(1<<20))
}
server := datas.NewRemoteDatabaseServer(chunkStore, 0)
portChan := make(chan int)
server.Ready = func() { portChan <- server.Port() }
go server.Run()
port := <-portChan
host = fmt.Sprintf("http://localhost:%d", port)
stopFn = func() { server.Stop() }
return
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"log"
"os"
"os/signal"
"syscall"
"time"
clipboard "github.com/atotto/clipboard"
htgotts "github.com/weiztd/htgo-tts"
handlers "github.com/weiztd/htgo-tts/handlers"
"golang.org/x/text/language"
"golang.org/x/text/language/display"
)
func main() {
var langCode, tempFolder, input string
tempFolder = os.Getenv("APPDATA") + "/clipboardTTS"
for {
fmt.Print("Language code: \n")
_, err := fmt.Scanln(&input)
if err != nil {
log.Fatal(err)
}
if isLangCodeLegit(input) {
langCode = input
break
}
}
err := speechFromClipboard(langCode, tempFolder)
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(tempFolder)
}
func isLangCodeLegit(input string) bool {
tag, err := language.Parse(input)
if err != nil {
fmt.Println("invalid language code: " + input)
return false
}
fmt.Printf("Language set to: %s (%s)\n",
display.English.Tags().Name(tag),
display.Self.Name(tag))
return true
}
func speechFromClipboard(langCode, tempFolder string) error {
quitChan := make(chan os.Signal)
signal.Notify(quitChan, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
temp := ""
clipboard.WriteAll(temp)
for {
select {
case <-time.After(200 * time.Millisecond):
clipboardString, err := clipboard.ReadAll()
if err != nil {
return fmt.Errorf("fail to read from clipboard: %v", err)
}
if len(clipboardString) > 1 && clipboardString != temp {
log.Println(clipboardString)
speech := htgotts.Speech{Folder: tempFolder, Language: langCode, Handler: &handlers.MPlayer{}}
speech.Speak(clipboardString)
temp = clipboardString
}
case <-quitChan:
return nil
}
}
}
|
[
"\"APPDATA\""
] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
go
| 1 | 0 | |
heroku_config.py
|
import os
class Var(object):
APP_ID = int(os.environ.get("APP_ID", 6))
# 6 is a placeholder
API_HASH = os.environ.get("API_HASH", "eb06d4abfb49dc3eeb1aeb98ae0f581e")
STRING_SESSION = os.environ.get("STRING_SESSION", None)
DB_URI = os.environ.get("DATABASE_URL", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TEMP_DOWNLOAD_DIRECTORY", None)
LOGGER = True
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
# Here for later purposes
SUDO_USERS = set(int(x) for x in os.environ.get("SUDO_USERS", "").split())
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
LESS_SPAMMY = os.environ.get("LESS_SPAMMY", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
TG_BOT_TOKEN_BF_HER = os.environ.get("TG_BOT_TOKEN_BF_HER", None)
# Send .get_id in any channel to fill this value.
PLUGIN_CHANNEL = int(os.environ.get("PLUGIN_CHANNEL", -100))
PRIVATE_GROUP_BOT_API_ID = int(os.environ.get("PRIVATE_GROUP_BOT_API_ID", -100))
PM_PERMIT_GROUP_ID = os.environ.get("PM_PERMIT_GROUP_ID", None)
TG_BOT_USER_NAME_BF_HER = os.environ.get("TG_BOT_USER_NAME_BF_HER", None)
NO_SONGS = bool(os.environ.get("NO_SONGS", False))
DOWNLOAD_PFP_URL_CLOCK = os.environ.get("DOWNLOAD_PFP_URL_CLOCK", None)
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", "root")
AUTH_TOKEN_DATA = os.environ.get("AUTH_TOKEN_DATA", None)
MAX_FLOOD_IN_P_M_s = int(os.environ.get("MAX_FLOOD_IN_P_M_s", 3))
if AUTH_TOKEN_DATA is not None:
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
t_file = open(TEMP_DOWNLOAD_DIRECTORY + "auth_token.txt", "w")
t_file.write(AUTH_TOKEN_DATA)
t_file.close()
PRIVATE_GROUP_ID = os.environ.get("PRIVATE_GROUP_ID", None)
if PRIVATE_GROUP_ID is not None:
try:
PRIVATE_GROUP_ID = int(PRIVATE_GROUP_ID)
except ValueError:
raise ValueError(
"Invalid Private Group ID. Make sure your ID is starts with -100 and make sure that it is only numbers."
)
NEWS_CHANNEL_ID = int(os.environ.get("NEWS_CHANNEL_ID", -100))
SPAMWATCH_API = os.environ.get("SPAMWATCH_API", None)
ANTISPAM_SYSTEM = os.environ.get("ANTISPAM_SYSTEM", "DISABLE")
LIGHTNING_PRO = os.environ.get("LIGHTNING_PRO", "YES")
WHITE_CHAT = set(int(x) for x in os.environ.get("WHITE_CHAT", "").split())
class Development(Var):
LOGGER = True
|
[] |
[] |
[
"TEMP_DOWNLOAD_DIRECTORY",
"G_DRIVE_CLIENT_SECRET",
"LYDIA_API_KEY",
"NO_SONGS",
"AUTH_TOKEN_DATA",
"PRIVATE_GROUP_BOT_API_ID",
"LESS_SPAMMY",
"TG_BOT_USER_NAME_BF_HER",
"SUDO_USERS",
"MAX_FLOOD_IN_P_M_s",
"PRIVATE_GROUP_ID",
"PM_PERMIT_GROUP_ID",
"DATABASE_URL",
"GDRIVE_FOLDER_ID",
"HEROKU_APP_NAME",
"NEWS_CHANNEL_ID",
"GIT_REPO_NAME",
"HEROKU_API_KEY",
"TG_BOT_TOKEN_BF_HER",
"LIGHTNING_PRO",
"SPAMWATCH_API",
"PLUGIN_CHANNEL",
"G_DRIVE_CLIENT_ID",
"DOWNLOAD_PFP_URL_CLOCK",
"APP_ID",
"ANTISPAM_SYSTEM",
"STRING_SESSION",
"WHITE_CHAT",
"GITHUB_ACCESS_TOKEN",
"API_HASH"
] |
[]
|
["TEMP_DOWNLOAD_DIRECTORY", "G_DRIVE_CLIENT_SECRET", "LYDIA_API_KEY", "NO_SONGS", "AUTH_TOKEN_DATA", "PRIVATE_GROUP_BOT_API_ID", "LESS_SPAMMY", "TG_BOT_USER_NAME_BF_HER", "SUDO_USERS", "MAX_FLOOD_IN_P_M_s", "PRIVATE_GROUP_ID", "PM_PERMIT_GROUP_ID", "DATABASE_URL", "GDRIVE_FOLDER_ID", "HEROKU_APP_NAME", "NEWS_CHANNEL_ID", "GIT_REPO_NAME", "HEROKU_API_KEY", "TG_BOT_TOKEN_BF_HER", "LIGHTNING_PRO", "SPAMWATCH_API", "PLUGIN_CHANNEL", "G_DRIVE_CLIENT_ID", "DOWNLOAD_PFP_URL_CLOCK", "APP_ID", "ANTISPAM_SYSTEM", "STRING_SESSION", "WHITE_CHAT", "GITHUB_ACCESS_TOKEN", "API_HASH"]
|
python
| 30 | 0 | |
backend/backend/settings/production.py
|
import os
from .base import *
SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = False
# White Noise configuration - http://whitenoise.evans.io/en/stable/django.html
INSTALLED_APPS.extend(["whitenoise.runserver_nostatic"])
# Must insert after SecurityMiddleware, which is first in settings/common.py
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware")
TEMPLATES[0]["DIRS"] = [os.path.join(BASE_DIR, "../", "react", "build")]
STATICFILES_DIRS = [os.path.join(BASE_DIR, "../", "react", "build", "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
WHITENOISE_ROOT = os.path.join(BASE_DIR, "../", "react", "build", "root")
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
Chapter06/07_patterns_tricks/03_test_data/example_test.go
|
package _3_test_data
import (
"encoding/json"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoadConfig(t *testing.T) {
config, err := ioutil.ReadFile("testdata/test-config.json")
require.NoError(t, err)
expected := `{"address": "0.0.0.0:8080"}`
assert.Equal(t, expected, string(config))
}
func TestGenerateJSON(t *testing.T) {
destination := "testdata/result.json"
testFixture := "testdata/expected.json"
// call function under test
resultErr := generateReceiptFile(destination)
require.NoError(t, resultErr)
// clean up the created file
defer os.Remove(destination)
// compare the generated file with the expected file
resultContents, err := ioutil.ReadFile(destination)
require.NoError(t, err)
expectedContents, err := ioutil.ReadFile(testFixture)
require.NoError(t, err)
assert.Equal(t, string(expectedContents), string(resultContents))
}
func TestGenerateJSONWithGenerator(t *testing.T) {
destination := "testdata/result.json"
testFixture := "testdata/expected.json"
if os.Getenv("UPDATE_FIXTURES") == "true" {
generateReceiptFile(testFixture)
return
}
// call function under test
resultErr := generateReceiptFile(destination)
require.NoError(t, resultErr)
// clean up the created file
defer os.Remove(destination)
// compare the generated file with the expected file
resultContents, err := ioutil.ReadFile(destination)
require.NoError(t, err)
expectedContents, err := ioutil.ReadFile(testFixture)
require.NoError(t, err)
assert.Equal(t, string(expectedContents), string(resultContents))
}
func generateReceiptFile(filename string) error {
receipt := &Receipt{
Name: "Sophia",
Total: 12.34,
}
contents, err := json.Marshal(receipt)
if err != nil {
return err
}
return ioutil.WriteFile(filename, contents, 0644)
}
type Receipt struct {
Name string
Total float64
}
|
[
"\"UPDATE_FIXTURES\""
] |
[] |
[
"UPDATE_FIXTURES"
] |
[]
|
["UPDATE_FIXTURES"]
|
go
| 1 | 0 | |
examples/holidays/countries/listCountries/main.go
|
package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/holidays"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Holidays.Countries(&holidays.CountriesRequest{})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
plugins/inputs/prometheus/prometheus.go
|
package prometheus
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
)
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1`
type Prometheus struct {
// An array of urls to scrape metrics from.
URLs []string `toml:"urls"`
// An array of Kubernetes services to scrape metrics from.
KubernetesServices []string
// Location of kubernetes config file
KubeConfig string
// Label Selector/s for Kubernetes
KubernetesLabelSelector string `toml:"kubernetes_label_selector"`
// Field Selector/s for Kubernetes
KubernetesFieldSelector string `toml:"kubernetes_field_selector"`
// Bearer Token authorization file path
BearerToken string `toml:"bearer_token"`
BearerTokenString string `toml:"bearer_token_string"`
// Basic authentication credentials
Username string `toml:"username"`
Password string `toml:"password"`
ResponseTimeout config.Duration `toml:"response_timeout"`
MetricVersion int `toml:"metric_version"`
URLTag string `toml:"url_tag"`
tls.ClientConfig
Log telegraf.Logger
client *http.Client
headers map[string]string
// Should we scrape Kubernetes services for prometheus annotations
MonitorPods bool `toml:"monitor_kubernetes_pods"`
PodScrapeScope string `toml:"pod_scrape_scope"`
NodeIP string `toml:"node_ip"`
PodScrapeInterval int `toml:"pod_scrape_interval"`
PodNamespace string `toml:"monitor_kubernetes_pods_namespace"`
lock sync.Mutex
kubernetesPods map[string]URLAndAddress
cancel context.CancelFunc
wg sync.WaitGroup
// Only for monitor_kubernetes_pods=true and pod_scrape_scope="node"
podLabelSelector labels.Selector
podFieldSelector fields.Selector
isNodeScrapeScope bool
}
var sampleConfig = `
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
## Metric version controls the mapping from Prometheus metrics into
## Telegraf metrics. When using the prometheus_client output, use the same
## value in both plugins to ensure metrics are round-tripped without
## modification.
##
## example: metric_version = 1;
## metric_version = 2; recommended version
# metric_version = 1
## Url tag name (tag containing scrapped url. optional, default is "url")
# url_tag = "url"
## An array of Kubernetes services to scrape metrics from.
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
## Kubernetes config file to create client from.
# kube_config = "/path/to/kubernetes.config"
## Scrape Kubernetes pods for the following prometheus annotations:
## - prometheus.io/scrape: Enable scraping for this pod
## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
## set this to 'https' & most likely set the tls config.
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
## - prometheus.io/port: If port is not 9102 use this annotation
# monitor_kubernetes_pods = true
## Get the list of pods to scrape with either the scope of
## - cluster: the kubernetes watch api (default, no need to specify)
## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP.
# pod_scrape_scope = "cluster"
## Only for node scrape scope: node IP of the node that telegraf is running on.
## Either this config or the environment variable NODE_IP must be set.
# node_ip = "10.180.1.1"
## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping.
## Default is 60 seconds.
# pod_scrape_interval = 60
## Restricts Kubernetes monitoring to a single namespace
## ex: monitor_kubernetes_pods_namespace = "default"
# monitor_kubernetes_pods_namespace = ""
# label selector to target pods which have the label
# kubernetes_label_selector = "env=dev,app=nginx"
# field selector to target pods
# eg. To scrape pods on a specific node
# kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
## Use bearer token for authorization. ('bearer_token' takes priority)
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
## HTTP Basic Authentication username and password. ('bearer_token' and
## 'bearer_token_string' take priority)
# username = ""
# password = ""
## Specify timeout duration for slower prometheus clients (default is 3s)
# response_timeout = "3s"
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (p *Prometheus) SampleConfig() string {
return sampleConfig
}
func (p *Prometheus) Description() string {
return "Read metrics from one or many prometheus clients"
}
func (p *Prometheus) Init() error {
// Config proccessing for node scrape scope for monitor_kubernetes_pods
p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node")
if p.isNodeScrapeScope {
// Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address
if p.NodeIP == "" || net.ParseIP(p.NodeIP) == nil {
p.Log.Infof("The config node_ip is empty or invalid. Using NODE_IP env var as default.")
// Check if set as env var and is valid IP address
envVarNodeIP := os.Getenv("NODE_IP")
if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil {
return errors.New("the node_ip config and the environment variable NODE_IP are not set or invalid; " +
"cannot get pod list for monitor_kubernetes_pods using node scrape scope")
}
p.NodeIP = envVarNodeIP
}
// Parse label and field selectors - will be used to filter pods after cAdvisor call
var err error
p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector)
if err != nil {
return fmt.Errorf("error parsing the specified label selector(s): %s", err.Error())
}
p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector)
if err != nil {
return fmt.Errorf("error parsing the specified field selector(s): %s", err.Error())
}
isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector)
if !isValid {
return fmt.Errorf("the field selector %s is not supported for pods", invalidSelector)
}
p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.")
p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector)
}
return nil
}
var ErrProtocolError = errors.New("prometheus protocol error")
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
host := address
if u.Port() != "" {
host = address + ":" + u.Port()
}
reconstructedURL := &url.URL{
Scheme: u.Scheme,
Opaque: u.Opaque,
User: u.User,
Path: u.Path,
RawPath: u.RawPath,
ForceQuery: u.ForceQuery,
RawQuery: u.RawQuery,
Fragment: u.Fragment,
Host: host,
}
return reconstructedURL
}
type URLAndAddress struct {
OriginalURL *url.URL
URL *url.URL
Address string
Tags map[string]string
}
func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
allURLs := make(map[string]URLAndAddress)
for _, u := range p.URLs {
URL, err := url.Parse(u)
if err != nil {
p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
continue
}
allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL}
}
p.lock.Lock()
defer p.lock.Unlock()
// loop through all pods scraped via the prometheus annotation on the pods
for k, v := range p.kubernetesPods {
allURLs[k] = v
}
for _, service := range p.KubernetesServices {
URL, err := url.Parse(service)
if err != nil {
return nil, err
}
resolvedAddresses, err := net.LookupHost(URL.Hostname())
if err != nil {
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error())
continue
}
for _, resolved := range resolvedAddresses {
serviceURL := p.AddressToURL(URL, resolved)
allURLs[serviceURL.String()] = URLAndAddress{
URL: serviceURL,
Address: resolved,
OriginalURL: URL,
}
}
}
return allURLs, nil
}
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
if p.client == nil {
client, err := p.createHTTPClient()
if err != nil {
return err
}
p.client = client
p.headers = map[string]string{
"User-Agent": internal.ProductToken(),
"Accept": acceptHeader,
}
}
var wg sync.WaitGroup
allURLs, err := p.GetAllURLs()
if err != nil {
return err
}
for _, URL := range allURLs {
wg.Add(1)
go func(serviceURL URLAndAddress) {
defer wg.Done()
acc.AddError(p.gatherURL(serviceURL, acc))
}(URL)
}
wg.Wait()
return nil
}
func (p *Prometheus) createHTTPClient() (*http.Client, error) {
tlsCfg, err := p.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
DisableKeepAlives: true,
},
Timeout: time.Duration(p.ResponseTimeout),
}
return client, nil
}
func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error {
var req *http.Request
var err error
var uClient *http.Client
var metrics []telegraf.Metric
if u.URL.Scheme == "unix" {
path := u.URL.Query().Get("path")
if path == "" {
path = "/metrics"
}
addr := "http://localhost" + path
req, err = http.NewRequest("GET", addr, nil)
if err != nil {
return fmt.Errorf("unable to create new request '%s': %s", addr, err)
}
// ignore error because it's been handled before getting here
tlsCfg, _ := p.ClientConfig.TLSConfig()
uClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
DisableKeepAlives: true,
Dial: func(network, addr string) (net.Conn, error) {
c, err := net.Dial("unix", u.URL.Path)
return c, err
},
},
Timeout: time.Duration(p.ResponseTimeout),
}
} else {
if u.URL.Path == "" {
u.URL.Path = "/metrics"
}
req, err = http.NewRequest("GET", u.URL.String(), nil)
if err != nil {
return fmt.Errorf("unable to create new request '%s': %s", u.URL.String(), err)
}
}
p.addHeaders(req)
if p.BearerToken != "" {
token, err := ioutil.ReadFile(p.BearerToken)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+string(token))
} else if p.BearerTokenString != "" {
req.Header.Set("Authorization", "Bearer "+p.BearerTokenString)
} else if p.Username != "" || p.Password != "" {
req.SetBasicAuth(p.Username, p.Password)
}
var resp *http.Response
if u.URL.Scheme != "unix" {
resp, err = p.client.Do(req)
} else {
resp, err = uClient.Do(req)
}
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("error reading body: %s", err)
}
if p.MetricVersion == 2 {
parser := parser_v2.Parser{Header: resp.Header}
metrics, err = parser.Parse(body)
} else {
metrics, err = Parse(body, resp.Header)
}
if err != nil {
return fmt.Errorf("error reading metrics for %s: %s",
u.URL, err)
}
for _, metric := range metrics {
tags := metric.Tags()
// strip user and password from URL
u.OriginalURL.User = nil
if p.URLTag != "" {
tags[p.URLTag] = u.OriginalURL.String()
}
if u.Address != "" {
tags["address"] = u.Address
}
for k, v := range u.Tags {
tags[k] = v
}
switch metric.Type() {
case telegraf.Counter:
acc.AddCounter(metric.Name(), metric.Fields(), tags, metric.Time())
case telegraf.Gauge:
acc.AddGauge(metric.Name(), metric.Fields(), tags, metric.Time())
case telegraf.Summary:
acc.AddSummary(metric.Name(), metric.Fields(), tags, metric.Time())
case telegraf.Histogram:
acc.AddHistogram(metric.Name(), metric.Fields(), tags, metric.Time())
default:
acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time())
}
}
return nil
}
func (p *Prometheus) addHeaders(req *http.Request) {
for header, value := range p.headers {
req.Header.Add(header, value)
}
}
/* Check if the field selector specified is valid.
* See ToSelectableFields() for list of fields that are selectable:
* https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go
*/
func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) {
supportedFieldsToSelect := map[string]bool{
"spec.nodeName": true,
"spec.restartPolicy": true,
"spec.schedulerName": true,
"spec.serviceAccountName": true,
"status.phase": true,
"status.podIP": true,
"status.nominatedNodeName": true,
}
for _, requirement := range fieldSelector.Requirements() {
if !supportedFieldsToSelect[requirement.Field] {
return false, requirement.Field
}
}
return true, ""
}
// Start will start the Kubernetes scraping if enabled in the configuration
func (p *Prometheus) Start(_ telegraf.Accumulator) error {
if p.MonitorPods {
var ctx context.Context
ctx, p.cancel = context.WithCancel(context.Background())
return p.start(ctx)
}
return nil
}
func (p *Prometheus) Stop() {
if p.MonitorPods {
p.cancel()
}
p.wg.Wait()
}
func init() {
inputs.Add("prometheus", func() telegraf.Input {
return &Prometheus{
ResponseTimeout: config.Duration(time.Second * 3),
kubernetesPods: map[string]URLAndAddress{},
URLTag: "url",
}
})
}
|
[
"\"NODE_IP\""
] |
[] |
[
"NODE_IP"
] |
[]
|
["NODE_IP"]
|
go
| 1 | 0 | |
cli/cmd/cfg/oldConfig.go
|
// Copyright © 2018 NAME HERE <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cfg
import (
"io/ioutil"
"os"
"time"
"github.com/jbvmio/kafkactl/cli/cx"
"github.com/jbvmio/kafkactl/cli/x/out"
"github.com/spf13/cast"
"github.com/spf13/viper"
yaml "gopkg.in/yaml.v2"
)
const homeConfigName = `.kafkactl.yaml`
// OldConfig contains a collection of cluster entries
type OldConfig struct {
Current string `json:"current" yaml:"current"`
Entries []Entry `json:"entries" yaml:"entries"`
}
// Entry contains kafka and burrow node details for a cluster
type Entry struct {
Name string `yaml:"name"`
Kafka []string `yaml:"kafka"`
Burrow []string `yaml:"burrow"`
Zookeeper []string `yaml:"zookeeper"`
}
func testReplaceOldConfig(filePath ...string) bool {
var configFilePath string
defaultFilePath := homeDir() + `/` + homeConfigName
switch {
case len(filePath) > 1:
out.Infof("Too Many Paths Specified.")
return false
case len(filePath) == 1 && filePath[0] != "":
configFilePath = filePath[0]
default:
switch {
case !fileExists(defaultFilePath):
out.Infof("No default config file found.\n Run kafkactl config --sample to display a sample config file.\n Save your config in ~/.kafkactl.yaml")
return false
case fileExists(defaultFilePath):
configFilePath = defaultFilePath
}
}
v := viper.New()
v.SetConfigFile(configFilePath)
v.ReadInConfig()
switch {
case !v.InConfig("config-version") && !v.InConfig("current-context"):
if v.InConfig("current") {
out.Infof("old config detected, converting ...")
var oldConfig OldConfig
v.Unmarshal(&oldConfig)
contexts := make(map[string]cx.Context, len(oldConfig.Entries))
for _, entry := range oldConfig.Entries {
ctx := cx.Context{
Name: entry.Name,
Brokers: entry.Kafka,
Burrow: entry.Burrow,
Zookeeper: entry.Zookeeper,
}
contexts[entry.Name] = ctx
}
newConfig := Config{
CurrentContext: oldConfig.Current,
Contexts: contexts,
ConfigVersion: configVersion,
}
backupFilePath := configFilePath + `.bak-` + cast.ToString(time.Now().Unix())
oc, err := yaml.Marshal(oldConfig)
out.IfErrf(err)
nc, err := yaml.Marshal(newConfig)
out.IfErrf(err)
writeConfig(backupFilePath, oc)
writeConfig(configFilePath, nc)
out.Infof("config [%v] has been converted to Latest.", configFilePath)
out.Infof("backup config saved as [%v]", backupFilePath)
return true
}
default:
out.Infof("config [%v] at Latest.", configFilePath)
}
return false
}
func writeConfig(path string, config []byte) {
err := ioutil.WriteFile(path, config, 0644)
out.IfErrf(err)
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func fileExists(filename string) bool {
if _, err := os.Stat(filename); os.IsNotExist(err) {
return false
}
return true
}
/*
func returnConfig(config []byte) OldConfig {
conf := OldConfig{}
err := yaml.Unmarshal(config, &conf)
if err != nil {
log.Fatalf("Error returning config: %v\n", err)
}
return conf
}
func readConfig(path string) []byte {
file, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalf("Error reading config file: %v\n", err)
}
return file
}
*/
|
[
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
tests/test_logs/test_logs.py
|
import os
import time
from unittest import SkipTest
import boto3
import six
from botocore.exceptions import ClientError
import pytest
import sure # noqa
from moto import mock_logs, settings
_logs_region = "us-east-1" if settings.TEST_SERVER_MODE else "us-west-2"
@mock_logs
def test_create_log_group():
conn = boto3.client("logs", "us-west-2")
response = conn.create_log_group(logGroupName="dummy")
response = conn.describe_log_groups()
response["logGroups"].should.have.length_of(1)
response["logGroups"][0].should_not.have.key("retentionInDays")
@mock_logs
def test_exceptions():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
log_stream_name = "dummp-stream"
conn.create_log_group(logGroupName=log_group_name)
with pytest.raises(ClientError):
conn.create_log_group(logGroupName=log_group_name)
# descrine_log_groups is not implemented yet
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
with pytest.raises(ClientError):
conn.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[{"timestamp": 0, "message": "line"}],
)
with pytest.raises(ClientError):
conn.put_log_events(
logGroupName=log_group_name,
logStreamName="invalid-stream",
logEvents=[{"timestamp": 0, "message": "line"}],
)
@mock_logs
def test_put_logs():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
log_stream_name = "stream"
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
messages = [
{"timestamp": 0, "message": "hello"},
{"timestamp": 0, "message": "world"},
]
putRes = conn.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages
)
res = conn.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name
)
events = res["events"]
nextSequenceToken = putRes["nextSequenceToken"]
assert isinstance(nextSequenceToken, six.string_types) == True
assert len(nextSequenceToken) == 56
events.should.have.length_of(2)
@mock_logs
def test_filter_logs_interleaved():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
log_stream_name = "stream"
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
messages = [
{"timestamp": 0, "message": "hello"},
{"timestamp": 0, "message": "world"},
]
conn.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name, logStreamNames=[log_stream_name], interleaved=True
)
events = res["events"]
for original_message, resulting_event in zip(messages, events):
resulting_event["eventId"].should.equal(str(resulting_event["eventId"]))
resulting_event["timestamp"].should.equal(original_message["timestamp"])
resulting_event["message"].should.equal(original_message["message"])
@mock_logs
def test_filter_logs_raises_if_filter_pattern():
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true":
raise SkipTest("Does not work in server mode due to error in Workzeug")
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
log_stream_name = "stream"
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
messages = [
{"timestamp": 0, "message": "hello"},
{"timestamp": 0, "message": "world"},
]
conn.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages
)
with pytest.raises(NotImplementedError):
conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
filterPattern='{$.message = "hello"}',
)
@mock_logs
def test_filter_logs_paging():
conn = boto3.client("logs", "us-west-2")
log_group_name = "/aws/dummy"
log_stream_name = "stream/stage"
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
timestamp = int(time.time())
messages = []
for i in range(25):
messages.append(
{"message": "Message number {}".format(i), "timestamp": timestamp}
)
timestamp += 100
conn.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name, logStreamNames=[log_stream_name], limit=20
)
events = res["events"]
events.should.have.length_of(20)
res["nextToken"].should.equal("/aws/dummy@stream/stage@" + events[-1]["eventId"])
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
limit=20,
nextToken=res["nextToken"],
)
events += res["events"]
events.should.have.length_of(25)
res.should_not.have.key("nextToken")
for original_message, resulting_event in zip(messages, events):
resulting_event["eventId"].should.equal(str(resulting_event["eventId"]))
resulting_event["timestamp"].should.equal(original_message["timestamp"])
resulting_event["message"].should.equal(original_message["message"])
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
limit=20,
nextToken="invalid-token",
)
res["events"].should.have.length_of(0)
res.should_not.have.key("nextToken")
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
limit=20,
nextToken="wrong-group@stream@999",
)
res["events"].should.have.length_of(0)
res.should_not.have.key("nextToken")
@mock_logs
def test_put_retention_policy():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response["logGroups"]) == 1
assert response["logGroups"][0].get("retentionInDays") == 7
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_delete_retention_policy():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response["logGroups"]) == 1
assert response["logGroups"][0].get("retentionInDays") == 7
response = conn.delete_retention_policy(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response["logGroups"]) == 1
assert response["logGroups"][0].get("retentionInDays") == None
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_get_log_events():
client = boto3.client("logs", "us-west-2")
log_group_name = "test"
log_stream_name = "stream"
client.create_log_group(logGroupName=log_group_name)
client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
events = [{"timestamp": x, "message": str(x)} for x in range(20)]
client.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=events
)
resp = client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, limit=10
)
resp["events"].should.have.length_of(10)
for i in range(10):
resp["events"][i]["timestamp"].should.equal(i + 10)
resp["events"][i]["message"].should.equal(str(i + 10))
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000019"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000010"
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp["nextBackwardToken"],
limit=20,
)
resp["events"].should.have.length_of(10)
for i in range(10):
resp["events"][i]["timestamp"].should.equal(i)
resp["events"][i]["message"].should.equal(str(i))
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000009"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000000"
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp["nextBackwardToken"],
limit=10,
)
resp["events"].should.have.length_of(0)
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000000"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000000"
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp["nextForwardToken"],
limit=1,
)
resp["events"].should.have.length_of(1)
resp["events"][0]["timestamp"].should.equal(1)
resp["events"][0]["message"].should.equal(str(1))
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000001"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000001"
)
@mock_logs
def test_get_log_events_with_start_from_head():
client = boto3.client("logs", "us-west-2")
log_group_name = "test"
log_stream_name = "stream"
client.create_log_group(logGroupName=log_group_name)
client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
events = [{"timestamp": x, "message": str(x)} for x in range(20)]
client.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=events
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
limit=10,
startFromHead=True, # this parameter is only relevant without the usage of nextToken
)
resp["events"].should.have.length_of(10)
for i in range(10):
resp["events"][i]["timestamp"].should.equal(i)
resp["events"][i]["message"].should.equal(str(i))
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000009"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000000"
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp["nextForwardToken"],
limit=20,
)
resp["events"].should.have.length_of(10)
for i in range(10):
resp["events"][i]["timestamp"].should.equal(i + 10)
resp["events"][i]["message"].should.equal(str(i + 10))
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000019"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000010"
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp["nextForwardToken"],
limit=10,
)
resp["events"].should.have.length_of(0)
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000019"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000019"
)
resp = client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken=resp["nextBackwardToken"],
limit=1,
)
resp["events"].should.have.length_of(1)
resp["events"][0]["timestamp"].should.equal(18)
resp["events"][0]["message"].should.equal(str(18))
resp["nextForwardToken"].should.equal(
"f/00000000000000000000000000000000000000000000000000000018"
)
resp["nextBackwardToken"].should.equal(
"b/00000000000000000000000000000000000000000000000000000018"
)
@mock_logs
def test_get_log_events_errors():
client = boto3.client("logs", "us-west-2")
log_group_name = "test"
log_stream_name = "stream"
client.create_log_group(logGroupName=log_group_name)
client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
with pytest.raises(ClientError) as e:
client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken="n/00000000000000000000000000000000000000000000000000000000",
)
ex = e.value
ex.operation_name.should.equal("GetLogEvents")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.equal("InvalidParameterException")
ex.response["Error"]["Message"].should.contain(
"The specified nextToken is invalid."
)
with pytest.raises(ClientError) as e:
client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
nextToken="not-existing-token",
)
ex = e.value
ex.operation_name.should.equal("GetLogEvents")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.equal("InvalidParameterException")
ex.response["Error"]["Message"].should.contain(
"The specified nextToken is invalid."
)
@mock_logs
def test_list_tags_log_group():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
tags = {"tag_key_1": "tag_value_1", "tag_key_2": "tag_value_2"}
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == {}
response = conn.delete_log_group(logGroupName=log_group_name)
response = conn.create_log_group(logGroupName=log_group_name, tags=tags)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == tags
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_tag_log_group():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
tags = {"tag_key_1": "tag_value_1"}
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.tag_log_group(logGroupName=log_group_name, tags=tags)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == tags
tags_with_added_value = {"tag_key_1": "tag_value_1", "tag_key_2": "tag_value_2"}
response = conn.tag_log_group(
logGroupName=log_group_name, tags={"tag_key_2": "tag_value_2"}
)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == tags_with_added_value
tags_with_updated_value = {"tag_key_1": "tag_value_XX", "tag_key_2": "tag_value_2"}
response = conn.tag_log_group(
logGroupName=log_group_name, tags={"tag_key_1": "tag_value_XX"}
)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == tags_with_updated_value
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_untag_log_group():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
response = conn.create_log_group(logGroupName=log_group_name)
tags = {"tag_key_1": "tag_value_1", "tag_key_2": "tag_value_2"}
response = conn.tag_log_group(logGroupName=log_group_name, tags=tags)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == tags
tags_to_remove = ["tag_key_1"]
remaining_tags = {"tag_key_2": "tag_value_2"}
response = conn.untag_log_group(logGroupName=log_group_name, tags=tags_to_remove)
response = conn.list_tags_log_group(logGroupName=log_group_name)
assert response["tags"] == remaining_tags
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_describe_subscription_filters():
# given
client = boto3.client("logs", "us-east-1")
log_group_name = "/test"
client.create_log_group(logGroupName=log_group_name)
# when
response = client.describe_subscription_filters(logGroupName=log_group_name)
# then
response["subscriptionFilters"].should.have.length_of(0)
@mock_logs
def test_describe_subscription_filters_errors():
# given
client = boto3.client("logs", "us-east-1")
# when
with pytest.raises(ClientError) as e:
client.describe_subscription_filters(logGroupName="not-existing-log-group",)
# then
ex = e.value
ex.operation_name.should.equal("DescribeSubscriptionFilters")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
@mock_logs
def test_describe_log_groups_paging():
client = boto3.client("logs", "us-east-1")
group_names = [
"/aws/lambda/lowercase-dev",
"/aws/lambda/FileMonitoring",
"/aws/events/GetMetricData",
"/aws/lambda/fileAvailable",
]
for name in group_names:
client.create_log_group(logGroupName=name)
resp = client.describe_log_groups()
resp["logGroups"].should.have.length_of(4)
resp.should_not.have.key("nextToken")
resp = client.describe_log_groups(limit=2)
resp["logGroups"].should.have.length_of(2)
resp["nextToken"].should.equal("/aws/lambda/FileMonitoring")
resp = client.describe_log_groups(nextToken=resp["nextToken"], limit=1)
resp["logGroups"].should.have.length_of(1)
resp["nextToken"].should.equal("/aws/lambda/fileAvailable")
resp = client.describe_log_groups(nextToken=resp["nextToken"])
resp["logGroups"].should.have.length_of(1)
resp["logGroups"][0]["logGroupName"].should.equal("/aws/lambda/lowercase-dev")
resp.should_not.have.key("nextToken")
resp = client.describe_log_groups(nextToken="invalid-token")
resp["logGroups"].should.have.length_of(0)
resp.should_not.have.key("nextToken")
@mock_logs
def test_describe_log_streams_paging():
client = boto3.client("logs", "us-east-1")
log_group_name = "/aws/codebuild/lowercase-dev"
stream_names = [
"job/214/stage/unit_tests/foo",
"job/215/stage/unit_tests/spam",
"job/215/stage/e2e_tests/eggs",
"job/216/stage/unit_tests/eggs",
]
client.create_log_group(logGroupName=log_group_name)
for name in stream_names:
client.create_log_stream(logGroupName=log_group_name, logStreamName=name)
resp = client.describe_log_streams(logGroupName=log_group_name)
resp["logStreams"].should.have.length_of(4)
resp["logStreams"][0]["arn"].should.contain(log_group_name)
resp.should_not.have.key("nextToken")
resp = client.describe_log_streams(logGroupName=log_group_name, limit=2)
resp["logStreams"].should.have.length_of(2)
resp["logStreams"][0]["arn"].should.contain(log_group_name)
resp["nextToken"].should.equal(
u"{}@{}".format(log_group_name, resp["logStreams"][1]["logStreamName"])
)
resp = client.describe_log_streams(
logGroupName=log_group_name, nextToken=resp["nextToken"], limit=1
)
resp["logStreams"].should.have.length_of(1)
resp["logStreams"][0]["arn"].should.contain(log_group_name)
resp["nextToken"].should.equal(
u"{}@{}".format(log_group_name, resp["logStreams"][0]["logStreamName"])
)
resp = client.describe_log_streams(
logGroupName=log_group_name, nextToken=resp["nextToken"]
)
resp["logStreams"].should.have.length_of(1)
resp["logStreams"][0]["arn"].should.contain(log_group_name)
resp.should_not.have.key("nextToken")
resp = client.describe_log_streams(
logGroupName=log_group_name, nextToken="invalid-token"
)
resp["logStreams"].should.have.length_of(0)
resp.should_not.have.key("nextToken")
resp = client.describe_log_streams(
logGroupName=log_group_name, nextToken="invalid@token"
)
resp["logStreams"].should.have.length_of(0)
resp.should_not.have.key("nextToken")
@mock_logs
def test_start_query():
client = boto3.client("logs", "us-east-1")
log_group_name = "/aws/codebuild/lowercase-dev"
client.create_log_group(logGroupName=log_group_name)
response = client.start_query(
logGroupName=log_group_name,
startTime=int(time.time()),
endTime=int(time.time()) + 300,
queryString="test",
)
assert "queryId" in response
with pytest.raises(ClientError) as e:
client.start_query(
logGroupName="/aws/codebuild/lowercase-dev-invalid",
startTime=int(time.time()),
endTime=int(time.time()) + 300,
queryString="test",
)
# then
ex = e.value
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
|
[] |
[] |
[
"TEST_SERVER_MODE"
] |
[]
|
["TEST_SERVER_MODE"]
|
python
| 1 | 0 | |
native_client_sdk/src/tools/sel_ldr.py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script for launching application within the sel_ldr.
"""
import optparse
import os
import subprocess
import sys
import create_nmf
import getos
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_SDK_ROOT = os.path.dirname(SCRIPT_DIR)
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
class Error(Exception):
pass
def Log(msg):
if Log.verbose:
sys.stderr.write(str(msg) + '\n')
Log.verbose = False
def FindQemu():
qemu_locations = [os.path.join(SCRIPT_DIR, 'qemu_arm'),
os.path.join(SCRIPT_DIR, 'qemu-arm')]
qemu_locations += [os.path.join(path, 'qemu_arm')
for path in os.environ["PATH"].split(os.pathsep)]
qemu_locations += [os.path.join(path, 'qemu-arm')
for path in os.environ["PATH"].split(os.pathsep)]
# See if qemu is in any of these locations.
qemu_bin = None
for loc in qemu_locations:
if os.path.isfile(loc) and os.access(loc, os.X_OK):
qemu_bin = loc
break
return qemu_bin
def main(argv):
usage = 'Usage: %prog [options] <.nexe>'
epilog = 'Example: sel_ldr.py my_nexe.nexe'
parser = optparse.OptionParser(usage, description=__doc__, epilog=epilog)
parser.add_option('-v', '--verbose', action='store_true',
help='Verbose output')
parser.add_option('-d', '--debug', action='store_true',
help='Enable debug stub')
parser.add_option('--debug-libs', action='store_true',
help='For dynamic executables, reference debug '
'libraries rather then release')
# To enable bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete sel_ldr.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options, args = parser.parse_args(argv)
if not args:
parser.error('No executable file specified')
nexe = args[0]
if options.verbose:
Log.verbose = True
osname = getos.GetPlatform()
if not os.path.exists(nexe):
raise Error('executable not found: %s' % nexe)
if not os.path.isfile(nexe):
raise Error('not a file: %s' % nexe)
arch, dynamic = create_nmf.ParseElfHeader(nexe)
if arch == 'arm' and osname != 'linux':
raise Error('Cannot run ARM executables under sel_ldr on ' + osname)
arch_suffix = arch.replace('-', '_')
sel_ldr = os.path.join(SCRIPT_DIR, 'sel_ldr_%s' % arch_suffix)
irt = os.path.join(SCRIPT_DIR, 'irt_core_%s.nexe' % arch_suffix)
if osname == 'win':
sel_ldr += '.exe'
Log('ROOT = %s' % NACL_SDK_ROOT)
Log('SEL_LDR = %s' % sel_ldr)
Log('IRT = %s' % irt)
cmd = [sel_ldr]
if osname == 'linux':
# Run sel_ldr under nacl_helper_bootstrap
helper = os.path.join(SCRIPT_DIR, 'nacl_helper_bootstrap_%s' % arch_suffix)
Log('HELPER = %s' % helper)
cmd.insert(0, helper)
cmd.append('--r_debug=0xXXXXXXXXXXXXXXXX')
cmd.append('--reserved_at_zero=0xXXXXXXXXXXXXXXXX')
cmd += ['-a', '-B', irt]
if options.debug:
cmd.append('-g')
if not options.verbose:
cmd += ['-l', os.devnull]
if arch == 'arm':
# Use the QEMU arm emulator if available.
qemu_bin = FindQemu()
if qemu_bin:
qemu = [qemu_bin, '-cpu', 'cortex-a8', '-L',
os.path.abspath(os.path.join(NACL_SDK_ROOT, 'toolchain',
'linux_arm_trusted'))]
# '-Q' disables platform qualification, allowing arm binaries to run.
cmd = qemu + cmd + ['-Q']
else:
raise Error('Cannot run ARM executables under sel_ldr without an emulator'
'. Try installing QEMU (http://wiki.qemu.org/).')
if dynamic:
if options.debug_libs:
libpath = os.path.join(NACL_SDK_ROOT, 'lib',
'glibc_%s' % arch_suffix, 'Debug')
else:
libpath = os.path.join(NACL_SDK_ROOT, 'lib',
'glibc_%s' % arch_suffix, 'Release')
toolchain = '%s_x86_glibc' % osname
sdk_lib_dir = os.path.join(NACL_SDK_ROOT, 'toolchain',
toolchain, 'x86_64-nacl')
if arch == 'x86-64':
sdk_lib_dir = os.path.join(sdk_lib_dir, 'lib')
else:
sdk_lib_dir = os.path.join(sdk_lib_dir, 'lib32')
ldso = os.path.join(sdk_lib_dir, 'runnable-ld.so')
cmd.append(ldso)
Log('LD.SO = %s' % ldso)
libpath += ':' + sdk_lib_dir
cmd.append('--library-path')
cmd.append(libpath)
if args:
# Append arguments for the executable itself.
cmd += args
Log(cmd)
rtn = subprocess.call(cmd)
return rtn
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import gc
import importlib.util
import inspect
import os
from pathlib import Path
import shlex
import subprocess
import sys
import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = Path(__file__).expanduser().absolute().parent
if curr_path.name == "_staging":
# Can't use curr_path.parent, because sphinx_gallery requires a relative path.
tvm_path = Path(os.pardir, os.pardir)
else:
tvm_path = Path(os.pardir)
sys.path.insert(0, str(tvm_path / "python"))
sys.path.insert(0, str(tvm_path / "vta" / "python"))
# -- General configuration ------------------------------------------------
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2021, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = tvm_path.joinpath("version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_staging"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = [tvm_path.joinpath("tutorials"), tvm_path.joinpath("vta", "tutorials")]
gallery_dirs = ["tutorials", "vta/tutorials"]
subsection_order = ExplicitOrder(
str(p)
for p in [
tvm_path / "tutorials" / "get_started",
tvm_path / "tutorials" / "frontend",
tvm_path / "tutorials" / "language",
tvm_path / "tutorials" / "optimize",
tvm_path / "tutorials" / "autotvm",
tvm_path / "tutorials" / "auto_scheduler",
tvm_path / "tutorials" / "dev",
tvm_path / "tutorials" / "topi",
tvm_path / "tutorials" / "deployment",
tvm_path / "tutorials" / "micro",
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"get_started": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
],
"frontend": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
"from_paddle.py",
],
"language": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"autotvm": [
"tune_simple_template.py",
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"auto_scheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"dev": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_conf, fname):
gc.collect()
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
"matplotlib": "https://matplotlib.org/",
"numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
"find_mayavi_figures": False,
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": ("matplotlib", "seaborn", force_gc),
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2020 Apache Software Foundation | All right reserved"
footer_note = " ".join(
"""
Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
def setup(app):
app.connect("autodoc-process-docstring", process_docstring)
|
[] |
[] |
[
"TVM_BUILD_DOC",
"TVM_TUTORIAL_EXEC_PATTERN",
"TVM_THEME",
"READTHEDOCS"
] |
[]
|
["TVM_BUILD_DOC", "TVM_TUTORIAL_EXEC_PATTERN", "TVM_THEME", "READTHEDOCS"]
|
python
| 4 | 0 | |
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# botocore documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 2 07:26:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from botocore.session import get_session
from botocore.docs import generate_docs
generate_docs(os.path.dirname(os.path.abspath(__file__)), get_session())
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'botocore'
copyright = u'2013, Mitch Garnaat'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.16.'
# The full version, including alpha/beta/rc tags.
release = '1.16.22'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_show_sourcelink = False
html_sidebars = {
'**': ['logo-text.html',
'globaltoc.html',
'localtoc.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'botocoredoc'
import guzzle_sphinx_theme
extensions.append("guzzle_sphinx_theme")
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# hack to add tracking
"google_analytics_account": os.getenv('TRACKING', False),
"base_url": "http://docs.aws.amazon.com/aws-sdk-php/guide/latest/"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'botocore.tex', u'botocore Documentation',
u'Mitch Garnaat', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'botocore', u'botocore Documentation',
[u'Mitch Garnaat'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'botocore', u'botocore Documentation',
u'Mitch Garnaat', 'botocore', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
[] |
[] |
[
"TRACKING"
] |
[]
|
["TRACKING"]
|
python
| 1 | 0 | |
source/lambda/firehose_topic_proxy/util/topic.py
|
#!/usr/bin/env python
######################################################################################################################
# Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import datetime
import json
import os
from datetime import datetime
import boto3
from botocore import config
from shared_util import custom_boto_config, custom_logging
logger = custom_logging.get_logger(__name__)
firehose = boto3.client("firehose", config=custom_boto_config.init())
def store_topics(data):
for key in data:
for record in data[key]:
logger.debug("Record information for writing to Firehose is " + json.dumps(record))
response = firehose.put_record(
DeliveryStreamName=os.environ["TOPICS_FIREHOSE"],
Record={
"Data": json.dumps(
{
"job_id": record["job_id"],
"job_timestamp": datetime.strftime(
datetime.strptime(record["job_timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"),
"%Y-%m-%d %H:%M:%S.%f",
),
"topic": record["topic"],
"term": record["term"],
"weight": record["weight"],
}
)
+ "\n"
},
)
logger.debug("Response for record " + record["job_id"] + "is " + json.dumps(response))
def store_mappings(data):
logger.debug("Data received is " + json.dumps(data))
response = firehose.put_record(
DeliveryStreamName=os.environ["TOPIC_MAPPINGS_FIREHOSE"],
Record={
"Data": json.dumps(
{
"platform": data["platform"],
"job_id": data["job_id"],
"job_timestamp": datetime.strftime(
datetime.strptime(data["job_timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d %H:%M:%S.%f"
),
"topic": data["topic"],
"id_str": data["id_str"],
}
)
+ "\n"
},
)
logger.debug(
"Response for record "
+ json.dumps({"platform": data["platform"], "topic": data["topic"], "id_str": data["id_str"]})
+ "is "
+ json.dumps(response)
)
|
[] |
[] |
[
"TOPICS_FIREHOSE",
"TOPIC_MAPPINGS_FIREHOSE"
] |
[]
|
["TOPICS_FIREHOSE", "TOPIC_MAPPINGS_FIREHOSE"]
|
python
| 2 | 0 | |
src/it/java/org/eclipse/microprofile/starter/utils/Commands.java
|
/*
* Copyright (c) 2017-2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.eclipse.microprofile.starter.utils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import javax.ws.rs.client.Client;
import javax.ws.rs.core.Response;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import static org.eclipse.microprofile.starter.TestMatrixTest.API_URL;
import static org.eclipse.microprofile.starter.TestMatrixTest.TMP;
import static org.junit.Assert.assertEquals;
/**
* @author Michal Karm Babacek <[email protected]>
*/
public class Commands {
private static final Logger LOGGER = Logger.getLogger(Commands.class.getName());
private static final String STARTER_TS_WORKSPACE = "STARTER_TS_WORKSPACE";
public static String getWorkspaceDir() {
String env = System.getenv().get(STARTER_TS_WORKSPACE);
if (StringUtils.isNotBlank(env)) {
return env;
}
String sys = System.getProperty(STARTER_TS_WORKSPACE);
if (StringUtils.isNotBlank(sys)) {
return sys;
}
return System.getProperty("java.io.tmpdir");
}
public static void download(Client client, String supportedServer, String artifactId, SpecSelection specSelection, String location) {
String uri = API_URL + "/project?supportedServer=" + supportedServer + specSelection.queryParam + "&artifactId=" + artifactId;
LOGGER.info("from " + uri);
Response response = client.target(uri).request().get();
assertEquals("Download failed.", Response.Status.OK.getStatusCode(), response.getStatus());
try (FileOutputStream out = new FileOutputStream(location); InputStream in = (InputStream) response.getEntity()) {
in.transferTo(out);
out.flush();
} catch (IOException e) {
e.printStackTrace();
}
}
public static File unzip(String location, String artifactId) throws InterruptedException, IOException {
ProcessBuilder pb;
if (isThisWindows()) {
pb = new ProcessBuilder("powershell", "-c", "Expand-Archive", "-Path", location, "-DestinationPath", TMP, "-Force");
} else {
pb = new ProcessBuilder("unzip", "-o", location, "-d", TMP);
}
Map<String, String> env = pb.environment();
env.put("PATH", System.getenv("PATH"));
pb.directory(new File(TMP));
File unzipLog = new File(TMP + File.separator + artifactId + "-unzip.log");
pb.redirectErrorStream(true);
pb.redirectOutput(ProcessBuilder.Redirect.to(unzipLog));
Process p = pb.start();
// On slow cloud VMs with weird I/O, this could be minutes for some reason...
p.waitFor(3, TimeUnit.MINUTES);
return unzipLog;
}
/**
* Does not fail the TS. Makes best effort to clean up.
*
* @param artifactId by convention, this is a filename friendly name of the server
*/
public static void cleanWorkspace(String artifactId) {
String path = TMP + File.separator + artifactId;
try {
FileUtils.deleteDirectory(new File(path));
} catch (IOException e) {
// Silence is golden
}
// onExit covers corner cases on Windows if a fd lock is held
(new File(path + ".zip")).deleteOnExit();
(new File(path + "-unzip.log")).deleteOnExit();
(new File(path + ".zip")).delete();
(new File(path + "-unzip.log")).delete();
}
public static boolean waitForTcpClosed(String host, int port, long loopTimeoutS) throws InterruptedException, UnknownHostException {
InetAddress address = InetAddress.getByName(host);
long now = System.currentTimeMillis();
long startTime = now;
InetSocketAddress socketAddr = new InetSocketAddress(address, port);
while (now - startTime < 1000 * loopTimeoutS) {
try (Socket socket = new Socket()) {
// If it let's you write something there, it is still ready.
socket.connect(socketAddr, 1000);
socket.setSendBufferSize(1);
socket.getOutputStream().write(1);
socket.shutdownInput();
socket.shutdownOutput();
LOGGER.info("Socket still available: " + host + ":" + port);
} catch (IOException e) {
// Exception thrown - socket is likely closed.
return true;
}
Thread.sleep(1000);
now = System.currentTimeMillis();
}
return false;
}
public static int parsePort(String url) {
return Integer.parseInt(url.split(":")[2].split("/")[0]);
}
public static Process runCommand(String[] command, File directory, File logFile) {
ProcessBuilder pa;
if (isThisWindows()) {
pa = new ProcessBuilder(ArrayUtils.addAll(new String[]{"cmd", "/C"}, command));
} else {
pa = new ProcessBuilder(ArrayUtils.addAll(command));
}
Map<String, String> envA = pa.environment();
envA.put("PATH", System.getenv("PATH"));
pa.directory(directory);
pa.redirectErrorStream(true);
pa.redirectOutput(ProcessBuilder.Redirect.to(logFile));
Process pA = null;
try {
pA = pa.start();
} catch (IOException e) {
e.printStackTrace();
}
return pA;
}
public static void pidKiller(long pid) {
try {
// TODO: /F is actually -9, so we are more strict on Windows. Good/no good?
if (isThisWindows()) {
Runtime.getRuntime().exec(new String[]{"cmd", "/C", "taskkill", "/PID", Long.toString(pid), "/F", "/T"});
} else {
Runtime.getRuntime().exec(new String[]{"kill", "-15", Long.toString(pid)});
}
} catch (IOException e) {
LOGGER.severe(e.getMessage());
}
}
public static void processStopper(Process p, String artifactId) throws InterruptedException, IOException {
// Unlike all others, Tomee creates a child :-)
p.children().forEach(child -> {
child.destroy();
pidKiller(child.pid());
});
p.destroy();
p.waitFor(3, TimeUnit.MINUTES);
pidKiller(p.pid());
if (isThisWindows()) {
windowsCmdCleaner(artifactId);
}
}
public static void windowsCmdCleaner(String artifactId) throws IOException, InterruptedException {
List<Long> pidsToKill = new ArrayList<>(2);
String[] wmicPIDcmd = new String[]{
"wmic", "process", "where", "(",
"commandline", "like", "\"%\\\\" + artifactId + "\\\\%\"", "and", "name", "=", "\"java.exe\"", "and",
"not", "commandline", "like", "\"%wmic%\"", "and",
"not", "commandline", "like", "\"%maven%\"",
")", "get", "Processid", "/format:list"};
ProcessBuilder pbA = new ProcessBuilder(wmicPIDcmd);
pbA.redirectErrorStream(true);
Process p = pbA.start();
try (BufferedReader processOutputReader = new BufferedReader(new InputStreamReader(p.getInputStream()))) {
String l;
while ((l = processOutputReader.readLine()) != null) {
if (l.contains("ProcessId=")) {
try {
pidsToKill.add(Long.parseLong(l.split("=")[1].trim()));
} catch (NumberFormatException ex) {
//Silence is golden. We don't care about wmic output glitches. This is a best effort.
}
}
}
p.waitFor();
}
if (pidsToKill.isEmpty()) {
LOGGER.warning("wmic didn't find any additional PIDs to kill.");
} else {
LOGGER.info(String.format("wmic found %d additional pids to kill", pidsToKill.size()));
}
pidsToKill.forEach(Commands::pidKiller);
}
public static boolean isThisWindows() {
return System.getProperty("os.name").matches(".*[Ww]indows.*");
}
public static class ProcessRunner implements Runnable {
final File directory;
final File log;
final String[] command;
final long timeoutMinutes;
public ProcessRunner(File directory, File log, String[] command, long timeoutMinutes) {
this.directory = directory;
this.log = log;
this.command = command;
this.timeoutMinutes = timeoutMinutes;
}
@Override
public void run() {
ProcessBuilder pb;
if (isThisWindows()) {
pb = new ProcessBuilder(ArrayUtils.addAll(new String[]{"cmd", "/C"}, command));
} else {
pb = new ProcessBuilder(ArrayUtils.addAll(command));
}
Map<String, String> env = pb.environment();
env.put("PATH", System.getenv("PATH"));
pb.directory(directory);
pb.redirectErrorStream(true);
pb.redirectOutput(ProcessBuilder.Redirect.to(log));
Process p = null;
try {
p = pb.start();
} catch (IOException e) {
e.printStackTrace();
}
try {
Objects.requireNonNull(p).waitFor(timeoutMinutes, TimeUnit.MINUTES);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
|
[
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
java
| 1 | 0 | |
manageApp.py
|
#!/usr/bin/env python
# ==============================================================================
#
# FILE: manageApp.py
#
# USAGE: manageApp.py
#
# DESCRIPTION: create and manage an application to be used within the
# devops.center framework.
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Gregg Jensen (), [email protected]
# Bob Lozano (), [email protected]
# ORGANIZATION: devops.center
# CREATED: 11/21/2016 15:13:37
# REVISION: ---
#
# Copyright 2014-2017 devops.center llc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
# flake8: noqa
import os
import fnmatch
from os.path import expanduser
import shutil
import sys
import argparse
from argparse import RawDescriptionHelpFormatter
import subprocess
from time import time
import fileinput
import re
from scripts.process_dc_env import pythonGetEnv
from scripts.sharedsettings import SharedSettings
# ==============================================================================
"""
This script provides an administrative interface to a customers application set
that is referred to as appName. The administrative functions implement some of
the CRUD services (ie, Create, Update, Delete).
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
def getSettingsValue(theKey):
"""Read the ~/.dcConfig/settings file."""
baseSettingsFile = expanduser("~") + "/.dcConfig/settings"
try:
with open(baseSettingsFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
except IOError as e:
return None
retValue = None
for aLine in lines:
if re.search("^"+theKey+"=", aLine):
retValue = aLine.split("=")[1].replace('"', '')
break
return retValue
def getCommonSharedDir():
"""Get the common shard directory."""
checkForDCInternal = getSettingsValue("dcInternal")
commonSharedDir = getSettingsValue("dcCOMMON_SHARED_DIR")
if commonSharedDir:
# now we need to check if this is being run internally the common dir will have a customer name
# at the end and we need to strip that off and put the customer name from this run there instead.
if not checkForDCInternal:
return commonSharedDir
else:
return os.path.dirname(commonSharedDir)
return None
class ManageAppName:
def __init__(self, theAppName, baseDirectory, altName, envList, appPath=None):
"""ManageAppName constructor"""
self.organization = None
self.appName = theAppName
self.dcAppName = ''
self.appPath = appPath
self.baseDir = baseDirectory
self.altName = altName.upper()
self.dcUtils = os.environ["dcUTILS"]
self.envList = envList
self.organization = self.envList["CUSTOMER_NAME"]
if "WORKSPACE_NAME_ORIGINAL" in self.envList:
self.organization = self.envList["WORKSPACE_NAME_ORIGINAL"]
# set up the defaults for the shared info and check to see if
# we are using a shared environment for this app
self.sharedUtilsName = "dcShared-utils"
# check to see if the shared config directory exists
commonSharedDir = getCommonSharedDir()
internalCheck = getSettingsValue("dcInternal")
if internalCheck:
sharedSettingsDir = commonSharedDir + "/" + self.organization + "/devops.center/dcConfig"
else:
sharedSettingsDir = commonSharedDir + "/devops.center/dcConfig"
if not os.path.exists(sharedSettingsDir):
print("ERROR: the directory for the shared settings file was not found:\n"
+ sharedSettingsDir +
"Please let the devops.center engineers know and they"
" will assist with correcting this.")
sys.exit(1)
else:
self.sharedSettingsPath = sharedSettingsDir
self.sharedSettingsFile = self.sharedSettingsPath + "/settings.json"
if not os.path.isfile(self.sharedSettingsFile):
print("ERROR: the shared settings file was not found:\n"
+ self.sharedSettingsFile +
"\nEither you do not have the shared directory "
"or the settings file has not been created.\n"
"Please let the devops.center engineers know and they"
" will assist with correcting this.")
sys.exit(1)
# need to get the information from the shared settings about the
# application VCS (git) URL(s) and the utilities URL. This was
# created when the application was created
self.theSharedSettings = SharedSettings(self.sharedSettingsFile)
self.sharedUtilsFlag = self.theSharedSettings.isShared(self.appName)
# put the baseDirectory path in the users $HOME/.dcConfig/baseDirectory
# file so that subsequent scripts can use it as a base to work
# from when determining the environment for a session
baseConfigDir = expanduser("~") + "/.dcConfig"
if not os.path.exists(baseConfigDir):
os.makedirs(baseConfigDir)
baseConfigFile = baseConfigDir + "/baseDirectory"
if not os.path.isfile(baseConfigFile):
try:
fileHandle = open(baseConfigFile, 'w')
if self.altName:
strToWrite = "CURRENT_WORKSPACE=" + self.altName + "\n"
else:
strToWrite = "CURRENT_WORKSPACE=DEFAULT\n"
fileHandle.write(strToWrite)
strToWrite = "##### WORKSPACES ######\n"
fileHandle.write(strToWrite)
if self.altName:
strToWrite = "_" + self.altName + \
"_BASE_CUSTOMER_DIR=" + self.baseDir + "\n"
else:
strToWrite = "_DEFAULT_BASE_CUSTOMER_DIR=" + \
self.baseDir + "\n"
fileHandle.write(strToWrite)
strToWrite = "##### BASE DIR CONSTRUCTION NO TOUCH ######\n"
fileHandle.write(strToWrite)
strToWrite = "CONSTRUCTED_BASE_DIR=_${CURRENT_WORKSPACE}" + \
"_BASE_CUSTOMER_DIR\n"
fileHandle.write(strToWrite)
strToWrite = "BASE_CUSTOMER_DIR=${!CONSTRUCTED_BASE_DIR}\n"
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be created: \n"
"$HOME/.dcConfig/baseDirectory and could not be written"
"\nPlease report this issue to the devops.center "
"admins.")
elif os.path.isfile(baseConfigFile):
if self.altName:
# the file exists and they are adding a new base directory
self.insertIntoBaseDirectoryFile(
baseConfigFile, self.baseDir, self.altName)
else:
# file exists and there is no altname so the workspace to be
# created is a default one
self.insertIntoBaseDirectoryFile(
baseConfigFile, self.baseDir, "DEFAULT")
def insertIntoBaseDirectoryFile(self, baseConfigFile, adjustedBaseDir,
nameToUse):
# so we need to read in the file into an array
with open(baseConfigFile) as f:
lines = [line.rstrip('\n') for line in f]
# first go through and check to see if we already have an alternate
# base directory by this name and if so, set a flag so we dont add
# again
flagToAdd = 1
strToSearch = "_" + nameToUse + "_BASE_CUSTOMER_DIR"
for aLine in lines:
if strToSearch in aLine:
flagToAdd = 0
break
# then open the same file for writting
try:
fileHandle = open(baseConfigFile, 'w')
# then loop through the array
for aLine in lines:
# look for the CURRENT_WORKSPACE and set it to the new name
if "CURRENT_WORKSPACE=" in aLine:
strToWrite = "CURRENT_WORKSPACE=" + nameToUse + "\n"
fileHandle.write(strToWrite)
continue
if nameToUse in aLine:
if flagToAdd == 0:
strToWrite = "_" + nameToUse + \
"_BASE_CUSTOMER_DIR=" + adjustedBaseDir + "\n"
fileHandle.write(strToWrite)
continue
# then look for the line that has WORKSPACES in it
if "WORKSPACES" in aLine:
fileHandle.write(aLine + "\n")
if flagToAdd:
strToWrite = "_" + nameToUse + \
"_BASE_CUSTOMER_DIR=" + adjustedBaseDir + "\n"
fileHandle.write(strToWrite)
continue
# other wise write the line as it came from the file
fileHandle.write(aLine + "\n")
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be created: \n"
"$HOME/.dcConfig/baseDirectory and could not be written"
"\nPlease report this issue to the devops.center admins.")
# and add a new line with the new altName and the adjustBasedir
# then write out the rest of the file
def run(self, command, options):
optionsMap = self.parseOptions(options)
# for testing purposes
# if len(optionsMap.keys()):
# for item in optionsMap.keys():
# print "[{}] =>{}<=".format(item, optionsMap[item])
if command == "join":
self.joinExistingDevelopment()
elif command == "create":
self.create(optionsMap)
elif command == "update":
self.update(optionsMap)
elif command == "delete":
self.delete(optionsMap)
elif command == "getUniqueID":
print(self.getUniqueStackID())
sys.exit(1)
# TODO come back to this when implementing the shared settings
# during the create app. This will need to utilize the class
# SharedSettings
# if self.sharedUtilsFlag:
# self.writeToSharedSettings()
def parseOptions(self, options):
"""options is string of comma separate key=value pairs. If there is
only one then it won't have a comma. And it could be blank"""
retMap = {}
if options:
# first they are comma se
optionsList = options.split(",")
for item in optionsList:
(key, val) = item.split('=', 1)
key = key.strip()
retMap[key] = val
return retMap
def joinExistingDevelopment(self): # noqa
"""This expects that the user is new and is joining development of an
already exsiting repository. So, this pulls down that existing repo
with the given appName and puts it in the given baseDirectory.
NOTE: the pound noqa after the method name will turn off the warning
that the method is to complex."""
# create the dataload directory...this is a placeholder and can
# be a link to somewhere else with more diskspace. But that is
# currently up to the user.
basePath = self.baseDir + self.appName
dataLoadDir = basePath + "/dataload"
if not os.path.exists(dataLoadDir):
os.makedirs(dataLoadDir, 0o755)
# change to the baseDirectory
os.chdir(basePath)
if self.appPath:
# they have entered a path to an existing front end directory
self.joinWithPath(basePath, "web", self.appPath)
else:
appSrcList = self.theSharedSettings.getApplicationRepoList(self.appName)
for aURL in appSrcList:
self.joinWithGit(basePath, "web", aURL)
# get the app-utils
utilsUrl = self.theSharedSettings.getUtilitiesRepo(self.appName)
if "dcShared-utils" in utilsUrl:
self.joinWithGit(self.baseDir, "utils", utilsUrl)
else:
self.joinWithGit(basePath, "utils", utilsUrl)
# and the environments directory
envDir = basePath + "/" + self.utilsDirName + "/environments"
if not os.path.exists(envDir):
os.makedirs(envDir, 0o755)
print("Creating environment files")
# and then create the individiual env files in that directory
self.createEnvFiles(envDir)
else:
print("Creating personal.env file")
# the environments directory exists so as long as there is a
# personal.env make a change to the dcHOME defined there
# to be the one that is passed into this script.
self.createPersonalEnv(envDir)
# create a directory to hold the generated env files
generatedEnvDir = envDir + "/.generatedEnvFiles"
if not os.path.exists(generatedEnvDir):
os.makedirs(generatedEnvDir, 0o755)
open(generatedEnvDir + "/.keep", 'a').close()
# need to ensure any keys that are pulled down have the correct permissions
privateKeyFileList = []
keysDir = basePath + '/' + self.utilsDirName + '/keys'
for root, dirnames, filenames in os.walk(keysDir):
for filename in fnmatch.filter(filenames, '*.pem'):
privateKeyFileList.append(os.path.join(root, filename))
# now go through the list and chmod them
for keyFile in privateKeyFileList:
os.chmod(keyFile, 0400)
print("Completed successfully\n")
def create(self, optionsMap):
"""creates the directory structure and sets up the appropriate
templates necessary to run a customers appliction set."""
self.createBaseDirectories()
self.createWebDirectories()
self.createUtilDirectories()
self.tmpGetStackDirectory()
self.createDockerComposeFiles()
print("\n\nDone")
# self.createStackDirectory()
def createBaseDirectories(self):
basePath = self.baseDir + self.appName
try:
os.makedirs(basePath, 0o755)
except OSError:
print('Error creating the base directory, if it exists this '
'will not re-create it.\nPlease check to see that this '
'path does not already exist: \n' + basePath)
sys.exit(1)
def createUtilDirectories(self):
basePath = ''
if self.sharedUtilsFlag:
self.checkForExistingSharedRepo(self.baseDir)
basePath = self.baseDir + self.sharedUtilsName
else:
basePath = self.baseDir + self.appName
commonDirs = ["local", "dev", "staging", "prod"]
# create the dataload directory...this is a placeholder and can
# be a link to somewhere else with more diskspace. But that is
# currently up to the user.
dataLoadDir = self.baseDir + self.appName + "/dataload"
if not os.path.exists(dataLoadDir):
os.makedirs(dataLoadDir, 0o755)
# utils path to be created
baseUtils = basePath + "/" + self.appName + \
"-utils/"
# and then the config directory and all the sub directories
configDir = baseUtils + "config/"
for item in commonDirs:
if not os.path.exists(configDir + item):
os.makedirs(configDir + item, 0o755)
# and touch a file so that this isn't an empty directory
open(configDir + item + "/.keep", 'a').close()
# and the environments directory
envDir = baseUtils + "environments"
if not os.path.exists(envDir):
os.makedirs(envDir, 0o755)
# and then create the individiual env files in that directory
self.createEnvFiles(envDir)
# create a directory to hold the generated env files
generatedEnvDir = envDir + "/.generatedEnvFiles"
if not os.path.exists(generatedEnvDir):
os.makedirs(generatedEnvDir, 0o755)
open(generatedEnvDir + "/.keep", 'a').close()
# create the certs directory
keyDir = baseUtils + "certs/"
for item in commonDirs:
if not os.path.exists(keyDir + item):
os.makedirs(keyDir + item, 0o755)
# and touch a file so that this isn't an empty directory
open(keyDir + item + "/.keep", 'a').close()
# and then the keys directory and all the sub directories
keyDir = baseUtils + "keys/"
for item in commonDirs:
if not os.path.exists(keyDir + item):
os.makedirs(keyDir + item, 0o755)
# and touch a file so that this isn't an empty directory
open(keyDir + item + "/.keep", 'a').close()
fileToWrite = self.baseDir + self.appName + "/.dcDirMap.cnf"
try:
fileHandle = open(fileToWrite, 'a')
strToWrite = "CUSTOMER_APP_UTILS=" + self.appName + "-utils\n"
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be created: \n" +
basePath + "/.dcDirMap.cnf and could not be written. \n"
"Please report this issue to the devops.center admins.")
# put a .gitignore file in the appName directory to properly ignore
# some files that will be created that don't need to go into the
# repository
if self.sharedUtilsFlag:
gitIgnoreFile = basePath + "/.gitignore"
else:
gitIgnoreFile = baseUtils + "/.gitignore"
try:
fileHandle = open(gitIgnoreFile, 'w')
strToWrite = (".DS_Store\n"
"personal.env\n"
".generatedEnvFiles/\n"
"!environments/.generatedEnvFiles/.keep\n")
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
if self.sharedUtilsFlag:
aPath = basePath
else:
aPath = baseUtils
print("NOTE: There is a file that needs to be created: \n" +
aPath + "/.gitignore and could not be written. \n"
"Please report this issue to the devops.center admins.")
# and now run the git init on the Utils directory
if not self.sharedUtilsFlag:
originalDir = os.getcwd()
os.chdir(baseUtils)
subprocess.check_call("git init .", shell=True)
os.chdir(originalDir)
else:
# make a symbolic link from the newly created directory in
# the shared utils directory to the web
originalDir = os.getcwd()
os.chdir(self.baseDir + self.appName)
sourceUtilsDir = "../" + self.sharedUtilsName + "/" + \
self.appName + "-utils/"
targetUtilsDir = self.baseDir + "/" + self.appName + "/" + \
self.appName + "-utils"
os.symlink(sourceUtilsDir, targetUtilsDir)
os.chdir(originalDir)
# and do the git init if it hasn't been done before
gitDir = basePath + "/.git"
if not os.path.exists(gitDir):
originalDir = os.getcwd()
os.chdir(basePath)
subprocess.check_call("git init .", shell=True)
os.chdir(originalDir)
def createWebDirectories(self):
webName = self.appName + "-web"
userResponse = raw_input(
"\n\nEnter the name of the web directory that you want to use\n"
"and a directory will be created with that name.\n\n"
"NOTE: If you already have a repository checked out on this \n"
"machine, we can create a link from there into our directory\n"
"structure. Provide the full path to that existing directory."
"\nAnother option is to enter the git repo URL and we can clone"
" it."
"\nOr press return to accept the "
"default name: (" + webName + ")\n")
if userResponse:
if '/' not in userResponse:
# is just a name
webName = userResponse
# web path to be created
self.baseWeb = self.baseDir + self.appName + "/" + userResponse
if not os.path.exists(self.baseWeb):
os.makedirs(self.baseWeb, 0o755)
# and now run the git init on the Utils directory
originalDir = os.getcwd()
os.chdir(self.baseWeb)
subprocess.check_call("git init .", shell=True)
os.chdir(originalDir)
elif (re.match("http", userResponse) or
re.match("git", userResponse)):
# its a URL so we need to get a git clone
originalDir = os.getcwd()
os.chdir(self.baseDir + self.appName)
print("Cloning: " + userResponse)
cmdToRun = "git clone " + userResponse
try:
subprocess.check_output(cmdToRun,
stderr=subprocess.STDOUT,
shell=True)
# get the name of the repo as the new webName
webName = os.path.basename(userResponse).split('.')[0]
except subprocess.CalledProcessError:
print ("There was an issue with cloning the "
"application you specified: " + userResponse +
"\nCheck that you specified the correct owner "
"and respository name.")
sys.exit(1)
os.chdir(originalDir)
else:
# is is a local directory so we need to sym-link it
if '~' in userResponse:
userRepo = userResponse.replace("~", expanduser("~"))
elif '$HOME' in userResponse:
userRepo = userResponse.replace("$HOME", expanduser("~"))
else:
userRepo = userResponse
if not os.path.exists(userRepo):
print("ERROR: That directory does not exist: {}".format(
userRepo))
sys.exit(1)
# other wise get the name of the repository
webName = os.path.basename(userRepo)
self.baseWeb = self.baseDir + self.appName + "/" + webName
print("\nThis directory: {}".format(userRepo))
print("will be linked to: {}\n".format(
self.baseWeb))
yesResponse = raw_input(
"If this is correct press Y/y (Any other response"
" will NOT create this directory): ")
if yesResponse.lower() == 'y':
# and the destination directory
os.symlink(userRepo, self.baseWeb)
else:
print("The symlink was NOT created.")
else:
# web path to be created
self.baseWeb = self.baseDir + self.appName + "/" + webName
if not os.path.exists(self.baseWeb):
os.makedirs(self.baseWeb, 0o755)
# set up the web name as the name for dcAPP that will be used to
# write in the personal.env file
self.dcAppName = webName
fileToWrite = self.baseDir + self.appName + "/.dcDirMap.cnf"
try:
fileHandle = open(fileToWrite, 'w')
strToWrite = "CUSTOMER_APP_WEB=" + self.dcAppName + "\n"
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be created: \n" +
self.baseDir + self.appName + "/.dcDirMap.cnf and "
"could not be written. \n"
"Please report this issue to the devops.center admins.")
def tmpGetStackDirectory(self):
"""This method is put in place to be called instead of the
createStackDirectory method. This will just ask for the unique stack
name that you want to use for this appliacation"""
userResponse = raw_input(
"\n\nEnter the name of the unique stack repository that has been "
"set up for this app\nand it will be used in the "
"docker-compose.yml files\n"
"for the web and worker images:\n")
if userResponse:
# take the userResponse and use it to edit the docker-compose.yml
self.uniqueStackName = userResponse
else:
print("You will need to have a stack name that corresponds "
"with the name of a repository that has the web "
"(and worker) that is with dcStack")
sys.exit(1)
def createStackDirectory(self):
"""create the dcStack directory that will contain the necessary files
to create the web and worker containers"""
uniqueStackName = self.getUniqueStackID()
stackName = uniqueStackName + "-stack"
self.registerStackID(stackName)
# stack path to be created
baseStack = self.baseDir + self.appName + "/" + stackName
if not os.path.exists(baseStack):
os.makedirs(baseStack, 0o755)
# make the web and worker directories
for item in ["web", "web-debug", "worker"]:
if not os.path.exists(baseStack + "/" + item):
os.makedirs(baseStack + "/" + item, 0o755)
# create the web/wheelhouse directory
if not os.path.exists(baseStack + "/web/wheelhouse"):
os.makedirs(baseStack + "/web/wheelhouse", 0o755)
# and the .gitignore to ignore the wheelhouse directoryo
gitIgnoreFile = baseStack + "/web/.gitignore"
try:
fileHandle = open(gitIgnoreFile, 'w')
strToWrite = ("wheelhouse\n")
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be created: \n" +
baseStack + "web/.gitignore and could not be written. \n"
"Please report this issue to the devops.center admins.")
# and get the template Dockerfile, requirements for each of the sub
# directories
webDockerFile = baseStack + "/web/Dockerfile"
shutil.copyfile(self.dcUtils + "/templates/Dockerfile-web",
webDockerFile)
workerDockerFile = baseStack + "/worker/Dockerfile"
shutil.copyfile(self.dcUtils + "/templates/Dockerfile-worker",
workerDockerFile)
webShFile = baseStack + "/web/web.sh"
shutil.copyfile(self.dcUtils + "/templates/web.sh", webShFile)
supervisorConfFile = baseStack + \
"/worker/supervisor-djangorq-worker.conf"
shutil.copyfile(self.dcUtils +
"/templates/supervisor-djangorq-worker.conf",
supervisorConfFile)
# need to change the entry in the work Dockerfile that references the
# stackName-web image to build from. So, there is a CUSTOMER_STACK
# variable that needs to be changed
for line in fileinput.input(workerDockerFile, inplace=1):
print line.replace("CUSTOMER_STACK", uniqueStackName),
def createEnvFiles(self, envDir):
appUtilsDir = self.appName + "-utils/"
commonFiles = ["dev", "local", "staging", "prod"]
for name in commonFiles:
envFile = envDir + "/" + name + ".env"
try:
fileHandle = open(envFile, 'w')
strToWrite = (
"#\n"
"# ENV vars specific to the " + name + " environment\n"
"#\n"
"APP_UTILS_CONFIG=${dcHOME}/" + appUtilsDir + "config/" +
name + "\n"
"APP_UTILS_KEYS=${dcHOME}/" + appUtilsDir + "/keys/" +
name + "\n"
"#\n"
"dcEnv=" + name + "\n"
"#\n"
"#\n")
fileHandle.write(strToWrite)
except IOError:
print('Unable to write to the {} file in the'
' given configDir: {} \n'.format(envFile, envDir))
sys.exit(1)
# fill the local file with some default value
envLocalFile = envDir + "/common.env"
try:
fileHandle = open(envLocalFile, 'w')
strToWrite = (
"# some app env vars specific to the environment\n"
"dcHOME=~/" + self.appName + "\n"
"dcTempDir=/media/data/tmp\n"
"\n#\n"
"# Papertrail settings\n"
"#\n"
"SYSLOG_SERVER='yourserver.papertrailapp.com'\n"
"SYSLOG_PORT='99999'\n"
"SYSLOG_PROTO='udp'\n"
"\n"
"#\n"
"# Default pgpool config - single backend\n"
"#\n"
"PGPOOL_CONFIG_FILE='/etc/pgpool2/pgpool.conf.one'\n"
)
fileHandle.write(strToWrite)
except IOError:
print('Unable to write to the {} file in the'
' given configDir: {} \n'.format(envLocalFile, envDir))
sys.exit(1)
# fill the local file with some default value
envLocalFile = envDir + "/local.env"
try:
fileHandle = open(envLocalFile, 'w')
strToWrite = (
"# some app env vars specific to the environment\n"
"APP_UTILS_CONFIG=${dcHOME}/" + appUtilsDir + "config/local\n"
"APP_UTILS_KEYS=${dcHOME}/" + appUtilsDir + "keys\n"
"#\n"
"dcEnv=local\n"
"#\n"
"\n#\n"
"# Papertrail settings\n"
"#\n"
"SYSLOG_SERVER='yourserver.papertrailapp.com'\n"
"SYSLOG_PORT='99999'\n"
"SYSLOG_PROTO='udp'\n"
"\n"
"#\n"
"# Default pgpool config - single backend\n"
"#\n"
"PGPOOL_CONFIG_FILE='/etc/pgpool2/pgpool.conf.one'\n"
)
fileHandle.write(strToWrite)
except IOError:
print('Unable to write to the {} file in the'
' given configDir: {} \n'.format(envLocalFile, envDir))
sys.exit(1)
# and now for the personal.env file
self.createPersonalEnv(envDir)
def createDockerComposeFiles(self):
"""This method will create the Dockerfile(s) as necessary for this
appName. It will take a template file and update it with the specific
directory updates for created with this script."""
# set up the base config directory path
# NOTE: it will only put these in the "local" directory
if self.sharedUtilsFlag:
baseConfig = self.baseDir + self.sharedUtilsName + "/" + \
self.appName + "-utils/config/local"
else:
baseConfig = self.baseDir + self.appName + "/" + self.appName + \
"-utils/config/local"
# copy the docker-compose template files
composeFile = baseConfig + "/docker-compose.yml"
shutil.copyfile(self.dcUtils + "/templates/docker-compose.yml",
composeFile)
composeDebugFile = baseConfig + "/docker-compose-debug.yml"
shutil.copyfile(self.dcUtils + "/templates/docker-compose-debug.yml",
composeDebugFile)
composeSubnetFile = baseConfig + "/docker-subnet.conf"
shutil.copyfile(self.dcUtils + "/templates/docker-subnet.conf",
composeSubnetFile)
# need to change the env file name and path to represent what is
# created with this script
for line in fileinput.input(composeFile, inplace=1):
print line.replace("DC_UNIQUE_ID", self.uniqueStackName),
for line in fileinput.input(composeDebugFile, inplace=1):
print line.replace("DC_UNIQUE_ID", self.uniqueStackName),
def createPersonalEnv(self, envDir):
"""create the personal.env file when joinExistingDevelopment"""
personalFile = envDir + "/personal.env"
try:
fileHandle = open(personalFile, 'w')
strToWrite = (
"#\n"
"# Personal env settings that take precedence over other\n"
"# env files.\n"
"# NOTE: if you change anything in this file you will need\n"
"# to run deployenv.sh to make sure the environment\n"
"# files get genereated with the latest changes\n"
"#\n"
"dcDEFAULT_APP_NAME=" + self.appName + "\n"
"dcHOME=" + self.baseDir + self.appName + "\n"
"\n"
'dcDATA=${dcHOME}/dataload\n'
'dcAPP=${dcHOME}/' + self.dcAppName + "\n"
"\n"
"#LOG_NAME=put the name you want to see in papertrail, "
"the default is hostname\n"
'#AWS_ACCESS_KEY_ID="put aws access key here"\n'
"#AWS_SECRET_ACCESS_KEY='put secret access key here'\n"
)
fileHandle.write(strToWrite)
except IOError:
print('Unable to write to the {} file in the'
' given configDir: {} \n'.format(personalFile, envDir))
sys.exit(1)
def joinWithGit(self, basePath, theType, theURL):
cloneOrPull = " clone "
cloneOrPullString = "cloning"
flagToChangeBackToOriginalDir = False
if self.sharedUtilsFlag and theType == "utils":
# the basePath includes the standard shared repo named
# directory
if os.path.exists(basePath + "/dcShared-utils"):
# then we need to be in that directory to do the pull
originalDir = os.getcwd()
os.chdir(basePath + '/dcShared-utils')
flagToChangeBackToOriginalDir = True
print("Pulling: " + theURL)
cloneOrPull = " pull "
cloneOrPullString = "pulling"
else:
flagToChangeBackToOriginalDir = True
originalDir = os.getcwd()
os.chdir(self.baseDir)
print("Cloning: " + theURL)
else:
print("Cloning: " + theURL)
cmdToRun = "git" + cloneOrPull + theURL
appOutput = ''
try:
appOutput = subprocess.check_output(cmdToRun,
stderr=subprocess.STDOUT,
shell=True)
# if using the shared repo then we need to make a symlink
# from the app-utils name under the dcShared-utils to the
# correct place in the app directory
if self.sharedUtilsFlag and theType == "utils":
aName = self.appName + "-utils"
sourceUtilsDir = "../" + self.sharedUtilsName + "/" + \
self.appName + "-utils/"
targetUtilsDir = self.baseDir + "/" + self.appName + "/" + \
aName
print("Doing a symlink of source=>{} to destination=>{}".format(
sourceUtilsDir, targetUtilsDir))
if not os.path.exists(targetUtilsDir):
os.symlink(sourceUtilsDir, targetUtilsDir)
else:
# get the newly created directory and put it in the
# appropriate ENV variable in the dcDirMap.cnf
aName = re.search("(?<=')[^']+(?=')", appOutput).group(0)
if theType == "web":
theEnvVarToWrite = "CUSTOMER_APP_WEB="
self.dcAppName = aName
# NOTE VERY dependent on the order in which this method
# is called. Web is assumed to be first ... see the
# joinExistingDevelopment method
fileWriteMode = 'w'
else:
theEnvVarToWrite = "CUSTOMER_APP_UTILS="
self.utilsDirName = aName
# NOTE VERY dependent on the order in which this method
# is called. Web is assumed to be first ... see the
# joinExistingDevelopment method
fileWriteMode = 'a'
fileToWrite = self.baseDir + self.appName + "/.dcDirMap.cnf"
try:
fileHandle = open(fileToWrite, fileWriteMode)
strToWrite = theEnvVarToWrite + aName + "\n"
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be "
"created: \n" + self.baseDir + self.appName +
"/.dcDirMap.cnf and could not be written. \n"
"Please report this issue to the devops.center "
"admins.")
except subprocess.CalledProcessError as aStmt:
print("There was an issue with " + cloneOrPullString +
" the application you "
"specified: " + theURL +
"\nCheck that you have provided the correct credentials "
"and respository name."
+ appOutput + " exception:" + aStmt.output)
sys.exit(1)
if flagToChangeBackToOriginalDir:
os.chdir(originalDir)
print("Done\n")
def joinWithPath(self, basePath, theType, thePath):
if thePath.startswith("~"):
adjustedPath = thePath.replace('~', expanduser('~'))
elif thePath.startswith("$HOME"):
adjustedPath = thePath.replace('$HOME', expanduser('~'))
elif thePath.startswith("${HOME}"):
adjustedPath = thePath.replace('${HOME}', expanduser('~'))
else:
adjustedPath = thePath
# and get the name at the end of the path
aName = os.path.basename(adjustedPath)
destPath = basePath + "/" + aName
print("Linking the path: " + adjustedPath)
print(" to directory: " + destPath)
os.symlink(adjustedPath, destPath)
# and now update the .dcDirMap.conf
if theType == "web":
theEnvVarToWrite = "CUSTOMER_APP_WEB="
self.dcAppName = aName
# NOTE VERY dependent on the order in which this method
# is called. Web is assumed to be first ... see the
# joinExistingDevelopment method
fileWriteMode = 'w'
else:
theEnvVarToWrite = "CUSTOMER_APP_UTILS="
self.utilsDirName = aName
# NOTE VERY dependent on the order in which this method
# is called. Web is assumed to be first ... see the
# joinExistingDevelopment method
fileWriteMode = 'a'
fileToWrite = basePath + "/.dcDirMap.cnf"
try:
fileHandle = open(fileToWrite, fileWriteMode)
strToWrite = theEnvVarToWrite + self.dcAppName + "\n"
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be "
"created: \n" + self.baseDir + self.appName +
"/.dcDirMap.cnf and could not be written. \n"
"Please report this issue to the devops.center "
"admins.")
print("Done\n")
def update(self, optionsMap):
"""takes an argument that dictates what needs to be updated and then
what items that are associated with the change"""
if "newEnv" in optionsMap:
self.createNewEnvDirs(optionsMap["newEnv"])
def createNewEnvDirs(self, newEnvName):
# utils path to be created
baseUtils = self.baseDir + self.appName + "/" + self.appName + \
"-utils/"
appUtilsDir = self.appName + "-utils/"
# and then the config directory and all the sub directories
configDir = baseUtils + "config/"
if not os.path.exists(configDir + newEnvName):
os.makedirs(configDir + newEnvName, 0o755)
# and touch a file so that this isn't an empty directory
open(configDir + newEnvName + "/.keep", 'a').close()
# and the environments directory
envDir = baseUtils + "environments"
if not os.path.exists(envDir):
os.makedirs(envDir, 0o755)
# and then create the individiual env files in that directory
envFile = envDir + "/" + newEnvName + ".env"
try:
fileHandle = open(envFile, 'w')
strToWrite = (
"#\n"
"# ENV vars specific to the " + newEnvName + " environment\n"
"#\n"
"APP_UTILS_CONFIG=${dcHOME}/" + appUtilsDir + "config/" +
newEnvName + "\n"
"APP_UTILS_KEYS=${dcHOME}/" + appUtilsDir + "keys/" +
newEnvName + "\n"
"#\n")
fileHandle.write(strToWrite)
except IOError:
print('Unable to write to the {} file in the'
' given configDir: {} \n'.format(envFile, envDir))
sys.exit(1)
# and then the keys directory and all the sub directories
keyDir = baseUtils + "keys/"
if not os.path.exists(keyDir + newEnvName):
os.makedirs(keyDir + newEnvName, 0o755)
# and touch a file so that this isn't an empty directory
open(keyDir + newEnvName + "/.keep", 'a').close()
certsDir = baseUtils + "/certs/"
if not os.path.exists(certsDir + newEnvName):
os.makedirs(certsDir + newEnvName, 0o755)
# and touch a file so that this isn't an empty directory
open(keyDir + newEnvName + "/.keep", 'a').close()
print("New Environment created: " + newEnvName)
def delete(self, optionsMap):
"""delete all the necessary items that are associated with the
appName"""
print("Currently under construcution\n")
sys.exit(1)
# TODO items to delete:
# - self.baseDir/self.appName
# - unregister the appName (ie, remove it from .mapAppStack)
# - remove the entry from the .aws config and credentials files
def getUniqueStackID(self):
return hex(int(time() * 10000000))[9:]
def registerStackID(self, stackName):
"""This will make not of the mapping between appName and stackName"""
# TODO: send this to a server to register in the devops.center database
# for now put it in a private file in this directory
mappingFile = ".mapAppStack"
if os.path.isfile(mappingFile):
foundALine = 0
for line in fileinput.input(mappingFile, inplace=1):
if line == "\n":
continue
if self.appName in line:
foundALine = 1
print re.sub("=(.*)-stack", "=" + stackName, line),
else:
# NOTE the comma doesn't print out an extra newline
print line,
fileinput.close()
if foundALine == 0:
try:
fileHandle = open(mappingFile, 'a')
strToWrite = (self.appName + "=" + stackName + "\n")
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be "
"created:\n./.mapAppStack\n And it could not be"
"written. \n"
"Please report this issue to the devops.center admins.")
else:
try:
fileHandle = open(mappingFile, 'w')
strToWrite = (self.appName + "=" + stackName + "\n")
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print("NOTE: There is a file that needs to be created: \n"
"./.mapAppStack and could not be written. \n"
"Please report this issue to the devops.center admins.")
def writeToSharedSettings(self):
if not os.path.exists(self.sharedSettingsPath):
try:
os.makedirs(self.sharedSettingsPath, 0o755)
except OSError:
print('Error creating the shared directory: '
+ self.sharedSettingsPath +
'\nSo the information about this app utilizing the shared'
'app utils will not be saved. ')
return
sharedRepoURL = self.createRepoURL()
print('\nGenerating a git repo and put it into a shared settings file:\n'
+ sharedRepoURL)
# if we get here then the shared drive and directory are set up so append
# this app-utils information that it is shared
try:
strToSearch = "SHARED_APP_REPO="
if strToSearch not in open(self.sharedSettingsFile).read():
fileHandle = open(self.sharedSettingsFile, 'a')
strToWrite = "SHARED_APP_REPO=" + sharedRepoURL + '\n'
fileHandle.write(strToWrite)
fileHandle.close()
# it exists so check to see if the app has already been added
strToSearch = self.appName + "-utils=shared"
if strToSearch not in open(self.sharedSettingsFile).read():
# then append this information
fileHandle = open(self.sharedSettingsFile, 'a')
strToWrite = (strToSearch + "\n")
fileHandle.write(strToWrite)
fileHandle.close()
except IOError:
print('NOTE: There is a problem writing to the shared settings:\n'
+ self.sharedSettingsFile +
"Please report this issue to the devops.center admins.")
def createRepoURL(self):
"""Generate the repo URL for the shared utils repository."""
# the git service name and the git account should be stored in
# the shared settings file (created upon RUN-ME_FIRST.SH run by the
# first person of a new customer.)
gitServiceName = self.theSharedSettings.getVCSServiceName()
gitAccountName = self.organization
retRepoURL = None
if os.path.isfile(self.sharedSettingsFile):
# it's there so pull out the values needed
with open(self.sharedSettingsFile) as fp:
for aLine in fp:
strippedLine = aLine.strip()
if "GIT_SERVICE_NAME" in strippedLine:
gitServiceName = strippedLine.split("=")[1]
if "GIT_ACCOUNT_NAME" in strippedLine:
gitAccountName = strippedLine.split("=")[1]
if gitServiceName == "github":
retRepoURL = "[email protected]:" + gitAccountName + \
'/dcShared-utils.git'
elif gitServiceName == "assembla":
retRepoURL = "[email protected]:" + gitAccountName + \
'/dcShared-utils.git'
elif gitServiceName == "bitbucket":
retRepoURL = '[email protected]:' + gitAccountName + \
'/dcShared-utils.git'
else:
retRepoURL = ("[email protected]:" + self.organization +
"/dcShared-utils.git\n")
return retRepoURL
def checkForExistingSharedRepo(self, sharedBasePath):
"""Check and retrieve the dcShared-utils repo."""
# first see if there is a directory locally already
if not os.path.exists(sharedBasePath + "dcShared-utils"):
# the dcShared-utils directory did not exist so lets
# go see if someone else has created the dcShared-utils
# for this company, maybe for another app, by looking in
# the shared settings file.
foundPreviousSharedRepo = False
if os.path.isfile(self.sharedSettingsFile):
with open(self.sharedSettingsFile) as fp:
for aLine in fp:
strippedLine = aLine.strip()
if "SHARED_APP_REPO" in strippedLine:
sharedRepoURL = strippedLine.split("=")[1]
foundPreviousSharedRepo = True
if not foundPreviousSharedRepo:
# there wasn't a dcShared-utils git URL so this
# is the first time to create it.
os.makedirs(sharedBasePath + "dcShared-utils")
# and return as that is all that needs to be done
return
sharedRepoURL = self.createRepoURL()
try:
originalDir = os.getcwd()
os.chdir(sharedBasePath)
sharedRepoURL = self.createRepoURL()
subprocess.check_call(
"git pull " + sharedRepoURL, shell=True)
os.chdir(originalDir)
except subprocess.CalledProcessError as gitOutput:
print("There was an error with pulling the "
"dcShared-utils repo.\n")
sys.exit(1)
def checkBaseDirectory(baseDirectory, envList):
if(baseDirectory.endswith('/')):
if baseDirectory.startswith('~'):
retBaseDir = baseDirectory.replace("~", expanduser("~"))
elif baseDirectory.startswith("$HOME"):
retBaseDir = baseDirectory.replace("$HOME", expanduser("~"))
else:
retBaseDir = baseDirectory
else:
tmpBaseDir = baseDirectory + '/'
if tmpBaseDir.startswith('~'):
retBaseDir = tmpBaseDir.replace("~", expanduser("~"))
elif tmpBaseDir.startswith("$HOME"):
retBaseDir = tmpBaseDir.replace("$HOME", expanduser("~"))
else:
retBaseDir = tmpBaseDir
newBaseDir = retBaseDir
if "WORKSPACE_NAME" in envList:
newBaseDir = retBaseDir + envList["WORKSPACE_NAME_ORIGINAL"] + "/"
if not os.path.exists(newBaseDir):
print('Creating base directory associated with the workspace '
'name:' + newBaseDir)
os.makedirs(newBaseDir, 0o755)
else:
try:
# lets try to write to that directory
tmpFile = newBaseDir + '.removeme'
tmpFileHandle = open(tmpFile, 'w')
tmpFileHandle.close()
os.remove(tmpFile)
except IOError:
print('Unable to access base directory: ' + newBaseDir)
sys.exit(1)
return newBaseDir
def getBaseDirectory(workspaceName=None):
# read the ~/.dcConfig/settings
baseSettingsDir = expanduser("~") + "/.dcConfig"
if not os.path.exists(baseSettingsDir):
print("You seem to be missing the $HOME/.dcConfig directory,"
"you will need to run the RUN-ME-FIRST.sh script that "
"established that directory and the settings file that"
"contains the initial base directory for you application"
"development.")
sys.exit(1)
if os.path.isfile(baseSettingsDir + "/baseDirectory"):
with open(baseSettingsDir + "/baseDirectory") as f:
lines = [line.rstrip('\n') for line in f]
if not workspaceName:
for item in lines:
if "CURRENT_WORKSPACE" in item:
lineArray = item.split('=')
workspaceName = '_' + lineArray[1] + '_BASE_CUSTOMER_DIR'
if workspaceName in item:
anotherLineArray = item.split('=')
if anotherLineArray[1][-1] == '/':
developmentBaseDir = anotherLineArray[1]
else:
developmentBaseDir = anotherLineArray[1] + '/'
return(developmentBaseDir)
if os.path.isfile(baseSettingsDir + "/settings"):
# get the base directory from the settings file
devBaseDir = getSettingsValue("DEV_BASE_DIR")
if devBaseDir:
return devBaseDir
else:
print("Could not find the DEV_BASE_DIR in the ~/.dcConfig/settings file. ")
print("You will need to re-run this command with the -d option to specify the base directory to continue.")
sys.exit(1)
else:
print("You will need to re-run this command with the -d option to specify the base directory to continue.")
sys.exit(1)
def checkArgs():
parser = argparse.ArgumentParser(
description='This script provides an administrative interface to a '
'customers application\nset that is referred to as appName. The '
'functions deal with manipulation of\nthe directory structure and '
'content of the appUtils and website, mainly the\nappUtils. This '
'script does not deal with the instances or containers that\nare the '
'end running product.\n\n'
'The '
'create command will take the\nbaseDirectory as the path to put the '
'files that will be associated with the\napplication. This directory '
'structure serves as a logical grouping of the\nfiles for the web '
'site and all the configuration and utilities that support\nbuilding '
'the appliction on the destination (either cloud instances or\n'
'containers or both.\n\n'
'The join option is a way to have someone else join in on the '
'development\nafter someone has created the initial application files '
'and checked them\ninto a git repository. It will create the '
'directory structure just like\nthe create command but it will get the '
'appUtils and web from a git\nrepository instead of starting from '
'scratch. The git URL to clone,\nfor each of the two options can '
'either be https or git and using one\nover the other depends on your '
'credentials\n\n'
'The update command supports the ability to create a new '
'environment\nname in the appUtils directory. So, if you have an '
'environment that is\ndifferent from dev,staging, or prod, you can use '
'this option and it will\navailable to be able to be used by all '
'subsequent commands and utilities.\nSee below for how to use.\n\n'
'Example command line to create an application:\n'
'./manageApp.py --appName YourApp\n'
' --command create\n\n'
'Example command line to join with an application:\n'
'./manageApp.py --appName YourApp\n'
' --command join\n'
'\n ... or you can leave the command off as the default is to join.\n'
'\n\n'
'Example cmd line to update an application with a new environment:\n'
'./manageApp.py --appName YourApp\n'
' --command update\n'
' --option "newEnv=UAT"\n',
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-d', '--baseDirectory', help='[OPTIONAL] The base directory '
'to be used to access the appName. This needs to be '
'an absolute path unless the first part of the path '
'is a tilde or $HOME. This option is '
'needed when using the workspaceName option '
'and reads from the personal settings in '
'$HOME/.dcConfig/settings',
required=False)
parser.add_argument('-c', '--command', help='[OPTIONAL] Command to execute' +
'on the appName. Default [join]',
choices=["join",
"create",
"update",
"delete",
"getUniqueID"],
default='join',
required=False)
parser.add_argument('-p', '--appPath', help='[OPTIONAL] This is the path to an existing '
'application source code directory (rather then pulling it '
'down from a VCS again, hence duplicating code) that '
'houses the front end source code. The '
'path it should be the full absolute path as it '
'will be symobolically linked to the base directory. '
'NOTE: tilde(~) or $HOME will be expanded appropriately',
default='',
required=False)
parser.add_argument('-o', '--cmdOptions', help='Options for the '
'command arg. Currently, this is used for creating a '
'new environment othere then; dev, staging, local or prod.'
'see the example above for the format.',
default='',
required=False)
retAppName = None
try:
args, unknown = parser.parse_known_args()
except SystemExit:
pythonGetEnv()
sys.exit(1)
retEnvList = pythonGetEnv(initialCreate=True)
if "CUSTOMER_APP_NAME" in retEnvList:
retAppName = retEnvList["CUSTOMER_APP_NAME"]
retCommand = args.command
retOptions = args.cmdOptions
retAppPath = None
if args.appPath:
retAppPath = args.appPath
if "WORKSPACE_NAME" in retEnvList:
retWorkspaceName = retEnvList["WORKSPACE_NAME"]
else:
retWorkspaceName = ''
# before going further we need to check whether there is a slash at the
# end of the value in destinationDir
if args.baseDirectory:
retBaseDir = checkBaseDirectory(args.baseDirectory, retEnvList)
else:
bareBaseDir = getBaseDirectory(retWorkspaceName)
retBaseDir = checkBaseDirectory(bareBaseDir, retEnvList)
if not retBaseDir:
print("Could not determine the baseDirectory, you will need to "
"re-run this script and provide the -d option.")
sys.exit(1)
# if we get here then the
return (retAppName, retBaseDir, retWorkspaceName, retCommand, retAppPath, retEnvList, retOptions)
def main(argv):
(appName, baseDir, workspaceName, command, appPath, envList, options) = checkArgs()
customerApp = ManageAppName(appName, baseDir, workspaceName, envList, appPath)
customerApp.run(command, options)
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
[] |
[] |
[
"dcUTILS"
] |
[]
|
["dcUTILS"]
|
python
| 1 | 0 | |
common/common.go
|
package common
import (
"encoding/base64"
"fmt"
"os"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/honeycombio/honeytail/parsers"
"github.com/honeycombio/honeytail/parsers/htjson"
"github.com/honeycombio/honeytail/parsers/keyval"
"github.com/honeycombio/honeytail/parsers/regex"
libhoney "github.com/honeycombio/libhoney-go"
"github.com/sirupsen/logrus"
)
var (
sampleRate uint
writeKey string
apiHost string
dataset string
errorDataset string
filterFields []string
)
const (
version = "2.1.1"
)
// InitHoneycombFromEnvVars will attempt to call libhoney.Init based on values
// passed to the lambda through env vars. The caller is responsible for calling
// libhoney.Close afterward. Will return an err if insufficient ENV vars were
// specified.
func InitHoneycombFromEnvVars() error {
sampleRate = 1
if os.Getenv("SAMPLE_RATE") != "" {
i, err := strconv.Atoi(os.Getenv("SAMPLE_RATE"))
if err != nil {
logrus.WithField("sample_rate", os.Getenv("SAMPLE_RATE")).
Warn("Warning: unable to parse sample rate, falling back to 1.")
}
sampleRate = uint(i)
}
// If KMS_KEY_ID is supplied, we assume we're dealing with an encrypted key.
kmsKeyID := os.Getenv("KMS_KEY_ID")
if kmsKeyID != "" {
encryptedWriteKey := os.Getenv("HONEYCOMB_WRITE_KEY")
if encryptedWriteKey == "" {
return fmt.Errorf("Value for KMS_KEY_ID but no value for HONEYCOMB_WRITE_KEY")
} else {
kmsSession := session.Must(session.NewSession(&aws.Config{
Region: aws.String(os.Getenv("AWS_REGION")),
}))
config := &aws.Config{}
svc := kms.New(kmsSession, config)
cyphertext, err := base64.StdEncoding.DecodeString(encryptedWriteKey)
if err != nil {
logrus.WithError(err).
Error("unable to decode ciphertext in write key")
return fmt.Errorf("unable to decode ciphertext in write key")
}
resp, err := svc.Decrypt(&kms.DecryptInput{
CiphertextBlob: cyphertext,
})
if err != nil {
logrus.WithError(err).Error("unable to decrypt honeycomb write key")
return fmt.Errorf("unable to decrypt honeycomb write key")
}
writeKey = string(resp.Plaintext)
}
} else {
writeKey = os.Getenv("HONEYCOMB_WRITE_KEY")
if writeKey == "" {
return fmt.Errorf("no value for HONEYCOMB_WRITE_KEY")
}
}
apiHost = os.Getenv("API_HOST")
if apiHost == "" {
apiHost = "https://api.honeycomb.io"
}
dataset = os.Getenv("DATASET")
if dataset == "" {
dataset = "honeycomb-cloudwatch-logs"
}
errorDataset = os.Getenv("ERROR_DATASET")
libhoney.UserAgentAddition = fmt.Sprintf("integrations-for-aws/%s", version)
// Call Init to configure libhoney
libhoney.Init(libhoney.Config{
WriteKey: writeKey,
Dataset: dataset,
APIHost: apiHost,
SampleRate: sampleRate,
})
return nil
}
// ConstructParser accepts a parser name and attempts to build the parser,
// pulling additional environment variables as needed
func ConstructParser(parserType string) (parsers.LineParser, error) {
if parserType == "regex" {
regexVal := os.Getenv("REGEX_PATTERN")
regexParser, err := regex.NewRegexLineParser([]string{regexVal})
if err != nil {
return nil, fmt.Errorf("failed to create regex parser: %s", err.Error())
}
return regexParser, nil
} else if parserType == "json" {
return &htjson.JSONLineParser{}, nil
} else if parserType == "keyval" {
return &keyval.KeyValLineParser{}, nil
}
return nil, fmt.Errorf("Unknown parser: %s", parserType)
}
// ConvertTypes will convert strings into integer and floats if applicable
func ConvertTypes(input map[string]interface{}) map[string]interface{} {
data := make(map[string]interface{})
for k, v := range input {
if stringVal, ok := v.(string); ok {
if val, err := strconv.Atoi(stringVal); err == nil {
data[k] = val
} else if val, err := strconv.ParseFloat(stringVal, 64); err == nil {
data[k] = val
} else {
data[k] = stringVal
}
} else {
data[k] = v
}
}
return data
}
// AddUserAgentMetadata adds additional metadata to the user agent string
func AddUserAgentMetadata(handler, parser string) {
libhoney.UserAgentAddition = fmt.Sprintf(
"%s (%s, %s)", libhoney.UserAgentAddition, handler, parser,
)
}
// GetSampleRate returns the sample rate the configured sample rate
func GetSampleRate() uint {
return sampleRate
}
// WriteErrorEvent writes the error and optional fields to the Error Dataset,
// if an error dataset was specified
func WriteErrorEvent(err error, errorType string, fields map[string]interface{}) {
if errorDataset != "" {
ev := libhoney.NewEvent()
ev.Dataset = errorDataset
ev.AddField("meta.honeycomb_error", err.Error())
ev.AddField("meta.error_type", errorType)
ev.Add(fields)
ev.Send()
}
}
// GetFilterFields returns a list of fields to be deleted from an event before it is sent to Honeycomb
// If FILTER_FIELDS is not set, returns an empty list.
func GetFilterFields() []string {
if filterFields != nil {
return filterFields
}
filterFields = []string{}
filtersString := os.Getenv("FILTER_FIELDS")
if filtersString == "" {
// return an empty (but non-nil) filterField list
return filterFields
}
// FILTER_FIELDS is a comma-separated string of fields
filterFields = strings.Split(filtersString, ",")
return filterFields
}
|
[
"\"SAMPLE_RATE\"",
"\"SAMPLE_RATE\"",
"\"SAMPLE_RATE\"",
"\"KMS_KEY_ID\"",
"\"HONEYCOMB_WRITE_KEY\"",
"\"AWS_REGION\"",
"\"HONEYCOMB_WRITE_KEY\"",
"\"API_HOST\"",
"\"DATASET\"",
"\"ERROR_DATASET\"",
"\"REGEX_PATTERN\"",
"\"FILTER_FIELDS\""
] |
[] |
[
"ERROR_DATASET",
"AWS_REGION",
"FILTER_FIELDS",
"HONEYCOMB_WRITE_KEY",
"API_HOST",
"REGEX_PATTERN",
"KMS_KEY_ID",
"DATASET",
"SAMPLE_RATE"
] |
[]
|
["ERROR_DATASET", "AWS_REGION", "FILTER_FIELDS", "HONEYCOMB_WRITE_KEY", "API_HOST", "REGEX_PATTERN", "KMS_KEY_ID", "DATASET", "SAMPLE_RATE"]
|
go
| 9 | 0 | |
python/ccxt/gateio.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class gateio(Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['KR'],
'rateLimit': 10 / 3, # 300 requests per second or 3.33ms
'version': 'v4',
'certified': True,
'pro': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'doc': 'https://www.gate.io/docs/apiv4/en/index.html',
'www': 'https://gate.io/',
'api': {
'public': 'https://api.gateio.ws/api/v4',
'private': 'https://api.gateio.ws/api/v4',
},
'referral': {
'url': 'https://www.gate.io/ref/2436035',
'discount': 0.2,
},
},
'has': {
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchWithdrawals': True,
'transfer': True,
'withdraw': True,
},
'api': {
'public': {
'spot': {
'get': {
'currencies': 1,
'currencies/{currency}': 1,
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'tickers': 1,
'order_book': 1,
'trades': 1,
'candlesticks': 1,
},
},
'margin': {
'get': {
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'cross/currencies': 1,
'cross/currencies/{currency}': 1,
},
},
'futures': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/funding_rate': 1.5,
'{settle}/insurance': 1.5,
'{settle}/contract_stats': 1.5,
'{settle}/liq_orders': 1.5,
},
},
'delivery': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/insurance': 1.5,
},
},
},
'private': {
'withdrawals': {
'post': {
'': 3000, # 3000 = 10 seconds
},
'delete': {
'{withdrawal_id}': 300,
},
},
'wallet': {
'get': {
'deposit_address': 300,
'withdrawals': 300,
'deposits': 300,
'sub_account_transfers': 300,
'withdraw_status': 300,
'sub_account_balances': 300,
'fee': 300,
},
'post': {
'transfers': 300,
'sub_account_transfers': 300,
},
},
'spot': {
'get': {
'accounts': 1,
'open_orders': 1,
'orders': 1,
'orders/{order_id}': 1,
'my_trades': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
'post': {
'batch_orders': 1,
'orders': 1,
'cancel_batch_orders': 1,
'price_orders': 1,
},
'delete': {
'orders': 1,
'orders/{order_id}': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
},
'margin': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'funding_accounts': 1.5,
'loans': 1.5,
'loans/{loan_id}': 1.5,
'loans/{loan_id}/repayment': 1.5,
'loan_records': 1.5,
'loan_records/{load_record_id}': 1.5,
'auto_repay': 1.5,
'transferable': 1.5,
'cross/accounts': 1.5,
'cross/account_book': 1.5,
'cross/loans': 1.5,
'cross/loans/{loan_id}': 1.5,
'cross/loans/repayments': 1.5,
'cross/transferable': 1.5,
},
'post': {
'loans': 1.5,
'merged_loans': 1.5,
'loans/{loan_id}/repayment': 1.5,
'auto_repay': 1.5,
'cross/loans': 1.5,
'cross/loans/repayments': 1.5,
},
'patch': {
'loans/{loan_id}': 1.5,
'loan_records/{loan_record_id}': 1.5,
},
'delete': {
'loans/{loan_id}': 1.5,
},
},
'futures': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/dual_mode': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
'{settle}/dual_comp/positions/{contract}/margin': 1.5,
'{settle}/dual_comp/positions/{contract}/leverage': 1.5,
'{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'delivery': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
},
},
'timeframes': {
'10s': '10s',
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'7d': '7d',
},
# copied from gateiov2
'commonCurrencies': {
'88MPH': 'MPH',
'BIFI': 'Bitcoin File',
'BOX': 'DefiBox',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'Beyond Finance',
'EGG': 'Goose Finance',
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'GTC_HT': 'Game.com HT',
'GTC_BSC': 'Game.com BSC',
'HIT': 'HitChain',
'MPH': 'Morpher', # conflict with 88MPH
'RAI': 'Rai Reflex Index', # conflict with RAI Finance
'SBTC': 'Super Bitcoin',
'STX': 'Stox',
'TNC': 'Trinity Network Credit',
'TON': 'TONToken',
'VAI': 'VAIOT',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'options': {
'createOrder': {
'expiration': 86400, # for conditional orders
},
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
'BEP20': 'BSC',
},
'accountsByType': {
'spot': 'spot',
'margin': 'margin',
'futures': 'futures',
'delivery': 'delivery',
},
'defaultType': 'spot',
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
'futures': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'feeSide': 'get',
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
'tiers': {
# volume is in BTC
'maker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00185')],
[self.parse_number('3'), self.parse_number('0.00175')],
[self.parse_number('6'), self.parse_number('0.00165')],
[self.parse_number('12.5'), self.parse_number('0.00155')],
[self.parse_number('25'), self.parse_number('0.00145')],
[self.parse_number('75'), self.parse_number('0.00135')],
[self.parse_number('200'), self.parse_number('0.00125')],
[self.parse_number('500'), self.parse_number('0.00115')],
[self.parse_number('1250'), self.parse_number('0.00105')],
[self.parse_number('2500'), self.parse_number('0.00095')],
[self.parse_number('3000'), self.parse_number('0.00085')],
[self.parse_number('6000'), self.parse_number('0.00075')],
[self.parse_number('11000'), self.parse_number('0.00065')],
[self.parse_number('20000'), self.parse_number('0.00055')],
[self.parse_number('40000'), self.parse_number('0.00055')],
[self.parse_number('75000'), self.parse_number('0.00055')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00195')],
[self.parse_number('3'), self.parse_number('0.00185')],
[self.parse_number('6'), self.parse_number('0.00175')],
[self.parse_number('12.5'), self.parse_number('0.00165')],
[self.parse_number('25'), self.parse_number('0.00155')],
[self.parse_number('75'), self.parse_number('0.00145')],
[self.parse_number('200'), self.parse_number('0.00135')],
[self.parse_number('500'), self.parse_number('0.00125')],
[self.parse_number('1250'), self.parse_number('0.00115')],
[self.parse_number('2500'), self.parse_number('0.00105')],
[self.parse_number('3000'), self.parse_number('0.00095')],
[self.parse_number('6000'), self.parse_number('0.00085')],
[self.parse_number('11000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
[self.parse_number('40000'), self.parse_number('0.00065')],
[self.parse_number('75000'), self.parse_number('0.00065')],
],
},
},
'swap': {
'tierBased': True,
'feeSide': 'base',
'percentage': True,
'maker': self.parse_number('0.0'),
'taker': self.parse_number('0.0005'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.0000')],
[self.parse_number('1.5'), self.parse_number('-0.00005')],
[self.parse_number('3'), self.parse_number('-0.00005')],
[self.parse_number('6'), self.parse_number('-0.00005')],
[self.parse_number('12.5'), self.parse_number('-0.00005')],
[self.parse_number('25'), self.parse_number('-0.00005')],
[self.parse_number('75'), self.parse_number('-0.00005')],
[self.parse_number('200'), self.parse_number('-0.00005')],
[self.parse_number('500'), self.parse_number('-0.00005')],
[self.parse_number('1250'), self.parse_number('-0.00005')],
[self.parse_number('2500'), self.parse_number('-0.00005')],
[self.parse_number('3000'), self.parse_number('-0.00008')],
[self.parse_number('6000'), self.parse_number('-0.01000')],
[self.parse_number('11000'), self.parse_number('-0.01002')],
[self.parse_number('20000'), self.parse_number('-0.01005')],
[self.parse_number('40000'), self.parse_number('-0.02000')],
[self.parse_number('75000'), self.parse_number('-0.02005')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.00050')],
[self.parse_number('1.5'), self.parse_number('0.00048')],
[self.parse_number('3'), self.parse_number('0.00046')],
[self.parse_number('6'), self.parse_number('0.00044')],
[self.parse_number('12.5'), self.parse_number('0.00042')],
[self.parse_number('25'), self.parse_number('0.00040')],
[self.parse_number('75'), self.parse_number('0.00038')],
[self.parse_number('200'), self.parse_number('0.00036')],
[self.parse_number('500'), self.parse_number('0.00034')],
[self.parse_number('1250'), self.parse_number('0.00032')],
[self.parse_number('2500'), self.parse_number('0.00030')],
[self.parse_number('3000'), self.parse_number('0.00030')],
[self.parse_number('6000'), self.parse_number('0.00030')],
[self.parse_number('11000'), self.parse_number('0.00030')],
[self.parse_number('20000'), self.parse_number('0.00030')],
[self.parse_number('40000'), self.parse_number('0.00030')],
[self.parse_number('75000'), self.parse_number('0.00030')],
],
},
},
},
# https://www.gate.io/docs/apiv4/en/index.html#label-list
'exceptions': {
'exact': {
'INVALID_PARAM_VALUE': BadRequest,
'INVALID_PROTOCOL': BadRequest,
'INVALID_ARGUMENT': BadRequest,
'INVALID_REQUEST_BODY': BadRequest,
'MISSING_REQUIRED_PARAM': ArgumentsRequired,
'BAD_REQUEST': BadRequest,
'INVALID_CONTENT_TYPE': BadRequest,
'NOT_ACCEPTABLE': BadRequest,
'METHOD_NOT_ALLOWED': BadRequest,
'NOT_FOUND': ExchangeError,
'INVALID_CREDENTIALS': AuthenticationError,
'INVALID_KEY': AuthenticationError,
'IP_FORBIDDEN': AuthenticationError,
'READ_ONLY': PermissionDenied,
'INVALID_SIGNATURE': AuthenticationError,
'MISSING_REQUIRED_HEADER': AuthenticationError,
'REQUEST_EXPIRED': AuthenticationError,
'ACCOUNT_LOCKED': AccountSuspended,
'FORBIDDEN': PermissionDenied,
'SUB_ACCOUNT_NOT_FOUND': ExchangeError,
'SUB_ACCOUNT_LOCKED': AccountSuspended,
'MARGIN_BALANCE_EXCEPTION': ExchangeError,
'MARGIN_TRANSFER_FAILED': ExchangeError,
'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,
'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,
'ACCOUNT_EXCEPTION': ExchangeError,
'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,
'ADDRESS_NOT_USED': ExchangeError,
'TOO_FAST': RateLimitExceeded,
'WITHDRAWAL_OVER_LIMIT': ExchangeError,
'API_WITHDRAW_DISABLED': ExchangeNotAvailable,
'INVALID_WITHDRAW_ID': ExchangeError,
'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,
'INVALID_PRECISION': InvalidOrder,
'INVALID_CURRENCY': BadSymbol,
'INVALID_CURRENCY_PAIR': BadSymbol,
'POC_FILL_IMMEDIATELY': ExchangeError,
'ORDER_NOT_FOUND': OrderNotFound,
'ORDER_CLOSED': InvalidOrder,
'ORDER_CANCELLED': InvalidOrder,
'QUANTITY_NOT_ENOUGH': InvalidOrder,
'BALANCE_NOT_ENOUGH': InsufficientFunds,
'MARGIN_NOT_SUPPORTED': InvalidOrder,
'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,
'AMOUNT_TOO_LITTLE': InvalidOrder,
'AMOUNT_TOO_MUCH': InvalidOrder,
'REPEATED_CREATION': InvalidOrder,
'LOAN_NOT_FOUND': OrderNotFound,
'LOAN_RECORD_NOT_FOUND': OrderNotFound,
'NO_MATCHED_LOAN': ExchangeError,
'NOT_MERGEABLE': ExchangeError,
'NO_CHANGE': ExchangeError,
'REPAY_TOO_MUCH': ExchangeError,
'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,
'TOO_MANY_ORDERS': InvalidOrder,
'MIXED_ACCOUNT_TYPE': InvalidOrder,
'AUTO_BORROW_TOO_MUCH': ExchangeError,
'TRADE_RESTRICTED': InsufficientFunds,
'USER_NOT_FOUND': ExchangeError,
'CONTRACT_NO_COUNTER': ExchangeError,
'CONTRACT_NOT_FOUND': BadSymbol,
'RISK_LIMIT_EXCEEDED': ExchangeError,
'INSUFFICIENT_AVAILABLE': InsufficientFunds,
'LIQUIDATE_IMMEDIATELY': InvalidOrder,
'LEVERAGE_TOO_HIGH': InvalidOrder,
'LEVERAGE_TOO_LOW': InvalidOrder,
'ORDER_NOT_OWNED': ExchangeError,
'ORDER_FINISHED': ExchangeError,
'POSITION_CROSS_MARGIN': ExchangeError,
'POSITION_IN_LIQUIDATION': ExchangeError,
'POSITION_IN_CLOSE': ExchangeError,
'POSITION_EMPTY': InvalidOrder,
'REMOVE_TOO_MUCH': ExchangeError,
'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,
'RISK_LIMIT_TOO_HIGH': ExchangeError,
'RISK_LIMIT_TOO_lOW': ExchangeError,
'PRICE_TOO_DEVIATED': InvalidOrder,
'SIZE_TOO_LARGE': InvalidOrder,
'SIZE_TOO_SMALL': InvalidOrder,
'PRICE_OVER_LIQUIDATION': InvalidOrder,
'PRICE_OVER_BANKRUPT': InvalidOrder,
'ORDER_POC_IMMEDIATE': InvalidOrder,
'INCREASE_POSITION': InvalidOrder,
'CONTRACT_IN_DELISTING': ExchangeError,
'INTERNAL': ExchangeError,
'SERVER_ERROR': ExchangeError,
'TOO_BUSY': ExchangeNotAvailable,
},
},
'broad': {},
})
def fetch_markets(self, params={}):
# :param params['type']: 'spot', 'margin', 'futures' or 'delivery'
# :param params['settle']: The quote currency
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
spot = (type == 'spot')
margin = (type == 'margin')
futures = (type == 'futures')
swap = (type == 'swap')
option = (type == 'option')
if not spot and not margin and not futures and not swap:
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to " + "'spot', 'margin', 'swap' or 'futures'") # eslint-disable-line quotes
response = None
result = []
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetCurrencyPairs',
'margin': 'publicMarginGetCurrencyPairs',
'swap': 'publicFuturesGetSettleContracts',
'futures': 'publicDeliveryGetSettleContracts',
})
if swap or futures or option:
settlementCurrencies = self.get_settlement_currencies(type, 'fetchMarkets')
for c in range(0, len(settlementCurrencies)):
settle = settlementCurrencies[c]
query['settle'] = settle
response = getattr(self, method)(query)
# Perpetual swap
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
# Delivery Futures
# [
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'name')
parts = id.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
date = self.safe_string(parts, 2)
linear = quoteId.lower() == settle
inverse = baseId.lower() == settle
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = ''
if date is not None:
symbol = base + '/' + quote + '-' + date + ':' + self.safe_currency_code(settle)
else:
symbol = base + '/' + quote + ':' + self.safe_currency_code(settle)
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
feeIndex = 'swap' if (type == 'futures') else type
pricePrecision = self.safe_number(market, 'order_price_round')
result.append({
'info': market,
'id': id,
'baseId': baseId,
'quoteId': quoteId,
'settleId': self.safe_symbol(settle),
'base': base,
'quote': quote,
'symbol': symbol,
'type': type,
'spot': spot,
'margin': margin,
'futures': futures,
'swap': swap,
'option': option,
'derivative': True,
'contract': True,
'linear': linear,
'inverse': inverse,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.safe_string(market, 'quanto_multiplier'),
'precision': {
'amount': self.parse_number('1'),
'price': pricePrecision,
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'leverage_min'),
'max': self.safe_number(market, 'leverage_max'),
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': minPrice,
'max': maxPrice,
},
},
'expiry': self.safe_integer(market, 'expire_time'),
'fees': self.safe_value(self.fees, feeIndex, {}),
})
else:
response = getattr(self, method)(query)
#
# Spot
# [
# {
# "id": "DEGO_USDT",
# "base": "DEGO",
# "quote": "USDT",
# "fee": "0.2",
# "min_quote_amount": "1",
# "amount_precision": "4",
# "precision": "4",
# "trade_status": "tradable",
# "sell_start": "0",
# "buy_start": "0"
# }
# ]
#
# Margin
# [
# {
# "id": "ETH_USDT",
# "base": "ETH",
# "quote": "USDT",
# "leverage": 3,
# "min_base_amount": "0.01",
# "min_quote_amount": "100",
# "max_quote_amount": "1000000"
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
spot = (type == 'spot')
futures = (type == 'futures')
swap = (type == 'swap')
option = (type == 'option')
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
takerPercent = self.safe_string(market, 'fee')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
amountPrecisionString = self.safe_string(market, 'amount_precision')
pricePrecisionString = self.safe_string(market, 'precision')
amountPrecision = self.parse_number(self.parse_precision(amountPrecisionString))
pricePrecision = self.parse_number(self.parse_precision(pricePrecisionString))
tradeStatus = self.safe_string(market, 'trade_status')
result.append({
'info': market,
'id': id,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'base': base,
'quote': quote,
'symbol': symbol,
'type': type,
'spot': spot,
'margin': margin,
'futures': futures,
'swap': swap,
'option': option,
'contract': False,
'derivative': False,
'linear': False,
'inverse': False,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'precision': {
'amount': amountPrecision,
'price': pricePrecision,
},
'active': tradeStatus == 'tradable',
'limits': {
'amount': {
'min': amountPrecision,
'max': None,
},
'price': {
'min': pricePrecision,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_quote_amount'),
'max': None,
},
'leverage': {
'max': self.safe_number(market, 'lever', 1),
},
},
})
return result
def prepare_request(self, market):
if market['contract']:
return {
'contract': market['id'],
'settle': market['settleId'],
}
else:
return {
'currency_pair': market['id'],
}
def get_settlement_currencies(self, type, method):
options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes
fetchMarketsContractOptions = self.safe_value(options, method, {})
defaultSettle = type == ['usdt'] if 'swap' else ['btc']
return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)
def fetch_currencies(self, params={}):
response = self.publicSpotGetCurrencies(params)
#
# {
# "currency": "BCN",
# "delisted": False,
# "withdraw_disabled": True,
# "withdraw_delayed": False,
# "deposit_disabled": True,
# "trade_disabled": False
# }
#
result = {}
# TODO: remove magic constants
amountPrecision = self.parse_number('1e-6')
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
delisted = self.safe_value(entry, 'delisted')
withdraw_disabled = self.safe_value(entry, 'withdraw_disabled')
deposit_disabled = self.safe_value(entry, 'disabled_disabled')
trade_disabled = self.safe_value(entry, 'trade_disabled')
active = not (delisted and withdraw_disabled and deposit_disabled and trade_disabled)
result[code] = {
'id': currencyId,
'name': None,
'code': code,
'precision': amountPrecision,
'info': entry,
'active': active,
'fee': None,
'fees': [],
'limits': self.limits,
}
return result
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'contract': market['id'],
'settle': market['quote'].lower(),
}
response = self.publicFuturesGetSettleContractsContract(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
return self.parse_funding_rate(response)
def fetch_funding_rates(self, symbols=None, params={}):
self.load_markets()
settle = self.safe_string(params, 'settle') # TODO: Save settle in markets?
request = {
'settle': settle.lower(),
}
response = self.publicFuturesGetSettleContracts(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
result = self.parse_funding_rates(response)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
marketId = self.safe_string(contract, 'name')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(contract, 'mark_price')
indexPrice = self.safe_number(contract, 'index_price')
interestRate = self.safe_number(contract, 'interest_rate')
fundingRate = self.safe_string(contract, 'funding_rate')
fundingInterval = self.safe_string(contract, 'funding_interval') * 1000
nextFundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000
previousFundingTime = (self.safe_number(contract, 'funding_next_apply') * 1000) - fundingInterval
fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')
timestamp = self.milliseconds()
return {
'info': contract,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'previousFundingRate': fundingRate,
'nextFundingRate': fundingRateIndicative,
'previousFundingTimestamp': previousFundingTime,
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': self.iso8601(previousFundingTime),
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
def fetch_network_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateWalletGetDepositAddress(self.extend(request, params))
addresses = self.safe_value(response, 'multichain_addresses')
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
result = {}
for i in range(0, len(addresses)):
entry = addresses[i]
#
# {
# "chain": "ETH",
# "address": "0x359a697945E79C7e17b634675BD73B33324E9408",
# "payment_id": "",
# "payment_name": "",
# "obtain_failed": "0"
# }
#
obtainFailed = self.safe_integer(entry, 'obtain_failed')
if obtainFailed:
continue
network = self.safe_string(entry, 'chain')
address = self.safe_string(entry, 'address')
tag = self.safe_string(entry, 'payment_id')
tagLength = len(tag)
tag = tag if tagLength else None
result[network] = {
'info': entry,
'code': code,
'address': address,
'tag': tag,
}
return result
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateWalletGetDepositAddress(self.extend(request, params))
#
# {
# "currency": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007",
# "multichain_addresses": [
# {
# "chain": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d",
# "payment_id": "391331007",
# "payment_name": "Tag",
# "obtain_failed": 0
# }
# ]
# }
#
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
addressField = self.safe_string(response, 'address')
tag = None
address = None
if addressField.find(' ') >= 0:
splitted = addressField.split(' ')
address = splitted[0]
tag = splitted[1]
else:
address = addressField
return {
'info': response,
'code': code,
'address': address,
'tag': tag,
'network': None,
}
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privateWalletGetFee(params)
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
result = {}
taker = self.safe_number(response, 'taker_fee')
maker = self.safe_number(response, 'maker_fee')
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'maker': maker,
'taker': taker,
'info': response,
'symbol': symbol,
}
return result
def fetch_funding_fees(self, params={}):
self.load_markets()
response = self.privateWalletGetWithdrawStatus(params)
#
# {
# "currency": "MTN",
# "name": "Medicalchain",
# "name_cn": "Medicalchain",
# "deposit": "0",
# "withdraw_percent": "0%",
# "withdraw_fix": "900",
# "withdraw_day_limit": "500000",
# "withdraw_day_limit_remain": "500000",
# "withdraw_amount_mini": "900.1",
# "withdraw_eachtime_limit": "90000000000",
# "withdraw_fix_on_chains": {
# "ETH": "900"
# }
# }
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
withdrawFees[code] = {}
withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')
if withdrawFix is None:
withdrawFix = {}
withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')
keys = list(withdrawFix.keys())
for i in range(0, len(keys)):
key = keys[i]
withdrawFees[code][key] = self.parse_number(withdrawFix[key])
return {
'info': response,
'withdraw': withdrawFees,
'deposit': {},
}
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires a symbol argument')
self.load_markets()
# defaultType = 'future'
market = self.market(symbol)
request = self.prepare_request(market)
request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'
if since is not None:
request['from'] = since
if limit is not None:
request['limit'] = limit
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesGetSettleAccountBook',
'futures': 'privateDeliveryGetSettleAccountBook',
})
response = getattr(self, method)(self.extend(request, params))
result = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 'time')
result.append({
'info': entry,
'symbol': symbol,
'code': self.safe_currency_code(self.safe_string(entry, 'text')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': None,
'amount': self.safe_number(entry, 'change'),
})
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# 'interval': '0', # depth, 0 means no aggregation is applied, default to 0
# 'limit': limit, # maximum number of order depth data in asks or bids
# 'with_id': True, # return order book ID
# }
#
request = self.prepare_request(market)
spot = market['spot']
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetOrderBook',
# 'margin': 'publicMarginGetOrderBook',
'swap': 'publicFuturesGetSettleOrderBook',
'futures': 'publicDeliveryGetSettleOrderBook',
})
if limit is not None:
request['limit'] = limit # default 10, max 100
response = getattr(self, method)(self.extend(request, params))
#
# SPOT
#
# {
# "current": 1634345973275,
# "update": 1634345973271,
# "asks": [
# ["2.2241","12449.827"],
# ["2.2242","200"],
# ["2.2244","826.931"],
# ["2.2248","3876.107"],
# ["2.225","2377.252"],
# ["2.22509","439.484"],
# ["2.2251","1489.313"],
# ["2.2253","714.582"],
# ["2.2254","1349.784"],
# ["2.2256","234.701"]],
# "bids":[
# ["2.2236","32.465"],
# ["2.2232","243.983"],
# ["2.2231","32.207"],
# ["2.223","449.827"],
# ["2.2228","7.918"],
# ["2.2227","12703.482"],
# ["2.2226","143.033"],
# ["2.2225","143.027"],
# ["2.2224","1369.352"],
# ["2.2223","756.063"]
# ]
# }
#
# Perpetual Swap
#
# {
# "current": 1634350208.745,
# "asks": [
# {"s":24909,"p": "61264.8"},
# {"s":81,"p": "61266.6"},
# {"s":2000,"p": "61267.6"},
# {"s":490,"p": "61270.2"},
# {"s":12,"p": "61270.4"},
# {"s":11782,"p": "61273.2"},
# {"s":14666,"p": "61273.3"},
# {"s":22541,"p": "61273.4"},
# {"s":33,"p": "61273.6"},
# {"s":11980,"p": "61274.5"}
# ],
# "bids": [
# {"s":41844,"p": "61264.7"},
# {"s":13783,"p": "61263.3"},
# {"s":1143,"p": "61259.8"},
# {"s":81,"p": "61258.7"},
# {"s":2471,"p": "61257.8"},
# {"s":2471,"p": "61257.7"},
# {"s":2471,"p": "61256.5"},
# {"s":3,"p": "61254.2"},
# {"s":114,"p": "61252.4"},
# {"s":14372,"p": "61248.6"}
# ],
# "update": 1634350208.724
# }
#
timestamp = self.safe_integer(response, 'current')
if not spot:
timestamp = timestamp * 1000
priceKey = 0 if spot else 'p'
amountKey = 1 if spot else 's'
return self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTickers',
# 'margin': 'publicMarginGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'futures': 'publicDeliveryGetSettleTickers',
})
response = getattr(self, method)(self.extend(request, params))
ticker = self.safe_value(response, 0)
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# SPOT
#
# {
# "currency_pair": "KFC_USDT",
# "last": "7.255",
# "lowest_ask": "7.298",
# "highest_bid": "7.218",
# "change_percentage": "-1.18",
# "base_volume": "1219.053687865",
# "quote_volume": "8807.40299875455",
# "high_24h": "7.262",
# "low_24h": "7.095"
# }
#
# LINEAR/DELIVERY
#
# {
# "contract": "BTC_USDT",
# "last": "6432",
# "low_24h": "6278",
# "high_24h": "6790",
# "change_percentage": "4.43",
# "total_size": "32323904",
# "volume_24h": "184040233284",
# "volume_24h_btc": "28613220",
# "volume_24h_usd": "184040233284",
# "volume_24h_base": "28613220",
# "volume_24h_quote": "184040233284",
# "volume_24h_settle": "28613220",
# "mark_price": "6534",
# "funding_rate": "0.0001",
# "funding_rate_indicative": "0.0001",
# "index_price": "6531"
# }
#
marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last')
ask = self.safe_number(ticker, 'lowest_ask')
bid = self.safe_number(ticker, 'highest_bid')
high = self.safe_number(ticker, 'high_24h')
low = self.safe_number(ticker, 'low_24h')
baseVolume = self.safe_number_2(ticker, 'base_volume', 'volume_24h_base')
quoteVolume = self.safe_number_2(ticker, 'quote_volume', 'volume_24h_quote')
percentage = self.safe_number(ticker, 'change_percentage')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetTickers',
# 'margin': 'publicMarginGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'futures': 'publicDeliveryGetSettleTickers',
})
request = {}
futures = type == 'futures'
swap = type == 'swap'
if (swap or futures) and not params['settle']:
request['settle'] = 'usdt' if swap else 'btc'
response = getattr(self, method)(self.extend(request, params))
return self.parse_tickers(response, symbols)
def fetch_balance(self, params={}):
# :param params.type: spot, margin, crossMargin, swap or future
# :param params.settle: Settle currency(usdt or btc) for perpetual swap and futures
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
swap = type == 'swap'
futures = type == 'futures'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetAccounts',
# 'margin': 'publicMarginGetTickers',
'swap': 'privateFuturesGetSettleAccounts',
'futures': 'privateDeliveryGetSettleAccounts',
})
request = {}
response = []
if swap or futures:
defaultSettle = 'usdt' if swap else 'btc'
request['settle'] = self.safe_string(params, 'settle', defaultSettle)
response_item = getattr(self, method)(self.extend(request, params))
response = [response_item]
else:
response = getattr(self, method)(self.extend(request, params))
# spot
#
# [
# {
# "currency": "DBC",
# "available": "0",
# "locked": "0"
# },
# ...
# ]
#
# Perpetual Swap
#
# {
# order_margin: "0",
# point: "0",
# bonus: "0",
# history: {
# dnw: "2.1321",
# pnl: "11.5351",
# refr: "0",
# point_fee: "0",
# fund: "-0.32340576684",
# bonus_dnw: "0",
# point_refr: "0",
# bonus_offset: "0",
# fee: "-0.20132775",
# point_dnw: "0",
# },
# unrealised_pnl: "13.315100000006",
# total: "12.51345151332",
# available: "0",
# in_dual_mode: False,
# currency: "USDT",
# position_margin: "12.51345151332",
# user: "6333333",
# }
#
# Delivery Future
#
# {
# order_margin: "0",
# point: "0",
# history: {
# dnw: "1",
# pnl: "0",
# refr: "0",
# point_fee: "0",
# point_dnw: "0",
# settle: "0",
# settle_fee: "0",
# point_refr: "0",
# fee: "0",
# },
# unrealised_pnl: "0",
# total: "1",
# available: "1",
# currency: "USDT",
# position_margin: "0",
# user: "6333333",
# }
#
result = {
'info': response,
}
for i in range(0, len(response)):
entry = response[i]
account = self.account()
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
account['used'] = self.safe_string_2(entry, 'locked', 'position_margin')
account['free'] = self.safe_string(entry, 'available')
result[code] = account
return self.parse_balance(result)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
request = self.prepare_request(market)
request['interval'] = self.timeframes[timeframe]
method = 'publicSpotGetCandlesticks'
if market['contract']:
if market['futures']:
method = 'publicDeliveryGetSettleCandlesticks'
elif market['swap']:
method = 'publicFuturesGetSettleCandlesticks'
isMark = (price == 'mark')
isIndex = (price == 'index')
if isMark or isIndex:
request['contract'] = price + '_' + market['id']
params = self.omit(params, 'price')
if since is None:
if limit is not None:
request['limit'] = limit
else:
timeframeSeconds = self.parse_timeframe(timeframe)
timeframeMilliseconds = timeframeSeconds * 1000
# align forward to the next timeframe alignment
since = self.sum(since - (since % timeframeMilliseconds), timeframeMilliseconds)
request['from'] = int(since / 1000)
if limit is not None:
request['to'] = self.sum(request['from'], limit * timeframeSeconds - 1)
response = getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'contract': market['id'],
'settle': market['settleId'],
}
if limit is not None:
request['limit'] = limit
method = 'publicFuturesGetSettleFundingRate'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "r": "0.00063521",
# "t": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 't')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'r'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None):
#
# Spot market candles
#
# [
# "1626163200", # Unix timestamp in seconds
# "346711.933138181617", # Trading volume
# "33165.23", # Close price
# "33260", # Highest price
# "33117.6", # Lowest price
# "33184.47" # Open price
# ]
#
# Mark and Index price candles
#
# {
# "t":1632873600, # Unix timestamp in seconds
# "o": "41025", # Open price
# "h": "41882.17", # Highest price
# "c": "41776.92", # Close price
# "l": "40783.94" # Lowest price
# }
#
if isinstance(ohlcv, list):
return [
self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds
self.safe_number(ohlcv, 5), # open price
self.safe_number(ohlcv, 3), # highest price
self.safe_number(ohlcv, 4), # lowest price
self.safe_number(ohlcv, 2), # close price
self.safe_number(ohlcv, 1), # trading volume
]
else:
# Mark and Index price candles
return [
self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds
self.safe_number(ohlcv, 'o'), # open price
self.safe_number(ohlcv, 'h'), # highest price
self.safe_number(ohlcv, 'l'), # lowest price
self.safe_number(ohlcv, 'c'), # close price
self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price
]
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
#
# spot
#
# request = {
# 'currency_pair': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id
# }
#
# swap, futures
#
# request = {
# 'settle': market['settleId'],
# 'contract': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items
# 'to': self.seconds(), # end time in seconds, default to current time
# }
#
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTrades',
# 'margin': 'publicMarginGetTickers',
'swap': 'publicFuturesGetSettleTrades',
'futures': 'publicDeliveryGetSettleTrades',
})
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None and (market['contract']):
request['from'] = int(since / 1000)
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# id: "1852958144",
# create_time: "1634673259",
# create_time_ms: "1634673259378.105000",
# currency_pair: "ADA_USDT",
# side: "sell",
# amount: "307.078",
# price: "2.104",
# }
# ]
#
# perpetual swap
#
# [
# {
# size: "2",
# id: "2522911",
# create_time_ms: "1634673380.182",
# create_time: "1634673380.182",
# contract: "ADA_USDT",
# price: "2.10486",
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# # 'limit': limit,
# # 'page': 0,
# # 'order_id': 'Order ID',
# # 'account': 'spot', # default to spot and margin account if not specified, set to cross_margin to operate against margin account
# # 'from': since, # default to 7 days before current time
# # 'to': self.milliseconds(), # default to current time
# }
#
request = self.prepare_request(market)
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None:
request['from'] = int(since / 1000)
# request['to'] = since + 7 * 24 * 60 * 60
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetMyTrades',
# 'margin': 'publicMarginGetCurrencyPairs',
'swap': 'privateFuturesGetSettleMyTrades',
'futures': 'privateDeliveryGetSettleMyTrades',
})
response = getattr(self, method)(self.extend(request, params))
# SPOT
# [{
# id: "1851927191",
# create_time: "1634333360",
# create_time_ms: "1634333360359.901000",
# currency_pair: "BTC_USDT",
# side: "buy",
# role: "taker",
# amount: "0.0001",
# price: "62547.51",
# order_id: "93475897349",
# fee: "2e-07",
# fee_currency: "BTC",
# point_fee: "0",
# gt_fee: "0",
# }]
# Perpetual Swap
# [{
# size: "-13",
# order_id: "79723658958",
# id: "47612669",
# role: "taker",
# create_time: "1634600263.326",
# contract: "BTC_USDT",
# price: "61987.8",
# }]
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public
#
# {
# "id": "1334253759",
# "create_time": "1626342738",
# "create_time_ms": "1626342738331.497000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "amount": "0.0022",
# "price": "32452.16"
# }
#
# private
#
# {
# "id": "218087755",
# "create_time": "1578958740",
# "create_time_ms": "1578958740122.710000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "0.0004",
# "price": "8112.77",
# "order_id": "8445563839",
# "fee": "0.006490216",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0"
# }
#
id = self.safe_string(trade, 'id')
timestampStringContract = self.safe_string(trade, 'create_time')
timestampString = self.safe_string_2(trade, 'create_time_ms', 'time', timestampStringContract)
timestamp = None
if timestampString.find('.') > 0:
milliseconds = timestampString.split('.')
timestamp = int(milliseconds[0])
if market['contract']:
timestamp = timestamp * 1000
marketId = self.safe_string_2(trade, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string_2(trade, 'amount', 'size')
priceString = self.safe_string(trade, 'price')
costString = Precise.string_abs(Precise.string_mul(amountString, priceString))
price = self.parse_number(priceString)
cost = self.parse_number(costString)
contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'
amountString = Precise.string_abs(amountString)
amount = self.parse_number(amountString)
side = self.safe_string(trade, 'side', contractSide)
orderId = self.safe_string(trade, 'order_id')
gtFee = self.safe_string(trade, 'gt_fee')
feeCurrency = None
feeCost = None
if gtFee == '0':
feeCurrency = self.safe_string(trade, 'fee_currency')
feeCost = self.safe_number(trade, 'fee')
else:
feeCurrency = 'GT'
feeCost = self.parse_number(gtFee)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
takerOrMaker = self.safe_string(trade, 'role')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = int(since / 1000)
request['to'] = since + 30 * 24 * 60 * 60
response = self.privateWalletGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = int(since / 1000)
request['to'] = since + 30 * 24 * 60 * 60
response = self.privateWalletGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency)
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
request['chain'] = network
params = self.omit(params, 'network')
response = self.privateWithdrawalsPost(self.extend(request, params))
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
currencyId = self.safe_string(response, 'currency')
id = self.safe_string(response, 'id')
return {
'info': response,
'id': id,
'code': self.safe_currency_code(currencyId),
'amount': self.safe_number(response, 'amount'),
'address': self.safe_string(response, 'address'),
'tag': self.safe_string(response, 'memo'),
}
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'DMOVE': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# {
# "id": "d33361395",
# "currency": "USDT_TRX",
# "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z",
# "amount": "100",
# "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0",
# "timestamp": "1626345819",
# "status": "DONE",
# "memo": ""
# }
#
# withdrawals
id = self.safe_string(transaction, 'id')
type = None
if id is not None:
type = self.parse_transaction_type(id[0])
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
txid = self.safe_string(transaction, 'txid')
rawStatus = self.safe_string(transaction, 'status')
status = self.parse_transaction_status(rawStatus)
address = self.safe_string(transaction, 'address')
fee = self.safe_number(transaction, 'fee')
tag = self.safe_string(transaction, 'memo')
if tag == '':
tag = None
timestamp = self.safe_timestamp(transaction, 'timestamp')
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
contract = market['contract']
stopPrice = self.safe_number(params, 'stopPrice')
methodTail = 'Orders'
reduceOnly = self.safe_value_2(params, 'reduce_only', 'reduceOnly')
defaultTimeInForce = self.safe_value_2(params, 'tif', 'time_in_force', 'gtc')
timeInForce = self.safe_value(params, 'timeInForce', defaultTimeInForce)
params = self.omit(params, ['stopPrice', 'reduce_only', 'reduceOnly', 'tif', 'time_in_force', 'timeInForce'])
isLimitOrder = (type == 'limit')
isMarketOrder = (type == 'market')
if isLimitOrder and price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for ' + type + ' orders')
if contract:
amountToPrecision = self.amount_to_precision(symbol, amount)
signedAmount = Precise.string_neg(amountToPrecision) if (side == 'sell') else amountToPrecision
amount = int(signedAmount)
if isMarketOrder:
timeInForce = 'ioc'
price = 0
elif not isLimitOrder:
# Gateio doesn't have market orders for spot
raise InvalidOrder(self.id + ' createOrder() does not support ' + type + ' orders for ' + market['type'] + ' markets')
request = None
if stopPrice is None:
if contract:
# contract order
request = {
'contract': market['id'], # filled in prepareRequest above
'size': amount, # int64, positive = bid, negative = ask
# 'iceberg': 0, # int64, display size for iceberg order, 0 for non-iceberg, note that you will have to pay the taker fee for the hidden size
'price': self.price_to_precision(symbol, price), # 0 for market order with tif set as ioc
# 'close': False, # True to close the position, with size set to 0
# 'reduce_only': False, # St as True to be reduce-only order
# 'tif': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'text': clientOrderId, # 't-abcdef1234567890',
# 'auto_size': '', # close_long, close_short, note size also needs to be set to 0
'settle': market['settleId'], # filled in prepareRequest above
}
if reduceOnly is not None:
request['reduce_only'] = reduceOnly
if timeInForce is not None:
request['tif'] = timeInForce
else:
options = self.safe_value(self.options, 'createOrder', {})
defaultAccount = self.safe_string(options, 'account', 'spot')
account = self.safe_string(params, 'account', defaultAccount)
params = self.omit(params, 'account')
# spot order
request = {
# 'text': clientOrderId, # 't-abcdef1234567890',
'currency_pair': market['id'], # filled in prepareRequest above
'type': type,
'account': account, # 'spot', 'margin', 'cross_margin'
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
# 'time_in_force': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'iceberg': 0, # amount to display for the iceberg order, null or 0 for normal orders, set to -1 to hide the order completely
# 'auto_borrow': False, # used in margin or cross margin trading to allow automatic loan of insufficient amount if balance is not enough
# 'auto_repay': False, # automatic repayment for automatic borrow loan generated by cross margin order, diabled by default
}
if timeInForce is not None:
request['time_in_force'] = timeInForce
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
if clientOrderId is not None:
# user-defined, must follow the rules if not empty
# prefixed with t-
# no longer than 28 bytes without t- prefix
# can only include 0-9, A-Z, a-z, underscores(_), hyphens(-) or dots(.)
if len(clientOrderId) > 28:
raise BadRequest(self.id + ' createOrder() clientOrderId or text param must be up to 28 characters')
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
request['text'] = clientOrderId
else:
if contract:
# contract conditional order
rule = 1 if (side == 'sell') else 2
request = {
'initial': {
'contract': market['id'],
'size': amount, # positive = buy, negative = sell, set to 0 to close the position
'price': self.price_to_precision(symbol, price), # set to 0 to use market price
# 'close': False, # set to True if trying to close the position
# 'tif': 'gtc', # gtc, ioc, if using market price, only ioc is supported
# 'text': clientOrderId, # web, api, app
# 'reduce_only': False,
},
'trigger': {
# 'strategy_type': 0, # 0 = by price, 1 = by price gap, only 0 is supported currently
# 'price_type': 0, # 0 latest deal price, 1 mark price, 2 index price
'price': self.price_to_precision(symbol, stopPrice), # price or gap
'rule': rule, # 1 means price_type >= price, 2 means price_type <= price
# 'expiration': expiration, how many seconds to wait for the condition to be triggered before cancelling the order
},
'settle': market['settleId'],
}
expiration = self.safe_integer(params, 'expiration')
if expiration is not None:
request['trigger']['expiration'] = expiration
params = self.omit(params, 'expiration')
if reduceOnly is not None:
request['initial']['reduce_only'] = reduceOnly
if timeInForce is not None:
request['initial']['tif'] = timeInForce
else:
# spot conditional order
options = self.safe_value(self.options, 'createOrder', {})
defaultAccount = self.safe_string(options, 'account', 'normal')
account = self.safe_string(params, 'account', defaultAccount)
params = self.omit(params, 'account')
defaultExpiration = self.safe_integer(options, 'expiration')
expiration = self.safe_integer(params, 'expiration', defaultExpiration)
rule = '>=' if (side == 'sell') else '<='
request = {
'trigger': {
'price': self.price_to_precision(symbol, stopPrice),
'rule': rule, # >= triggered when market price larger than or equal to price field, <= triggered when market price less than or equal to price field
'expiration': expiration, # required, how long(in seconds) to wait for the condition to be triggered before cancelling the order
},
'put': {
'type': type,
'side': side,
'price': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
'account': account, # normal, margin
'time_in_force': timeInForce, # gtc, ioc for taker only
},
'market': market['id'],
}
methodTail = 'PriceOrders'
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotPost' + methodTail,
'margin': 'privateSpotPost' + methodTail,
'swap': 'privateFuturesPostSettle' + methodTail,
'future': 'privateDeliveryPostSettle' + methodTail,
})
response = getattr(self, method)(self.deep_extend(request, params))
#
# spot
#
# {
# "id":"95282841887",
# "text":"apiv4",
# "create_time":"1637383156",
# "update_time":"1637383156",
# "create_time_ms":1637383156017,
# "update_time_ms":1637383156017,
# "status":"open",
# "currency_pair":"ETH_USDT",
# "type":"limit",
# "account":"spot",
# "side":"buy",
# "amount":"0.01",
# "price":"3500",
# "time_in_force":"gtc",
# "iceberg":"0",
# "left":"0.01",
# "fill_price":"0",
# "filled_total":"0",
# "fee":"0",
# "fee_currency":"ETH",
# "point_fee":"0",
# "gt_fee":"0",
# "gt_discount":false,
# "rebated_fee":"0",
# "rebated_fee_currency":"USDT"
# }
#
# spot conditional
#
# {"id":5891843}
#
# futures and perpetual swaps
#
# {
# "id":95938572327,
# "contract":"ETH_USDT",
# "mkfr":"0",
# "tkfr":"0.0005",
# "tif":"gtc",
# "is_reduce_only":false,
# "create_time":1637384600.08,
# "price":"3000",
# "size":1,
# "refr":"0",
# "left":1,
# "text":"api",
# "fill_price":"0",
# "user":2436035,
# "status":"open",
# "is_liq":false,
# "refu":0,
# "is_close":false,
# "iceberg":0
# }
#
# futures and perpetual swaps conditionals
#
# {"id":7615567}
#
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
'filled': 'closed',
'cancelled': 'canceled',
'liquidated': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, spot
#
# {
# "id": "62364648575",
# "text": "apiv4",
# "create_time": "1626354834",
# "update_time": "1626354834",
# "create_time_ms": "1626354833544",
# "update_time_ms": "1626354833544",
# "status": "open",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.0001",
# "price": "30000",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.0001",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "BTC",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": True,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
#
id = self.safe_string(order, 'id')
marketId = self.safe_string_2(order, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_timestamp(order, 'create_time')
timestamp = self.safe_integer(order, 'create_time_ms', timestamp)
lastTradeTimestamp = self.safe_timestamp(order, 'update_time')
lastTradeTimestamp = self.safe_integer(order, 'update_time_ms', lastTradeTimestamp)
amountRaw = self.safe_string_2(order, 'amount', 'size')
amount = Precise.string_abs(amountRaw)
price = self.safe_string(order, 'price')
# average = self.safe_string(order, 'fill_price')
remaining = self.safe_string(order, 'left')
cost = self.safe_string(order, 'filled_total') # same as filled_price
rawStatus = None
side = None
contract = self.safe_value(market, 'contract')
if contract:
if amount:
side = 'buy' if Precise.string_gt(amountRaw, '0') else 'sell'
else:
side = None
rawStatus = self.safe_string(order, 'finish_as', 'open')
else:
# open, closed, cancelled - almost already ccxt unified!
rawStatus = self.safe_string(order, 'status')
side = self.safe_string(order, 'side')
status = self.parse_order_status(rawStatus)
type = self.safe_string(order, 'type')
timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif')
fees = []
gtFee = self.safe_number(order, 'gt_fee')
if gtFee:
fees.append({
'currency': 'GT',
'cost': gtFee,
})
fee = self.safe_number(order, 'fee')
if fee:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),
'cost': fee,
})
rebate = self.safe_string(order, 'rebated_fee')
if rebate:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),
'cost': self.parse_number(Precise.string_neg(rebate)),
})
mkfr = self.safe_number(order, 'mkfr')
tkfr = self.safe_number(order, 'tkfr')
if mkfr:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'settleId')),
'cost': mkfr,
})
if tkfr:
fees.append({
'currency': self.safe_currency_code(self.safe_string(market, 'settleId')),
'cost': tkfr,
})
return self.safe_order2({
'id': id,
'clientOrderId': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': None,
'amount': amount,
'cost': cost,
'filled': None,
'remaining': remaining,
'fee': None,
'fees': fees,
'trades': None,
'info': order,
}, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
}
if market['spot'] or market['margin']:
request['currency_pair'] = market['id']
else:
request['settle'] = market['settleId']
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrdersOrderId',
# 'margin': 'publicMarginGetTickers',
'swap': 'privateFuturesGetSettleOrdersOrderId',
'futures': 'privateDeliveryGetSettlePriceOrdersOrderId',
})
response = getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
if symbol is None and (type == 'spot') or type == 'margin' or type == 'cross_margin':
request = {
# 'page': 1,
# 'limit': limit,
'account': type, # spot/margin(default), cross_margin
}
if limit is not None:
request['limit'] = limit
response = self.privateSpotGetOpenOrders(self.extend(request, params))
#
# [
# {
# "currency_pair": "ETH_BTC",
# "total": 1,
# "orders": [
# {
# "id": "12332324",
# "text": "t-123456",
# "create_time": "1548000000",
# "update_time": "1548000100",
# "currency_pair": "ETH_BTC",
# "status": "open",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "1",
# "price": "5.00032",
# "time_in_force": "gtc",
# "left": "0.5",
# "filled_total": "2.50016",
# "fee": "0.005",
# "fee_currency": "ETH",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "BTC"
# }
# ]
# },
# ...
# ]
#
allOrders = []
for i in range(0, len(response)):
entry = response[i]
orders = self.safe_value(entry, 'orders', [])
parsed = self.parse_orders(orders, None, since, limit)
allOrders = self.array_concat(allOrders, parsed)
return self.filter_by_since_limit(allOrders, since, limit)
return self.fetch_orders_by_status('open', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_status('finished', symbol, since, limit, params)
def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByStatus requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
request['status'] = status
if limit is not None:
request['limit'] = limit
if since is not None and (market['spot'] or market['margin']):
request['start'] = int(since / 1000)
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrders',
'margin': 'privateSpotGetOrders',
'swap': 'privateFuturesGetSettleOrders',
'futures': 'privateDeliveryGetSettleOrders',
})
if market['type'] == 'margin' or market['type'] == 'cross_margin':
request['account'] = market['type']
response = getattr(self, method)(self.extend(request, params))
# SPOT
# {
# "id":"8834234273",
# "text": "3",
# "create_time": "1635406193",
# "update_time": "1635406193",
# "create_time_ms": 1635406193361,
# "update_time_ms": 1635406193361,
# "status": "closed",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "sell",
# "amount": "0.0002",
# "price": "58904.01",
# "time_in_force":"gtc",
# "iceberg": "0",
# "left": "0.0000",
# "fill_price": "11.790516",
# "filled_total": "11.790516",
# "fee": "0.023581032",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee_currency": "BTC"
# }
# Perpetual Swap
# {
# "status": "finished",
# "size":-1,
# "left":0,
# "id":82750739203,
# "is_liq":false,
# "is_close":false,
# "contract": "BTC_USDT",
# "text": "web",
# "fill_price": "60721.3",
# "finish_as": "filled",
# "iceberg":0,
# "tif": "ioc",
# "is_reduce_only":true,
# "create_time": 1635403475.412,
# "finish_time": 1635403475.4127,
# "price": "0"
# }
return self.parse_orders(response, market, since, limit)
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders requires a symbol parameter')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
}
if market['contract']:
request['settle'] = market['settleId']
else:
request['currency_pair'] = market['id']
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotDeleteOrdersOrderId',
'margin': 'privateSpotDeleteOrdersOrderId',
'swap': 'privateFuturesDeleteSettleOrdersOrderId',
'futures': 'privateDeliveryDeleteSettleOrdersOrderId',
})
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "id":"95282841887",
# "text":"apiv4",
# "create_time":"1637383156",
# "update_time":"1637383235",
# "create_time_ms":1637383156017,
# "update_time_ms":1637383235085,
# "status":"cancelled",
# "currency_pair":"ETH_USDT",
# "type":"limit",
# "account":"spot",
# "side":"buy",
# "amount":"0.01",
# "price":"3500",
# "time_in_force":"gtc",
# "iceberg":"0",
# "left":"0.01",
# "fill_price":"0",
# "filled_total":"0",
# "fee":"0",
# "fee_currency":"ETH",
# "point_fee":"0",
# "gt_fee":"0",
# "gt_discount":false,
# "rebated_fee":"0",
# "rebated_fee_currency":"USDT"
# }
#
# spot conditional
#
# {
# "market":"ETH_USDT",
# "user":2436035,
# "trigger":{
# "price":"3500",
# "rule":"\u003c=",
# "expiration":86400
# },
# "put":{
# "type":"limit",
# "side":"buy",
# "price":"3500",
# "amount":"0.01000000000000000000",
# "account":"normal",
# "time_in_force":"gtc"
# },
# "id":5891843,
# "ctime":1637382379,
# "ftime":1637382673,
# "status":"canceled"
# }
#
# perpetual swaps
#
# {
# id: "82241928192",
# contract: "BTC_USDT",
# mkfr: "0",
# tkfr: "0.0005",
# tif: "gtc",
# is_reduce_only: False,
# create_time: "1635196145.06",
# finish_time: "1635196233.396",
# price: "61000",
# size: "4",
# refr: "0",
# left: "4",
# text: "web",
# fill_price: "0",
# user: "6693577",
# finish_as: "cancelled",
# status: "finished",
# is_liq: False,
# refu: "0",
# is_close: False,
# iceberg: "0",
# }
#
return self.parse_order(response, market)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
self.load_markets()
currency = self.currency(code)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
truncated = self.currency_to_precision(code, amount)
request = {
'currency': currency['id'],
'from': fromId,
'to': toId,
'amount': truncated,
}
if (toId == 'futures') or (toId == 'delivery'):
request['settle'] = currency['id']
response = self.privateWalletPostTransfers(self.extend(request, params))
#
# according to the docs
#
# {
# "currency": "BTC",
# "from": "spot",
# "to": "margin",
# "amount": "1",
# "currency_pair": "BTC_USDT"
# }
#
# actual response
#
# POST https://api.gateio.ws/api/v4/wallet/transfers 204 No Content
#
return {
'info': response,
'from': fromId,
'to': toId,
'amount': truncated,
'code': code,
}
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 0) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
self.load_markets()
market = self.market(symbol)
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesPostSettlePositionsContractLeverage',
'futures': 'privateDeliveryPostSettlePositionsContractLeverage',
})
request = self.prepare_request(market)
request['query'] = {
'leverage': str(leverage),
}
if 'cross_leverage_limit' in params:
if leverage != 0:
raise BadRequest(self.id + ' cross margin leverage(valid only when leverage is 0)')
request['cross_leverage_limit'] = str(params['cross_leverage_limit'])
params = self.omit(params, 'cross_leverage_limit')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "value":"0",
# "leverage":"5",
# "mode":"single",
# "realised_point":"0",
# "contract":"BTC_USDT",
# "entry_price":"0",
# "mark_price":"62035.86",
# "history_point":"0",
# "realised_pnl":"0",
# "close_order":null,
# "size":0,
# "cross_leverage_limit":"0",
# "pending_orders":0,
# "adl_ranking":6,
# "maintenance_rate":"0.005",
# "unrealised_pnl":"0",
# "user":2436035,
# "leverage_max":"100",
# "history_pnl":"0",
# "risk_limit":"1000000",
# "margin":"0",
# "last_close_pnl":"0",
# "liq_price":"0"
# }
#
return response
def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):
authentication = api[0] # public, private
type = api[1] # spot, margin, futures, delivery
query = self.omit(params, self.extract_params(path))
path = self.implode_params(path, params)
endPart = (path == '' if '' else '/' + path)
entirePath = '/' + type + endPart
url = self.urls['api'][authentication] + entirePath
if authentication == 'public':
if query:
url += '?' + self.urlencode(query)
else:
queryString = ''
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = self.urlencode(query)
url += '?' + queryString
else:
urlQueryParams = self.safe_value(query, 'query', {})
if urlQueryParams:
queryString = self.urlencode(urlQueryParams)
url += '?' + queryString
query = self.omit(query, 'query')
body = self.json(query)
bodyPayload = '' if (body is None) else body
bodySignature = self.hash(self.encode(bodyPayload), 'sha512')
timestamp = self.seconds()
timestampString = str(timestamp)
signaturePath = '/api/' + self.version + entirePath
payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]
# eslint-disable-next-line quotes
payload = "\n".join(payloadArray)
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)
headers = {
'KEY': self.apiKey,
'Timestamp': timestampString,
'SIGN': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"label":"ORDER_NOT_FOUND","message":"Order not found"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: status"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: Trigger.rule"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: trigger.expiration invalid range"}
# {"label":"INVALID_ARGUMENT","detail":"invalid size"}
#
label = self.safe_string(response, 'label')
if label is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], label, feedback)
raise ExchangeError(feedback)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py
|
import json
import os
from dotenv import load_dotenv, find_dotenv
from watson_developer_cloud import ConversationV1
from watson_developer_cloud import ToneAnalyzerV3
# import tone detection
import tone_detection
# load the .env file containing your environment variables for the required
# services (conversation and tone)
load_dotenv(find_dotenv())
# replace with your own conversation credentials or put them in a .env file
conversation = ConversationV1(
username=os.environ.get('CONVERSATION_USERNAME') or 'YOUR SERVICE NAME',
password=os.environ.get('CONVERSATION_PASSWORD') or 'YOUR PASSWORD',
version='2016-09-20')
# replace with your own tone analyzer credentials
tone_analyzer = ToneAnalyzerV3(
username=os.environ.get('TONE_ANALYZER_USERNAME') or 'YOUR SERVICE NAME',
password=os.environ.get('TONE_ANALYZER_PASSWORD') or 'YOUR SERVICE NAME',
version='2016-02-11')
# replace with your own workspace_id
workspace_id = os.environ.get('WORKSPACE_ID') or 'YOUR WORKSPACE ID'
# This example stores tone for each user utterance in conversation context.
# Change this to false, if you do not want to maintain history
maintainToneHistoryInContext = True
# Payload for the Watson Conversation Service
# user input text required - replace "I am happy" with user input text.
payload = {
'workspace_id': workspace_id,
'input': {
'text': "I am happy"
}
}
def invokeToneConversation(payload, maintainToneHistoryInContext):
"""
invokeToneConversation calls the the Tone Analyzer service to get the
tone information for the user's input text (input['text'] in the payload
json object), adds/updates the user's tone in the payload's context,
and sends the payload to the
conversation service to get a response which is printed to screen.
:param payload: a json object containing the basic information needed to
converse with the Conversation Service's message endpoint.
:param maintainHistoryInContext:
Note: as indicated below, the console.log statements can be replaced
with application-specific code to process the err or data object
returned by the Conversation Service.
"""
tone = tone_analyzer.tone(text=payload['input']['text'])
conversation_payload = tone_detection.\
updateUserTone(payload, tone, maintainToneHistoryInContext)
response = conversation.message(workspace_id=workspace_id,
message_input=conversation_payload['input'],
context=conversation_payload['context'])
print(json.dumps(response, indent=2))
# synchronous call to conversation with tone included in the context
invokeToneConversation(payload, maintainToneHistoryInContext)
|
[] |
[] |
[
"CONVERSATION_PASSWORD",
"TONE_ANALYZER_PASSWORD",
"TONE_ANALYZER_USERNAME",
"WORKSPACE_ID",
"CONVERSATION_USERNAME"
] |
[]
|
["CONVERSATION_PASSWORD", "TONE_ANALYZER_PASSWORD", "TONE_ANALYZER_USERNAME", "WORKSPACE_ID", "CONVERSATION_USERNAME"]
|
python
| 5 | 0 | |
script/hassfest/requirements.py
|
"""Validate requirements."""
from __future__ import annotations
from collections import deque
import json
import operator
import os
import re
import subprocess
import sys
from awesomeversion import AwesomeVersion, AwesomeVersionStrategy
from stdlib_list import stdlib_list
from tqdm import tqdm
from homeassistant.const import REQUIRED_PYTHON_VER
import homeassistant.util.package as pkg_util
from script.gen_requirements_all import COMMENT_REQUIREMENTS
from .model import Config, Integration
IGNORE_PACKAGES = {
commented.lower().replace("_", "-") for commented in COMMENT_REQUIREMENTS
}
PACKAGE_REGEX = re.compile(r"^(?:--.+\s)?([-_\.\w\d]+).*==.+$")
PIP_REGEX = re.compile(r"^(--.+\s)?([-_\.\w\d]+.*(?:==|>=|<=|~=|!=|<|>|===)?.*$)")
SUPPORTED_PYTHON_TUPLES = [
REQUIRED_PYTHON_VER[:2],
tuple(map(operator.add, REQUIRED_PYTHON_VER, (0, 1, 0)))[:2],
]
SUPPORTED_PYTHON_VERSIONS = [
".".join(map(str, version_tuple)) for version_tuple in SUPPORTED_PYTHON_TUPLES
]
STD_LIBS = {version: set(stdlib_list(version)) for version in SUPPORTED_PYTHON_VERSIONS}
PIPDEPTREE_CACHE = None
IGNORE_VIOLATIONS = {
# Still has standard library requirements.
"acmeda",
"blink",
"ezviz",
"hdmi_cec",
"juicenet",
"lupusec",
"rainbird",
"slide",
"suez_water",
}
def normalize_package_name(requirement: str) -> str:
"""Return a normalized package name from a requirement string."""
match = PACKAGE_REGEX.search(requirement)
if not match:
return ""
# pipdeptree needs lowercase and dash instead of underscore as separator
package = match.group(1).lower().replace("_", "-")
return package
def validate(integrations: dict[str, Integration], config: Config):
"""Handle requirements for integrations."""
# Check if we are doing format-only validation.
if not config.requirements:
for integration in integrations.values():
validate_requirements_format(integration)
return
ensure_cache()
# check for incompatible requirements
disable_tqdm = config.specific_integrations or os.environ.get("CI", False)
for integration in tqdm(integrations.values(), disable=disable_tqdm):
if not integration.manifest:
continue
validate_requirements(integration)
def validate_requirements_format(integration: Integration) -> bool:
"""Validate requirements format.
Returns if valid.
"""
start_errors = len(integration.errors)
for req in integration.requirements:
if " " in req:
integration.add_error(
"requirements",
f'Requirement "{req}" contains a space',
)
continue
pkg, sep, version = req.partition("==")
if not sep and integration.core:
integration.add_error(
"requirements",
f'Requirement {req} need to be pinned "<pkg name>==<version>".',
)
continue
if AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN:
integration.add_error(
"requirements",
f"Unable to parse package version ({version}) for {pkg}.",
)
continue
return len(integration.errors) == start_errors
def validate_requirements(integration: Integration):
"""Validate requirements."""
if not validate_requirements_format(integration):
return
# Some integrations have not been fixed yet so are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
integration_requirements = set()
integration_packages = set()
for req in integration.requirements:
package = normalize_package_name(req)
if not package:
integration.add_error(
"requirements",
f"Failed to normalize package name from requirement {req}",
)
return
if package in IGNORE_PACKAGES:
continue
integration_requirements.add(req)
integration_packages.add(package)
if integration.disabled:
return
install_ok = install_requirements(integration, integration_requirements)
if not install_ok:
return
all_integration_requirements = get_requirements(integration, integration_packages)
if integration_requirements and not all_integration_requirements:
integration.add_error(
"requirements",
f"Failed to resolve requirements {integration_requirements}",
)
return
# Check for requirements incompatible with standard library.
for version, std_libs in STD_LIBS.items():
for req in all_integration_requirements:
if req in std_libs:
integration.add_error(
"requirements",
f"Package {req} is not compatible with Python {version} standard library",
)
def ensure_cache():
"""Ensure we have a cache of pipdeptree.
{
"flake8-docstring": {
"key": "flake8-docstrings",
"package_name": "flake8-docstrings",
"installed_version": "1.5.0"
"dependencies": {"flake8"}
}
}
"""
global PIPDEPTREE_CACHE
if PIPDEPTREE_CACHE is not None:
return
cache = {}
for item in json.loads(
subprocess.run(
["pipdeptree", "-w", "silence", "--json"],
check=True,
capture_output=True,
text=True,
).stdout
):
cache[item["package"]["key"]] = {
**item["package"],
"dependencies": {dep["key"] for dep in item["dependencies"]},
}
PIPDEPTREE_CACHE = cache
def get_requirements(integration: Integration, packages: set[str]) -> set[str]:
"""Return all (recursively) requirements for an integration."""
ensure_cache()
all_requirements = set()
to_check = deque(packages)
while to_check:
package = to_check.popleft()
if package in all_requirements:
continue
all_requirements.add(package)
item = PIPDEPTREE_CACHE.get(package)
if item is None:
# Only warn if direct dependencies could not be resolved
if package in packages:
integration.add_error(
"requirements", f"Failed to resolve requirements for {package}"
)
continue
to_check.extend(item["dependencies"])
return all_requirements
def install_requirements(integration: Integration, requirements: set[str]) -> bool:
"""Install integration requirements.
Return True if successful.
"""
global PIPDEPTREE_CACHE
ensure_cache()
for req in requirements:
match = PIP_REGEX.search(req)
if not match:
integration.add_error(
"requirements",
f"Failed to parse requirement {req} before installation",
)
continue
install_args = match.group(1)
requirement_arg = match.group(2)
is_installed = False
normalized = normalize_package_name(requirement_arg)
if normalized and "==" in requirement_arg:
ver = requirement_arg.split("==")[-1]
item = PIPDEPTREE_CACHE.get(normalized)
is_installed = item and item["installed_version"] == ver
if not is_installed:
try:
is_installed = pkg_util.is_installed(req)
except ValueError:
is_installed = False
if is_installed:
continue
args = [sys.executable, "-m", "pip", "install", "--quiet"]
if install_args:
args.append(install_args)
args.append(requirement_arg)
try:
result = subprocess.run(args, check=True, capture_output=True, text=True)
except subprocess.SubprocessError:
integration.add_error(
"requirements",
f"Requirement {req} failed to install",
)
else:
# Clear the pipdeptree cache if something got installed
if "Successfully installed" in result.stdout:
PIPDEPTREE_CACHE = None
if integration.errors:
return False
return True
|
[] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
python
| 1 | 0 | |
tests/communityid_test.py
|
#! /usr/bin/env python
"""
Unit & functional tests for the Community ID package. Run with something like:
python -m unittest communityid_test
nose2 -C --coverage ../communityid --coverage-report term-missing communityid_test
You can also invoke this file directly.
"""
import os
import socket
import struct
import subprocess
import sys
import unittest
try:
import pylint.epylint
except ImportError:
pass # Pity!
LOCAL_DIR=os.path.dirname(__file__)
MODULE_DIR=os.path.abspath(os.path.join(LOCAL_DIR, '..'))
sys.path.insert(0, MODULE_DIR)
import communityid
import communityid.compat
class TestCommunityID(unittest.TestCase):
def setUp(self):
self.cids = [
communityid.CommunityID(),
communityid.CommunityID(use_base64=False),
communityid.CommunityID(seed=1),
]
def assertEqualID(self, cft, correct_results):
"""
Helper for ID string correctness assertion. cft is a
communityid.FlowTyple.
"""
# Create a list of tuples, each containing a CommunityID
# instance as first member, and the expected result as the
# second:
cid_result_pairs = zip(self.cids, correct_results)
for cid, correct_res in cid_result_pairs:
res = cid.calc(cft)
self.assertEqual(res, correct_res,
msg='%s: %s result is %s, should be %s, err: %s'
% (cid, cft, res, correct_res, cid.get_error()))
def verify_full_tuples(self, tuples, high_level_func, proto_num, af_family):
"""
Verifies for each of the provided flow tuples and expected
Community ID strings that the computation produces the
expected result, trying the various supported types for the
flow tuple coordinates.
"""
for tpl in tuples:
# Using the convenience wrapper:
cft = high_level_func(tpl[0], tpl[1], tpl[2], tpl[3])
self.assertEqualID(cft, tpl[4:])
# Using specific protocol number:
cft = communityid.FlowTuple(proto_num, tpl[0], tpl[1], tpl[2], tpl[3])
self.assertEqualID(cft, tpl[4:])
# Using packed NBO, as when grabbing from a packet header:
cft = communityid.FlowTuple(
proto_num,
socket.inet_pton(af_family, tpl[0]),
socket.inet_pton(af_family, tpl[1]),
struct.pack('!H', tpl[2]),
struct.pack('!H', tpl[3]))
self.assertEqualID(cft, tpl[4:])
# Using a mix, ewww.
cft = communityid.FlowTuple(
proto_num,
socket.inet_pton(af_family, tpl[0]),
socket.inet_pton(af_family, tpl[1]),
tpl[2], tpl[3])
self.assertEqualID(cft, tpl[4:])
# Using Python 3.3+'s ipaddress types or their 2.x
# backport:
try:
cft = communityid.FlowTuple(
proto_num,
communityid.compat.ip_address(tpl[0]),
communityid.compat.ip_address(tpl[1]),
tpl[2], tpl[3])
self.assertEqualID(cft, tpl[4:])
except RuntimeError:
pass
def verify_short_tuples(self, tuples, high_level_func, proto_num, af_family):
"""
Similar to verify_full_tuples, but for the IP-only tuple scenario.
"""
for tpl in tuples:
# Using the convenience wrapper:
cft = high_level_func(tpl[0], tpl[1], proto_num)
self.assertEqualID(cft, tpl[2:])
# Using specific protocol number:
cft = communityid.FlowTuple(proto_num, tpl[0], tpl[1])
self.assertEqualID(cft, tpl[2:])
# Using packed NBO, as when grabbing from a packet header:
cft = communityid.FlowTuple(
proto_num,
socket.inet_pton(af_family, tpl[0]),
socket.inet_pton(af_family, tpl[1]))
self.assertEqualID(cft, tpl[2:])
# Using a mix, ewww.
cft = communityid.FlowTuple(
proto_num, tpl[0], socket.inet_pton(af_family, tpl[1]))
self.assertEqualID(cft, tpl[2:])
# Using Python 3.3+'s ipaddress types or their 2.x
# backport:
try:
cft = communityid.FlowTuple(
proto_num,
communityid.compat.ip_address(tpl[0]),
communityid.compat.ip_address(tpl[1]))
self.assertEqualID(cft, tpl[2:])
except RuntimeError:
pass
# All of the following tests would be tidier with the DDT module,
# but I'm reluctant to add third-party dependencies for
# testing. --cpk
def test_icmp(self):
self.verify_full_tuples(
[
['192.168.0.89', '192.168.0.1', 8, 0,
'1:X0snYXpgwiv9TZtqg64sgzUn6Dk=',
'1:5f4b27617a60c22bfd4d9b6a83ae2c833527e839',
'1:03g6IloqVBdcZlPyX8r0hgoE7kA='],
['192.168.0.1', '192.168.0.89', 0, 8,
'1:X0snYXpgwiv9TZtqg64sgzUn6Dk=',
'1:5f4b27617a60c22bfd4d9b6a83ae2c833527e839',
'1:03g6IloqVBdcZlPyX8r0hgoE7kA='],
# This is correct: message type 20 (experimental) isn't
# one we consider directional, so the message code ends up
# in the hash computation, and thus two different IDs result:
['192.168.0.89', '192.168.0.1', 20, 0,
'1:3o2RFccXzUgjl7zDpqmY7yJi8rI=',
'1:de8d9115c717cd482397bcc3a6a998ef2262f2b2',
'1:lCXHHxavE1Vq3oX9NH5ladQg02o='],
['192.168.0.89', '192.168.0.1', 20, 1,
'1:tz/fHIDUHs19NkixVVoOZywde+I=',
'1:b73fdf1c80d41ecd7d3648b1555a0e672c1d7be2',
'1:Ie3wmFyxiEyikbsbcO03d2nh+PM='],
# Therefore the following does _not_ get treated as the
# reverse direction, but _does_ get treated the same as
# the first two tuples, because for message type 0 the
# code is currently ignored.
['192.168.0.1', '192.168.0.89', 0, 20,
'1:X0snYXpgwiv9TZtqg64sgzUn6Dk=',
'1:5f4b27617a60c22bfd4d9b6a83ae2c833527e839',
'1:03g6IloqVBdcZlPyX8r0hgoE7kA='],
],
communityid.FlowTuple.make_icmp,
communityid.PROTO_ICMP,
socket.AF_INET)
def test_icmp6(self):
self.verify_full_tuples(
[
['fe80::200:86ff:fe05:80da', 'fe80::260:97ff:fe07:69ea', 135, 0,
'1:dGHyGvjMfljg6Bppwm3bg0LO8TY=',
'1:7461f21af8cc7e58e0e81a69c26ddb8342cef136',
'1:kHa1FhMYIT6Ym2Vm2AOtoOARDzY='],
['fe80::260:97ff:fe07:69ea', 'fe80::200:86ff:fe05:80da', 136, 0,
'1:dGHyGvjMfljg6Bppwm3bg0LO8TY=',
'1:7461f21af8cc7e58e0e81a69c26ddb8342cef136',
'1:kHa1FhMYIT6Ym2Vm2AOtoOARDzY='],
['3ffe:507:0:1:260:97ff:fe07:69ea', '3ffe:507:0:1:200:86ff:fe05:80da', 3, 0,
'1:NdobDX8PQNJbAyfkWxhtL2Pqp5w=',
'1:35da1b0d7f0f40d25b0327e45b186d2f63eaa79c',
'1:OlOWx9psIbBFi7lOCw/4MhlKR9M='],
['3ffe:507:0:1:200:86ff:fe05:80da', '3ffe:507:0:1:260:97ff:fe07:69ea', 3, 0,
'1:/OGBt9BN1ofenrmSPWYicpij2Vc=',
'1:fce181b7d04dd687de9eb9923d66227298a3d957',
'1:Ij4ZxnC87/MXzhOjvH2vHu7LRmE='],
],
communityid.FlowTuple.make_icmp6,
communityid.PROTO_ICMP6,
socket.AF_INET6)
def test_sctp(self):
self.verify_full_tuples(
[
['192.168.170.8', '192.168.170.56', 7, 80,
'1:jQgCxbku+pNGw8WPbEc/TS/uTpQ=',
'1:8d0802c5b92efa9346c3c58f6c473f4d2fee4e94',
'1:Y1/0jQg6e+I3ZwZZ9LP65DNbTXU='],
['192.168.170.56', '192.168.170.8', 80, 7,
'1:jQgCxbku+pNGw8WPbEc/TS/uTpQ=',
'1:8d0802c5b92efa9346c3c58f6c473f4d2fee4e94',
'1:Y1/0jQg6e+I3ZwZZ9LP65DNbTXU='],
],
communityid.FlowTuple.make_sctp,
communityid.PROTO_SCTP,
socket.AF_INET)
def test_tcp(self):
self.verify_full_tuples(
[
['128.232.110.120', '66.35.250.204', 34855, 80,
'1:LQU9qZlK+B5F3KDmev6m5PMibrg=',
'1:2d053da9994af81e45dca0e67afea6e4f3226eb8',
'1:3V71V58M3Ksw/yuFALMcW0LAHvc='],
['66.35.250.204', '128.232.110.120', 80, 34855,
'1:LQU9qZlK+B5F3KDmev6m5PMibrg=',
'1:2d053da9994af81e45dca0e67afea6e4f3226eb8',
'1:3V71V58M3Ksw/yuFALMcW0LAHvc='],
# Verify https://github.com/corelight/pycommunityid/issues/3
['10.0.0.1', '10.0.0.2', 10, 11569,
'1:SXBGMX1lBOwhhoDrZynfROxnhnM=',
'1:497046317d6504ec218680eb6729df44ec678673',
'1:HmBRGR+fUyXF4t8WEtal7Y0gEAo='],
],
communityid.FlowTuple.make_tcp,
communityid.PROTO_TCP,
socket.AF_INET)
def test_udp(self):
self.verify_full_tuples(
[
['192.168.1.52', '8.8.8.8', 54585, 53,
'1:d/FP5EW3wiY1vCndhwleRRKHowQ=',
'1:77f14fe445b7c22635bc29dd87095e451287a304',
'1:Q9We8WO3piVF8yEQBNJF4uiSVrI='],
['8.8.8.8', '192.168.1.52', 53, 54585,
'1:d/FP5EW3wiY1vCndhwleRRKHowQ=',
'1:77f14fe445b7c22635bc29dd87095e451287a304',
'1:Q9We8WO3piVF8yEQBNJF4uiSVrI='],
],
communityid.FlowTuple.make_udp,
communityid.PROTO_UDP,
socket.AF_INET)
def test_ip(self):
self.verify_short_tuples(
[
['10.1.24.4', '10.1.12.1',
'1:/nQI4Rh/TtY3mf0R2gJFBkVlgS4=',
'1:fe7408e1187f4ed63799fd11da0245064565812e',
'1:BK3BVW3U2eemuwVQVN3zd/GULno='],
['10.1.12.1', '10.1.24.4',
'1:/nQI4Rh/TtY3mf0R2gJFBkVlgS4=',
'1:fe7408e1187f4ed63799fd11da0245064565812e',
'1:BK3BVW3U2eemuwVQVN3zd/GULno='],
],
communityid.FlowTuple.make_ip,
46, socket.AF_INET)
def test_inputs(self):
# Need protocol
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
None, '1.2.3.4', '5.6.7.8')
# Need both IP addresses
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', None)
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, None, '5.6.7.8')
# Need parseable IP addresses
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, 'ohdear.com', '5.6.7.8')
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', 'ohdear.com')
# Need two valid ports
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', '5.6.7.8', 23, None)
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', '5.6.7.8', None, 23)
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', '5.6.7.8', "23/tcp", 23)
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', '5.6.7.8', 23, "23/tcp")
# Need ports with port-enabled protocol
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', '5.6.7.8')
@unittest.skipIf(sys.version_info[0] < 3, 'not supported in Python 2.x')
def test_inputs_py3(self):
# Python 3 allows us to distinguish strings and byte sequences,
# and the following test only applies to it.
with self.assertRaises(communityid.FlowTupleError):
tpl = communityid.FlowTuple(
communityid.PROTO_TCP, '1.2.3.4', '5.6.7.8', 23, "80")
def test_get_proto(self):
self.assertEqual(communityid.get_proto(23), 23)
self.assertEqual(communityid.get_proto("23"), 23)
self.assertEqual(communityid.get_proto("tcp"), 6)
self.assertEqual(communityid.get_proto("TCP"), 6)
self.assertEqual(communityid.get_proto("23/tcp"), None)
class LintCommunityID(unittest.TestCase):
def setUp(self):
if 'pylint.epylint' not in sys.modules:
self.skipTest('pylint module not available')
def test_linting(self):
rcfile = os.path.join(LOCAL_DIR, 'pylint.rc')
(out, _) = pylint.epylint.py_run('communityid --rcfile=' + rcfile, return_std=True)
for line in out.getvalue().splitlines():
if line.find('Your code has been') > 0:
print('\n' + line.strip())
break
self.assertTrue(out.getvalue().find(' error ') < 0,
msg='Pylint error: ' + out.getvalue())
class TestCommands(unittest.TestCase):
def setUp(self):
# Adjust the environment so it prioritizes our local module
# tree. This also makes the tests work before the module is
# installed.
self.env = os.environ.copy()
try:
ppath = self.env['PYTHONPATH']
ppath = MODULE_DIR + os.pathsep + ppath
except KeyError:
ppath = MODULE_DIR
self.env['PYTHONPATH'] = ppath
def _scriptpath(self, scriptname):
return os.path.abspath(os.path.join(LOCAL_DIR, '..', 'scripts', scriptname))
def _testfilepath(self, testfile):
return os.path.abspath(os.path.join(LOCAL_DIR, testfile))
def test_communityid(self):
out = subprocess.check_output(
[self._scriptpath('community-id'), 'tcp', '10.0.0.1', '10.0.0.2', '10', '20'],
env=self.env)
self.assertEqual(out, b'1:9j2Dzwrw7T9E+IZi4b4IVT66HBI=\n')
def _check_output_community_id_pcap(self, args):
try:
args = [self._scriptpath('community-id-pcap')] + args
return subprocess.check_output(args, env=self.env)
except subprocess.CalledProcessError as err:
if err.output.find(b'This needs the dpkt Python module') < 0:
raise
self.skipTest("This test requires dpkt")
def test_communityid_pcap(self):
# This only works if we have dpkt
out = self._check_output_community_id_pcap([self._testfilepath('tcp.pcap')])
first_line = out.decode('ascii').split('\n')[0].strip()
self.assertEqual(first_line, '1071580904.891921 | 1:LQU9qZlK+B5F3KDmev6m5PMibrg= | 128.232.110.120 66.35.250.204 6 34855 80')
def test_communityid_pcap_json(self):
out = self._check_output_community_id_pcap(['--json', self._testfilepath('tcp.pcap')])
self.assertEqual(out, b'[{"proto": 6, "saddr": "128.232.110.120", "daddr": "66.35.250.204", "sport": 34855, "dport": 80, "communityid": "1:LQU9qZlK+B5F3KDmev6m5PMibrg="}, {"proto": 6, "saddr": "66.35.250.204", "daddr": "128.232.110.120", "sport": 80, "dport": 34855, "communityid": "1:LQU9qZlK+B5F3KDmev6m5PMibrg="}]\n')
def test_communityid_tcpdump(self):
# This uses subprocess.check_output(..., input=...) which was added in 3.4:
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
self.skipTest('Needs Python 3.4 or greater')
out = subprocess.check_output(
[self._scriptpath('community-id-tcpdump')], input=b'1071580904.891921 IP 128.232.110.120.34855 > 66.35.250.204.80: Flags [S], seq 3201037957, win 5840, options [mss 1460,sackOK,TS val 87269134 ecr 0,nop,wscale 0], length 0',
env=self.env)
first_line = out.decode('ascii').split('\n')[0].strip()
self.assertEqual(first_line, '1071580904.891921 IP 1:LQU9qZlK+B5F3KDmev6m5PMibrg= 128.232.110.120:34855 > 66.35.250.204.80: Flags [S], seq 3201037957, win 5840, options [mss 1460,sackOK,TS val 87269134 ecr 0,nop,wscale 0], length 0')
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/test/java/com/galiglobal/jreview/ListCommandTest.java
|
package com.galiglobal.jreview;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class ListCommandTest {
private String username = System.getenv("JREVIEW_USERNAME");
private String token = System.getenv("JREVIEW_TOKEN");
@Test
public void testListSuccessful() throws IOException, InterruptedException {
PrintStream oldOut = System.out;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
System.setOut(new PrintStream(baos));
int exitCode = new CommandLine(new ListCommand()).execute("-u ", username, "-t ", token,
"-r", "origin");
String expected = String.format("" +
"#6 Test 2 active Antón María Rodriguez Yuste" + "\n" +
"#5 Test 1 active Antón María Rodriguez Yuste" + "\n"
);
assertEquals(expected.trim(), baos.toString().trim());
assertEquals(0, exitCode);
} finally {
System.setOut(oldOut);
}
}
}
|
[
"\"JREVIEW_USERNAME\"",
"\"JREVIEW_TOKEN\""
] |
[] |
[
"JREVIEW_TOKEN",
"JREVIEW_USERNAME"
] |
[]
|
["JREVIEW_TOKEN", "JREVIEW_USERNAME"]
|
java
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'school_management_system.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/fixtures/app2/services/hello.py
|
#!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import os
# We can access to the port the HTTP server have to listen to via TASK_PORT
PORT_NUMBER = int(os.environ['TASK_PORT'])
class myHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
# EXECUTE APPLICATION LOGIC HERE AND RETURN JSON
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
self.wfile.write('{"success" : true, "app": "python"}')
server = HTTPServer(('', PORT_NUMBER), myHandler)
server.serve_forever()
|
[] |
[] |
[
"TASK_PORT"
] |
[]
|
["TASK_PORT"]
|
python
| 1 | 0 | |
tests/test_token_envrionment.py
|
"""unit test for token environemnt"""
from __future__ import annotations
import os
from wechaty_puppet_service.config import get_token
def test_service_token():
token = 'your-self-token'
os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = token
assert get_token() == token
def test_upper_token_name():
token = 'your-self-token'
os.environ['TOKEN'] = token
assert get_token() == token
def test_lower_token_name():
token = 'your-self-token'
os.environ['token'] = token
assert get_token() == token
def test_none_token():
del os.environ['WECHATY_PUPPET_SERVICE_TOKEN']
del os.environ['TOKEN']
del os.environ['token']
assert get_token() is None
|
[] |
[] |
[
"WECHATY_PUPPET_SERVICE_TOKEN",
"TOKEN",
"token"
] |
[]
|
["WECHATY_PUPPET_SERVICE_TOKEN", "TOKEN", "token"]
|
python
| 3 | 0 | |
auditing/datacollectors/TTNCollector.py
|
import os
import websocket
import threading
import json
import requests
from datetime import datetime, timedelta
import dateutil.parser
from time import sleep
import auditing.datacollectors.utils.PhyParser as phy_parser
from auditing.datacollectors.BaseCollector import BaseCollector
from auditing.datacollectors.utils.PacketPersistence import save, save_parsing_error, save_login_error, \
notify_test_event
account_login_url = os.environ[
'ACCOUNT_self.logIN_URL'] if 'ACCOUNT_self.logIN_URL' in os.environ else 'https://account.thethingsnetwork.org/api/v2/users/login' # 'https://account.thethingsnetwork.org/api/v2/users/login'
login_url = os.environ['self.logIN_URL'] if 'self.logIN_URL' in os.environ else 'https://console.thethingsnetwork.org/login'
access_token_url = os.environ[
'ACCESS_TOKEN_URL'] if 'ACCESS_TOKEN_URL' in os.environ else 'https://console.thethingsnetwork.org/refresh'
ws_url = os.environ[
'WS_URL'] if 'WS_URL' in os.environ else 'wss://console.thethingsnetwork.org/api/events/644/lta0xryg/websocket?version=v2.6.11'
class TTNCollector(BaseCollector):
"""
This collector establishes a connection to a thethingsnetwork.com account and
retrieves data from the https://console.thethingsnetwork.org/gateways/eui-THEGATEWAYID/traffic
endpoint using websockets.
The steps to retrieve gateway payloads:
1- Get the access token
2- Using the access token and the Gateway ID (see below for its format), suscribe
to the web socket.
3- Handle messages with the on_message() function.
There are 5 kinds of messages:
* gateway downlink and gateway uplink: this is, uplink and downlink data messages
(PHYpayload) as well as radio metadata.
* join request and join accept
* gateway status: it provides the location of the gateway.
About the functioning of this collector:
1- It's instantiated in the Orchestrator, and it's started by executing connect()
method
2- In connect(), 2 threads are launched:
a- one for the WS socket connection, where messages are processed. In case we
receive a disconnection message from the server, the refresh token thread is
stopped and the connect() method is executed again.
b- and the other thread is for refreshing the access token every N minutes. In
the case where is not possible to send the new access token to the web server,
and after 3 consecutive failed attemps, this thread stops the WS thread and
executes the connect() method.
Some considerations about this endpoint / websocket:
* It provides messages with the same amount of information as the packet_forwarder,
which means that NO application data is handled (such as direct associations)
between message/devEui, app_name, gw_name, etc.
* Sometimes it happens that the access token is retrieved but the web
service refuses to accept it. In this situation, we manually restart the
WS and open a new connection.
About the Gateway ID format, it can be:
* Legacy packet-forwarder format: a string matching the pattern 'eui-aabbccddeeff0011'.
This is, 'eui-' followed by 8 bytes in hex format (16 characters), or,
* TTN format: a lowercase alphanumeric string separated by hyphens.
"""
def __init__(self, data_collector_id, organization_id, user, password, gateway_id, verified):
super().__init__(data_collector_id=data_collector_id, organization_id=organization_id, verified=verified)
self.user = user
self.password = password
self.gateway_id = gateway_id
self.ws = None
self.session = None
self.last_seen = None
self.manually_disconnected = None
# The data sent to the MQTT queue, to be written by the packet writer. It must have at least one MQ message
self.packet_writter_message = self.init_packet_writter_message()
# Dict containing location
self.location = dict()
self.being_tested = False
self.ws_thread= None
self.refresh_token_thread= None
def connect(self):
super(TTNCollector, self).connect()
self.session = self.login(self.user, self.password)
if self.session:
self.connected = "CONNECTED"
self.manually_disconnected = None
data_access = self.fetch_access_token(self.session)
access_token = data_access.get('access_token')
expires = data_access.get('expires')
self.ws = websocket.WebSocketApp(ws_url,
on_message=lambda ws, msg: self.on_message(ws, msg),
on_error=lambda ws, msg: self.on_error(ws, msg),
on_close=lambda ws: self.on_close(ws))
self.log.debug(f'WebSocket app initialized')
self.ws.access_token = access_token
self.ws.gateway = self.gateway_id
self.ws.organization_id = self.organization_id
self.ws.data_collector_id = self.data_collector_id
self.ws.on_open = lambda ws: self.on_open(ws)
self.ws.user_data = self
self.ws.is_closed = False
self.ws.packet_writter_message = self.packet_writter_message
self.ws.location = self.location
self.ws_thread = threading.Thread(target=self.ws.run_forever, kwargs={'ping_interval': 20})
self.ws_thread.daemon = True
self.ws_thread.start()
self.refresh_token_thread = threading.Thread(target=self.schedule_refresh_token, args=(self.ws, self.session, expires))
self.refresh_token_thread.daemon = True
self.refresh_token_thread.start()
else:
if self.being_tested:
notify_test_event(self.data_collector_id, 'ERROR', 'Login failed')
self.stop_testing = True
else:
save_login_error(self.data_collector_id)
def disconnect(self):
self.manually_disconnected = True
if self.being_tested:
self.log.info("Stopping test connection to DataCollector ID {0}".format(self.data_collector_id))
else:
self.log.info("Manually disconnected to gw: {}".format(self.gateway_id))
try:
if self.ws:
self.ws.close()
except Exception as exc:
self.log.error("Error closing socket: " + str(exc))
super(TTNCollector, self).disconnect()
def verify_payload(self, msg):
# If we managed to login into TTN, then we are sure we're receiving TTN messages.
# Then, I comment the code below
return True
# if not self.has_to_parse:
# self.log.debug('message does not include physical payload')
# return True # NOT SURE if this should be True or False
# phyPayload = msg.get('payload', None)
# if not phyPayload:
# self.log.error("Payload not present in message")
# return False
# try:
# phy_parser.setPHYPayload(phyPayload)
# return True
# except Exception as e:
# self.log.error(f'Error parsing physical payload: {e}')
# return False
def on_message(self, ws, raw_message):
if self.being_tested:
return
# The contents of many messages is an 'h'. We don't want to print that.
if len(raw_message) > 1:
self.log.debug("Message: {}".format(raw_message))
else:
self.log.debug('Message len <= 1, skipping')
return
# Retry after disconnection. End thread refreshing token before
if '[200,"disconnected"]' in raw_message:
self.log.info(f"DataCollector {self.data_collector_id}: Disconnected by server. Reconnecting.")
ws.close()
ws.is_closed= True
self.log.debug(f"DataCollector {self.data_collector_id}: Joining refresh token thread.")
self.refresh_token_thread.join()
self.log.debug(f"DataCollector {self.data_collector_id}: Refresh token thread joined.")
self.connect()
# Remove data format stuff
message = raw_message.replace('\\"', '"')
origin_message = message
self.has_to_parse = False
if 'gateway downlink' in message:
self.has_to_parse = True
message = message[20:-2]
elif 'gateway uplink' in message:
self.has_to_parse = True
message = message[18:-2]
elif 'gateway join request' in message:
self.has_to_parse = True
message = message[24:-2]
elif 'gateway join accept' in message:
self.has_to_parse = True
message = message[23:-2]
if not self.verified:
# TTN collectors only verify the physical payload, which is only parsed if has_to_parse is True
if not self.verify_message(message):
self.log.debug("Collector is not yet verified, skipping message\n")
return
# message processing
try:
if 'gateway status' in message and 'location' in message:
# Check if the location is given in this message. If so, save it and add it in subsequent messages
message = message[18:-2].replace('\\"', '"')
try:
status_message = json.loads(message)
ws.location['longitude'] = status_message.get('status').get('location').get('longitude')
ws.location['latitude'] = status_message.get('status').get('location').get('latitude')
ws.location['altitude'] = status_message.get('status').get('location').get('altitude')
except Exception as e:
self.log.error(f"Error when fetching location in TTNCollector: {str(e)} Message: {raw_message}" )
message = message.replace('\\"', '"')
# Save the message that originates the packet
ws.packet_writter_message['messages'].append(
{
'topic': None,
'message': origin_message[0:4096],
'data_collector_id': ws.data_collector_id
}
)
self.last_seen = datetime.now()
if self.has_to_parse:
message = json.loads(message)
packet = phy_parser.setPHYPayload(message.get('payload'))
packet['chan'] = None
packet['stat'] = None
packet['lsnr'] = message.get('snr', None)
packet['rssi'] = message.get('rssi', None)
packet['tmst'] = datetime.timestamp(dateutil.parser.parse(message.get('timestamp', None))) * 1000
packet['rfch'] = message.get('rfch', None)
packet['freq'] = message.get('frequency', None)
packet['modu'] = None
packet['datr'] = None
packet['codr'] = message.get('coding_rate', None)
packet['size'] = None
packet['data'] = message.get('payload')
if len(ws.location) > 0:
packet['latitude'] = ws.location['latitude']
packet['longitude'] = ws.location['longitude']
packet['altitude'] = ws.location['altitude']
# Reset location
ws.location = {}
packet['app_name'] = None
packet['dev_name'] = None
gw = ws.gateway
packet['gateway'] = gw.replace('eui-', '') if gw else None
packet['seqn'] = None
packet['opts'] = None
packet['port'] = None
packet['date'] = datetime.now().__str__()
packet['dev_eui'] = message.get('dev_eui')
packet['data_collector_id'] = ws.data_collector_id
packet['organization_id'] = ws.organization_id
ws.packet_writter_message['packet'] = packet
# Save the packet
save(ws.packet_writter_message, ws.data_collector_id)
self.log.debug(f'Message received from TTN saved in DB: {ws.packet_writter_message}.')
# Reset this variable
ws.packet_writter_message = self.init_packet_writter_message()
except Exception as e:
self.log.error(f"Error creating Packet in TTNCollector ID {ws.data_collector_id}: {str(e)} Message: {raw_message}")
save_parsing_error(ws.data_collector_id, raw_message)
def on_error(self, ws, error):
# If this connection is a test, send the event
if self.being_tested:
notify_test_event(self.data_collector_id, 'ERROR', str(error))
self.log.error(f"Error testing DataCollector ID {self.data_collector_id}: {str(error)}")
self.stop_testing = True
return
else:
self.log.error(f"Error ws: {str(error)}")
def on_close(self, ws): # similar to on_disconnect
ws.close()
ws.is_closed = True
self.log.info(f"Disconnected to gw: {ws.gateway_id}")
def on_open(self, ws): # similar to on_connect
# If this connection is a test, activate the flag and emit the event
if self.being_tested:
notify_test_event(self.data_collector_id, 'SUCCESS', 'Connection successful')
self.stop_testing = True
return
ws.send('["gateway:' + ws.gateway + '"]')
ws.send('["token:' + ws.access_token + '"]')
self.connected = "CONNECTED"
ws.is_closed = False
self.log.info(f"Connected to GW: {ws.gateway}" )
def login(self, user, password):
ses = requests.Session()
ses.headers['Content-type'] = 'application/json'
res = ses.post(account_login_url, data=json.dumps({"username": user, "password": password}))
ses.get(login_url)
return ses if res.status_code == 200 else None
def fetch_access_token(self, ses):
res = ses.get(access_token_url, timeout=30)
return res.json()
def schedule_refresh_token(self, ws, session, first_expires):
expires = first_expires
connection_attempts= 0
expire_dt= None
while (not ws.is_closed):
if expire_dt is not None and expire_dt > datetime.now():
sleep(30)
continue
if expires:
expire_dt = datetime.fromtimestamp((expires / 1000)-900) # Converted from ms to seconds and substracted 15 min
self.log.info(f"expires: {str(expires)}")
self.log.debug(f"DataCollector {self.data_collector_id}: Refresh token in {(expire_dt - datetime.now()).seconds} seconds")
self.log.debug(f"WS is closed: {str(ws.is_closed)}")
if first_expires:
first_expires=None
continue
try:
data_access = self.fetch_access_token(session)
access_token = data_access.get('access_token')
expires = data_access.get('expires')
ws.access_token = access_token
ws.send('["token:' + access_token + '"]')
connection_attempts= 0
except Exception as exc:
self.log.error(f'error fetching access token: {str(exc)}')
expires= None
expire_dt= None
connection_attempts+=1
if connection_attempts>= 3:
self.log.info(f"DataCollector {self.data_collector_id}: Stopping websocket")
self.ws.close()
self.ws_thread.join()
self.ws= None
self.log.info(f"DataCollector {self.data_collector_id}: Reconnecting websocket")
self.connect()
self.log.info(f"DataCollector {self.data_collector_id}: Stop token refresh")
|
[] |
[] |
[
"ACCESS_TOKEN_URL",
"self.logIN_URL",
"ACCOUNT_self.logIN_URL",
"WS_URL"
] |
[]
|
["ACCESS_TOKEN_URL", "self.logIN_URL", "ACCOUNT_self.logIN_URL", "WS_URL"]
|
python
| 4 | 0 | |
k8s/k8s.go
|
package k8s
import (
"context"
"fmt"
"os"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// Init inits k8s client
func Init() error {
switch os.Getenv("KUBERNETES_BACKEND") {
default:
config, err := rest.InClusterConfig()
if err != nil {
return err
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
client = &clusterClient{k8sClient}
case "fs":
kubeFS := os.Getenv("KUBERNETES_FS")
if kubeFS == "" {
return fmt.Errorf("KUBERNETES_FS required")
}
var err error
client, err = newFSClient(kubeFS)
return err
case "local":
k8sClient, err := kubernetes.NewForConfig(&rest.Config{
Host: "127.0.0.1:8001",
})
if err != nil {
return err
}
client = &clusterClient{k8sClient}
}
return nil
}
var client interface {
WatchIngresses(ctx context.Context, namespace string) (watch.Interface, error)
GetServices(ctx context.Context, namespace string) ([]v1.Service, error)
WatchServices(ctx context.Context, namespace string) (watch.Interface, error)
GetIngresses(ctx context.Context, namespace string) ([]networking.Ingress, error)
GetSecrets(ctx context.Context, namespace string) ([]v1.Secret, error)
WatchSecrets(ctx context.Context, namespace string) (watch.Interface, error)
GetEndpoints(ctx context.Context, namespace string) ([]v1.Endpoints, error)
WatchEndpoints(ctx context.Context, namespace string) (watch.Interface, error)
}
// WatchIngresses watches ingresses for given namespace
func WatchIngresses(ctx context.Context, namespace string) (watch.Interface, error) {
return client.WatchIngresses(ctx, namespace)
}
// GetServices lists all service
func GetServices(ctx context.Context, namespace string) ([]v1.Service, error) {
return client.GetServices(ctx, namespace)
}
// WatchServices watches services
func WatchServices(ctx context.Context, namespace string) (watch.Interface, error) {
return client.WatchServices(ctx, namespace)
}
// GetIngresses lists all ingresses for given namespace
func GetIngresses(ctx context.Context, namespace string) ([]networking.Ingress, error) {
return client.GetIngresses(ctx, namespace)
}
// GetSecrets lists all secret for given namespace
func GetSecrets(ctx context.Context, namespace string) ([]v1.Secret, error) {
return client.GetSecrets(ctx, namespace)
}
// WatchSecrets watches secrets for given namespace
func WatchSecrets(ctx context.Context, namespace string) (watch.Interface, error) {
return client.WatchSecrets(ctx, namespace)
}
// GetEndpoints lists all endpoints
func GetEndpoints(ctx context.Context, namespace string) ([]v1.Endpoints, error) {
return client.GetEndpoints(ctx, namespace)
}
// WatchEndpoints watches endpoints
func WatchEndpoints(ctx context.Context, namespace string) (watch.Interface, error) {
return client.WatchEndpoints(ctx, namespace)
}
|
[
"\"KUBERNETES_BACKEND\"",
"\"KUBERNETES_FS\""
] |
[] |
[
"KUBERNETES_BACKEND",
"KUBERNETES_FS"
] |
[]
|
["KUBERNETES_BACKEND", "KUBERNETES_FS"]
|
go
| 2 | 0 | |
vendor/github.com/eximchain/go-ethereum/internal/debug/flags.go
|
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package debug
import (
"fmt"
"io"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"github.com/eximchain/go-ethereum/log"
"github.com/eximchain/go-ethereum/log/term"
"github.com/eximchain/go-ethereum/metrics"
"github.com/eximchain/go-ethereum/metrics/exp"
"github.com/fjl/memsize/memsizeui"
colorable "github.com/mattn/go-colorable"
"gopkg.in/urfave/cli.v1"
)
var Memsize memsizeui.Handler
var (
verbosityFlag = cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
}
vmoduleFlag = cli.StringFlag{
Name: "vmodule",
Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=5,p2p=4)",
Value: "",
}
backtraceAtFlag = cli.StringFlag{
Name: "backtrace",
Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")",
Value: "",
}
debugFlag = cli.BoolFlag{
Name: "debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
pprofFlag = cli.BoolFlag{
Name: "pprof",
Usage: "Enable the pprof HTTP server",
}
pprofPortFlag = cli.IntFlag{
Name: "pprofport",
Usage: "pprof HTTP server listening port",
Value: 6060,
}
pprofAddrFlag = cli.StringFlag{
Name: "pprofaddr",
Usage: "pprof HTTP server listening interface",
Value: "127.0.0.1",
}
memprofilerateFlag = cli.IntFlag{
Name: "memprofilerate",
Usage: "Turn on memory profiling with the given rate",
Value: runtime.MemProfileRate,
}
blockprofilerateFlag = cli.IntFlag{
Name: "blockprofilerate",
Usage: "Turn on block profiling with the given rate",
}
cpuprofileFlag = cli.StringFlag{
Name: "cpuprofile",
Usage: "Write CPU profile to the given file",
}
traceFlag = cli.StringFlag{
Name: "trace",
Usage: "Write execution trace to the given file",
}
)
// Flags holds all command-line flags required for debugging.
var Flags = []cli.Flag{
verbosityFlag, vmoduleFlag, backtraceAtFlag, debugFlag,
pprofFlag, pprofAddrFlag, pprofPortFlag,
memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
}
var (
ostream log.Handler
glogger *log.GlogHandler
)
func init() {
usecolor := term.IsTty(os.Stderr.Fd()) && os.Getenv("TERM") != "dumb"
output := io.Writer(os.Stderr)
if usecolor {
output = colorable.NewColorableStderr()
}
ostream = log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger = log.NewGlogHandler(ostream)
}
// Setup initializes profiling and logging based on the CLI flags.
// It should be called as early as possible in the program.
func Setup(ctx *cli.Context, logdir string) error {
// logging
log.PrintOrigins(ctx.GlobalBool(debugFlag.Name))
if logdir != "" {
rfh, err := log.RotatingFileHandler(
logdir,
262144,
log.JSONFormatOrderedEx(false, true),
)
if err != nil {
return err
}
glogger.SetHandler(log.MultiHandler(ostream, rfh))
}
glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name)))
glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name))
glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name))
log.Root().SetHandler(glogger)
// profiling, tracing
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name))
if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" {
if err := Handler.StartGoTrace(traceFile); err != nil {
return err
}
}
if cpuFile := ctx.GlobalString(cpuprofileFlag.Name); cpuFile != "" {
if err := Handler.StartCPUProfile(cpuFile); err != nil {
return err
}
}
// pprof server
if ctx.GlobalBool(pprofFlag.Name) {
address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
StartPProf(address)
}
return nil
}
func StartPProf(address string) {
// Hook go-metrics into expvar on any /debug/metrics request, load all vars
// from the registry into expvar, and execute regular expvar handler.
exp.Exp(metrics.DefaultRegistry)
http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize))
log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address))
go func() {
if err := http.ListenAndServe(address, nil); err != nil {
log.Error("Failure in running pprof server", "err", err)
}
}()
}
// Exit stops all running profiles, flushing their output to the
// respective file.
func Exit() {
Handler.StopCPUProfile()
Handler.StopGoTrace()
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
scraper_utils.py
|
import json
import pathlib
import re
import time
import bs4
import os
import data_models
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
GOOGLE_CHROME_PATH = os.environ.get('GOOGLE_CHROME_BIN')
CHROMEDRIVER_PATH = os.environ.get('CHROMEDRIVER_PATH')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
browser = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,
chrome_options=chrome_options)
def get_program_info(url: str):
browser.get(url)
el = browser.find_element_by_id('student-status')
for option in el.find_elements_by_tag_name('option'):
if option.text == 'Attending / Recently Graduated from a Canadian Secondary School':
option.click() # select() in earlier versions of webdriver
break
el = browser.find_element_by_id('provinces')
for option in el.find_elements_by_tag_name('option'):
if option.text == 'Ontario':
option.click() # select() in earlier versions of webdriver
break
element = WebDriverWait(browser, 10).until(
ec.presence_of_element_located(
(By.ID, 'submit'))
)
browser.execute_script("arguments[0].scrollIntoView();", element)
element.click()
browser.implicitly_wait(1)
prog_info = browser.find_element_by_id('program-1').text
prog_infos = prog_info.split('\n')
# print(prog_infos)
name = prog_infos[0].split('Program ')[1].strip()
province = prog_infos[1].split('Province')[1].strip()
ouac_code = prog_infos[2].split('OUAC Code: ')[1].strip()
degrees = [degree.strip() for degree in
prog_infos[3].split('Degrees ')[1].split(',')]
coop_option = False if 'no' in prog_infos[4].split('Co-op/Internship: ')[
1].strip().lower() else True
req_courses = browser.find_element_by_xpath(
'//*[@id="program-1"]/table/tbody/tr[8]/td/ul[1]').text.strip().split(
'\n')
try:
admission_range = browser.find_element_by_xpath(
'//*[@id="program-1"]/table/tbody/tr[12]/td').text.strip()
except:
admission_range = browser.find_element_by_xpath(
'//*[@id="program-1"]/table/tbody/tr[11]/td').text.strip()
try:
enrolment = int(re.findall(r'\d+', browser.find_element_by_xpath(
'//*[@id="program-1"]/table/tbody/tr[16]/td').text.strip())[0])
except:
enrolment = int(re.findall(r'\d+', browser.find_element_by_xpath(
'//*[@id="program-1"]/table/tbody/tr[15]/td').text.strip())[0])
return data_models.ScrapingData(name, province,
'McMaster University', ouac_code,
degrees, coop_option, req_courses,
admission_range,
enrolment)
def remove_consecutive_duplicates(s):
if len(s)<2:
return s
if s[0] == '_' and s[0]==s[1]:
return remove_consecutive_duplicates(s[1:])
else:
return s[0]+remove_consecutive_duplicates(s[1:])
def legal_name(name: str) -> str:
valids = list("abcdefghijklmnopqrstuvwxyz1234567890")
name_to_return = []
for char in name:
if char in valids:
name_to_return.append(char)
else:
name_to_return.append('_')
name = "".join(name_to_return)
return remove_consecutive_duplicates(name)
def fetch_programs(url: str):
programs = requests.get(url).text
soup = bs4.BeautifulSoup(programs, features="html.parser")
program_divs = soup.find_all("div", {"class": "row row-eq-height center-content"})[0].find_all('div')
programs = []
for program_div in program_divs:
try:
time.sleep(1)
programs.append(data_models.Program(
program_div.find('a').text,
program_div.find('a').get('href'),
get_program_info(program_div.find('a').get('href'))
))
except Exception as e:
print(f'Error on website: {program_div.find("a").get("href")}')
return programs
def mcmaster_programs():
return fetch_programs('https://future.mcmaster.ca/programs/')
# progs = mcmaster_programs()
# for prog in progs:
# output_file = f'programs/{legal_name(prog.name.lower())}_program.json'
# with open(output_file, 'w') as outfile:
# json.dump(prog.__dict__(), outfile)
path = pathlib.Path('programs')
files = os.listdir(path)
for file in files:
os.rename(f'{path}/{file}', f'{path}/{file.replace("_program", "")}')
|
[] |
[] |
[
"GOOGLE_CHROME_BIN",
"CHROMEDRIVER_PATH"
] |
[]
|
["GOOGLE_CHROME_BIN", "CHROMEDRIVER_PATH"]
|
python
| 2 | 0 | |
tutorials/mnist_tutorial_defences_gaussian_augmentation.py
|
#coding=utf-8
# Copyright 2017 - 2018 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FGSM tutorial on mnist using advbox tool.
FGSM method is non-targeted attack while FGSMT is targeted attack.
"""
import sys
import os
sys.path.append("..")
import logging
#logging.basicConfig(level=logging.INFO,format="%(filename)s[line:%(lineno)d] %(levelname)s %(message)s")
logger=logging.getLogger(__name__)
import numpy as np
import paddle.fluid as fluid
import paddle.v2 as paddle
from advbox.adversary import Adversary
from advbox.attacks.gradient_method import FGSM_static
from advbox.models.paddle import PaddleModel
from tutorials.mnist_model import mnist_cnn_model
#通过设置环境变量WITH_GPU 来动态设置是否使用GPU资源 特别适合在mac上开发但是在GPU服务器上运行的情况
#比如在mac上不设置该环境变量,在GPU服务器上设置 export WITH_GPU=1
with_gpu = os.getenv('WITH_GPU', '0') != '0'
def main(use_cuda):
"""
Advbox demo which demonstrate how to use advbox.
"""
TOTAL_NUM = 500
IMG_NAME = 'img'
LABEL_NAME = 'label'
img = fluid.layers.data(name=IMG_NAME, shape=[1, 28, 28], dtype='float32')
# gradient should flow
img.stop_gradient = False
label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
logits = mnist_cnn_model(img)
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
#根据配置选择使用CPU资源还是GPU资源
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
BATCH_SIZE = 1
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.test(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
fluid.io.load_params(
exe, "./mnist-gad/", main_program=fluid.default_main_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name, (-1, 1),
channel_axis=1)
#使用静态FGSM epsilon不可变
attack = FGSM_static(m)
attack_config = {"epsilon": 0.01}
# use test data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in test_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
# FGSM non-targeted attack
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
#print(
# 'attack success, original_label=%d, adversarial_label=%d, count=%d'
# % (data[0][1], adversary.adversarial_label, total_count))
else:
logger.info('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
print("fgsm attack done with GaussianAugmentationDefence")
#攻击未被加固的模型
fluid.io.load_params(
exe, "./mnist/", main_program=fluid.default_main_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name, (-1, 1),
channel_axis=1)
#使用静态FGSM epsilon不可变
attack = FGSM_static(m)
attack_config = {"epsilon": 0.01}
# use test data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in test_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
# FGSM non-targeted attack
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
#print(
# 'attack success, original_label=%d, adversarial_label=%d, count=%d'
# % (data[0][1], adversary.adversarial_label, total_count))
else:
logger.info('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
print("fgsm attack done without any defence")
if __name__ == '__main__':
main(use_cuda=with_gpu)
|
[] |
[] |
[
"WITH_GPU"
] |
[]
|
["WITH_GPU"]
|
python
| 1 | 0 | |
pkg/subctl/cmd/cloud/rhos/rhos.go
|
/*
SPDX-License-Identifier: Apache-2.0
Copyright Contributors to the Submariner project.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rhos provides common functionality to run cloud prepare/cleanup on RHOS Clusters.
package rhos
import (
"encoding/json"
"os"
"path/filepath"
"github.com/gophercloud/utils/openstack/clientconfig"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/submariner-io/admiral/pkg/util"
"github.com/submariner-io/cloud-prepare/pkg/api"
"github.com/submariner-io/cloud-prepare/pkg/k8s"
"github.com/submariner-io/cloud-prepare/pkg/ocp"
"github.com/submariner-io/cloud-prepare/pkg/rhos"
"github.com/submariner-io/submariner-operator/internal/exit"
"github.com/submariner-io/submariner-operator/internal/restconfig"
cloudutils "github.com/submariner-io/submariner-operator/pkg/subctl/cmd/cloud/utils"
"github.com/submariner-io/submariner-operator/pkg/subctl/cmd/utils"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
const (
infraIDFlag = "infra-id"
regionFlag = "region"
projectIDFlag = "project-id"
cloudEntryFlag = "cloud-entry"
)
var (
infraID string
region string
projectID string
ocpMetadataFile string
cloudEntry string
)
// AddRHOSFlags adds basic flags needed by RHOS.
func AddRHOSFlags(command *cobra.Command) {
command.Flags().StringVar(&infraID, infraIDFlag, "", "RHOS infra ID")
command.Flags().StringVar(®ion, regionFlag, "", "RHOS region")
command.Flags().StringVar(&projectID, projectIDFlag, "", "RHOS project ID")
command.Flags().StringVar(&ocpMetadataFile, "ocp-metadata", "",
"OCP metadata.json file (or the directory containing it) from which to read the RHOS infra ID "+
"and region from (takes precedence over the specific flags)")
command.Flags().StringVar(&cloudEntry, cloudEntryFlag, "", "the cloud entry to use")
}
// RunOnRHOS runs the given function on RHOS, supplying it with a cloud instance connected to RHOS and a reporter that writes to CLI.
// The functions makes sure that infraID and region are specified, and extracts the credentials from a secret in order to connect to RHOS.
func RunOnRHOS(restConfigProducer restconfig.Producer, gwInstanceType string, dedicatedGWNodes bool,
function func(cloud api.Cloud, gwDeployer api.GatewayDeployer,
reporter api.Reporter) error) error {
if ocpMetadataFile != "" {
err := initializeFlagsFromOCPMetadata(ocpMetadataFile)
region = os.Getenv("OS_REGION_NAME")
utils.ExitOnError("Failed to read RHOS Cluster information from OCP metadata file", err)
} else {
utils.ExpectFlag(infraIDFlag, infraID)
utils.ExpectFlag(regionFlag, region)
utils.ExpectFlag(projectIDFlag, projectID)
}
reporter := cloudutils.NewStatusReporter()
reporter.Started("Retrieving RHOS credentials from your RHOS configuration")
// Using RHOS default "openstack", if not specified
if cloudEntry == "" {
cloudEntry = "openstack"
}
opts := &clientconfig.ClientOpts{
Cloud: cloudEntry,
}
providerClient, err := clientconfig.AuthenticatedClient(opts)
utils.ExitOnError("Failed to initialize a RHOS Client", err)
k8sConfig, err := restConfigProducer.ForCluster()
utils.ExitOnError("Failed to initialize a Kubernetes config", err)
clientSet, err := kubernetes.NewForConfig(k8sConfig)
utils.ExitOnError("Failed to create Kubernetes client", err)
k8sClientSet := k8s.NewInterface(clientSet)
restMapper, err := util.BuildRestMapper(k8sConfig)
exit.OnErrorWithMessage(err, "Failed to create restmapper")
dynamicClient, err := dynamic.NewForConfig(k8sConfig)
exit.OnErrorWithMessage(err, "Failed to create dynamic client")
cloudInfo := rhos.CloudInfo{
Client: providerClient,
InfraID: infraID,
Region: region,
K8sClient: k8sClientSet,
}
rhosCloud := rhos.NewCloud(cloudInfo)
msDeployer := ocp.NewK8sMachinesetDeployer(restMapper, dynamicClient)
gwDeployer := rhos.NewOcpGatewayDeployer(cloudInfo, msDeployer, projectID, gwInstanceType,
"", cloudEntry, dedicatedGWNodes)
utils.ExitOnError("Failed to initialize a GatewayDeployer config", err)
return function(rhosCloud, gwDeployer, reporter)
}
func initializeFlagsFromOCPMetadata(metadataFile string) error {
fileInfo, err := os.Stat(metadataFile)
if err != nil {
return errors.Wrapf(err, "failed to stat file %q", metadataFile)
}
if fileInfo.IsDir() {
metadataFile = filepath.Join(metadataFile, "metadata.json")
}
data, err := os.ReadFile(metadataFile)
if err != nil {
return errors.Wrapf(err, "error reading file %q", metadataFile)
}
var metadata struct {
InfraID string `json:"infraID"`
RHOS struct {
ProjectID string `json:"projectID"`
} `json:"rhos"`
}
err = json.Unmarshal(data, &metadata)
if err != nil {
return errors.Wrap(err, "error unmarshalling data")
}
infraID = metadata.InfraID
projectID = metadata.RHOS.ProjectID
return nil
}
|
[
"\"OS_REGION_NAME\""
] |
[] |
[
"OS_REGION_NAME"
] |
[]
|
["OS_REGION_NAME"]
|
go
| 1 | 0 | |
cli/command/context/options.go
|
package context
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context"
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/kubernetes"
"github.com/docker/cli/cli/context/store"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/homedir"
"github.com/pkg/errors"
)
const (
keyFrom = "from"
keyHost = "host"
keyCA = "ca"
keyCert = "cert"
keyKey = "key"
keySkipTLSVerify = "skip-tls-verify"
keyKubeconfig = "config-file"
keyKubecontext = "context-override"
keyKubenamespace = "namespace-override"
)
type configKeyDescription struct {
name string
description string
}
var (
allowedDockerConfigKeys = map[string]struct{}{
keyFrom: {},
keyHost: {},
keyCA: {},
keyCert: {},
keyKey: {},
keySkipTLSVerify: {},
}
allowedKubernetesConfigKeys = map[string]struct{}{
keyFrom: {},
keyKubeconfig: {},
keyKubecontext: {},
keyKubenamespace: {},
}
dockerConfigKeysDescriptions = []configKeyDescription{
{
name: keyFrom,
description: "Copy named context's Docker endpoint configuration",
},
{
name: keyHost,
description: "Docker endpoint on which to connect",
},
{
name: keyCA,
description: "Trust certs signed only by this CA",
},
{
name: keyCert,
description: "Path to TLS certificate file",
},
{
name: keyKey,
description: "Path to TLS key file",
},
{
name: keySkipTLSVerify,
description: "Skip TLS certificate validation",
},
}
kubernetesConfigKeysDescriptions = []configKeyDescription{
{
name: keyFrom,
description: "Copy named context's Kubernetes endpoint configuration",
},
{
name: keyKubeconfig,
description: "Path to a Kubernetes config file",
},
{
name: keyKubecontext,
description: "Overrides the context set in the kubernetes config file",
},
{
name: keyKubenamespace,
description: "Overrides the namespace set in the kubernetes config file",
},
}
)
func parseBool(config map[string]string, name string) (bool, error) {
strVal, ok := config[name]
if !ok {
return false, nil
}
res, err := strconv.ParseBool(strVal)
return res, errors.Wrap(err, name)
}
func validateConfig(config map[string]string, allowedKeys map[string]struct{}) error {
var errs []string
for k := range config {
if _, ok := allowedKeys[k]; !ok {
errs = append(errs, fmt.Sprintf("%s: unrecognized config key", k))
}
}
if len(errs) == 0 {
return nil
}
return errors.New(strings.Join(errs, "\n"))
}
func getDockerEndpoint(dockerCli command.Cli, config map[string]string) (docker.Endpoint, error) {
if err := validateConfig(config, allowedDockerConfigKeys); err != nil {
return docker.Endpoint{}, err
}
if contextName, ok := config[keyFrom]; ok {
metadata, err := dockerCli.ContextStore().GetContextMetadata(contextName)
if err != nil {
return docker.Endpoint{}, err
}
if ep, ok := metadata.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta); ok {
return docker.Endpoint{EndpointMeta: ep}, nil
}
return docker.Endpoint{}, errors.Errorf("unable to get endpoint from context %q", contextName)
}
tlsData, err := context.TLSDataFromFiles(config[keyCA], config[keyCert], config[keyKey])
if err != nil {
return docker.Endpoint{}, err
}
skipTLSVerify, err := parseBool(config, keySkipTLSVerify)
if err != nil {
return docker.Endpoint{}, err
}
ep := docker.Endpoint{
EndpointMeta: docker.EndpointMeta{
Host: config[keyHost],
SkipTLSVerify: skipTLSVerify,
},
TLSData: tlsData,
}
// try to resolve a docker client, validating the configuration
opts, err := ep.ClientOpts()
if err != nil {
return docker.Endpoint{}, errors.Wrap(err, "invalid docker endpoint options")
}
if _, err := client.NewClientWithOpts(opts...); err != nil {
return docker.Endpoint{}, errors.Wrap(err, "unable to apply docker endpoint options")
}
return ep, nil
}
func getDockerEndpointMetadataAndTLS(dockerCli command.Cli, config map[string]string) (docker.EndpointMeta, *store.EndpointTLSData, error) {
ep, err := getDockerEndpoint(dockerCli, config)
if err != nil {
return docker.EndpointMeta{}, nil, err
}
return ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil
}
func getKubernetesEndpoint(dockerCli command.Cli, config map[string]string) (*kubernetes.Endpoint, error) {
if err := validateConfig(config, allowedKubernetesConfigKeys); err != nil {
return nil, err
}
if len(config) == 0 {
return nil, nil
}
if contextName, ok := config[keyFrom]; ok {
ctxMeta, err := dockerCli.ContextStore().GetContextMetadata(contextName)
if err != nil {
return nil, err
}
endpointMeta := kubernetes.EndpointFromContext(ctxMeta)
if endpointMeta != nil {
res, err := endpointMeta.WithTLSData(dockerCli.ContextStore(), dockerCli.CurrentContext())
if err != nil {
return nil, err
}
return &res, nil
}
// fallback to env-based kubeconfig
kubeconfig := os.Getenv("KUBECONFIG")
if kubeconfig == "" {
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
}
ep, err := kubernetes.FromKubeConfig(kubeconfig, "", "")
if err != nil {
return nil, err
}
return &ep, nil
}
if config[keyKubeconfig] != "" {
ep, err := kubernetes.FromKubeConfig(config[keyKubeconfig], config[keyKubecontext], config[keyKubenamespace])
if err != nil {
return nil, err
}
return &ep, nil
}
return nil, nil
}
func getKubernetesEndpointMetadataAndTLS(dockerCli command.Cli, config map[string]string) (*kubernetes.EndpointMeta, *store.EndpointTLSData, error) {
ep, err := getKubernetesEndpoint(dockerCli, config)
if err != nil {
return nil, nil, err
}
if ep == nil {
return nil, nil, err
}
return &ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
api/python/trips.py
|
import sys
import simplejson as json
import datetime
import decimal
import mariadb
import os
import flask
from flask import request
from flask import Blueprint
from dotenv import load_dotenv
load_dotenv()
trips = Blueprint('trips', __name__)
config = {
'host': os.getenv("DB_HOST"),
'port': int(os.getenv("DB_PORT")),
'user': os.getenv("DB_USER"),
'password': os.getenv("DB_PASS"),
'ssl': True
}
def converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
@trips.route('/api/trips', methods=['GET'])
def index():
conn = mariadb.connect(**config)
cur = conn.cursor()
query = "select \
t.fl_num, \
a.airline, \
t.carrier airline_code, \
t.fl_date, \
t.origin, \
t.dest, \
f.dep_time, \
f.arr_time, \
fh.delayed_pct, \
fh.avg_delay \
from \
travel.trips tr inner join \
travel.tickets t on tr.ticket_id = t.id inner join \
travel.airlines a on t.carrier = a.iata_code, \
(select * from travel.flights where year >= 2020) f, \
(select \
a.avg_delay, \
round(100 * (a.`delayed` / a.volume), 2) delayed_pct, \
round(100 * (a.cancelled / a.volume), 2) cancelled_pct, \
a.carrier, \
a.day, \
a.month \
from \
(select \
count(*) volume, \
sum(case when dep_delay > 0 then 1 else 0 end) `delayed`, \
sum(cancelled) cancelled, \
avg(dep_delay) avg_delay, \
carrier, \
month, \
day \
from \
travel_history.flights \
where \
year >= 2014 and \
month in (select month(fl_date) from travel.trips tr inner join travel.tickets t on tr.ticket_id = t.id) and \
day in (select day(fl_date) from travel.trips tr inner join travel.tickets t on tr.ticket_id = t.id) \
group by \
day, \
month, \
carrier) a) fh \
where \
t.carrier = f.carrier and \
t.fl_date = f.fl_date and \
t.fl_num = f.fl_num and \
t.carrier = fh.carrier and \
fh.month = month(t.fl_date) and \
fh.day = day(t.fl_date)"
cur.execute(query)
row_headers=[x[0] for x in cur.description]
rv = cur.fetchall()
json_data=[]
for result in rv:
json_data.append(dict(zip(row_headers,result)))
if len(json_data) > 0:
json_data = analyzeResults(json_data)
return json.dumps(json_data, default=converter), 200, {'ContentType':'application/json'}
def analyzeResults(json_data):
#TODO: Replace placeholder with (location based) weather API results
for item in json_data:
precip_probability = .2
wind_speed = 10
weather_score = 5 - 5 * (precip_probability + (wind_speed/100))
historical_score = round(5 * ((100 - item['delayed_pct'])/100), 1)
overall_score = round((decimal.Decimal(weather_score) + decimal.Decimal(historical_score)) / 2, 1)
weather_delay_multiplier = round((precip_probability + (wind_speed/100)) * 5, 3)
projected_delay = round(decimal.Decimal(weather_delay_multiplier) * item['avg_delay'], 0)
assessment = {
'overall_score': overall_score,
'historical_score': historical_score,
'historical_delay_percentage': item['delayed_pct'],
'weather_score': weather_score,
'weather_delay_multiplier': weather_delay_multiplier,
'projected_delay': projected_delay
}
item['assessment'] = assessment
forecast = {
'description': "Clear",
'icon': "clear-day",
'temp_low': "55°F",
'temp_high': "55°F",
'precip_probability': precip_probability,
'wind_speed': wind_speed
}
item['forecast'] = forecast
return json_data
|
[] |
[] |
[
"DB_PORT",
"DB_USER",
"DB_HOST",
"DB_PASS"
] |
[]
|
["DB_PORT", "DB_USER", "DB_HOST", "DB_PASS"]
|
python
| 4 | 0 | |
config.docker.py
|
import binascii
import os
SDM_META_READ_KEY = binascii.unhexlify(os.environ.get("SDM_META_READ_KEY", "00000000000000000000000000000000"))
SDM_FILE_READ_KEY = binascii.unhexlify(os.environ.get("SDM_FILE_READ_KEY", "00000000000000000000000000000000"))
ENC_PICC_DATA_PARAM = os.environ.get("ENC_PICC_DATA_PARAM", "picc_data")
ENC_FILE_DATA_PARAM = os.environ.get("ENC_FILE_DATA_PARAM", "enc")
UID_PARAM = os.environ.get("UID_PARAM", "uid")
CTR_PARAM = os.environ.get("CTR_PARAM", "ctr")
SDMMAC_PARAM = os.environ.get("SDMMAC_PARAM", "cmac")
REQUIRE_LRP = (os.environ.get("REQUIRE_LRP", "0") == "1")
|
[] |
[] |
[
"ENC_FILE_DATA_PARAM",
"ENC_PICC_DATA_PARAM",
"UID_PARAM",
"REQUIRE_LRP",
"SDM_FILE_READ_KEY",
"CTR_PARAM",
"SDMMAC_PARAM",
"SDM_META_READ_KEY"
] |
[]
|
["ENC_FILE_DATA_PARAM", "ENC_PICC_DATA_PARAM", "UID_PARAM", "REQUIRE_LRP", "SDM_FILE_READ_KEY", "CTR_PARAM", "SDMMAC_PARAM", "SDM_META_READ_KEY"]
|
python
| 8 | 0 | |
backend/app/auth.py
|
import secrets
import pymysql
import redis
import hashlib
import os
N_BYTES = 32
DEV_MODE = os.environ.get("DEV_MODE", True)
if DEV_MODE == True:
from .config import *
else:
from config import *
if DEV_MODE == True:
USER_DB_CONFIG = USER_DB_DEV_CONFIG
else:
USER_DB_CONFIG = USER_DB_PROD_CONFIG
def check_for_no_sql_injection(string: str) -> False:
prohibited_symbols = [")", "(", ";", "DROP"]
for symbol in prohibited_symbols:
if symbol in string:
return False
return True
def generate_access_token():
return secrets.token_hex(N_BYTES)
def check_user(user_name: str, access_token: str, db) -> bool:
# request from db (redis) a token
access_token_true = request_token(user_name, db)
#print(type(access_token_true))
# compare two tokens securely
if secrets.compare_digest(access_token, access_token_true) == True:
# if tokens match, returhs true
return True
# if tokens do not match, returns false
return False
def check_user_access_token_exists(user_name:str,db) -> bool:
try:
access_token = db.get(user_name)
if access_token == None:
return False
else:
return True
except:
return False
def create_token(user_name, db) -> str:
new_token = generate_access_token()
db.set(user_name, new_token)
return new_token
def request_token(user_name: str, db) -> str:
token = db.get(user_name)
if token == None:
return "NULL"
return token
def deauthorise_user(user_name:str,access_token:str, db) -> str:
if check_user_access_token_exists(user_name, db) == True and check_user(user_name, access_token, db) == True:
try:
db.delete(user_name)
return "SUCCESS"
except:
return "ERR"
else:
return "NOT_EXISTS"
def calculate_hash(string):
return hashlib.sha256(string.encode()).hexdigest()
def create_new_user(user_name:str, password:str, email: str) -> str:
connection = pymysql.connect(**USER_DB_CONFIG)
hashed_password = calculate_hash(password)
out = "ERROR"
if user_in_database(user_name) == True:
return "EXISTS"
try:
with connection.cursor() as cursor:
query = f"INSERT INTO MAIN (userName, userPassword, email) VALUES('{user_name}', '{hashed_password}', '{email}')"
cursor.execute(query)
connection.commit()
out = "SUCCESS"
except:
print("An error in call to 'user_in_database' occupied")
finally:
connection.close()
return out
def authorise_user(user_name:str, user_pass:str) -> str:
if user_with_password_in_database(user_name, user_pass) == True:
return "SUCCESS"
else:
return "ERROR"
def verify_password(user_name: str, password: str) -> bool:
connection = pymysql.connect(**USER_DB_CONFIG)
hashed_password = calculate_hash(password)
try:
with connection.cursor() as cursor:
# Create a new record
query = f"SELECT 1 FROM MAIN WHERE userName = {user_name} AND userPassword={hashed_password}"
print(query)
cursor.execute(query)
result = cursor.fetchone()
connection.close()
print("QUERY RESULT IN verify_password, ",result)
print("QUERY RESULT LENGTH IN verify_password, ", len(result))
return len(result) > 0
except:
print("An error in call to 'user_in_database' occupied")
connection.close()
return False
def user_in_database(user_name:str) -> bool:
out = False
connection = pymysql.connect(**USER_DB_CONFIG)
try:
with connection.cursor() as cursor:
# Create a new record
query = f"SELECT 1 FROM MAIN WHERE userName = '{user_name}'"
cursor.execute(query)
result = cursor.fetchone()
if result != None:
out = True
except:
print("An error in call to 'user_in_database' occupied")
return False
finally:
connection.close()
return out
def userId_by_userName(userName:str) -> int:
connection = pymysql.connect(**USER_DB_CONFIG)
out = -1
try:
with connection.cursor() as cursor:
# Create a new record
query = f"SELECT * FROM MAIN WHERE userName = '{userName}'"
cursor.execute(query)
result = cursor.fetchone()
out = result[0]
print(result)
except:
print("An error in call to 'userId_by_userName' occupied")
finally:
connection.close()
return out
def user_with_password_in_database(user_name:str, password: str) -> bool:
out = False
connection = pymysql.connect(**USER_DB_CONFIG)
hashedPassword = calculate_hash(password)
try:
with connection.cursor() as cursor:
# Create a new record
query = f"SELECT * FROM MAIN WHERE userName = '{user_name}' AND userPassword='{hashedPassword}'"
cursor.execute(query)
result = cursor.fetchone()
if result != None:
out = True
except:
print("An error in call to 'user_with_password_in_database' occupied")
return False
finally:
connection.close()
return out
def check_if_user_admin(userName:str) -> bool:
if not user_in_database(userName):
return False
connection = pymysql.connect(**USER_DB_CONFIG)
result = "FUCK"
try:
with connection.cursor() as cursor:
print("CURSOR CREATED")
# Create a new record
query = f"SELECT verified FROM MAIN WHERE userName='{userName}'"
print(query)
cursor.execute(query)
result = cursor.fetchone()
print("RESULT", result)
connection.close()
verified = result[-1]
print("QUERY RESULT IN check_if_user_admin, ",result[-1])
print("QUERY RESULT LENGTH IN verify_password, ", len(result))
return verified == 1
except:
print("An error in call to 'check_if_user_admin' occupied")
print("RESULT", result)
connection.close()
return False
if __name__ == "__main__":
#database = redis.Redis(**REDIS_CONFIG)
#new_user_1 = "alex"
#new_user_2 = "max"
#token_1 = create_token(new_user_1, database)
#token_2 = create_token(new_user_2, database)
#print(os.environ.get("USER_DB_PASS"))
#database.delete(new_user_2)
#print(check_user_access_token_exists(new_user_2, database) )
print(userId_by_userName("alex"))
|
[] |
[] |
[
"USER_DB_PASS",
"DEV_MODE"
] |
[]
|
["USER_DB_PASS", "DEV_MODE"]
|
python
| 2 | 0 | |
tools/run_tests/run_performance_tests.py
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run performance tests locally or remotely."""
from __future__ import print_function
import argparse
import collections
import itertools
import json
import multiprocessing
import os
import pipes
import re
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import six
import performance.scenario_config as scenario_config
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
class QpsWorkerJob:
"""Encapsulates a qps worker server job."""
def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
self._spec = spec
self.language = language
self.host_and_port = host_and_port
self._job = None
self.perf_file_base_name = perf_file_base_name
def start(self):
self._job = jobset.Job(
self._spec, newline_on_success=True, travis=True, add_env={})
def is_running(self):
"""Polls a job and returns True if given job is still running."""
return self._job and self._job.state() == jobset._RUNNING
def kill(self):
if self._job:
self._job.kill()
self._job = None
def create_qpsworker_job(language,
shortname=None,
port=10000,
remote_host=None,
perf_cmd=None):
cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
if remote_host:
host_and_port = '%s:%s' % (remote_host, port)
else:
host_and_port = 'localhost:%s' % port
perf_file_base_name = None
if perf_cmd:
perf_file_base_name = '%s-%s' % (host_and_port, shortname)
# specify -o output file so perf.data gets collected when worker stopped
cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
] + cmdline
worker_timeout = 3 * 60
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
ssh_cmd = ['ssh']
cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
ssh_cmd.extend([
str(user_at_host),
'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s'
% ' '.join(cmdline)
])
cmdline = ssh_cmd
jobspec = jobset.JobSpec(
cmdline=cmdline,
shortname=shortname,
timeout_seconds=worker_timeout, # workers get restarted after each scenario
verbose_success=True)
return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
def create_scenario_jobspec(scenario_json,
workers,
remote_host=None,
bq_result_table=None,
server_cpu_load=0):
"""Runs one scenario using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
if bq_result_table:
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(
json.dumps({
'scenarios': [scenario_json]
}))
cmd += '--scenario_result_file=scenario_result.json '
if server_cpu_load != 0:
cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host, pipes.quote(cmd))
return jobset.JobSpec(
cmdline=[cmd],
shortname='qps_json_driver.%s' % scenario_json['name'],
timeout_seconds=12 * 60,
shell=True,
verbose_success=True)
def create_quit_jobspec(workers, remote_host=None):
"""Runs quit using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(
w.host_and_port for w in workers)
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host, pipes.quote(cmd))
return jobset.JobSpec(
cmdline=[cmd],
shortname='qps_json_driver.quit',
timeout_seconds=3 * 60,
shell=True,
verbose_success=True)
def create_netperf_jobspec(server_host='localhost',
client_host=None,
bq_result_table=None):
"""Runs netperf benchmark."""
cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
if bq_result_table:
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
if client_host:
# If netperf is running remotely, the env variables populated by Jenkins
# won't be available on the client, but we need them for uploading results
# to BigQuery.
jenkins_job_name = os.getenv('JOB_NAME')
if jenkins_job_name:
cmd += 'JOB_NAME="%s" ' % jenkins_job_name
jenkins_build_number = os.getenv('BUILD_NUMBER')
if jenkins_build_number:
cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
cmd += 'tools/run_tests/performance/run_netperf.sh'
if client_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host, pipes.quote(cmd))
return jobset.JobSpec(
cmdline=[cmd],
shortname='netperf',
timeout_seconds=60,
shell=True,
verbose_success=True)
def archive_repo(languages):
"""Archives local version of repo including submodules."""
cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
if 'java' in languages:
cmdline.append('../grpc-java')
if 'go' in languages:
cmdline.append('../grpc-go')
archive_job = jobset.JobSpec(
cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
jobset.message('START', 'Archiving local repository.', do_newline=True)
num_failures, _ = jobset.run(
[archive_job], newline_on_success=True, maxjobs=1)
if num_failures == 0:
jobset.message(
'SUCCESS',
'Archive with local repository created successfully.',
do_newline=True)
else:
jobset.message(
'FAILED', 'Failed to archive local repository.', do_newline=True)
sys.exit(1)
def prepare_remote_hosts(hosts, prepare_local=False):
"""Prepares remote hosts (and maybe prepare localhost as well)."""
prepare_timeout = 10 * 60
prepare_jobs = []
for host in hosts:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
prepare_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
shortname='remote_host_prepare.%s' % host,
environ={'USER_AT_HOST': user_at_host},
timeout_seconds=prepare_timeout))
if prepare_local:
# Prepare localhost as well
prepare_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/kill_workers.sh'],
shortname='local_prepare',
timeout_seconds=prepare_timeout))
jobset.message('START', 'Preparing hosts.', do_newline=True)
num_failures, _ = jobset.run(
prepare_jobs, newline_on_success=True, maxjobs=10)
if num_failures == 0:
jobset.message(
'SUCCESS', 'Prepare step completed successfully.', do_newline=True)
else:
jobset.message(
'FAILED', 'Failed to prepare remote hosts.', do_newline=True)
sys.exit(1)
def build_on_remote_hosts(hosts,
languages=scenario_config.LANGUAGES.keys(),
build_local=False):
"""Builds performance worker on remote hosts (and maybe also locally)."""
build_timeout = 15 * 60
# Kokoro VMs (which are local only) do not have caching, so they need more time to build
local_build_timeout = 30 * 60
build_jobs = []
for host in hosts:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
build_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
languages,
shortname='remote_host_build.%s' % host,
environ={'USER_AT_HOST': user_at_host,
'CONFIG': 'opt'},
timeout_seconds=build_timeout))
if build_local:
# Build locally as well
build_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/build_performance.sh'] +
languages,
shortname='local_build',
environ={'CONFIG': 'opt'},
timeout_seconds=local_build_timeout))
jobset.message('START', 'Building.', do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=10)
if num_failures == 0:
jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
else:
jobset.message('FAILED', 'Build failed.', do_newline=True)
sys.exit(1)
def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
"""Creates QPS workers (but does not start them)."""
if not worker_hosts:
# run two workers locally (for each language)
workers = [(None, 10000), (None, 10010)]
elif len(worker_hosts) == 1:
# run two workers on the remote host (for each language)
workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
else:
# run one worker per each remote host (for each language)
workers = [(worker_host, 10000) for worker_host in worker_hosts]
return [
create_qpsworker_job(
language,
shortname='qps_worker_%s_%s' % (language, worker_idx),
port=worker[1] + language.worker_port_offset(),
remote_host=worker[0],
perf_cmd=perf_cmd)
for language in languages for worker_idx, worker in enumerate(workers)
]
def perf_report_processor_job(worker_host, perf_base_name, output_filename,
flame_graph_reports):
print('Creating perf report collection job for %s' % worker_host)
cmd = ''
if worker_host != 'localhost':
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
user_at_host, output_filename, flame_graph_reports, perf_base_name)
else:
cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
output_filename, flame_graph_reports, perf_base_name)
return jobset.JobSpec(
cmdline=cmd,
timeout_seconds=3 * 60,
shell=True,
verbose_success=True,
shortname='process perf report')
Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
def create_scenarios(languages,
workers_by_lang,
remote_host=None,
regex='.*',
category='all',
bq_result_table=None,
netperf=False,
netperf_hosts=[],
server_cpu_load=0):
"""Create jobspecs for scenarios to run."""
all_workers = [
worker for workers in workers_by_lang.values() for worker in workers
]
scenarios = []
_NO_WORKERS = []
if netperf:
if not netperf_hosts:
netperf_server = 'localhost'
netperf_client = None
elif len(netperf_hosts) == 1:
netperf_server = netperf_hosts[0]
netperf_client = netperf_hosts[0]
else:
netperf_server = netperf_hosts[0]
netperf_client = netperf_hosts[1]
scenarios.append(
Scenario(
create_netperf_jobspec(
server_host=netperf_server,
client_host=netperf_client,
bq_result_table=bq_result_table), _NO_WORKERS, 'netperf'))
for language in languages:
for scenario_json in language.scenarios():
if re.search(regex, scenario_json['name']):
categories = scenario_json.get('CATEGORIES',
['scalable', 'smoketest'])
if category in categories or category == 'all':
workers = workers_by_lang[str(language)][:]
# 'SERVER_LANGUAGE' is an indicator for this script to pick
# a server in different language.
custom_server_lang = scenario_json.get('SERVER_LANGUAGE',
None)
custom_client_lang = scenario_json.get('CLIENT_LANGUAGE',
None)
scenario_json = scenario_config.remove_nonproto_fields(
scenario_json)
if custom_server_lang and custom_client_lang:
raise Exception(
'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
'in the same scenario')
if custom_server_lang:
if not workers_by_lang.get(custom_server_lang, []):
print('Warning: Skipping scenario %s as' %
scenario_json['name'])
print(
'SERVER_LANGUAGE is set to %s yet the language has '
'not been selected with -l' %
custom_server_lang)
continue
for idx in range(0, scenario_json['num_servers']):
# replace first X workers by workers of a different language
workers[idx] = workers_by_lang[custom_server_lang][
idx]
if custom_client_lang:
if not workers_by_lang.get(custom_client_lang, []):
print('Warning: Skipping scenario %s as' %
scenario_json['name'])
print(
'CLIENT_LANGUAGE is set to %s yet the language has '
'not been selected with -l' %
custom_client_lang)
continue
for idx in range(scenario_json['num_servers'],
len(workers)):
# replace all client workers by workers of a different language,
# leave num_server workers as they are server workers.
workers[idx] = workers_by_lang[custom_client_lang][
idx]
scenario = Scenario(
create_scenario_jobspec(
scenario_json, [w.host_and_port for w in workers],
remote_host=remote_host,
bq_result_table=bq_result_table,
server_cpu_load=server_cpu_load), workers,
scenario_json['name'])
scenarios.append(scenario)
return scenarios
def finish_qps_workers(jobs, qpsworker_jobs):
"""Waits for given jobs to finish and eventually kills them."""
retries = 0
num_killed = 0
while any(job.is_running() for job in jobs):
for job in qpsworker_jobs:
if job.is_running():
print('QPS worker "%s" is still running.' % job.host_and_port)
if retries > 10:
print('Killing all QPS workers.')
for job in jobs:
job.kill()
num_killed += 1
retries += 1
time.sleep(3)
print('All QPS workers finished.')
return num_killed
profile_output_files = []
# Collect perf text reports and flamegraphs if perf_cmd was used
# Note the base names of perf text reports are used when creating and processing
# perf data. The scenario name uniqifies the output name in the final
# perf reports directory.
# Alos, the perf profiles need to be fetched and processed after each scenario
# in order to avoid clobbering the output files.
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
flame_graph_reports):
perf_report_jobs = []
global profile_output_files
for host_and_port in hosts_and_base_names:
perf_base_name = hosts_and_base_names[host_and_port]
output_filename = '%s-%s' % (scenario_name, perf_base_name)
# from the base filename, create .svg output filename
host = host_and_port.split(':')[0]
profile_output_files.append('%s.svg' % output_filename)
perf_report_jobs.append(
perf_report_processor_job(host, perf_base_name, output_filename,
flame_graph_reports))
jobset.message(
'START', 'Collecting perf reports from qps workers', do_newline=True)
failures, _ = jobset.run(
perf_report_jobs, newline_on_success=True, maxjobs=1)
jobset.message(
'END', 'Collecting perf reports from qps workers', do_newline=True)
return failures
def main():
argp = argparse.ArgumentParser(description='Run performance tests.')
argp.add_argument(
'-l',
'--language',
choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
nargs='+',
required=True,
help='Languages to benchmark.')
argp.add_argument(
'--remote_driver_host',
default=None,
help='Run QPS driver on given host. By default, QPS driver is run locally.'
)
argp.add_argument(
'--remote_worker_host',
nargs='+',
default=[],
help='Worker hosts where to start QPS workers.')
argp.add_argument(
'--dry_run',
default=False,
action='store_const',
const=True,
help='Just list scenarios to be run, but don\'t run them.')
argp.add_argument(
'-r',
'--regex',
default='.*',
type=str,
help='Regex to select scenarios to run.')
argp.add_argument(
'--bq_result_table',
default=None,
type=str,
help='Bigquery "dataset.table" to upload results to.')
argp.add_argument(
'--category',
choices=['smoketest', 'all', 'scalable', 'sweep'],
default='all',
help='Select a category of tests to run.')
argp.add_argument(
'--netperf',
default=False,
action='store_const',
const=True,
help='Run netperf benchmark as one of the scenarios.')
argp.add_argument(
'--server_cpu_load',
default=0,
type=int,
help='Select a targeted server cpu load to run. 0 means ignore this flag'
)
argp.add_argument(
'-x',
'--xml_report',
default='report.xml',
type=str,
help='Name of XML report file to generate.')
argp.add_argument(
'--perf_args',
help=('Example usage: "--perf_args=record -F 99 -g". '
'Wrap QPS workers in a perf command '
'with the arguments to perf specified here. '
'".svg" flame graph profiles will be '
'created for each Qps Worker on each scenario. '
'Files will output to "<repo_root>/<args.flame_graph_reports>" '
'directory. Output files from running the worker '
'under perf are saved in the repo root where its ran. '
'Note that the perf "-g" flag is necessary for '
'flame graphs generation to work (assuming the binary '
'being profiled uses frame pointers, check out '
'"--call-graph dwarf" option using libunwind otherwise.) '
'Also note that the entire "--perf_args=<arg(s)>" must '
'be wrapped in quotes as in the example usage. '
'If the "--perg_args" is unspecified, "perf" will '
'not be used at all. '
'See http://www.brendangregg.com/perf.html '
'for more general perf examples.'))
argp.add_argument(
'--skip_generate_flamegraphs',
default=False,
action='store_const',
const=True,
help=('Turn flame graph generation off. '
'May be useful if "perf_args" arguments do not make sense for '
'generating flamegraphs (e.g., "--perf_args=stat ...")'))
argp.add_argument(
'-f',
'--flame_graph_reports',
default='perf_reports',
type=str,
help='Name of directory to output flame graph profiles to, if any are created.'
)
argp.add_argument(
'-u',
'--remote_host_username',
default='',
type=str,
help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
args = argp.parse_args()
global _REMOTE_HOST_USERNAME
if args.remote_host_username:
_REMOTE_HOST_USERNAME = args.remote_host_username
languages = set(
scenario_config.LANGUAGES[l]
for l in itertools.chain.from_iterable(
six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
for x in args.language))
# Put together set of remote hosts where to run and build
remote_hosts = set()
if args.remote_worker_host:
for host in args.remote_worker_host:
remote_hosts.add(host)
if args.remote_driver_host:
remote_hosts.add(args.remote_driver_host)
if not args.dry_run:
if remote_hosts:
archive_repo(languages=[str(l) for l in languages])
prepare_remote_hosts(remote_hosts, prepare_local=True)
else:
prepare_remote_hosts([], prepare_local=True)
build_local = False
if not args.remote_driver_host:
build_local = True
if not args.dry_run:
build_on_remote_hosts(
remote_hosts,
languages=[str(l) for l in languages],
build_local=build_local)
perf_cmd = None
if args.perf_args:
print('Running workers under perf profiler')
# Expect /usr/bin/perf to be installed here, as is usual
perf_cmd = ['/usr/bin/perf']
perf_cmd.extend(re.split('\s+', args.perf_args))
qpsworker_jobs = create_qpsworkers(
languages, args.remote_worker_host, perf_cmd=perf_cmd)
# get list of worker addresses for each language.
workers_by_lang = dict([(str(language), []) for language in languages])
for job in qpsworker_jobs:
workers_by_lang[str(job.language)].append(job)
scenarios = create_scenarios(
languages,
workers_by_lang=workers_by_lang,
remote_host=args.remote_driver_host,
regex=args.regex,
category=args.category,
bq_result_table=args.bq_result_table,
netperf=args.netperf,
netperf_hosts=args.remote_worker_host,
server_cpu_load=args.server_cpu_load)
if not scenarios:
raise Exception('No scenarios to run')
total_scenario_failures = 0
qps_workers_killed = 0
merged_resultset = {}
perf_report_failures = 0
for scenario in scenarios:
if args.dry_run:
print(scenario.name)
else:
scenario_failures = 0
try:
for worker in scenario.workers:
worker.start()
jobs = [scenario.jobspec]
if scenario.workers:
jobs.append(
create_quit_jobspec(
scenario.workers,
remote_host=args.remote_driver_host))
scenario_failures, resultset = jobset.run(
jobs, newline_on_success=True, maxjobs=1)
total_scenario_failures += scenario_failures
merged_resultset = dict(
itertools.chain(
six.iteritems(merged_resultset),
six.iteritems(resultset)))
finally:
# Consider qps workers that need to be killed as failures
qps_workers_killed += finish_qps_workers(scenario.workers,
qpsworker_jobs)
if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
workers_and_base_names = {}
for worker in scenario.workers:
if not worker.perf_file_base_name:
raise Exception(
'using perf buf perf report filename is unspecified')
workers_and_base_names[
worker.host_and_port] = worker.perf_file_base_name
perf_report_failures += run_collect_perf_profile_jobs(
workers_and_base_names, scenario.name,
args.flame_graph_reports)
# Still write the index.html even if some scenarios failed.
# 'profile_output_files' will only have names for scenarios that passed
if perf_cmd and not args.skip_generate_flamegraphs:
# write the index fil to the output dir, with all profiles from all scenarios/workers
report_utils.render_perf_profiling_results(
'%s/index.html' % args.flame_graph_reports, profile_output_files)
report_utils.render_junit_xml_report(
merged_resultset, args.xml_report, suite_name='benchmarks')
if total_scenario_failures > 0 or qps_workers_killed > 0:
print('%s scenarios failed and %s qps worker jobs killed' %
(total_scenario_failures, qps_workers_killed))
sys.exit(1)
if perf_report_failures > 0:
print('%s perf profile collection jobs failed' % perf_report_failures)
sys.exit(1)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"JOB_NAME",
"BUILD_NUMBER"
] |
[]
|
["JOB_NAME", "BUILD_NUMBER"]
|
python
| 2 | 0 | |
walltalkie/wsgi.py
|
"""
WSGI config for walltalkie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "walltalkie.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
reader/reader.go
|
package reader
import (
"math"
"os"
"runtime"
"strconv"
"strings"
"sync"
osmcache "imposm3/cache"
"imposm3/element"
"imposm3/geom/geos"
"imposm3/geom/limit"
"imposm3/logging"
"imposm3/mapping"
"imposm3/parser/pbf"
"imposm3/proj"
"imposm3/stats"
"imposm3/util"
)
var log = logging.NewLogger("reader")
var skipCoords, skipNodes, skipWays bool
var nParser, nWays, nRels, nNodes, nCoords int64
func init() {
if os.Getenv("IMPOSM_SKIP_COORDS") != "" {
skipCoords = true
}
if os.Getenv("IMPOSM_SKIP_NODES") != "" {
skipNodes = true
}
if os.Getenv("IMPOSM_SKIP_WAYS") != "" {
skipWays = true
}
if procConf := os.Getenv("IMPOSM_READ_PROCS"); procConf != "" {
parts := strings.Split(procConf, ":")
nParser, _ = strconv.ParseInt(parts[0], 10, 32)
nRels, _ = strconv.ParseInt(parts[1], 10, 32)
nWays, _ = strconv.ParseInt(parts[2], 10, 32)
nNodes, _ = strconv.ParseInt(parts[3], 10, 32)
nCoords, _ = strconv.ParseInt(parts[3], 10, 32)
} else {
nParser, nRels, nWays, nNodes, nCoords = readersForCpus(runtime.NumCPU())
}
}
func readersForCpus(cpus int) (int64, int64, int64, int64, int64) {
cpuf := float64(cpus)
return int64(math.Ceil(cpuf * 0.75)), int64(math.Ceil(cpuf * 0.25)), int64(math.Ceil(cpuf * 0.25)), int64(math.Ceil(cpuf * 0.25)), int64(math.Ceil(cpuf * 0.25))
}
func ReadPbf(cache *osmcache.OSMCache, progress *stats.Statistics,
tagmapping *mapping.Mapping, pbfFile *pbf.Pbf,
limiter *limit.Limiter,
) {
nodes := make(chan []element.Node, 4)
coords := make(chan []element.Node, 4)
ways := make(chan []element.Way, 4)
relations := make(chan []element.Relation, 4)
withLimiter := false
if limiter != nil {
withLimiter = true
}
if pbfFile.Header.Time.Unix() != 0 {
log.Printf("reading %s with data till %v", pbfFile.Filename, pbfFile.Header.Time.Local())
}
parser := pbf.NewParser(pbfFile, coords, nodes, ways, relations)
coordsSynced := make(chan bool)
coordsSync := util.NewSyncPoint(int(nCoords+nNodes), func() {
coordsSynced <- true
})
parser.NotifyWays(func() {
for i := 0; int64(i) < nCoords; i++ {
coords <- nil
}
for i := 0; int64(i) < nNodes; i++ {
nodes <- nil
}
<-coordsSynced
})
waysSynced := make(chan bool)
waysSync := util.NewSyncPoint(int(nWays), func() {
waysSynced <- true
})
parser.NotifyRelations(func() {
for i := 0; int64(i) < nWays; i++ {
ways <- nil
}
<-waysSynced
})
parser.Start()
waitWriter := sync.WaitGroup{}
for i := 0; int64(i) < nWays; i++ {
waitWriter.Add(1)
go func() {
var skip, hit int
m := tagmapping.WayTagFilter()
for ws := range ways {
if ws == nil {
waysSync.Sync()
continue
}
if skipWays {
continue
}
for i, _ := range ws {
m.Filter(&ws[i].Tags)
if withLimiter {
if !cache.Coords.FirstRefIsCached(ws[i].Refs) {
ws[i].Id = osmcache.SKIP
skip += 1
} else {
hit += 1
}
}
}
cache.Ways.PutWays(ws)
progress.AddWays(len(ws))
}
waitWriter.Done()
}()
}
for i := 0; int64(i) < nRels; i++ {
waitWriter.Add(1)
go func() {
var skip, hit int
m := tagmapping.RelationTagFilter()
for rels := range relations {
numWithTags := 0
for i, _ := range rels {
m.Filter(&rels[i].Tags)
if len(rels[i].Tags) > 0 {
numWithTags += 1
}
if withLimiter {
if !cache.Ways.FirstMemberIsCached(rels[i].Members) {
skip += 1
rels[i].Id = osmcache.SKIP
} else {
hit += 1
}
}
}
cache.Relations.PutRelations(rels)
progress.AddRelations(numWithTags)
}
waitWriter.Done()
}()
}
for i := 0; int64(i) < nCoords; i++ {
waitWriter.Add(1)
go func() {
var skip, hit int
g := geos.NewGeos()
defer g.Finish()
for nds := range coords {
if nds == nil {
coordsSync.Sync()
continue
}
if withLimiter {
for i, _ := range nds {
nd := element.Node{Long: nds[i].Long, Lat: nds[i].Lat}
proj.NodeToMerc(&nd)
if !limiter.IntersectsBuffer(g, nd.Long, nd.Lat) {
skip += 1
nds[i].Id = osmcache.SKIP
} else {
hit += 1
}
}
}
cache.Coords.PutCoords(nds)
progress.AddCoords(len(nds))
}
waitWriter.Done()
}()
}
for i := 0; int64(i) < nNodes; i++ {
waitWriter.Add(1)
go func() {
g := geos.NewGeos()
defer g.Finish()
m := tagmapping.NodeTagFilter()
for nds := range nodes {
if nds == nil {
coordsSync.Sync()
continue
}
numWithTags := 0
for i, _ := range nds {
m.Filter(&nds[i].Tags)
if len(nds[i].Tags) > 0 {
numWithTags += 1
}
if withLimiter {
nd := element.Node{Long: nds[i].Long, Lat: nds[i].Lat}
proj.NodeToMerc(&nd)
if !limiter.IntersectsBuffer(g, nd.Long, nd.Lat) {
nds[i].Id = osmcache.SKIP
}
}
}
cache.Nodes.PutNodes(nds)
progress.AddNodes(numWithTags)
}
waitWriter.Done()
}()
}
parser.Close()
close(relations)
close(ways)
close(nodes)
close(coords)
waitWriter.Wait()
}
|
[
"\"IMPOSM_SKIP_COORDS\"",
"\"IMPOSM_SKIP_NODES\"",
"\"IMPOSM_SKIP_WAYS\"",
"\"IMPOSM_READ_PROCS\""
] |
[] |
[
"IMPOSM_SKIP_COORDS",
"IMPOSM_SKIP_NODES",
"IMPOSM_READ_PROCS",
"IMPOSM_SKIP_WAYS"
] |
[]
|
["IMPOSM_SKIP_COORDS", "IMPOSM_SKIP_NODES", "IMPOSM_READ_PROCS", "IMPOSM_SKIP_WAYS"]
|
go
| 4 | 0 | |
customer_profile_test.go
|
package AuthorizeCIM
import (
"math/rand"
"os"
"testing"
"time"
)
var newCustomerProfileId string
var newCustomerPaymentId string
var newCustomerShippingId string
var newSecondCustomerProfileId string
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestSetAPIInfo(t *testing.T) {
apiName := os.Getenv("AUTHORIZE_ID")
apiKey := os.Getenv("AUTHORIZE_TRANSACTION_KEY")
// apiMode := os.Getenv("AUTHORIZE_API_ENVIRONMENT")
SetAPIInfo(apiName, apiKey, "test")
t.Log("API Info Set")
}
func TestIsConnected(t *testing.T) {
authenticated, err := IsConnected()
if err != nil {
t.Fail()
}
if !authenticated {
t.Fail()
}
}
func TestCreateCustomerProfile(t *testing.T) {
customer := Profile{
MerchantCustomerID: RandomNumber(1000, 9999),
Email: "info@" + RandomString(8) + ".com",
PaymentProfiles: &PaymentProfiles{
CustomerType: "individual",
Payment: Payment{
CreditCard: CreditCard{
CardNumber: "4007000000027",
ExpirationDate: "10/26",
//CardCode: "384",
},
},
},
}
response, err := customer.CreateProfile()
if err != nil {
t.Fail()
}
if response.Ok() {
newCustomerProfileId = response.CustomerProfileID
t.Log("New Customer Profile Created #", response.CustomerProfileID)
} else {
t.Fail()
t.Log(response.ErrorMessage())
}
}
func TestGetProfileIds(t *testing.T) {
profiles, _ := GetProfileIds()
for _, p := range profiles {
t.Log("Profile ID #", p)
}
if len(profiles) == 0 {
t.Fail()
}
t.Log(profiles)
}
func TestUpdateCustomerProfile(t *testing.T) {
customer := Profile{
MerchantCustomerID: newCustomerProfileId,
CustomerProfileId: newCustomerProfileId,
Description: "Updated Account",
Email: "[email protected]",
}
response, err := customer.UpdateProfile()
if err != nil {
t.Fail()
}
if response.Ok() {
t.Log("Customer Profile was Updated")
} else {
t.Log(response.ErrorMessage())
t.Fail()
}
}
func TestCreateCustomerPaymentProfile(t *testing.T) {
paymentProfile := CustomerPaymentProfile{
CustomerProfileID: newCustomerProfileId,
PaymentProfile: PaymentProfile{
BillTo: &BillTo{
FirstName: "okokk",
LastName: "okok",
Address: "1111 white ct",
City: "los angeles",
Country: "USA",
PhoneNumber: "8885555555",
},
Payment: &Payment{
CreditCard: CreditCard{
CardNumber: "5424000000000015",
ExpirationDate: "04/22",
},
},
DefaultPaymentProfile: "true",
},
}
response, err := paymentProfile.Add()
if err != nil {
t.Fail()
}
if response.Ok() {
newCustomerPaymentId = response.CustomerPaymentProfileID
t.Log("Created new Payment Profile #", response.CustomerPaymentProfileID, "for Customer ID: ", response.CustomerProfileId)
} else {
t.Log(response.ErrorMessage())
t.Fail()
}
}
func TestGetCustomerPaymentProfile(t *testing.T) {
customer := Customer{
ID: newCustomerProfileId,
}
response, err := customer.Info()
if err != nil {
t.Fail()
}
paymentProfiles := response.PaymentProfiles()
t.Log("Customer Payment Profiles", paymentProfiles)
if len(paymentProfiles) == 0 {
t.Fail()
}
}
func TestGetCustomerPaymentProfileList(t *testing.T) {
profileIds, err := GetPaymentProfileIds("2020-03", "cardsExpiringInMonth")
if err != nil {
t.Fail()
}
t.Log(profileIds)
}
func TestValidateCustomerPaymentProfile(t *testing.T) {
customerProfile := Customer{
ID: newCustomerProfileId,
PaymentID: newCustomerPaymentId,
}
response, err := customerProfile.Validate()
if err != nil {
t.Fail()
}
if response.Ok() {
t.Log("Customer Payment Profile is VALID")
} else {
t.Log(response.ErrorMessage())
t.Fail()
}
}
func TestUpdateCustomerPaymentProfile(t *testing.T) {
customer := Profile{
CustomerProfileId: newCustomerProfileId,
PaymentProfileId: newCustomerPaymentId,
Description: "Updated Account",
Email: "info@" + RandomString(8) + ".com",
PaymentProfiles: &PaymentProfiles{
Payment: Payment{
CreditCard: CreditCard{
CardNumber: "4007000000027",
ExpirationDate: "01/26",
},
},
BillTo: &BillTo{
FirstName: "newname",
LastName: "golang",
Address: "2841 purple ct",
City: "los angeles",
State: "CA",
Country: "USA",
PhoneNumber: "8885555555",
},
},
}
response, err := customer.UpdatePaymentProfile()
if err != nil {
t.Fail()
}
if response.Ok() {
t.Log("Customer Payment Profile was Updated")
} else {
t.Log(response.ErrorMessage())
t.Fail()
}
}
func TestCreateCustomerShippingProfile(t *testing.T) {
customer := Profile{
MerchantCustomerID: "86437",
CustomerProfileId: newCustomerProfileId,
Email: "info@" + RandomString(8) + ".com",
Shipping: &Address{
FirstName: "My",
LastName: "Name",
Company: "none",
Address: "1111 yellow ave.",
City: "Los Angeles",
State: "CA",
Zip: "92039",
Country: "USA",
PhoneNumber: "8885555555",
},
}
response, err := customer.CreateShipping()
if err != nil {
t.Fail()
}
if response.Ok() {
newCustomerShippingId = response.CustomerAddressID
t.Log("New Shipping Added: #", response.CustomerAddressID)
} else {
t.Log(response.ErrorMessage())
t.Fail()
}
}
func TestGetCustomerShippingProfile(t *testing.T) {
customer := Customer{
ID: newCustomerProfileId,
}
response, err := customer.Info()
if err != nil {
t.Fail()
}
shippingProfiles := response.ShippingProfiles()
t.Log("Customer Shipping Profiles", shippingProfiles)
if shippingProfiles[0].Zip != "92039" {
t.Fail()
}
}
func TestUpdateCustomerShippingProfile(t *testing.T) {
customer := Profile{
CustomerProfileId: newCustomerProfileId,
CustomerAddressId: newCustomerShippingId,
Shipping: &Address{
FirstName: "My",
LastName: "Name",
Company: "none",
Address: "1111 yellow ave.",
City: "Los Angeles",
State: "CA",
Zip: "92039",
Country: "USA",
PhoneNumber: "8885555555",
},
}
response, err := customer.UpdateShippingProfile()
if err != nil {
t.Fail()
}
if response.Ok() {
t.Log("Shipping Address Profile was updated")
} else {
t.Log(response.ErrorMessage())
t.Fail()
}
}
func TestAcceptProfilePage(t *testing.T) {
}
func TestCreateCustomerProfileFromTransaction(t *testing.T) {
}
func TestCreateSubscriptionCustomerProfile(t *testing.T) {
amount := RandomNumber(5, 99) + "." + RandomNumber(10, 99)
subscription := Subscription{
Name: "New Customer Profile Subscription",
Amount: amount,
//TrialAmount: "0.00",
PaymentSchedule: &PaymentSchedule{
StartDate: CurrentDate(),
TotalOccurrences: "9999",
//TrialOccurrences: "0",
Interval: IntervalMonthly(),
},
Profile: &CustomerProfiler{
CustomerProfileID: newCustomerProfileId,
CustomerPaymentProfileID: newCustomerPaymentId,
CustomerShippingProfileID: newCustomerShippingId,
},
}
response, err := subscription.Charge()
if err != nil {
t.Fail()
}
if response.Approved() {
newSubscriptionId = response.SubscriptionID
t.Log("Customer #", response.CustomerProfileId(), " Created a New Subscription: ", response.SubscriptionID)
} else {
t.Log(response.ErrorMessage(), "\n")
t.Fail()
}
}
func TestGetCustomerProfile(t *testing.T) {
customer := Customer{
ID: newCustomerProfileId,
}
response, err := customer.Info()
if err != nil {
t.Fail()
}
paymentProfiles := response.PaymentProfiles()
shippingProfiles := response.ShippingProfiles()
subscriptions := response.Subscriptions()
t.Log("Customer Profile", response)
t.Log("Customer Payment Profiles", paymentProfiles)
t.Log("Customer Shipping Profiles", shippingProfiles)
t.Log("Customer Subscription IDs", subscriptions)
}
|
[
"\"AUTHORIZE_ID\"",
"\"AUTHORIZE_TRANSACTION_KEY\"",
"\"AUTHORIZE_API_ENVIRONMENT\""
] |
[] |
[
"AUTHORIZE_ID",
"AUTHORIZE_TRANSACTION_KEY",
"AUTHORIZE_API_ENVIRONMENT"
] |
[]
|
["AUTHORIZE_ID", "AUTHORIZE_TRANSACTION_KEY", "AUTHORIZE_API_ENVIRONMENT"]
|
go
| 3 | 0 | |
tunnel/proxy/proxy.go
|
package proxy
import (
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"os"
)
var (
token = os.Getenv("TOKEN")
)
func Send(w http.ResponseWriter, r *http.Request) {
// check the auth token
x := r.Header.Get("Micro-Token")
if x != token {
http.Error(w, "unauthorized", 401)
return
}
// don't forward the token
r.Header.Del("Micro-Token")
// get the endpoint
u := r.Header.Get("Micro-Endpoint")
if len(u) == 0 {
return
}
// delete the endpoint header
r.Header.Del("Micro-Endpoint")
// parse the request url
uri, _ := url.Parse(u)
r.Host = uri.Host
r.URL.Host = uri.Host
r.URL.Scheme = uri.Scheme
// reverse proxy the request
fmt.Printf("Proxying request to: %v", uri.String())
// proxy the request
proxy := httputil.NewSingleHostReverseProxy(r.URL)
proxy.ServeHTTP(w, r)
}
|
[
"\"TOKEN\""
] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
go
| 1 | 0 | |
wildcard/wsgi/django.wsgi
|
import logging
import os
import sys
import django.core.handlers.wsgi
from django.conf import settings
# Add this file path to sys.path in order to import settings
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'wildcard.settings'
sys.stdout = sys.stderr
DEBUG = False
application = django.core.handlers.wsgi.WSGIHandler()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
src/batou/environment.py
|
import glob
import json
import os
import os.path
import sys
from configparser import RawConfigParser
from importlib_metadata import entry_points
import batou.c
import batou.utils
import batou.vfs
from batou import (
ComponentLoadingError,
ConfigurationError,
CycleErrorDetected,
DuplicateHostError,
DuplicateHostMapping,
InvalidIPAddressError,
MissingComponent,
MissingEnvironment,
MultipleEnvironmentConfigs,
NonConvergingWorkingSet,
SuperfluousComponentSection,
SuperfluousSection,
UnknownComponentConfigurationError,
UnsatisfiedResources,
UnusedResources,
)
from batou._output import output
from batou.component import RootComponent
from batou.repository import Repository
from batou.utils import CycleError, cmd
from .component import load_components_from_file
from .host import Host, LocalHost, RemoteHost
from .resources import Resources
from .secrets import add_secrets_to_environment
class ConfigSection(dict):
def as_list(self, option):
result = self[option]
if "," in result:
result = [x.strip() for x in result.split(",")]
elif "\n" in result:
result = (x.strip() for x in result.split("\n"))
result = [x for x in result if x]
else:
result = [result]
return result
class Config(object):
def __init__(self, path):
config = RawConfigParser()
config.optionxform = lambda s: s
if path: # Test support
config.read(path)
self.config = config
def __contains__(self, section):
return self.config.has_section(section)
def __getitem__(self, section):
if section not in self:
raise KeyError(section)
return ConfigSection((x, self.config.get(section, x))
for x in self.config.options(section))
def __iter__(self):
return iter(self.config.sections())
def get(self, section, default=None):
try:
return self[section]
except KeyError:
return default
class Environment(object):
"""An environment assigns components to hosts and provides
environment-specific configuration for components.
"""
service_user = None
host_domain = None
branch = None
connect_method = None
update_method = None
platform = None
vfs_sandbox = None
timeout = None
target_directory = None
jobs = None
repository_url = None
repository_root = None
provision_rebuild = False
host_factory = Host
def __init__(self,
name,
timeout=None,
platform=None,
basedir=".",
provision_rebuild=False):
self.name = name
self.hosts = {}
self.resources = Resources()
self.overrides = {}
self.secret_data = set()
self.exceptions = []
self.timeout = timeout
self.platform = platform
self.provision_rebuild = provision_rebuild
self.hostname_mapping = {}
# These are the component classes, decorated with their
# name.
self.components = {}
# These are the components assigned to hosts.
self.root_components = []
self.base_dir = os.path.abspath(basedir)
self.workdir_base = os.path.join(self.base_dir, "work")
# Additional secrets files as placed in secrets/<env>-<name>
self.secret_files = {}
self.provisioners = {}
def _environment_path(self, path='.'):
return os.path.abspath(
os.path.join(self.base_dir, 'environments', self.name, path))
def _ensure_environment_dir(self):
if not os.path.isdir(self._environment_path()):
os.makedirs(self._environment_path())
def load(self):
batou.utils.resolve_override.clear()
batou.utils.resolve_v6_override.clear()
existing_configs = []
for candidate in [
"environments/{}.cfg", "environments/{}/environment.cfg"]:
candidate = os.path.join(self.base_dir,
candidate.format(self.name))
if os.path.isfile(candidate):
existing_configs.append(candidate)
if not existing_configs:
raise MissingEnvironment(self)
elif len(existing_configs) > 1:
raise MultipleEnvironmentConfigs(self, existing_configs)
config_file = existing_configs[0]
mapping_file = self._environment_path('hostmap.json')
if os.path.exists(mapping_file):
with open(mapping_file, 'r') as f:
for k, v in json.load(f).items():
if k in self.hostname_mapping:
raise DuplicateHostMapping(k, v,
self.hostname_mapping[k])
self.hostname_mapping[k] = v
# Scan all components
for filename in sorted(
glob.glob(
os.path.join(self.base_dir, "components/*/component.py"))):
try:
self.components.update(load_components_from_file(filename))
except Exception as e:
self.exceptions.append(ComponentLoadingError(filename, e))
config = Config(config_file)
self.load_environment(config)
self.load_provisioners(config)
self.load_hosts(config)
self.load_resolver(config)
# load overrides
for section in config:
if section.startswith("host:"):
continue
if section.startswith('provisioner:'):
continue
if not section.startswith("component:"):
if section not in ["hosts", "environment", "vfs", "resolver"]:
self.exceptions.append(SuperfluousSection(section))
continue
root_name = section.replace("component:", "")
if root_name not in self.components:
self.exceptions.append(SuperfluousComponentSection(root_name))
continue
self.overrides.setdefault(root_name, {})
self.overrides[root_name].update(config[section])
self.repository = Repository.from_environment(self)
# The deployment base is the path relative to the
# repository where batou is located (with ./batou,
# ./environments, and ./components)
if self.connect_method == "local":
self.target_directory = self.repository.root
self.deployment_base = os.path.relpath(self.base_dir,
self.repository.root)
def load_secrets(self):
add_secrets_to_environment(self)
def load_environment(self, config):
environment = config.get("environment", {})
for key in [
"service_user",
"host_domain",
"target_directory",
"connect_method",
"update_method",
"branch",
"platform",
"timeout",
"repository_url",
"repository_root",
"jobs", ]:
if key not in environment:
continue
if getattr(self, key) is not None:
# Avoid overriding early changes that have already been
# applied, e.g. by tests.
continue
setattr(self, key, environment[key])
self._set_defaults()
if "vfs" in config:
sandbox = config["vfs"]["sandbox"]
sandbox = getattr(batou.vfs, sandbox)(self, config["vfs"])
self.vfs_sandbox = sandbox
if self.connect_method == "local":
self.host_factory = LocalHost
else:
self.host_factory = RemoteHost
def load_resolver(self, config):
resolver = config.get("resolver", {})
self._resolve_override = v4 = {}
self._resolve_v6_override = v6 = {}
for key, value in list(resolver.items()):
for ip in value.splitlines():
ip = ip.strip()
if not ip:
continue
if "." in ip:
v4[key] = ip
elif ":" in ip:
v6[key] = ip
else:
self.exceptions.append(InvalidIPAddressError(ip))
batou.utils.resolve_override.update(v4)
batou.utils.resolve_v6_override.update(v6)
def load_provisioners(self, config):
self.provisioners = {}
for section in config:
if not section.startswith('provisioner:'):
continue
name = section.replace('provisioner:', '')
method = config[section]['method']
factory = entry_points(group='batou.provisioners')[method].load()
provisioner = factory.from_config_section(name, config[section])
provisioner.rebuild = self.provision_rebuild
self.provisioners[name] = provisioner
def load_hosts(self, config):
self._load_hosts_single_section(config)
self._load_hosts_multi_section(config)
if self.hostname_mapping:
self._ensure_environment_dir()
mapping_file = self._environment_path('hostmap.json')
with open(mapping_file, 'w') as f:
json.dump(self.hostname_mapping, f)
def _load_hosts_single_section(self, config):
for literal_hostname in config.get("hosts", {}):
hostname = literal_hostname.lstrip("!")
host = self.host_factory(
hostname,
self,
config={
'ignore':
'True' if literal_hostname.startswith("!") else 'False'})
self.hosts[host.name] = host
self._load_host_components(
host, config["hosts"].as_list(literal_hostname))
def _load_hosts_multi_section(self, config):
for section in config:
if not section.startswith("host:"):
continue
hostname = section.replace("host:", "", 1)
host = self.host_factory(hostname, self, config[section])
# The name can now have been remapped.
if host.name in self.hosts:
self.exceptions.append(DuplicateHostError(host.name))
self.hosts[host.name] = host
self._load_host_components(host,
config[section].as_list("components"))
def _load_host_components(self, host, component_list):
components = parse_host_components(component_list)
for component, settings in list(components.items()):
try:
self.add_root(component, host, settings["features"],
settings["ignore"])
except KeyError:
self.exceptions.append(MissingComponent(component, host.name))
def _set_defaults(self):
if self.update_method is None:
self.update_method = "rsync"
if self.connect_method is None:
self.connect_method = "ssh"
if self.target_directory is None:
self.target_directory = "~/deployment"
if self.platform is None and self.host_domain:
self.platform = self.host_domain
if self.timeout is None:
self.timeout = 3
else:
self.timeout = int(self.timeout)
# API to instrument environment config loading
def get_host(self, hostname):
return self.hosts[self.hostname_mapping.get(hostname, hostname)]
def add_root(self, component_name, host, features=(), ignore=False):
compdef = self.components[component_name]
root = RootComponent(
name=compdef.name,
environment=self,
host=host,
features=features,
ignore=ignore,
factory=compdef.factory,
defdir=compdef.defdir,
workdir=os.path.join(self.workdir_base, compdef.name))
self.root_components.append(root)
return root
def get_root(self, component_name, host):
for root in self.root_components:
if root.host == host and root.name == component_name:
return root
raise KeyError("Component {} not configured for host {}".format(
component_name, host.name))
def prepare_connect(self):
if self.connect_method == "vagrant":
output.step("vagrant", "Ensuring machines are up ...")
cmd("vagrant up")
elif self.connect_method == "kitchen":
output.step("kitchen", "Ensuring machines are up ...")
for fqdn in self.hosts:
cmd("kitchen create {}".format(fqdn))
if "BATOU_POST_KITCHEN_CREATE_CMD" in os.environ:
cmd("kitchen exec -c '{}'".format(
os.environ["BATOU_POST_KITCHEN_CREATE_CMD"]))
# Deployment API (implements the configure-verify-update cycle)
def configure(self):
"""Configure all root components.
Monitor the dependencies between resources and try to reach a stable
order.
"""
working_set = set(self.root_components)
previous_working_sets = []
exceptions = []
order = []
root_dependencies = None
while working_set:
exceptions = []
previous_working_sets.append(working_set.copy())
retry = set()
self.resources.dirty_dependencies.clear()
for root in working_set:
try:
self.resources.reset_component_resources(root)
root.overrides = self.overrides.get(root.name, {})
root.prepare()
except ConfigurationError as e:
# A known exception which we can report gracefully later.
exceptions.append(e)
retry.add(root)
except Exception as e:
# An unknown exception which we have to work harder
# to report gracefully.
ex_type, ex, tb = sys.exc_info()
exceptions.append(
UnknownComponentConfigurationError(root, e, tb))
retry.add(root)
retry.update(self.resources.dirty_dependencies)
retry.update(self.resources.unsatisfied_components)
# Try to find a valid order of the components. If we can't then we
# have detected a dependency cycle and need to stop.
root_dependencies = self.root_dependencies()
try:
order = batou.utils.topological_sort(
batou.utils.revert_graph(root_dependencies))
except CycleError as e:
exceptions.append(CycleErrorDetected(e))
if retry in previous_working_sets:
# If any resources were required, now is the time to report
# them.
if self.resources.unsatisfied:
exceptions.append(
UnsatisfiedResources(
self.resources.unsatisfied_keys_and_components))
# We did not manage to improve on our last working set, so we
# give up.
exceptions.append(NonConvergingWorkingSet(retry))
break
working_set = retry
# We managed to converge on a working set. However, some resource were
# provided but never used. We're rather picky here and report this as
# an error.
if self.resources.unused:
exceptions.append(UnusedResources(self.resources.unused))
for root in order:
root.log_finish_configure()
self.exceptions.extend(exceptions)
if self.exceptions:
# We just raise here to support a reasonable flow
# for our caller. We expect him to look at our exceptions
# attribute anyway.
raise self.exceptions[0]
def root_dependencies(self, host=None):
"""Return all roots (host/component) with their direct dependencies.
This can be used as a "todo" list where all things that have no
dependencies left can be started to be worked on.
"""
dependencies = self.resources.get_dependency_graph()
# Complete the graph with components that do not have any dependencies
# (yet)
for root in self.root_components:
if root not in dependencies:
dependencies[root] = set()
if host is not None:
for root in list(dependencies):
if root.host.fqdn is not host:
del dependencies[root]
return dependencies
def map(self, path):
if self.vfs_sandbox:
return self.vfs_sandbox.map(path)
return path
def components_for(self, host):
"""Return component names for given host name"""
result = {}
for component in self.root_components:
if component.host is host:
result[component.name] = component
return result
def _host_data(self):
host_data = {}
for hostname, host in self.hosts.items():
host_data[hostname] = host.data
return host_data
def parse_host_components(components):
"""Parse a component list as given in an environment config for a host
into a dict of dicts:
{'name': {'features': [], 'ignore': False}}
If one component is ignored, then the whole set of component features
is ignored.
Expected syntax:
[!]component[:feature], component[:feature]
"""
result = {}
for name in components:
name = name.strip()
if ":" in name:
name, feature = name.split(":", 1)
else:
feature = None
ignore = name.startswith("!")
name = name.lstrip("!")
result.setdefault(name, {"features": [], "ignore": False})
result[name]["ignore"] |= ignore
if feature:
result[name]["features"].append(feature)
return result
|
[] |
[] |
[
"BATOU_POST_KITCHEN_CREATE_CMD"
] |
[]
|
["BATOU_POST_KITCHEN_CREATE_CMD"]
|
python
| 1 | 0 | |
demo/demo_appapi_ranking.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import sys
from pixivpy_async import AppPixivAPI
sys.dont_write_bytecode = True
_USERNAME = "userbay"
_PASSWORD = "UserPay"
_TOKEN = "uXooTT7xz9v4mflnZqJUO7po9W5ciouhKrIDnI2Dv3c"
async def appapi_ranking(aapi):
json_result = await aapi.illust_ranking('day_male')
# print(json_result)
illust = json_result.illusts[0]
print(">>> %s, origin url: %s" % (illust.title, illust.image_urls['large']))
# get next page
next_qs = aapi.parse_qs(json_result.next_url)
json_result = await aapi.illust_ranking(**next_qs)
# print(json_result)
illust = json_result.illusts[0]
print(">>> %s, origin url: %s" % (illust.title, illust.image_urls['large']))
# 2016-07-15 日的过去一周排行
json_result = await aapi.illust_ranking('week', date='2016-07-15')
# print(json_result)
illust = json_result.illusts[0]
print(">>> %s, origin url: %s" % (illust.title, illust.image_urls['large']))
async def _login(aapi):
# await aapi.login(_USERNAME, _PASSWORD)
await aapi.login(refresh_token=_TOKEN)
async def _main(aapi):
await _login(aapi)
await asyncio.gather(
appapi_ranking(aapi)
)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(_main(AppPixivAPI()))
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import argparse
import logging
import os
import pdb
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
from typing import List
from .address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from .authproxy import JSONRPCException
from . import coverage
from .p2p import NetworkThread
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
get_datadir_path,
initialize_datadir,
p2p_port,
wait_until_helper,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "finalcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class FinalcoinTestMetaClass(type):
"""Metaclass for FinalcoinTestFramework.
Ensures that any attempt to register a subclass of `FinalcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'FinalcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("FinalcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("FinalcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class FinalcoinTestFramework(metaclass=FinalcoinTestMetaClass):
"""Base class for a finalcoin test script.
Individual finalcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain: str = 'regtest'
self.setup_clean_chain: bool = False
self.nodes: List[TestNode] = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.parse_args()
self.disable_syscall_sandbox = self.options.nosandbox
self.default_wallet_name = "default_wallet" if self.options.descriptors else ""
self.wallet_data_filename = "wallet.dat"
# Optional list of wallet names that can be set in set_test_params to
# create and import keys to. If unset, default is len(nodes) *
# [default_wallet_name]. If wallet names are None, wallet creation is
# skipped. If list is truncated, wallet creation is skipped and keys
# are not imported.
self.wallet_names = None
# By default the wallet is not required. Set to true by skip_if_no_wallet().
# When False, we ignore wallet_names regardless of what it is.
self.requires_wallet = False
# Disable ThreadOpenConnections by default, so that adding entries to
# addrman will not result in automatic connections to them.
self.disable_autoconnect = True
self.set_test_params()
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
if self.options.timeout_factor == 0 :
self.options.timeout_factor = 99999
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave finalcoinds and test.* datadir on exit or error")
parser.add_argument("--nosandbox", dest="nosandbox", default=False, action="store_true",
help="Don't use the syscall sandbox")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop finalcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
help="Force test of previous releases (default: %(default)s)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use finalcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
group = parser.add_mutually_exclusive_group()
group.add_argument("--descriptors", action='store_const', const=True,
help="Run test using a descriptor wallet", dest='descriptors')
group.add_argument("--legacy-wallet", action='store_const', const=False,
help="Run test using legacy wallets", dest='descriptors')
self.add_options(parser)
# Running TestShell in a Jupyter notebook causes an additional -f argument
# To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument
# source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
self.options = parser.parse_args()
self.options.previous_releases_path = previous_releases_path
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
if self.options.descriptors is None:
# Prefer BDB unless it isn't available
if self.is_bdb_compiled():
self.options.descriptors = False
elif self.is_sqlite_compiled():
self.options.descriptors = True
else:
# If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter
# It still needs to exist and be None in order for tests to work however.
self.options.descriptors = None
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = self.config
fname_finalcoind = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"finalcoind" + config["environment"]["EXEEXT"],
)
fname_finalcoincli = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"finalcoin-cli" + config["environment"]["EXEEXT"],
)
self.options.finalcoind = os.getenv("FINALCOIND", default=fname_finalcoind)
self.options.finalcoincli = os.getenv("FINALCOINCLI", default=fname_finalcoincli)
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: finalcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("")
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
self.log.error("")
self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
self.log.error("")
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("FinalcoinRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must override this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# self.connect_nodes(1, 2)
for i in range(self.num_nodes - 1):
self.connect_nodes(i + 1, i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = [[]] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
if self.requires_wallet:
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.generate(self.nodes[0], 1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for i in range(self.num_nodes):
self.init_wallet(i)
def init_wallet(self, i):
wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[i] if i < len(self.wallet_names) else False
if wallet_name is not False:
n = self.nodes[i]
if wallet_name is not None:
n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True)
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
def get_bin_from_version(version, bin_name, bin_default):
if not version:
return bin_default
return os.path.join(
self.options.previous_releases_path,
re.sub(
r'\.0$',
'', # remove trailing .0 for point releases
'v{}.{}.{}.{}'.format(
(version % 100000000) // 1000000,
(version % 1000000) // 10000,
(version % 10000) // 100,
(version % 100) // 1,
),
),
'bin',
bin_name,
)
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if self.is_syscall_sandbox_compiled() and not self.disable_syscall_sandbox:
for i in range(len(extra_args)):
if versions[i] is None or versions[i] >= 219900:
extra_args[i] = extra_args[i] + ["-sandbox=log-and-abort"]
if binary is None:
binary = [get_bin_from_version(v, 'finalcoind', self.options.finalcoind) for v in versions]
if binary_cli is None:
binary_cli = [get_bin_from_version(v, 'finalcoin-cli', self.options.finalcoincli) for v in versions]
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
test_node_i = TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
finalcoind=binary[i],
finalcoin_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
descriptors=self.options.descriptors,
)
self.nodes.append(test_node_i)
if not test_node_i.version_is_at_least(170000):
# adjust conf for pre 17
conf_file = test_node_i.finalcoinconf
with open(conf_file, 'r', encoding='utf8') as conf:
conf_data = conf.read()
with open(conf_file, 'w', encoding='utf8') as conf:
conf.write(conf_data.replace('[regtest]', ''))
def start_node(self, i, *args, **kwargs):
"""Start a finalcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple finalcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a finalcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
def stop_nodes(self, wait=0):
"""Stop multiple finalcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait, wait_until_stopped=False)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def connect_nodes(self, a, b):
from_connection = self.nodes[a]
to_connection = self.nodes[b]
ip_port = "127.0.0.1:" + str(p2p_port(b))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
# See comments in net_processing:
# * Must have a version message before anything else
# * Must have a verack message before anything else
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['version'] != 0 for peer in to_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in to_connection.getpeerinfo()))
def disconnect_nodes(self, a, b):
def disconnect_nodes_helper(from_connection, node_num):
def get_peer_ids():
result = []
for peer in from_connection.getpeerinfo():
if "testnode{}".format(node_num) in peer['subver']:
result.append(peer['id'])
return result
peer_ids = get_peer_ids()
if not peer_ids:
self.log.warning("disconnect_nodes: {} and {} were not connected".format(
from_connection.index,
node_num,
))
return
for peer_id in peer_ids:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
disconnect_nodes_helper(self.nodes[a], b)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
self.disconnect_nodes(1, 2)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
self.connect_nodes(1, 2)
self.sync_all()
def generate(self, generator, *args, **kwargs):
blocks = generator.generate(*args, **kwargs)
return blocks
def generateblock(self, generator, *args, **kwargs):
blocks = generator.generateblock(*args, **kwargs)
return blocks
def generatetoaddress(self, generator, *args, **kwargs):
blocks = generator.generatetoaddress(*args, **kwargs)
return blocks
def generatetodescriptor(self, generator, *args, **kwargs):
blocks = generator.generatetodescriptor(*args, **kwargs)
return blocks
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def wait_until(self, test_function, timeout=60):
return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as finalcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("FinalcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
finalcoind=self.options.finalcoind,
finalcoin_cli=self.options.finalcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
descriptors=self.options.descriptors,
))
self.start_node(CACHE_NODE_ID)
cache_node = self.nodes[CACHE_NODE_ID]
# Wait for RPC connections to be ready
cache_node.wait_for_rpc_connection()
# Set a time in the past, so that blocks don't end up in the future
cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
# Create a 199-block-long chain; each of the 3 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th address gets 25 mature and only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_BCRT1_P2WSH_OP_TRUE]
assert_equal(len(gen_addresses), 4)
for i in range(8):
self.generatetoaddress(
cache_node,
nblocks=25 if i != 7 else 24,
address=gen_addresses[i % len(gen_addresses)],
)
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks', 'indexes']: # Only indexes, chainstate and blocks folders
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect) # Overwrite port/rpcport in finalcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_finalcoind_zmq(self):
"""Skip the running test if finalcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("finalcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
self.requires_wallet = True
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
if self.options.descriptors:
self.skip_if_no_sqlite()
else:
self.skip_if_no_bdb()
def skip_if_no_sqlite(self):
"""Skip the running test if sqlite has not been compiled."""
if not self.is_sqlite_compiled():
raise SkipTest("sqlite has not been compiled.")
def skip_if_no_bdb(self):
"""Skip the running test if BDB has not been compiled."""
if not self.is_bdb_compiled():
raise SkipTest("BDB has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if finalcoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("finalcoin-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if finalcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("finalcoin-cli has not been compiled.")
def skip_if_no_previous_releases(self):
"""Skip the running test if previous releases are not available."""
if not self.has_previous_releases():
raise SkipTest("previous releases not available or disabled")
def has_previous_releases(self):
"""Checks whether previous releases are present and enabled."""
if not os.path.isdir(self.options.previous_releases_path):
if self.options.prev_releases:
raise AssertionError("Force test of previous releases but releases missing: {}".format(
self.options.previous_releases_path))
return self.options.prev_releases
def skip_if_no_external_signer(self):
"""Skip the running test if external signer support has not been compiled."""
if not self.is_external_signer_compiled():
raise SkipTest("external signer support has not been compiled.")
def is_cli_compiled(self):
"""Checks whether finalcoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_external_signer_compiled(self):
"""Checks whether external signer support was compiled."""
return self.config["components"].getboolean("ENABLE_EXTERNAL_SIGNER")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether finalcoin-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
def is_sqlite_compiled(self):
"""Checks whether the wallet module was compiled with Sqlite support."""
return self.config["components"].getboolean("USE_SQLITE")
def is_bdb_compiled(self):
"""Checks whether the wallet module was compiled with BDB support."""
return self.config["components"].getboolean("USE_BDB")
def is_syscall_sandbox_compiled(self):
"""Checks whether the syscall sandbox was compiled."""
return self.config["components"].getboolean("ENABLE_SYSCALL_SANDBOX")
|
[] |
[] |
[
"PREVIOUS_RELEASES_DIR",
"FINALCOINCLI",
"PATH",
"FINALCOIND"
] |
[]
|
["PREVIOUS_RELEASES_DIR", "FINALCOINCLI", "PATH", "FINALCOIND"]
|
python
| 4 | 0 | |
students/K33401/Do_Thien/Lr2/django_project_lr2/django_project_lr2/asgi.py
|
"""
ASGI config for django_project_lr2 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_lr2.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
|
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.mapreduce;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.security.token.TokenUtil;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.StringUtils;
import com.codahale.metrics.MetricRegistry;
/**
* Utility for {@link TableMapper} and {@link TableReducer}
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@InterfaceAudience.Public
public class TableMapReduceUtil {
private static final Log LOG = LogFactory.getLog(TableMapReduceUtil.class);
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table The table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job)
throws IOException {
initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass,
job, true);
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table The table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(TableName table,
Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass,
Job job) throws IOException {
initTableMapperJob(table.getNameAsString(),
scan,
mapper,
outputKeyClass,
outputValueClass,
job,
true);
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table Binary representation of the table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(byte[] table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job)
throws IOException {
initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass,
job, true);
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table The table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass)
throws IOException {
initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job,
addDependencyJars, true, inputFormatClass);
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table The table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @param initCredentials whether to initialize hbase auth credentials for the job
* @param inputFormatClass the input format
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, boolean initCredentials,
Class<? extends InputFormat> inputFormatClass)
throws IOException {
job.setInputFormatClass(inputFormatClass);
if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
job.setMapperClass(mapper);
if (Put.class.equals(outputValueClass)) {
job.setCombinerClass(PutCombiner.class);
}
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
conf.set(TableInputFormat.INPUT_TABLE, table);
conf.set(TableInputFormat.SCAN, convertScanToString(scan));
conf.setStrings("io.serializations", conf.get("io.serializations"),
MutationSerialization.class.getName(), ResultSerialization.class.getName(),
KeyValueSerialization.class.getName());
if (addDependencyJars) {
addDependencyJars(job);
}
if (initCredentials) {
initCredentials(job);
}
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table Binary representation of the table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @param inputFormatClass The class of the input format
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(byte[] table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass)
throws IOException {
initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass,
outputValueClass, job, addDependencyJars, inputFormatClass);
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table Binary representation of the table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(byte[] table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars)
throws IOException {
initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass,
outputValueClass, job, addDependencyJars, TableInputFormat.class);
}
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table The table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars)
throws IOException {
initTableMapperJob(table, scan, mapper, outputKeyClass,
outputValueClass, job, addDependencyJars, TableInputFormat.class);
}
/**
* Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on
* direct memory will likely cause the map tasks to OOM when opening the region. This
* is done here instead of in TableSnapshotRegionRecordReader in case an advanced user
* wants to override this behavior in their job.
*/
public static void resetCacheConfig(Configuration conf) {
conf.setFloat(
HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f);
conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
}
/**
* Sets up the job for reading from one or more table snapshots, with one or more scans
* per snapshot.
* It bypasses hbase servers and read directly from snapshot files.
*
* @param snapshotScans map of snapshot name to scans on that snapshot.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
*/
public static void initMultiTableSnapshotMapperJob(Map<String, Collection<Scan>> snapshotScans,
Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass,
Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException {
MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir);
job.setInputFormatClass(MultiTableSnapshotInputFormat.class);
if (outputValueClass != null) {
job.setMapOutputValueClass(outputValueClass);
}
if (outputKeyClass != null) {
job.setMapOutputKeyClass(outputKeyClass);
}
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
if (addDependencyJars) {
addDependencyJars(job);
addDependencyJarsForClasses(job.getConfiguration(), MetricRegistry.class);
}
resetCacheConfig(job.getConfiguration());
}
/**
* Sets up the job for reading from a table snapshot. It bypasses hbase servers
* and read directly from snapshot files.
*
* @param snapshotName The name of the snapshot (of a table) to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
*
* @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should
* have write permissions to this directory, and this should not be a subdirectory of rootdir.
* After the job is finished, restore directory can be deleted.
* @throws IOException When setting up the details fails.
* @see TableSnapshotInputFormat
*/
public static void initTableSnapshotMapperJob(String snapshotName, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, Path tmpRestoreDir)
throws IOException {
TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir);
initTableMapperJob(snapshotName, scan, mapper, outputKeyClass,
outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class);
resetCacheConfig(job.getConfiguration());
}
/**
* Use this before submitting a Multi TableMap job. It will appropriately set
* up the job.
*
* @param scans The list of {@link Scan} objects to read from.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is carrying
* all necessary HBase configuration.
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(List<Scan> scans,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job) throws IOException {
initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job,
true);
}
/**
* Use this before submitting a Multi TableMap job. It will appropriately set
* up the job.
*
* @param scans The list of {@link Scan} objects to read from.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is carrying
* all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the
* configured job classes via the distributed cache (tmpjars).
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(List<Scan> scans,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars) throws IOException {
initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job,
addDependencyJars, true);
}
/**
* Use this before submitting a Multi TableMap job. It will appropriately set
* up the job.
*
* @param scans The list of {@link Scan} objects to read from.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is carrying
* all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the
* configured job classes via the distributed cache (tmpjars).
* @param initCredentials whether to initialize hbase auth credentials for the job
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(List<Scan> scans,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars,
boolean initCredentials) throws IOException {
job.setInputFormatClass(MultiTableInputFormat.class);
if (outputValueClass != null) {
job.setMapOutputValueClass(outputValueClass);
}
if (outputKeyClass != null) {
job.setMapOutputKeyClass(outputKeyClass);
}
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
List<String> scanStrings = new ArrayList<>();
for (Scan scan : scans) {
scanStrings.add(convertScanToString(scan));
}
job.getConfiguration().setStrings(MultiTableInputFormat.SCANS,
scanStrings.toArray(new String[scanStrings.size()]));
if (addDependencyJars) {
addDependencyJars(job);
}
if (initCredentials) {
initCredentials(job);
}
}
public static void initCredentials(Job job) throws IOException {
UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
if (userProvider.isHadoopSecurityEnabled()) {
// propagate delegation related props from launcher job to MR job
if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
job.getConfiguration().set("mapreduce.job.credentials.binary",
System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
}
}
if (userProvider.isHBaseSecurityEnabled()) {
try {
// init credentials for remote cluster
String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
User user = userProvider.getCurrent();
if (quorumAddress != null) {
Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
Connection peerConn = ConnectionFactory.createConnection(peerConf);
try {
TokenUtil.addTokenForJob(peerConn, user, job);
} finally {
peerConn.close();
}
}
Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
try {
TokenUtil.addTokenForJob(conn, user, job);
} finally {
conn.close();
}
} catch (InterruptedException ie) {
LOG.info("Interrupted obtaining user authentication token");
Thread.currentThread().interrupt();
}
}
}
/**
* Obtain an authentication token, for the specified cluster, on behalf of the current user
* and add it to the credentials for the given map reduce job.
*
* The quorumAddress is the key to the ZK ensemble, which contains:
* hbase.zookeeper.quorum, hbase.zookeeper.client.port and
* zookeeper.znode.parent
*
* @param job The job that requires the permission.
* @param quorumAddress string that contains the 3 required configuratins
* @throws IOException When the authentication token cannot be obtained.
* @deprecated Since 1.2.0, use {@link #initCredentialsForCluster(Job, Configuration)} instead.
*/
@Deprecated
public static void initCredentialsForCluster(Job job, String quorumAddress)
throws IOException {
Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
quorumAddress);
initCredentialsForCluster(job, peerConf);
}
/**
* Obtain an authentication token, for the specified cluster, on behalf of the current user
* and add it to the credentials for the given map reduce job.
*
* @param job The job that requires the permission.
* @param conf The configuration to use in connecting to the peer cluster
* @throws IOException When the authentication token cannot be obtained.
*/
public static void initCredentialsForCluster(Job job, Configuration conf)
throws IOException {
UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
if (userProvider.isHBaseSecurityEnabled()) {
try {
Connection peerConn = ConnectionFactory.createConnection(conf);
try {
TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
} finally {
peerConn.close();
}
} catch (InterruptedException e) {
LOG.info("Interrupted obtaining user authentication token");
Thread.interrupted();
}
}
}
/**
* Writes the given scan into a Base64 encoded string.
*
* @param scan The scan to write out.
* @return The scan saved in a Base64 encoded string.
* @throws IOException When writing the scan fails.
*/
public static String convertScanToString(Scan scan) throws IOException {
ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
return Base64.encodeBytes(proto.toByteArray());
}
/**
* Converts the given Base64 string back into a Scan instance.
*
* @param base64 The scan details.
* @return The newly created Scan instance.
* @throws IOException When reading the scan instance fails.
*/
public static Scan convertStringToScan(String base64) throws IOException {
byte [] decoded = Base64.decode(base64);
return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded));
}
/**
* Use this before submitting a TableReduce job. It will
* appropriately set up the JobConf.
*
* @param table The output table.
* @param reducer The reducer class to use.
* @param job The current job to adjust.
* @throws IOException When determining the region count fails.
*/
public static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job)
throws IOException {
initTableReducerJob(table, reducer, job, null);
}
/**
* Use this before submitting a TableReduce job. It will
* appropriately set up the JobConf.
*
* @param table The output table.
* @param reducer The reducer class to use.
* @param job The current job to adjust.
* @param partitioner Partitioner to use. Pass <code>null</code> to use
* default partitioner.
* @throws IOException When determining the region count fails.
*/
public static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job,
Class partitioner) throws IOException {
initTableReducerJob(table, reducer, job, partitioner, null, null, null);
}
/**
* Use this before submitting a TableReduce job. It will
* appropriately set up the JobConf.
*
* @param table The output table.
* @param reducer The reducer class to use.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param partitioner Partitioner to use. Pass <code>null</code> to use
* default partitioner.
* @param quorumAddress Distant cluster to write to; default is null for
* output to the cluster that is designated in <code>hbase-site.xml</code>.
* Set this String to the zookeeper ensemble of an alternate remote cluster
* when you would have the reduce write a cluster that is other than the
* default; e.g. copying tables between clusters, the source would be
* designated by <code>hbase-site.xml</code> and this param would have the
* ensemble address of the remote cluster. The format to pass is particular.
* Pass <code> <hbase.zookeeper.quorum>:<
* hbase.zookeeper.client.port>:<zookeeper.znode.parent>
* </code> such as <code>server,server2,server3:2181:/hbase</code>.
* @param serverClass redefined hbase.regionserver.class
* @param serverImpl redefined hbase.regionserver.impl
* @throws IOException When determining the region count fails.
*/
public static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job,
Class partitioner, String quorumAddress, String serverClass,
String serverImpl) throws IOException {
initTableReducerJob(table, reducer, job, partitioner, quorumAddress,
serverClass, serverImpl, true);
}
/**
* Use this before submitting a TableReduce job. It will
* appropriately set up the JobConf.
*
* @param table The output table.
* @param reducer The reducer class to use.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param partitioner Partitioner to use. Pass <code>null</code> to use
* default partitioner.
* @param quorumAddress Distant cluster to write to; default is null for
* output to the cluster that is designated in <code>hbase-site.xml</code>.
* Set this String to the zookeeper ensemble of an alternate remote cluster
* when you would have the reduce write a cluster that is other than the
* default; e.g. copying tables between clusters, the source would be
* designated by <code>hbase-site.xml</code> and this param would have the
* ensemble address of the remote cluster. The format to pass is particular.
* Pass <code> <hbase.zookeeper.quorum>:<
* hbase.zookeeper.client.port>:<zookeeper.znode.parent>
* </code> such as <code>server,server2,server3:2181:/hbase</code>.
* @param serverClass redefined hbase.regionserver.class
* @param serverImpl redefined hbase.regionserver.impl
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @throws IOException When determining the region count fails.
*/
public static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job,
Class partitioner, String quorumAddress, String serverClass,
String serverImpl, boolean addDependencyJars) throws IOException {
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
job.setOutputFormatClass(TableOutputFormat.class);
if (reducer != null) job.setReducerClass(reducer);
conf.set(TableOutputFormat.OUTPUT_TABLE, table);
conf.setStrings("io.serializations", conf.get("io.serializations"),
MutationSerialization.class.getName(), ResultSerialization.class.getName());
// If passed a quorum/ensemble address, pass it on to TableOutputFormat.
if (quorumAddress != null) {
// Calling this will validate the format
ZKConfig.validateClusterKey(quorumAddress);
conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
}
if (serverClass != null && serverImpl != null) {
conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
}
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
} else if (partitioner != null) {
job.setPartitionerClass(partitioner);
}
if (addDependencyJars) {
addDependencyJars(job);
}
initCredentials(job);
}
/**
* Ensures that the given number of reduce tasks for the given job
* configuration does not exceed the number of regions for the given table.
*
* @param table The table to get the region count for.
* @param job The current job to adjust.
* @throws IOException When retrieving the table details fails.
*/
public static void limitNumReduceTasks(String table, Job job)
throws IOException {
int regions =
MetaTableAccessor.getRegionCount(job.getConfiguration(), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
/**
* Sets the number of reduce tasks for the given job configuration to the
* number of regions the given table has.
*
* @param table The table to get the region count for.
* @param job The current job to adjust.
* @throws IOException When retrieving the table details fails.
*/
public static void setNumReduceTasks(String table, Job job)
throws IOException {
job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(),
TableName.valueOf(table)));
}
/**
* Sets the number of rows to return and cache with each scanner iteration.
* Higher caching values will enable faster mapreduce jobs at the expense of
* requiring more heap to contain the cached rows.
*
* @param job The current job to adjust.
* @param batchSize The number of rows to return in batch with each scanner
* iteration.
*/
public static void setScannerCaching(Job job, int batchSize) {
job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize);
}
/**
* Add HBase and its dependencies (only) to the job configuration.
* <p>
* This is intended as a low-level API, facilitating code reuse between this
* class and its mapred counterpart. It also of use to external tools that
* need to build a MapReduce job that interacts with HBase but want
* fine-grained control over the jars shipped to the cluster.
* </p>
* @param conf The Configuration object to extend with dependencies.
* @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil
* @see <a href="https://issues.apache.org/jira/browse/PIG-3285">PIG-3285</a>
*/
public static void addHBaseDependencyJars(Configuration conf) throws IOException {
// PrefixTreeCodec is part of the hbase-prefix-tree module. If not included in MR jobs jar
// dependencies, MR jobs that write encoded hfiles will fail.
// We used reflection here so to prevent a circular module dependency.
// TODO - if we extract the MR into a module, make it depend on hbase-prefix-tree.
Class prefixTreeCodecClass = null;
try {
prefixTreeCodecClass =
Class.forName("org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec");
} catch (ClassNotFoundException e) {
// this will show up in unit tests but should not show in real deployments
LOG.warn("The hbase-prefix-tree module jar containing PrefixTreeCodec is not present." +
" Continuing without it.");
}
addDependencyJarsForClasses(conf,
// explicitly pull a class from each module
org.apache.hadoop.hbase.HConstants.class, // hbase-common
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded
org.apache.hadoop.hbase.client.Put.class, // hbase-client
org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat
org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat
org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-server
org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics
org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api
prefixTreeCodecClass, // hbase-prefix-tree (if null will be skipped)
// pull necessary dependencies
org.apache.zookeeper.ZooKeeper.class,
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel.class,
com.google.protobuf.Message.class,
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
org.apache.htrace.Trace.class,
com.codahale.metrics.MetricRegistry.class);
}
/**
* Returns a classpath string built from the content of the "tmpjars" value in {@code conf}.
* Also exposed to shell scripts via `bin/hbase mapredcp`.
*/
public static String buildDependencyClasspath(Configuration conf) {
if (conf == null) {
throw new IllegalArgumentException("Must provide a configuration object.");
}
Set<String> paths = new HashSet<>(conf.getStringCollection("tmpjars"));
if (paths.isEmpty()) {
throw new IllegalArgumentException("Configuration contains no tmpjars.");
}
StringBuilder sb = new StringBuilder();
for (String s : paths) {
// entries can take the form 'file:/path/to/file.jar'.
int idx = s.indexOf(":");
if (idx != -1) s = s.substring(idx + 1);
if (sb.length() > 0) sb.append(File.pathSeparator);
sb.append(s);
}
return sb.toString();
}
/**
* Add the HBase dependency jars as well as jars for any of the configured
* job classes to the job configuration, so that JobClient will ship them
* to the cluster and add them to the DistributedCache.
*/
public static void addDependencyJars(Job job) throws IOException {
addHBaseDependencyJars(job.getConfiguration());
try {
addDependencyJarsForClasses(job.getConfiguration(),
// when making changes here, consider also mapred.TableMapReduceUtil
// pull job classes
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getInputFormatClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getOutputFormatClass(),
job.getPartitionerClass(),
job.getCombinerClass());
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
}
/**
* Add the jars containing the given classes to the job's configuration
* such that JobClient will ship them to the cluster and add them to
* the DistributedCache.
* @deprecated rely on {@link #addDependencyJars(Job)} instead.
*/
@Deprecated
public static void addDependencyJars(Configuration conf,
Class<?>... classes) throws IOException {
LOG.warn("The addDependencyJars(Configuration, Class<?>...) method has been deprecated since it"
+ " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " +
"instead. See HBASE-8386 for more details.");
addDependencyJarsForClasses(conf, classes);
}
/**
* Add the jars containing the given classes to the job's configuration
* such that JobClient will ship them to the cluster and add them to
* the DistributedCache.
*
* N.B. that this method at most adds one jar per class given. If there is more than one
* jar available containing a class with the same name as a given class, we don't define
* which of those jars might be chosen.
*
* @param conf The Hadoop Configuration to modify
* @param classes will add just those dependencies needed to find the given classes
* @throws IOException if an underlying library call fails.
*/
@InterfaceAudience.Private
public static void addDependencyJarsForClasses(Configuration conf,
Class<?>... classes) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Set<String> jars = new HashSet<>();
// Add jars that are already in the tmpjars variable
jars.addAll(conf.getStringCollection("tmpjars"));
// add jars as we find them to a map of contents jar name so that we can avoid
// creating new jars for classes that have already been packaged.
Map<String, String> packagedClasses = new HashMap<>();
// Add jars containing the specified classes
for (Class<?> clazz : classes) {
if (clazz == null) continue;
Path path = findOrCreateJar(clazz, localFs, packagedClasses);
if (path == null) {
LOG.warn("Could not find jar for class " + clazz +
" in order to ship it to the cluster.");
continue;
}
if (!localFs.exists(path)) {
LOG.warn("Could not validate jar file " + path + " for class "
+ clazz);
continue;
}
jars.add(path.toString());
}
if (jars.isEmpty()) return;
conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()])));
}
/**
* Finds the Jar for a class or creates it if it doesn't exist. If the class is in
* a directory in the classpath, it creates a Jar on the fly with the
* contents of the directory and returns the path to that Jar. If a Jar is
* created, it is created in the system temporary directory. Otherwise,
* returns an existing jar that contains a class of the same name. Maintains
* a mapping from jar contents to the tmp jar created.
* @param my_class the class to find.
* @param fs the FileSystem with which to qualify the returned path.
* @param packagedClasses a map of class name to path.
* @return a jar file that contains the class.
* @throws IOException
*/
private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
Map<String, String> packagedClasses)
throws IOException {
// attempt to locate an existing jar for the class.
String jar = findContainingJar(my_class, packagedClasses);
if (null == jar || jar.isEmpty()) {
jar = getJar(my_class);
updateMap(jar, packagedClasses);
}
if (null == jar || jar.isEmpty()) {
return null;
}
LOG.debug(String.format("For class %s, using jar %s", my_class.getName(), jar));
return new Path(jar).makeQualified(fs);
}
/**
* Add entries to <code>packagedClasses</code> corresponding to class files
* contained in <code>jar</code>.
* @param jar The jar who's content to list.
* @param packagedClasses map[class -> jar]
*/
private static void updateMap(String jar, Map<String, String> packagedClasses) throws IOException {
if (null == jar || jar.isEmpty()) {
return;
}
ZipFile zip = null;
try {
zip = new ZipFile(jar);
for (Enumeration<? extends ZipEntry> iter = zip.entries(); iter.hasMoreElements();) {
ZipEntry entry = iter.nextElement();
if (entry.getName().endsWith("class")) {
packagedClasses.put(entry.getName(), jar);
}
}
} finally {
if (null != zip) zip.close();
}
}
/**
* Find a jar that contains a class of the same name, if any. It will return
* a jar file, even if that is not the first thing on the class path that
* has a class with the same name. Looks first on the classpath and then in
* the <code>packagedClasses</code> map.
* @param my_class the class to find.
* @return a jar file that contains the class, or null.
* @throws IOException
*/
private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses)
throws IOException {
ClassLoader loader = my_class.getClassLoader();
String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
if (loader != null) {
// first search the classpath
for (Enumeration<URL> itr = loader.getResources(class_file); itr.hasMoreElements();) {
URL url = itr.nextElement();
if ("jar".equals(url.getProtocol())) {
String toReturn = url.getPath();
if (toReturn.startsWith("file:")) {
toReturn = toReturn.substring("file:".length());
}
// URLDecoder is a misnamed class, since it actually decodes
// x-www-form-urlencoded MIME type rather than actual
// URL encoding (which the file path has). Therefore it would
// decode +s to ' 's which is incorrect (spaces are actually
// either unencoded or encoded as "%20"). Replace +s first, so
// that they are kept sacred during the decoding process.
toReturn = toReturn.replaceAll("\\+", "%2B");
toReturn = URLDecoder.decode(toReturn, "UTF-8");
return toReturn.replaceAll("!.*$", "");
}
}
}
// now look in any jars we've packaged using JarFinder. Returns null when
// no jar is found.
return packagedClasses.get(class_file);
}
/**
* Invoke 'getJar' on a custom JarFinder implementation. Useful for some job
* configuration contexts (HBASE-8140) and also for testing on MRv2.
* check if we have HADOOP-9426.
* @param my_class the class to find.
* @return a jar file that contains the class, or null.
*/
private static String getJar(Class<?> my_class) {
String ret = null;
try {
ret = JarFinder.getJar(my_class);
} catch (Exception e) {
// toss all other exceptions, related to reflection failure
throw new RuntimeException("getJar invocation failed.", e);
}
return ret;
}
}
|
[
"\"HADOOP_TOKEN_FILE_LOCATION\"",
"\"HADOOP_TOKEN_FILE_LOCATION\""
] |
[] |
[
"HADOOP_TOKEN_FILE_LOCATION"
] |
[]
|
["HADOOP_TOKEN_FILE_LOCATION"]
|
java
| 1 | 0 | |
twitter/twitter/utility.py
|
import tweepy
import os
from dotenv import load_dotenv
load_dotenv()
consumer_key = os.getenv("consumer_key")
consumer_secret = os.getenv("consumer_secret")
access_token = os.getenv("access_token")
access_token_secret = os.getenv("access_token_secrete")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
public_tweets = api.home_timeline()
for tweet in public_tweets:
print (tweet._json)
|
[] |
[] |
[
"access_token_secrete",
"consumer_key",
"access_token",
"consumer_secret"
] |
[]
|
["access_token_secrete", "consumer_key", "access_token", "consumer_secret"]
|
python
| 4 | 0 | |
tests/regressiontests/admin_views/models.py
|
# -*- coding: utf-8 -*-
import datetime
import tempfile
import os
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, null=True, blank=True)
def __unicode__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name=u'¿Name?')
def __unicode__(self):
return self.name
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name=u'¿Name?')
book = models.ForeignKey(Book)
def __unicode__(self):
return self.name
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name=u'¿Title?')
content = models.TextField()
book = models.ForeignKey(Book)
def __unicode__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, verbose_name=u'¿Chap?')
xtra = models.CharField(max_length=100, verbose_name=u'¿Xtra?')
def __unicode__(self):
return u'¿Xtra1: %s' % self.xtra
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, verbose_name=u'¿Chap?')
xtra = models.CharField(max_length=100, verbose_name=u'¿Xtra?')
def __unicode__(self):
return u'¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
class ModelWithStringPrimaryKey(models.Model):
id = models.CharField(max_length=255, primary_key=True)
def __unicode__(self):
return self.id
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField()
def __unicode__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __unicode__(self):
return self.title
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
def __unicode__(self):
return self.name
class Inquisition(models.Model):
expected = models.BooleanField()
leader = models.ForeignKey(Actor)
country = models.CharField(max_length=20)
def __unicode__(self):
return u"by %s from %s" % (self.leader, self.country)
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(Inquisition, limit_choices_to={'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
})
def __unicode__(self):
return self.title
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)
),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField()
def __unicode__(self):
return self.name
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __unicode__(self):
return self.name
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, related_name="accounts")
servicename = u'generic service'
def __unicode__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = u'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = u'bar'
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __unicode__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False)
name = models.CharField(max_length=30, blank=True)
class EmptyModel(models.Model):
def __unicode__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR']))
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
recommender = models.ForeignKey(Recommender)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector)
expensive = models.BooleanField(default=True)
class Category(models.Model):
collector = models.ForeignKey(Collector)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __unicode__(self):
return u'%s:o%s' % (self.id, self.order)
class Link(models.Model):
posted = models.DateField(
default=lambda: datetime.date.today() - datetime.timedelta(days=7)
)
url = models.URLField()
post = models.ForeignKey("Post")
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField()
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Villain(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class SuperVillain(Villain):
pass
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return self.name
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, related_name='lead_plots')
contact = models.ForeignKey(Villain, related_name='contact_plots')
tags = generic.GenericRelation(FunkyTag)
def __unicode__(self):
return self.name
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot)
def __unicode__(self):
return self.details
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain)
def __unicode__(self):
return self.location
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain)
def __unicode__(self):
return self.location
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo')
def __unicode__(self):
return self.name
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne)
def __unicode__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping')
class Album(models.Model):
owner = models.ForeignKey(User)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee)
class Question(models.Model):
question = models.CharField(max_length=20)
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.PROTECT)
answer = models.CharField(max_length=20)
def __unicode__(self):
return self.answer
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
(u'bill', 'Bill G'),
(u'steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
(u'indian', u'A Taste of India'),
(u'thai', u'Thai Pography'),
(u'pizza', u'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __unicode__(self):
return self.author
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField()
slug = models.SlugField(max_length=1000)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class Report(models.Model):
title = models.CharField(max_length=100)
def __unicode__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField()
slug2 = models.SlugField()
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated)
name = models.CharField(max_length=75)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
# Models for #23329
class ReferencedByParent(models.Model):
name = models.CharField(max_length=20, unique=True)
class ParentWithFK(models.Model):
fk = models.ForeignKey(
ReferencedByParent, to_field='name', related_name='hidden+',
)
class ChildOfReferer(ParentWithFK):
pass
class M2MReference(models.Model):
ref = models.ManyToManyField('self')
# Models for #23431
class ReferencedByInline(models.Model):
name = models.CharField(max_length=20, unique=True)
class InlineReference(models.Model):
fk = models.ForeignKey(
ReferencedByInline, to_field='name', related_name='hidden+',
)
class InlineReferer(models.Model):
refs = models.ManyToManyField(InlineReference)
# Models for #23604
class Recipe(models.Model):
pass
class Ingredient(models.Model):
recipes = models.ManyToManyField(Recipe)
# Model for #23839
class NotReferenced(models.Model):
# Don't point any FK at this model.
pass
|
[] |
[] |
[
"DJANGO_TEST_TEMP_DIR"
] |
[]
|
["DJANGO_TEST_TEMP_DIR"]
|
python
| 1 | 0 | |
iam/oauth.go
|
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package iam
import (
"errors"
"log"
"net/http"
"net/url"
"os"
"github.com/Azure-Samples/azure-sdk-for-go-samples/helpers"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
)
const (
samplesAppID = "bee3737f-b06f-444f-b3c3-5b0f3fce46ea"
azCLIclientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
)
var (
// for service principal and device
clientID string
oauthConfig *adal.OAuthConfig
armToken adal.OAuthTokenProvider
batchToken adal.OAuthTokenProvider
graphToken adal.OAuthTokenProvider
// for service principal
subscriptionID string
tenantID string
clientSecret string
// UseCLIclientID sets if the Azure CLI client iD should be used on device authentication
UseCLIclientID bool
)
// OAuthGrantType specifies which grant type to use.
type OAuthGrantType int
const (
// OAuthGrantTypeServicePrincipal for client credentials flow
OAuthGrantTypeServicePrincipal OAuthGrantType = iota
// OAuthGrantTypeDeviceFlow for device-auth flow
OAuthGrantTypeDeviceFlow
)
func init() {
err := parseArgs()
if err != nil {
log.Fatalf("failed to parse args: %s\n", err)
}
}
func parseArgs() error {
err := helpers.LoadEnvVars()
if err != nil {
return err
}
tenantID = os.Getenv("AZ_TENANT_ID")
if tenantID == "" {
log.Println("tenant id missing")
}
clientID = os.Getenv("AZ_CLIENT_ID")
if clientID == "" {
log.Println("client id missing")
}
clientSecret = os.Getenv("AZ_CLIENT_SECRET")
if clientSecret == "" {
log.Println("client secret missing")
}
if !(len(tenantID) > 0) || !(len(clientID) > 0) || !(len(clientSecret) > 0) {
return errors.New("tenant id, client id, and client secret must be specified via env var or flags")
}
oauthConfig, err = adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
return err
}
// ClientID gets the client ID
func ClientID() string {
return clientID
}
// TenantID gets the client ID
func TenantID() string {
return tenantID
}
// ClientSecret gets the client secret
func ClientSecret() string {
return clientSecret
}
// AuthGrantType returns what kind of authentication is going to be used: device flow or service principal
func AuthGrantType() OAuthGrantType {
if helpers.DeviceFlow() {
return OAuthGrantTypeDeviceFlow
}
return OAuthGrantTypeServicePrincipal
}
// GetResourceManagementToken gets an OAuth token for managing resources using the specified grant type.
func GetResourceManagementToken(grantType OAuthGrantType) (adal.OAuthTokenProvider, error) {
if armToken != nil {
return armToken, nil
}
token, err := getToken(grantType, azure.PublicCloud.ResourceManagerEndpoint)
if err == nil {
armToken = token
}
return token, err
}
const batchManagementEndpoint = "https://batch.core.windows.net/"
// GetBatchToken gets an OAuth token for Azure batch using the specified grant type.
func GetBatchToken(grantType OAuthGrantType) (adal.OAuthTokenProvider, error) {
if batchToken != nil {
return batchToken, nil
}
token, err := getToken(grantType, batchManagementEndpoint)
if err == nil {
batchToken = token
}
return token, err
}
// GetGraphToken gets an OAuth token for the graphrbac API using the specified grant type.
func GetGraphToken(grantType OAuthGrantType) (adal.OAuthTokenProvider, error) {
if graphToken != nil {
return graphToken, nil
}
token, err := getToken(grantType, azure.PublicCloud.GraphEndpoint)
if err == nil {
graphToken = token
}
return token, err
}
func getToken(grantType OAuthGrantType, endpoint string) (token adal.OAuthTokenProvider, err error) {
switch grantType {
case OAuthGrantTypeServicePrincipal:
token, err = getServicePrincipalToken(endpoint)
case OAuthGrantTypeDeviceFlow:
token, err = getDeviceToken(endpoint)
default:
log.Fatalln("invalid token type specified")
}
return
}
func getServicePrincipalToken(endpoint string) (adal.OAuthTokenProvider, error) {
return adal.NewServicePrincipalToken(
*oauthConfig,
clientID,
clientSecret,
endpoint)
}
func getDeviceToken(endpoint string) (adal.OAuthTokenProvider, error) {
sender := &http.Client{}
cliID := samplesAppID
if UseCLIclientID {
cliID = azCLIclientID
}
code, err := adal.InitiateDeviceAuth(
sender,
*oauthConfig,
cliID, // clientID
endpoint)
if err != nil {
log.Fatalf("%s: %v\n", "failed to initiate device auth", err)
}
log.Println(*code.Message)
return adal.WaitForUserCompletion(sender, code)
}
// GetKeyvaultToken gets an authorizer for the keyvault dataplane
func GetKeyvaultToken(grantType OAuthGrantType) (authorizer autorest.Authorizer, err error) {
config, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
updatedAuthorizeEndpoint, err := url.Parse("https://login.windows.net/" + tenantID + "/oauth2/token")
config.AuthorizeEndpoint = *updatedAuthorizeEndpoint
if err != nil {
return
}
switch grantType {
case OAuthGrantTypeServicePrincipal:
spt, err := adal.NewServicePrincipalToken(
*config,
clientID,
clientSecret,
"https://vault.azure.net")
if err != nil {
return authorizer, err
}
authorizer = autorest.NewBearerAuthorizer(spt)
case OAuthGrantTypeDeviceFlow:
sender := &http.Client{}
code, err := adal.InitiateDeviceAuth(
sender,
*config,
samplesAppID, // clientID
"https://vault.azure.net")
if err != nil {
log.Fatalf("%s: %v\n", "failed to initiate device auth", err)
}
log.Println(*code.Message)
spt, err := adal.WaitForUserCompletion(sender, code)
if err != nil {
return authorizer, err
}
authorizer = autorest.NewBearerAuthorizer(spt)
default:
log.Fatalln("invalid token type specified")
}
return
}
|
[
"\"AZ_TENANT_ID\"",
"\"AZ_CLIENT_ID\"",
"\"AZ_CLIENT_SECRET\""
] |
[] |
[
"AZ_TENANT_ID",
"AZ_CLIENT_SECRET",
"AZ_CLIENT_ID"
] |
[]
|
["AZ_TENANT_ID", "AZ_CLIENT_SECRET", "AZ_CLIENT_ID"]
|
go
| 3 | 0 | |
conf/conf_test.go
|
package conf
import (
"fmt"
"io/ioutil"
"math/rand"
"net"
"os"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/spf13/viper"
)
var confData = []byte(`
GoVersion: 1.9.2
Version: 1.0.0
BuildDate: 20180428
Service:
Address: 10.0.0.0/8
HTTP:
Host: 127.0.0.1
Port: 8080
Mode: test
RPC:
Host: 127.0.0.1
Port: 9552
Log:
Level: error
Format: json
`)
func initConfig() *configuration {
config := &configuration{}
viper.SetConfigType("yaml")
filename := fmt.Sprintf("conf_test%04d.yml", rand.Intn(9999))
err := ioutil.WriteFile(filename, confData, 0664)
if err != nil {
fmt.Printf("write config file failed:%s", err)
}
//parse struct tag
c := configuration{}
t := reflect.TypeOf(c)
v := reflect.ValueOf(c)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if v.Field(i).Type().Kind() != reflect.Struct {
key := field.Name
value := field.Tag.Get(tagName)
//set default value
viper.SetDefault(key, value)
//log.Printf("key is: %v,value is: %v\n", key, value)
} else {
structField := v.Field(i).Type()
for j := 0; j < structField.NumField(); j++ {
key := structField.Field(j).Name
values := structField.Field(j).Tag.Get(tagName)
viper.SetDefault(key, values)
}
continue
}
}
// parse config
file := must(os.Open(filename)).(*os.File)
defer file.Close()
defer os.Remove(filename)
must(nil, viper.ReadConfig(file))
must(nil, viper.Unmarshal(config))
return config
}
type configuration struct {
GoVersion string
Version string
BuildDate string
Service struct {
Address string
}
HTTP struct {
Host string
Port int
Mode string
}
RPC struct {
Host string
Port int
}
Log struct {
Level string
Format string
}
}
func TestInitConfig(t *testing.T) {
config := initConfig()
expected := &configuration{}
expected.Service.Address = "10.0.0.0/8"
expected.HTTP.Host = "127.0.0.1"
expected.HTTP.Port = 8080
expected.HTTP.Mode = "test"
expected.Log.Format = "json"
expected.Log.Level = "error"
expected.GoVersion = "1.9.2"
expected.Version = "1.0.0"
expected.BuildDate = "20180428"
expected.RPC.Host = "127.0.0.1"
expected.RPC.Port = 9552
if !reflect.DeepEqual(config, expected) {
t.Error("Expected value is not equal to the actual value obtained")
}
}
func TestSetDefault(t *testing.T) {
viper.SetDefault("key", 100)
if viper.GetInt("key") != 100 {
t.Error("set default(key) error")
}
viper.SetDefault("rpc.user", "admin")
if viper.GetString("rpc.user") != "admin" {
t.Error("set default(rpc.user) error")
}
}
func TestCopyFile(t *testing.T) {
nameSRC := "conf.txt"
nameDES := "copy_conf.txt"
content := "hello,copernicus"
data := []byte(content)
err := ioutil.WriteFile(nameSRC, data, 0644)
if err != nil {
t.Errorf("write conf file failed: %s\n ", err)
}
defer os.Remove(nameSRC)
writeNum, err := CopyFile(nameSRC, nameDES)
if err != nil {
t.Errorf("copy file failed: %s\n", err)
}
readNum, err := ioutil.ReadFile(nameDES)
if int64(len(readNum)) != writeNum {
t.Errorf("error copying the contents of the file: %s\n", err)
}
defer os.Remove(nameDES)
}
func TestExistDataDir(t *testing.T) {
fileTrue := "conf.txt"
fileFalse := "confNo.txt"
fileTrue, err := ioutil.TempDir("", fileTrue)
if err != nil {
t.Fatalf("generate temp db path failed: %s\n", err)
}
defer os.Remove(fileTrue)
if !FileExists(fileTrue) {
t.Errorf("the fileTrue file should exist!")
}
if FileExists(fileFalse) {
t.Errorf("the fileFalse file shouldn't exist!")
}
}
type defaultArgs struct {
dataDir string
testNet bool
regTestNet bool
whiteList []*net.IPNet
UtxoHashStartHeight int32
UtxoHashEndHeight int32
}
func getDefaultConfiguration(args defaultArgs) *Configuration {
dataDir := args.dataDir
testNet := args.testNet
regTestNet := args.regTestNet
whiteList := args.whiteList
defaultDataDir := AppDataDir(defaultDataDirname, false)
return &Configuration{
DataDir: dataDir,
RPC: struct {
RPCListeners []string
RPCUser string
RPCPass string
RPCLimitUser string
RPCLimitPass string
RPCCert string `default:""`
RPCKey string
RPCMaxClients int
RPCMaxWebsockets int
RPCMaxConcurrentReqs int
RPCQuirks bool
}{
RPCCert: filepath.Join(defaultDataDir, "rpc.cert"),
RPCKey: filepath.Join(defaultDataDir, "rpc.key"),
},
Mempool: struct {
MinFeeRate int64 //
LimitAncestorCount int // Default for -limitancestorcount, max number of in-mempool ancestors
LimitAncestorSize int // Default for -limitancestorsize, maximum kilobytes of tx + all in-mempool ancestors
LimitDescendantCount int // Default for -limitdescendantcount, max number of in-mempool descendants
LimitDescendantSize int // Default for -limitdescendantsize, maximum kilobytes of in-mempool descendants
MaxPoolSize int64 `default:"300000000"` // Default for MaxPoolSize, maximum megabytes of mempool memory usage
MaxPoolExpiry int // Default for -mempoolexpiry, expiration time for mempool transactions in hours
CheckFrequency uint64 `default:"0"`
}{
MaxPoolSize: 300000000,
CheckFrequency: 0,
},
P2PNet: struct {
ListenAddrs []string `validate:"require" default:"1234"`
MaxPeers int `default:"128"`
TargetOutbound int `default:"8"`
ConnectPeersOnStart []string
DisableBanning bool `default:"true"`
BanThreshold uint32
TestNet bool
RegTest bool `default:"false"`
SimNet bool
DisableListen bool `default:"true"`
BlocksOnly bool `default:"false"` //Do not accept transactions from remote peers.
BanDuration time.Duration // How long to ban misbehaving peers
Proxy string // Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)
UserAgentComments []string // Comment to add to the user agent -- See BIP 14 for more information.
DisableDNSSeed bool //Disable DNS seeding for peers
DisableRPC bool `default:"false"`
DisableTLS bool `default:"false"`
Whitelists []*net.IPNet
NoOnion bool `default:"true"` // Disable connecting to tor hidden services
Upnp bool `default:"false"` // Use UPnP to map our listening port outside of NAT
ExternalIPs []string // Add an ip to the list of local addresses we claim to listen on to peers
//AddCheckpoints []model.Checkpoint
}{
ListenAddrs: []string{"1234"},
MaxPeers: 128,
TargetOutbound: 8,
DisableBanning: true,
DisableListen: true,
BlocksOnly: false,
DisableRPC: false,
Upnp: false,
DisableTLS: false,
NoOnion: true,
TestNet: testNet,
RegTest: regTestNet,
Whitelists: whiteList,
},
Protocol: struct {
NoPeerBloomFilters bool `default:"true"`
DisableCheckpoints bool `default:"true"`
}{NoPeerBloomFilters: true, DisableCheckpoints: true},
Script: struct {
AcceptDataCarrier bool `default:"true"`
MaxDatacarrierBytes uint `default:"223"`
IsBareMultiSigStd bool `default:"true"`
//use promiscuousMempoolFlags to make more or less check of script, the type of value is uint
PromiscuousMempoolFlags string
Par int `default:"32"`
}{
AcceptDataCarrier: true,
MaxDatacarrierBytes: 223,
IsBareMultiSigStd: true,
PromiscuousMempoolFlags: "",
Par: 32,
},
TxOut: struct {
DustRelayFee int64 `default:"83"`
}{DustRelayFee: 83},
Chain: struct {
AssumeValid string
UtxoHashStartHeight int32 `default:"-1"`
UtxoHashEndHeight int32 `default:"-1"`
}{
AssumeValid: "",
UtxoHashStartHeight: args.UtxoHashStartHeight,
UtxoHashEndHeight: args.UtxoHashEndHeight,
},
Mining: struct {
BlockMinTxFee int64 // default DefaultBlockMinTxFee
BlockMaxSize uint64 // default DefaultMaxGeneratedBlockSize
BlockVersion int32 `default:"-1"`
Strategy string `default:"ancestorfeerate"` // option:ancestorfee/ancestorfeerate
}{
BlockVersion: -1,
Strategy: "ancestorfeerate",
},
PProf: struct {
IP string `default:"localhost"`
Port string `default:"6060"`
}{IP: "localhost", Port: "6060"},
AddrMgr: struct {
SimNet bool
ConnectPeers []string
}{SimNet: false},
BlockIndex: struct{ CheckBlockIndex bool }{CheckBlockIndex: regTestNet},
}
}
func createTmpFile() {
confFile := os.Getenv("GOPATH") + "/src/" + defaultProjectDir + "/conf/"
CopyFile(confFile+"bitcoincash.yml", confFile+"bitcoincash.yml.tmp")
os.Remove(confFile + "bitcoincash.yml")
f, err := os.Create(confFile + "bitcoincash.yml")
if err != nil {
fmt.Println(err)
}
defer f.Close()
}
func revert() {
confFile := os.Getenv("GOPATH") + "/src/" + defaultProjectDir + "/conf/"
os.Remove(confFile + "bitcoincash.yml")
CopyFile(confFile+"bitcoincash.yml.tmp", confFile+"bitcoincash.yml")
os.Remove(confFile + "bitcoincash.yml.tmp")
}
func createNet(nets []string) []*net.IPNet {
netReuslt := make([]*net.IPNet, 0)
for _, addr := range nets {
_, ipnet, err := net.ParseCIDR(addr)
if err != nil {
ip := net.ParseIP(addr)
if ip == nil {
continue
}
var bits int
if ip.To4() == nil {
bits = 128
} else {
bits = 32
}
ipnet = &net.IPNet{
IP: ip,
Mask: net.CIDRMask(bits, bits),
}
}
netReuslt = append(netReuslt, ipnet)
}
return netReuslt
}
func TestInitConfig2(t *testing.T) {
tests := []struct {
in []string
want *Configuration
}{
{[]string{"--datadir=/tmp/Coper"},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper",
testNet: false,
regTestNet: false,
whiteList: nil,
UtxoHashStartHeight: -1,
UtxoHashEndHeight: -1,
})},
{[]string{"--datadir=/tmp/Coper", "--testnet"},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper/testnet",
testNet: true,
regTestNet: false,
whiteList: nil,
UtxoHashStartHeight: -1,
UtxoHashEndHeight: -1,
})},
{[]string{"--datadir=/tmp/Coper", "--regtest"},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper/regtest",
testNet: false,
regTestNet: true,
whiteList: nil,
UtxoHashStartHeight: -1,
UtxoHashEndHeight: -1,
})},
{[]string{"--datadir=/tmp/Coper", "--whitelist=127.0.0.1/24"},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper",
testNet: false,
regTestNet: false,
whiteList: createNet([]string{"127.0.0.1/24"}),
UtxoHashStartHeight: -1,
UtxoHashEndHeight: -1,
})},
{[]string{"--datadir=/tmp/Coper", "--whitelist="},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper",
testNet: false,
regTestNet: false,
whiteList: createNet([]string{""}),
UtxoHashStartHeight: -1,
UtxoHashEndHeight: -1,
})},
{[]string{"--datadir=/tmp/Coper", "--whitelist=127.0.0.1"},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper",
testNet: false,
regTestNet: false,
whiteList: createNet([]string{"127.0.0.1"}),
UtxoHashStartHeight: -1,
UtxoHashEndHeight: -1,
})},
{[]string{"--datadir=/tmp/Coper", "--utxohashstartheight=0", "--utxohashendheight=1"},
getDefaultConfiguration(defaultArgs{
dataDir: "/tmp/Coper",
testNet: false,
regTestNet: false,
whiteList: nil,
UtxoHashStartHeight: 0,
UtxoHashEndHeight: 1,
})},
}
createTmpFile()
defer os.RemoveAll("/tmp/Coper")
defer revert()
for i, v := range tests {
value := v
result := InitConfig(value.in)
if !reflect.DeepEqual(result, value.want) {
t.Errorf(" %d it not expect", i)
}
}
}
func TestSetUnitTestDataDir(t *testing.T) {
args := []string{"--testnet"}
Cfg = InitConfig(args)
testDir, err := SetUnitTestDataDir(Cfg)
if err != nil {
fmt.Printf("Error: %s", err)
os.Exit(1)
}
t.Logf("generated file path is: %v", testDir)
defer os.RemoveAll(testDir)
_, err = os.Stat(testDir)
if err != nil && os.IsNotExist(err) {
t.Errorf("SetUnitTestDataDir implementation error:%v", err)
}
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
acceptance-tests/apps/mysqlapp/main.go
|
package main
import (
"fmt"
"log"
"mysqlapp/internal/app"
"mysqlapp/internal/credentials"
"net/http"
"os"
)
func main() {
log.Println("Starting.")
log.Println("Reading credentials.")
creds, err := credentials.Read()
if err != nil {
panic(err)
}
port := port()
log.Printf("Listening on port: %s", port)
http.Handle("/", app.App(creds))
http.ListenAndServe(port, nil)
}
func port() string {
if port := os.Getenv("PORT"); port != "" {
return fmt.Sprintf(":%s", port)
}
return ":8080"
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
optimade-python-tools/tests/server/test_config.py
|
# pylint: disable=protected-access,pointless-statement,relative-beyond-top-level
import json
import os
from pathlib import Path
def test_env_variable():
"""Set OPTIMADE_DEBUG environment variable and check CONFIG picks up on it correctly"""
from optimade.server.config import ServerConfig
org_env_var = os.getenv("OPTIMADE_DEBUG")
try:
os.environ["OPTIMADE_DEBUG"] = "true"
CONFIG = ServerConfig()
assert CONFIG.debug
os.environ.pop("OPTIMADE_DEBUG", None)
CONFIG = ServerConfig()
assert not CONFIG.debug
finally:
if org_env_var is not None:
os.environ["OPTIMADE_DEBUG"] = org_env_var
else:
assert os.getenv("OPTIMADE_DEBUG") is None
def test_default_config_path(top_dir):
"""Make sure the default config path works
Expected default config path: PATH/TO/USER/HOMEDIR/.optimade.json
"""
from optimade.server.config import ServerConfig
org_env_var = os.getenv("OPTIMADE_CONFIG_FILE")
with open(top_dir.joinpath("tests/test_config.json"), "r") as config_file:
config = json.load(config_file)
different_base_url = "http://something_you_will_never_think_of.com"
config["base_url"] = different_base_url
# Try-finally to make sure we don't overwrite possible existing `.optimade.json`.
# As well as restoring OPTIMADE_CONFIG_FILE environment variable
default_config_file = Path.home().joinpath(".optimade.json")
restore = False
CONFIG = None
if default_config_file.exists():
restore = True
with open(default_config_file, "rb") as original_file:
original_file_content = original_file.read()
try:
# Unset OPTIMADE_CONFIG_FILE environment variable
os.environ.pop("OPTIMADE_CONFIG_FILE", None)
assert os.getenv("OPTIMADE_CONFIG_FILE") is None
with open(default_config_file, "w") as default_file:
json.dump(config, default_file)
CONFIG = ServerConfig()
assert CONFIG.base_url == different_base_url, (
f"\nDumped file content:\n{config}.\n\nLoaded CONFIG:\n{CONFIG}",
)
finally:
if CONFIG is not None:
del CONFIG
if restore:
with open(default_config_file, "wb") as original_file:
original_file.write(original_file_content)
elif default_config_file.exists():
os.remove(default_config_file)
if org_env_var is None:
assert os.getenv("OPTIMADE_CONFIG_FILE") is None
else:
os.environ["OPTIMADE_CONFIG_FILE"] = org_env_var
def test_debug_is_respected_when_off(both_clients):
"""Make sure traceback is toggleable according to debug mode - here OFF
TODO: This should be moved to a separate test file that tests the exception handlers.
"""
from optimade.server.config import CONFIG
org_value = CONFIG.debug
try:
if CONFIG.debug:
CONFIG.debug = False
response = both_clients.get("/non/existent/path")
assert (
response.status_code == 404
), f"Request should have failed, but didn't: {response.json()}"
response = response.json()
assert "data" not in response
assert "meta" in response
assert f"_{CONFIG.provider.prefix}_traceback" not in response["meta"]
finally:
CONFIG.debug = org_value
def test_debug_is_respected_when_on(both_clients):
"""Make sure traceback is toggleable according to debug mode - here ON
TODO: This should be moved to a separate test file that tests the exception handlers.
"""
from optimade.server.config import CONFIG
org_value = CONFIG.debug
try:
CONFIG.debug = True
response = both_clients.get("/non/existent/path")
assert (
response.status_code == 404
), f"Request should have failed, but didn't: {response.json()}"
response = response.json()
assert "data" not in response
assert "meta" in response
assert f"_{CONFIG.provider.prefix}_traceback" in response["meta"]
finally:
CONFIG.debug = org_value
|
[] |
[] |
[
"OPTIMADE_DEBUG",
"OPTIMADE_CONFIG_FILE"
] |
[]
|
["OPTIMADE_DEBUG", "OPTIMADE_CONFIG_FILE"]
|
python
| 2 | 0 | |
malcolm/modules/system/controllers/ProcessController.py
|
import os
import subprocess
from collections import OrderedDict
import cothread
from annotypes import Anno
from malcolm import __version__
from malcolm.core import (
Alarm,
AlarmSeverity,
BadValueError,
ProcessStartHook,
ProcessStopHook,
StringMeta,
Widget,
)
from malcolm.modules import builtin, ca
from malcolm.modules.ca.util import catools
from ..parts.dirparsepart import DirParsePart
from ..parts.iociconpart import IocIconPart
def await_ioc_start(stats, prefix):
cothread.Yield()
pid_rbv = catools.caget("%s:PID" % prefix, timeout=5)
if int(pid_rbv) != os.getpid():
raise BadValueError(
"Got back different PID: "
+ "is there another system instance on the machine?"
)
catools.caput(
"%s:YAML:PATH" % prefix, stats["yaml_path"], datatype=catools.DBR_CHAR_STR
)
catools.caput(
"%s:PYMALCOLM:PATH" % prefix,
stats["pymalcolm_path"],
datatype=catools.DBR_CHAR_STR,
)
def start_ioc(stats, prefix):
db_macros = "prefix='%s'" % prefix
try:
epics_base = os.environ["EPICS_BASE"]
except KeyError:
raise BadValueError("EPICS base not defined in environment")
softIoc_bin = epics_base + "/bin/linux-x86_64/softIoc"
for key, value in stats.items():
db_macros += ",%s='%s'" % (key, value)
root = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
db_template = os.path.join(root, "db", "system.template")
ioc = subprocess.Popen(
[softIoc_bin, "-m", db_macros, "-d", db_template],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
cothread.Spawn(await_ioc_start, stats, prefix)
return ioc
with Anno("prefix for self.system PVs"):
APvPrefix = str
with Anno("space-separated list of IOCs to monitor"):
AIocList = str
class ProcessController(builtin.controllers.ManagerController):
def __init__(
self,
mri: builtin.controllers.AMri,
prefix: APvPrefix,
config_dir: builtin.controllers.AConfigDir,
ioc_list: AIocList = "",
) -> None:
super().__init__(mri, config_dir)
self.ioc = None
self.ioc_blocks: OrderedDict = OrderedDict()
self.prefix = prefix
self.bl_iocs = ioc_list.split(" ")
if self.bl_iocs[-1] == "":
self.bl_iocs = self.bl_iocs[:-1]
self.stats = dict()
# TODO: the following stuff is all Linux-specific....
sys_call_bytes = (
open("/proc/%s/cmdline" % os.getpid(), "rb").read().split(b"\0")
)
sys_call = [el.decode("utf-8") for el in sys_call_bytes]
self.stats["pymalcolm_path"] = os.path.abspath(sys_call[1])
self.stats["yaml_path"] = os.path.abspath(sys_call[2])
self.stats["yaml_ver"] = self.parse_yaml_version(
self.stats["yaml_path"], "/dls_sw/work", "/dls_sw/prod"
)
self.stats["pymalcolm_ver"] = __version__
hostname = os.uname()[1]
self.stats["kernel"] = "%s %s" % (os.uname()[0], os.uname()[2])
self.stats["hostname"] = (
hostname if len(hostname) < 39 else hostname[:35] + "..."
)
self.stats["pid"] = str(os.getpid())
self.pymalcolm_path = StringMeta(
"Path to pymalcolm executable", tags=[Widget.MULTILINETEXTUPDATE.tag()]
).create_attribute_model(self.stats["pymalcolm_path"])
self.pymalcolm_ver = StringMeta(
"Version of pymalcolm executable", tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(self.stats["pymalcolm_ver"])
self.yaml_path = StringMeta(
"Path to yaml configuration file", tags=[Widget.MULTILINETEXTUPDATE.tag()]
).create_attribute_model(self.stats["yaml_path"])
self.yaml_ver = StringMeta(
"version of yaml configuration file", tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(self.stats["yaml_ver"])
self.hostname = StringMeta(
"Name of host machine", tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(self.stats["hostname"])
self.kernel = StringMeta(
"Kernel of host machine", tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(self.stats["kernel"])
self.pid = StringMeta(
"process ID of pymalcolm instance", tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(self.stats["pid"])
self.field_registry.add_attribute_model("pymalcolmPath", self.pymalcolm_path)
self.field_registry.add_attribute_model("pymalcolmVer", self.pymalcolm_ver)
self.field_registry.add_attribute_model("yamlPath", self.yaml_path)
self.field_registry.add_attribute_model("yamlVer", self.yaml_ver)
self.field_registry.add_attribute_model("hostname", self.hostname)
self.field_registry.add_attribute_model("kernel", self.kernel)
self.field_registry.add_attribute_model("pid", self.pid)
if self.stats["yaml_ver"] in ["work", "unknown"]:
message = "Non-prod YAML config"
alarm = Alarm(message=message, severity=AlarmSeverity.MINOR_ALARM)
self.update_health("", builtin.infos.HealthInfo(alarm))
self.register_hooked(ProcessStartHook, self.init)
self.register_hooked(ProcessStopHook, self.stop_ioc)
def init(self):
if self.ioc is None:
self.ioc = start_ioc(self.stats, self.prefix)
self.get_ioc_list()
super().init()
msg = (
"""\
pymalcolm %(pymalcolm_ver)s started
Path: %(pymalcolm_path)s
Yaml: %(yaml_path)s"""
% self.stats
)
self._run_git_cmd("commit", "--allow-empty", "-m", msg)
def set_default_layout(self):
name = []
mri = []
x = []
y = []
visible = []
for part_name in self.parts.keys():
if isinstance(self.parts[part_name], builtin.parts.ChildPart):
visible += [True]
x += [0]
y += [0]
name += [part_name]
mri += [self.parts[part_name].mri]
self.set_layout(builtin.util.LayoutTable(name, mri, x, y, visible))
def stop_ioc(self):
if self.ioc is not None:
self.ioc.terminate()
self.ioc = None
def get_ioc_list(self):
ioc_controllers = []
for ioc in self.bl_iocs:
ioc_controller = make_ioc_status(ioc)
ioc_controllers += [ioc_controller]
self.process.add_controllers(ioc_controllers)
for ioc in self.bl_iocs:
self.add_part(builtin.parts.ChildPart(name=ioc, mri=ioc + ":STATUS"))
def parse_yaml_version(self, file_path, work_area, prod_area):
ver = "unknown"
if file_path.startswith(work_area):
ver = "work"
elif file_path.startswith(prod_area):
ver = self._run_git_cmd(
"describe", "--tags", "--exact-match", cwd=os.path.split(file_path)[0]
)
if ver is None:
return "Prod (unknown version)"
ver = ver.strip(b"\n").decode("utf-8")
return ver
def make_ioc_status(ioc):
controller = builtin.controllers.StatefulController(ioc + ":STATUS")
controller.add_part(
ca.parts.CAStringPart(
name="epicsVersion",
description="EPICS version",
rbv=(ioc + ":EPICS_VERS"),
throw=False,
)
)
controller.add_part(
IocIconPart(ioc, (os.path.split(__file__)[0] + "/../icons/epics-logo.svg"))
)
controller.add_part(DirParsePart(ioc, ioc))
controller.add_part(
ca.parts.CAActionPart(
"restartIoc",
description="restart IOC via procServ",
pv=(ioc + ":RESTART"),
throw=False,
)
)
return controller
|
[] |
[] |
[
"EPICS_BASE"
] |
[]
|
["EPICS_BASE"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
try:
from django.core.management import execute_from_command_line
from django.core.management.commands.runserver import Command as runserver
runserver.default_port = "5050"
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Apestratos/wsgi.py
|
"""
WSGI config for Apestratos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Apestratos.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Docker-Compose2UML_nonokawa2019/docker-compose2uml/target/classes/Lib/site-packages/win32/Demos/SystemParametersInfo.py
|
import win32gui, win32con, win32api, time, os, glob
## some of these tests will fail for systems prior to XP
for pname in(
## Set actions all take an unsigned int in pvParam
"SPI_GETMOUSESPEED", "SPI_GETACTIVEWNDTRKTIMEOUT", "SPI_GETCARETWIDTH",
"SPI_GETFOREGROUNDFLASHCOUNT", "SPI_GETFOREGROUNDLOCKTIMEOUT",
## Set actions all take an unsigned int in uiParam
"SPI_GETWHEELSCROLLLINES", "SPI_GETKEYBOARDDELAY",
"SPI_GETKEYBOARDSPEED",
"SPI_GETMOUSEHOVERHEIGHT", "SPI_GETMOUSEHOVERWIDTH",
"SPI_GETMOUSEHOVERTIME", "SPI_GETSCREENSAVETIMEOUT", "SPI_GETMENUSHOWDELAY",
"SPI_GETLOWPOWERTIMEOUT", "SPI_GETPOWEROFFTIMEOUT", "SPI_GETBORDER",
## below are winxp only:
"SPI_GETFONTSMOOTHINGCONTRAST", "SPI_GETFONTSMOOTHINGTYPE", "SPI_GETFOCUSBORDERHEIGHT",
"SPI_GETFOCUSBORDERWIDTH", "SPI_GETMOUSECLICKLOCKTIME"):
print pname
cget=getattr(win32con,pname)
cset=getattr(win32con,pname.replace('_GET','_SET'))
orig_value=win32gui.SystemParametersInfo(cget)
print '\toriginal setting:',orig_value
win32gui.SystemParametersInfo(cset, orig_value+1)
new_value=win32gui.SystemParametersInfo(cget)
print '\tnew value:',new_value
# On Vista, some of these values seem to be ignored. So only "fail" if
# the new value isn't what we set or the original
if new_value!=orig_value+1:
assert new_value == orig_value
print "Strange - setting %s seems to have been ignored" % (pname,)
win32gui.SystemParametersInfo(cset, orig_value)
assert win32gui.SystemParametersInfo(cget)==orig_value
# these take a boolean value in pvParam
# change to opposite, confirm that it was changed and change back
for pname in ("SPI_GETFLATMENU","SPI_GETDROPSHADOW","SPI_GETKEYBOARDCUES","SPI_GETMENUFADE",
"SPI_GETCOMBOBOXANIMATION", "SPI_GETCURSORSHADOW", "SPI_GETGRADIENTCAPTIONS", "SPI_GETHOTTRACKING",
"SPI_GETLISTBOXSMOOTHSCROLLING", "SPI_GETMENUANIMATION", "SPI_GETSELECTIONFADE",
"SPI_GETTOOLTIPANIMATION", "SPI_GETTOOLTIPFADE", "SPI_GETUIEFFECTS", "SPI_GETACTIVEWINDOWTRACKING",
"SPI_GETACTIVEWNDTRKZORDER"):
print pname
cget=getattr(win32con,pname)
cset=getattr(win32con,pname.replace('_GET','_SET'))
orig_value=win32gui.SystemParametersInfo(cget)
print orig_value
win32gui.SystemParametersInfo(cset, not orig_value)
new_value=win32gui.SystemParametersInfo(cget)
print new_value
assert orig_value!=new_value
win32gui.SystemParametersInfo(cset, orig_value)
assert win32gui.SystemParametersInfo(cget)==orig_value
# these take a boolean in uiParam
# could combine with above section now that SystemParametersInfo only takes a single parameter
for pname in ("SPI_GETFONTSMOOTHING","SPI_GETICONTITLEWRAP","SPI_GETBEEP","SPI_GETBLOCKSENDINPUTRESETS",
"SPI_GETKEYBOARDPREF","SPI_GETSCREENSAVEACTIVE","SPI_GETMENUDROPALIGNMENT",
"SPI_GETDRAGFULLWINDOWS", "SPI_GETSHOWIMEUI"):
print pname
cget=getattr(win32con,pname)
cset=getattr(win32con,pname.replace('_GET','_SET'))
orig_value=win32gui.SystemParametersInfo(cget)
print orig_value
win32gui.SystemParametersInfo(cset, not orig_value)
new_value=win32gui.SystemParametersInfo(cget)
print new_value
assert orig_value!=new_value
win32gui.SystemParametersInfo(cset, orig_value)
assert win32gui.SystemParametersInfo(cget)==orig_value
print "SPI_GETICONTITLELOGFONT"
lf=win32gui.SystemParametersInfo(win32con.SPI_GETICONTITLELOGFONT)
orig_height=lf.lfHeight
orig_italic=lf.lfItalic
print 'Height:', orig_height, 'Italic:',orig_italic
lf.lfHeight+=2
lf.lfItalic=not lf.lfItalic
win32gui.SystemParametersInfo(win32con.SPI_SETICONTITLELOGFONT, lf)
new_lf=win32gui.SystemParametersInfo(win32con.SPI_GETICONTITLELOGFONT)
print 'New Height:', new_lf.lfHeight, 'New Italic:',new_lf.lfItalic
assert new_lf.lfHeight==orig_height+2
assert new_lf.lfItalic!=orig_italic
lf.lfHeight=orig_height
lf.lfItalic=orig_italic
win32gui.SystemParametersInfo(win32con.SPI_SETICONTITLELOGFONT, lf)
new_lf=win32gui.SystemParametersInfo(win32con.SPI_GETICONTITLELOGFONT)
assert new_lf.lfHeight==orig_height
assert new_lf.lfItalic==orig_italic
print "SPI_GETMOUSEHOVERWIDTH, SPI_GETMOUSEHOVERHEIGHT, SPI_GETMOUSEHOVERTIME"
w=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERWIDTH)
h=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERHEIGHT)
t=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERTIME)
print 'w,h,t:', w,h,t
win32gui.SystemParametersInfo(win32con.SPI_SETMOUSEHOVERWIDTH,w+1)
win32gui.SystemParametersInfo(win32con.SPI_SETMOUSEHOVERHEIGHT,h+2)
win32gui.SystemParametersInfo(win32con.SPI_SETMOUSEHOVERTIME,t+3)
new_w=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERWIDTH)
new_h=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERHEIGHT)
new_t=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERTIME)
print 'new w,h,t:', new_w, new_h, new_t
assert new_w==w+1
assert new_h==h+2
assert new_t==t+3
win32gui.SystemParametersInfo(win32con.SPI_SETMOUSEHOVERWIDTH,w)
win32gui.SystemParametersInfo(win32con.SPI_SETMOUSEHOVERHEIGHT,h)
win32gui.SystemParametersInfo(win32con.SPI_SETMOUSEHOVERTIME,t)
new_w=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERWIDTH)
new_h=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERHEIGHT)
new_t=win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERTIME)
assert new_w==w
assert new_h==h
assert new_t==t
print "SPI_SETDOUBLECLKWIDTH, SPI_SETDOUBLECLKHEIGHT"
x=win32api.GetSystemMetrics(win32con.SM_CXDOUBLECLK)
y=win32api.GetSystemMetrics(win32con.SM_CYDOUBLECLK)
print 'x,y:', x, y
win32gui.SystemParametersInfo(win32con.SPI_SETDOUBLECLKWIDTH, x+1)
win32gui.SystemParametersInfo(win32con.SPI_SETDOUBLECLKHEIGHT, y+2)
new_x=win32api.GetSystemMetrics(win32con.SM_CXDOUBLECLK)
new_y=win32api.GetSystemMetrics(win32con.SM_CYDOUBLECLK)
print 'new x,y:', new_x, new_y
assert new_x==x+1
assert new_y==y+2
win32gui.SystemParametersInfo(win32con.SPI_SETDOUBLECLKWIDTH, x)
win32gui.SystemParametersInfo(win32con.SPI_SETDOUBLECLKHEIGHT, y)
new_x=win32api.GetSystemMetrics(win32con.SM_CXDOUBLECLK)
new_y=win32api.GetSystemMetrics(win32con.SM_CYDOUBLECLK)
assert new_x==x
assert new_y==y
print "SPI_SETDRAGWIDTH, SPI_SETDRAGHEIGHT"
dw=win32api.GetSystemMetrics(win32con.SM_CXDRAG)
dh=win32api.GetSystemMetrics(win32con.SM_CYDRAG)
print 'dw,dh:', dw, dh
win32gui.SystemParametersInfo(win32con.SPI_SETDRAGWIDTH,dw+1)
win32gui.SystemParametersInfo(win32con.SPI_SETDRAGHEIGHT,dh+2)
new_dw=win32api.GetSystemMetrics(win32con.SM_CXDRAG)
new_dh=win32api.GetSystemMetrics(win32con.SM_CYDRAG)
print 'new dw,dh:', new_dw, new_dh
assert new_dw==dw+1
assert new_dh==dh+2
win32gui.SystemParametersInfo(win32con.SPI_SETDRAGWIDTH,dw)
win32gui.SystemParametersInfo(win32con.SPI_SETDRAGHEIGHT,dh)
new_dw=win32api.GetSystemMetrics(win32con.SM_CXDRAG)
new_dh=win32api.GetSystemMetrics(win32con.SM_CYDRAG)
assert new_dw==dw
assert new_dh==dh
orig_wallpaper=win32gui.SystemParametersInfo(Action=win32con.SPI_GETDESKWALLPAPER)
print 'Original: ',orig_wallpaper
for bmp in glob.glob(os.path.join(os.environ['windir'],'*.bmp')):
print bmp
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, Param=bmp)
print win32gui.SystemParametersInfo(Action=win32con.SPI_GETDESKWALLPAPER)
time.sleep(1)
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, Param=orig_wallpaper)
|
[] |
[] |
[
"windir"
] |
[]
|
["windir"]
|
python
| 1 | 0 | |
cmd/cloudFoundryCreateService_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type cloudFoundryCreateServiceOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfService string `json:"cfService,omitempty"`
CfServicePlan string `json:"cfServicePlan,omitempty"`
CfServiceInstanceName string `json:"cfServiceInstanceName,omitempty"`
CfServiceBroker string `json:"cfServiceBroker,omitempty"`
CfCreateServiceConfig string `json:"cfCreateServiceConfig,omitempty"`
CfServiceTags string `json:"cfServiceTags,omitempty"`
ServiceManifest string `json:"serviceManifest,omitempty"`
ManifestVariables []string `json:"manifestVariables,omitempty"`
ManifestVariablesFiles []string `json:"manifestVariablesFiles,omitempty"`
}
// CloudFoundryCreateServiceCommand Creates one or multiple Services in Cloud Foundry
func CloudFoundryCreateServiceCommand() *cobra.Command {
const STEP_NAME = "cloudFoundryCreateService"
metadata := cloudFoundryCreateServiceMetadata()
var stepConfig cloudFoundryCreateServiceOptions
var startTime time.Time
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createCloudFoundryCreateServiceCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Creates one or multiple Services in Cloud Foundry",
Long: `Creates one or multiple Cloud Foundry Services in Cloud Foundry
Mandatory:
* Cloud Foundry API endpoint, Organization, Space and user are available
Please provide either of the following options:
* If you chose to create a single Service the Service Instance Name, Service Plan and Service Broker of the Service to be created have to be available. You can set the optional ` + "`" + `cfCreateServiceConfig` + "`" + ` flag to configure the Service creation with your respective JSON configuration. The JSON configuration can either be an in-line JSON string or the path a dedicated JSON configuration file containing the JSON configuration. If you chose a dedicated config file, you must store the file in the same folder as your ` + "`" + `Jenkinsfile` + "`" + ` that starts the Pipeline in order for the Pipeline to be able to find the file. Most favourable SCM is Git. If you want the service to be created from a particular broker you can set the optional ` + "`" + `cfServiceBroker` + "`" + `flag. You can set user provided tags for the Service creation using a flat list as the value for the optional ` + "`" + `cfServiceTags` + "`" + ` flag. The optional ` + "`" + `cfServiceBroker` + "`" + ` flag can be used when the service name is ambiguous.
* For creating one or multiple Cloud Foundry Services at once with the Cloud Foundry Create-Service-Push Plugin using the optional ` + "`" + `serviceManifest` + "`" + ` flag. If you chose to set this flag, the Create-Service-Push Plugin will be used for all Service creations in this step and you will need to provide a ` + "`" + `serviceManifest.yml` + "`" + ` file. In that case, above described flags and options will not be used for the Service creations, since you chose to use the Create-Service-Push Plugin. Please see below examples for more information on how to make use of the plugin with the appropriate step configuation. Additionally the Plugin provides the option to make use of variable substitution for the Service creations. You can find further information regarding the functionality of the Cloud Foundry Create-Service-Push Plugin in the respective documentation: [Cloud Foundry Create-Service-Push Plugin](https://github.com/dawu415/CF-CLI-Create-Service-Push-Plugin)`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
cloudFoundryCreateService(stepConfig, &stepTelemetryData)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addCloudFoundryCreateServiceFlags(createCloudFoundryCreateServiceCmd, &stepConfig)
return createCloudFoundryCreateServiceCmd
}
func addCloudFoundryCreateServiceFlags(cmd *cobra.Command, stepConfig *cloudFoundryCreateServiceOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", `https://api.cf.eu10.hana.ondemand.com`, "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User or E-Mail for CF")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for Cloud Foundry User")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry org")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry Space")
cmd.Flags().StringVar(&stepConfig.CfService, "cfService", os.Getenv("PIPER_cfService"), "Parameter for Cloud Foundry Service to be used for creating Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServicePlan, "cfServicePlan", os.Getenv("PIPER_cfServicePlan"), "Parameter for Cloud Foundry Service Plan to be used when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServiceInstanceName, "cfServiceInstanceName", os.Getenv("PIPER_cfServiceInstanceName"), "Parameter for naming the Service Instance when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServiceBroker, "cfServiceBroker", os.Getenv("PIPER_cfServiceBroker"), "Parameter for Service Broker to be used when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfCreateServiceConfig, "cfCreateServiceConfig", os.Getenv("PIPER_cfCreateServiceConfig"), "Path to JSON file or JSON in-line string for a Cloud Foundry Service creation")
cmd.Flags().StringVar(&stepConfig.CfServiceTags, "cfServiceTags", os.Getenv("PIPER_cfServiceTags"), "Flat list of Tags to be used when creating a Cloud Foundry Service in a single string")
cmd.Flags().StringVar(&stepConfig.ServiceManifest, "serviceManifest", `service-manifest.yml`, "Path to Cloud Foundry Service Manifest in YAML format for multiple service creations that are being passed to a Create-Service-Push Cloud Foundry cli plugin")
cmd.Flags().StringSliceVar(&stepConfig.ManifestVariables, "manifestVariables", []string{}, "Defines a List of variables as key-value Map objects used for variable substitution within the file given by the Manifest. Defaults to an empty list, if not specified otherwise. This can be used to set variables like it is provided by `cf push --var key=value`. The order of the maps of variables given in the list is relevant in case there are conflicting variable names and values between maps contained within the list. In case of conflicts, the last specified map in the list will win. Though each map entry in the list can contain more than one key-value pair for variable substitution, it is recommended to stick to one entry per map, and rather declare more maps within the list. The reason is that if a map in the list contains more than one key-value entry, and the entries are conflicting, the conflict resolution behavior is undefined (since map entries have no sequence). Variables defined via `manifestVariables` always win over conflicting variables defined via any file given by `manifestVariablesFiles` - no matter what is declared before. This is the same behavior as can be observed when using `cf push --var` in combination with `cf push --vars-file`")
cmd.Flags().StringSliceVar(&stepConfig.ManifestVariablesFiles, "manifestVariablesFiles", []string{}, "Defines the manifest variables Yaml files to be used to replace variable references in manifest. This parameter is optional and will default to `manifest-variables.yml`. This can be used to set variable files like it is provided by `cf push --vars-file <file>`. If the manifest is present and so are all variable files, a variable substitution will be triggered that uses the `cfManifestSubstituteVariables` step before deployment. The format of variable references follows the Cloud Foundry standard in `https://docs.cloudfoundry.org/devguide/deploy-apps/manifest-attributes.html#variable-substitution`")
cmd.MarkFlagRequired("cfApiEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("cfOrg")
cmd.MarkFlagRequired("cfSpace")
}
// retrieve step metadata
func cloudFoundryCreateServiceMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "cloudFoundryCreateService",
Aliases: []config.Alias{},
Description: "Creates one or multiple Services in Cloud Foundry",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "cfCredentialsId", Description: "Jenkins 'Username with password' credentials ID containing user and password to authenticate to the Cloud Foundry API.", Type: "jenkins", Aliases: []config.Alias{{Name: "cloudFoundry/credentialsId", Deprecated: false}}},
},
Resources: []config.StepResources{
{Name: "deployDescriptor", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
Default: `https://api.cf.eu10.hana.ondemand.com`,
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "username",
Type: "secret",
},
{
Name: "cloudfoundryVaultSecretName",
Type: "vaultSecret",
Default: "cloudfoundry-$(org)-$(space)",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "password",
Type: "secret",
},
{
Name: "cloudfoundryVaultSecretName",
Type: "vaultSecret",
Default: "cloudfoundry-$(org)-$(space)",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
Default: os.Getenv("PIPER_cfOrg"),
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
Default: os.Getenv("PIPER_cfSpace"),
},
{
Name: "cfService",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/service"}},
Default: os.Getenv("PIPER_cfService"),
},
{
Name: "cfServicePlan",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/servicePlan"}},
Default: os.Getenv("PIPER_cfServicePlan"),
},
{
Name: "cfServiceInstanceName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstanceName"}},
Default: os.Getenv("PIPER_cfServiceInstanceName"),
},
{
Name: "cfServiceBroker",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceBroker"}},
Default: os.Getenv("PIPER_cfServiceBroker"),
},
{
Name: "cfCreateServiceConfig",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/createServiceConfig"}},
Default: os.Getenv("PIPER_cfCreateServiceConfig"),
},
{
Name: "cfServiceTags",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceTags"}},
Default: os.Getenv("PIPER_cfServiceTags"),
},
{
Name: "serviceManifest",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceManifest"}, {Name: "cfServiceManifest"}},
Default: `service-manifest.yml`,
},
{
Name: "manifestVariables",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/manifestVariables"}, {Name: "cfManifestVariables"}},
Default: []string{},
},
{
Name: "manifestVariablesFiles",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/manifestVariablesFiles"}, {Name: "cfManifestVariablesFiles"}},
Default: []string{},
},
},
},
Containers: []config.Container{
{Name: "cf", Image: "ppiper/cf-cli:latest"},
},
},
}
return theMetaData
}
|
[
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfService\"",
"\"PIPER_cfServicePlan\"",
"\"PIPER_cfServiceInstanceName\"",
"\"PIPER_cfServiceBroker\"",
"\"PIPER_cfCreateServiceConfig\"",
"\"PIPER_cfServiceTags\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfService\"",
"\"PIPER_cfServicePlan\"",
"\"PIPER_cfServiceInstanceName\"",
"\"PIPER_cfServiceBroker\"",
"\"PIPER_cfCreateServiceConfig\"",
"\"PIPER_cfServiceTags\""
] |
[] |
[
"PIPER_cfServiceTags",
"PIPER_cfSpace",
"PIPER_cfServicePlan",
"PIPER_password",
"PIPER_cfService",
"PIPER_username",
"PIPER_cfServiceInstanceName",
"PIPER_cfServiceBroker",
"PIPER_cfOrg",
"PIPER_cfCreateServiceConfig"
] |
[]
|
["PIPER_cfServiceTags", "PIPER_cfSpace", "PIPER_cfServicePlan", "PIPER_password", "PIPER_cfService", "PIPER_username", "PIPER_cfServiceInstanceName", "PIPER_cfServiceBroker", "PIPER_cfOrg", "PIPER_cfCreateServiceConfig"]
|
go
| 10 | 0 | |
misago/users/tests/test_gender_profilefield.py
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils import six
from misago.admin.testutils import AdminTestCase
UserModel = get_user_model()
class GenderProfileFieldTests(AdminTestCase):
def setUp(self):
super(GenderProfileFieldTests, self).setUp()
self.test_link = reverse(
'misago:admin:users:accounts:edit',
kwargs={
'pk': self.user.pk,
},
)
def test_field_displays_in_admin(self):
"""field displays in admin"""
response = self.client.get(self.test_link)
self.assertContains(response, 'name="gender"')
def test_admin_clears_field(self):
"""admin form allows admins to clear field"""
self.user.profile_fields['gender'] = 'female'
self.user.save()
self.reload_user()
self.assertEqual(self.user.profile_fields['gender'], 'female')
response = self.client.post(
self.test_link,
data={
'username': 'Edited',
'rank': six.text_type(self.user.rank_id),
'roles': six.text_type(self.user.roles.all()[0].pk),
'email': '[email protected]',
'new_password': '',
'signature': '',
'is_signature_locked': '0',
'is_hiding_presence': '0',
'limits_private_thread_invites_to': '0',
'signature_lock_staff_message': '',
'signature_lock_user_message': '',
'subscribe_to_started_threads': '2',
'subscribe_to_replied_threads': '2',
}
)
self.assertEqual(response.status_code, 302)
self.reload_user()
self.assertEqual(self.user.profile_fields['gender'], '')
def test_admin_validates_field(self):
"""admin form allows admins to edit field"""
response = self.client.post(
self.test_link,
data={
'username': 'Edited',
'rank': six.text_type(self.user.rank_id),
'roles': six.text_type(self.user.roles.all()[0].pk),
'email': '[email protected]',
'gender': 'attackcopter',
'new_password': '',
'signature': '',
'is_signature_locked': '0',
'is_hiding_presence': '0',
'limits_private_thread_invites_to': '0',
'signature_lock_staff_message': '',
'signature_lock_user_message': '',
'subscribe_to_started_threads': '2',
'subscribe_to_replied_threads': '2',
}
)
self.assertContains(response, 'attackcopter is not one of the available choices.')
def test_admin_edits_field(self):
"""admin form allows admins to edit field"""
response = self.client.post(
self.test_link,
data={
'username': 'Edited',
'rank': six.text_type(self.user.rank_id),
'roles': six.text_type(self.user.roles.all()[0].pk),
'email': '[email protected]',
'gender': 'female',
'new_password': '',
'signature': '',
'is_signature_locked': '0',
'is_hiding_presence': '0',
'limits_private_thread_invites_to': '0',
'signature_lock_staff_message': '',
'signature_lock_user_message': '',
'subscribe_to_started_threads': '2',
'subscribe_to_replied_threads': '2',
}
)
self.assertEqual(response.status_code, 302)
self.reload_user()
self.assertEqual(self.user.profile_fields['gender'], 'female')
def test_admin_search_field(self):
"""admin users search searches this field"""
test_link = reverse('misago:admin:users:accounts:index')
response = self.client.get('{}?redirected=1&profilefields=female'.format(test_link))
self.assertContains(response, "No users matching search criteria have been found.")
# search by value
self.user.profile_fields['gender'] = 'female'
self.user.save()
response = self.client.get('{}?redirected=1&profilefields=female'.format(test_link))
self.assertNotContains(response, "No users matching search criteria have been found.")
# search by choice name
self.user.profile_fields['gender'] = 'secret'
self.user.save()
response = self.client.get('{}?redirected=1&profilefields=telling'.format(test_link))
self.assertNotContains(response, "No users matching search criteria have been found.")
def test_field_display(self):
"""field displays on user profile when filled in"""
test_link = reverse(
'misago:user-details',
kwargs={
'pk': self.user.pk,
'slug': self.user.slug,
},
)
response = self.client.get(test_link)
self.assertNotContains(response, 'Gender')
self.user.profile_fields['gender'] = 'secret'
self.user.save()
response = self.client.get(test_link)
self.assertContains(response, 'Gender')
self.assertContains(response, 'Not telling')
def test_field_outdated_hidden(self):
"""field with outdated value is hidden"""
test_link = reverse(
'misago:user-details',
kwargs={
'pk': self.user.pk,
'slug': self.user.slug,
},
)
response = self.client.get(test_link)
self.assertNotContains(response, 'Gender')
self.user.profile_fields['gender'] = 'not valid'
self.user.save()
response = self.client.get(test_link)
self.assertNotContains(response, 'Gender')
def test_field_display_json(self):
"""field is included in display json"""
test_link = reverse('misago:api:user-details', kwargs={'pk': self.user.pk})
response = self.client.get(test_link)
self.assertEqual(
response.json()['groups'],
[
{
'name': 'IP address',
'fields': [
{
'fieldname': 'join_ip',
'name': 'Join IP',
'text': '127.0.0.1',
},
{
'fieldname': 'last_ip',
'name': 'Last IP',
'text': '127.0.0.1',
},
],
},
]
)
self.user.profile_fields['gender'] = 'male'
self.user.save()
response = self.client.get(test_link)
self.assertEqual(
response.json()['groups'],
[
{
'name': 'Personal',
'fields': [
{
'fieldname': 'gender',
'name': 'Gender',
'text': 'Male',
}
],
},
{
'name': 'IP address',
'fields': [
{
'fieldname': 'join_ip',
'name': 'Join IP',
'text': '127.0.0.1',
},
{
'fieldname': 'last_ip',
'name': 'Last IP',
'text': '127.0.0.1',
},
],
},
]
)
def test_field_outdated_hidden_json(self):
"""field with outdated value is removed in display json"""
test_link = reverse('misago:api:user-details', kwargs={'pk': self.user.pk})
response = self.client.get(test_link)
self.assertEqual(
response.json()['groups'],
[
{
'name': 'IP address',
'fields': [
{
'fieldname': 'join_ip',
'name': 'Join IP',
'text': '127.0.0.1',
},
{
'fieldname': 'last_ip',
'name': 'Last IP',
'text': '127.0.0.1',
},
],
},
]
)
self.user.profile_fields['gender'] = 'invalid'
self.user.save()
response = self.client.get(test_link)
self.assertEqual(
response.json()['groups'],
[
{
'name': 'IP address',
'fields': [
{
'fieldname': 'join_ip',
'name': 'Join IP',
'text': '127.0.0.1',
},
{
'fieldname': 'last_ip',
'name': 'Last IP',
'text': '127.0.0.1',
},
],
},
]
)
def test_api_returns_field_json(self):
"""field json is returned from API"""
test_link = reverse('misago:api:user-edit-details', kwargs={'pk': self.user.pk})
response = self.client.get(test_link)
found_field = None
for group in response.json():
for field in group['fields']:
if field['fieldname'] == 'gender':
found_field = field
self.assertEqual(found_field, {
'fieldname': 'gender',
'label': 'Gender',
'help_text': None,
'input': {
'type': 'select',
'choices': [
{'label': 'Not specified', 'value': ''},
{'label': 'Not telling', 'value': 'secret'},
{'label': 'Female', 'value': 'female'},
{'label': 'Male', 'value': 'male'},
],
},
'initial': '',
})
def test_api_clears_field(self):
"""field can be cleared via api"""
test_link = reverse('misago:api:user-edit-details', kwargs={'pk': self.user.pk})
self.user.profile_fields['gender'] = 'secret'
self.user.save()
self.reload_user()
self.assertEqual(self.user.profile_fields['gender'], 'secret')
response = self.client.post(test_link, data={})
self.assertEqual(response.status_code, 200)
self.reload_user()
self.assertEqual(self.user.profile_fields['gender'], '')
def test_api_validates_field(self):
"""field can be edited via api"""
test_link = reverse('misago:api:user-edit-details', kwargs={'pk': self.user.pk})
response = self.client.post(test_link, data={'gender': 'attackhelicopter'})
self.assertContains(response, "attackhelicopter is not one of the available choices.", status_code=400)
def test_api_edits_field(self):
"""field can be edited via api"""
test_link = reverse('misago:api:user-edit-details', kwargs={'pk': self.user.pk})
response = self.client.post(test_link, data={'gender': 'female'})
self.assertEqual(response.status_code, 200)
self.reload_user()
self.assertEqual(self.user.profile_fields['gender'], 'female')
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
haros/extractor.py
|
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import next
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
from fnmatch import fnmatch
import itertools
import logging
from operator import attrgetter
import os
import re
import subprocess
from urllib.request import urlopen
from urllib.error import URLError
import xml.etree.ElementTree as ET
import yaml
from bonsai.model import (
CodeGlobalScope, CodeReference, CodeFunctionCall, pretty_str
)
from bonsai.cpp.model import (
CppEntity, CppFunctionCall, CppDefaultArgument, CppOperator, CppReference
)
from bonsai.analysis import (
CodeQuery, resolve_reference, resolve_expression, get_control_depth,
get_conditions, get_condition_paths, is_under_loop
)
try:
from bonsai.cpp.clang_parser import CppAstParser
except ImportError:
CppAstParser = None
from bonsai.py.py_parser import PyAstParser
from rospkg import RosPack, RosStack, ResourceNotFound
from xml.etree.cElementTree import ElementTree
from distutils.spawn import find_executable
from .cmake_parser import RosCMakeParser
from .launch_parser import LaunchParser, LaunchParserError
from .metamodel import (
Project, Repository, Package, SourceFile, Node, Person, SourceCondition,
AdvertiseCall, SubscribeCall, AdvertiseServiceCall,
ServiceClientCall, Location, GetParamCall, SetParamCall
)
from .util import cwd
###############################################################################
# Utility
###############################################################################
class LoggingObject(object):
log = logging.getLogger(__name__)
def findRosPackages(paths = None, as_stack = False):
"""
Find ROS packages inside folders.
:param paths: [list] of [str] File system path to search, [None] to use the ROS default search paths.
:param as_stack: [bool] Whether the paths point to stacks.
:returns: [dict] Dictionary of [str]package_name -> [str]package_path.
"""
ros_version = os.environ.get("ROS_VERSION")
if ros_version != "1":
# try ROS2 crawling with colcon if possible
# (in ambiguous cases, we give preference to trying the ROS2 method first,
# because ROS1 rospkg only produces misleading/
# incorrect information when used in ROS2/mixed workspaces.
colcon = find_executable('colcon')
if colcon != None:
cmd = [colcon, 'list']
if paths != None:
cmd.extend(['--base-paths'])
cmd.extend(paths)
try:
pkglist = subprocess.check_output(cmd)
# format is <pkg_name>\t<pkg_path>\t<build_system>\n
pkglist = pkglist.split('\n')
pkgs = {}
for pkginfo in pkglist:
pkginfo_parts = pkginfo.split('\t')
if len(pkginfo_parts) < 2:
continue
if pkginfo_parts[0] in pkgs:
continue
pkgs[pkginfo_parts[0]] = pkginfo_parts[1]
return pkgs
except:
pass
# ^ if colcon != None
# ^ if ros_version != "1"
# else: try the ROS1 way
ros = None
if as_stack:
ros = RosStack.get_instance(paths)
else:
ros = RosPack.get_instance(paths)
pkg_names = ros.list()
pkgs = {}
for pkg_name in pkg_names:
if pkg_name in pkgs:
continue
pkgs[pkg_name] = ros.get_path(pkg_name)
return pkgs
# ^ findRosPackages(paths)
_EMPTY_DICT = {}
_EMPTY_LIST = ()
###############################################################################
# Source Extractor
###############################################################################
class ProjectExtractor(LoggingObject):
def __init__(self, index_file, env = None, pkg_cache = None,
repo_cache = None, repo_path = None, distro_url = None,
require_repos = False, parse_nodes = False, node_cache = None):
self.log.debug("ProjectExtractor(%s, %s, %s)",
index_file, repo_path, distro_url)
self.index_file = index_file
self.repo_path = repo_path
self.distribution = distro_url
self.require_repos = require_repos
self.parse_nodes = parse_nodes
self.environment = env if not env is None else {}
self.package_cache = pkg_cache if not pkg_cache is None else {}
self.repo_cache = repo_cache if not repo_cache is None else {}
self.node_cache = node_cache if not node_cache is None else {}
self.project = None
self.packages = None
self.missing = None
self.repositories = None
self.configurations = None
self.node_specs = None
self.rules = None
self.analysis = None
self._extra_packages = set()
def index_source(self, settings=None):
self.log.debug("ProjectExtractor.index_source()")
self._setup()
settings.update_analysis_preferences(self.analysis)
self._load_user_repositories()
self._find_local_packages()
if self.missing and self.distribution:
self._load_distro_repositories()
self._find_local_packages()
self._topological_sort()
for name in self.missing:
self.log.warning("Could not find package " + name)
self._populate_packages_and_dependencies(settings=settings)
self._update_node_cache()
self._find_nodes(settings)
self._update_nodes_from_specs()
def _setup(self):
try:
with open(self.index_file, "r") as handle:
data = yaml.safe_load(handle)
except IOError as e:
data = {}
self.project = Project(data.get("project", "default"))
self.repositories = data.get("repositories", {})
self.packages = set(data.get("packages")
or list(findRosPackages(["."])))
self.missing = set(self.packages)
self.configurations = data.get("configurations", {})
self.node_specs = data.get("nodes", {})
self.project.node_specs = self.node_specs
self.rules = data.get("rules", {})
self.analysis = data.get("analysis", {})
for node_name in self.node_specs:
if not "/" in node_name:
raise ValueError("expected '<pkg>/<node>' in node specs")
pkg, exe = node_name.split("/")
self._extra_packages.add(pkg)
self.missing.update(self._extra_packages)
def _load_user_repositories(self):
self.log.info("Looking up user provided repositories.")
extractor = RepositoryExtractor()
for name, data in self.repositories.items():
repo = self.repo_cache.get(name)
if repo:
self.project.repositories.append(repo)
else:
extractor.load_from_user(name, data, project = self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _find_local_packages(self):
self.log.info("Looking for packages locally.")
cdir = os.path.abspath(".")
alt_paths = [self.repo_path, cdir] if self.repo_path else [cdir]
extractor = PackageExtractor(alt_paths = alt_paths)
extractor.refresh_package_cache()
found = []
for name in self.missing:
analyse = name in self.packages
pkg = self.package_cache.get(name)
if pkg:
self.project.packages.append(pkg)
found.append(name)
pkg._analyse = analyse
else:
pkg = extractor.find_package(name, project=self.project)
if pkg:
found.append(name)
pkg._analyse = analyse
self.missing.difference_update(found)
def _load_distro_repositories(self):
self.log.info("Looking up repositories from official distribution.")
try:
data = yaml.safe_load(urlopen(self.distribution).read())["repositories"]
except URLError as e:
self.log.warning("Could not download distribution data.")
return
extractor = RepositoryExtractor()
extractor.load_needed_from_distro(data, self.missing, self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _topological_sort(self):
dependencies = {}
pending = list(self.project.packages)
for pkg in self.project.packages:
pkg.topological_tier = -1
dependencies[pkg.id] = set(p for p in pkg.dependencies.packages
if p in self.packages)
tier = 1
emitted = []
while pending:
next_pending = []
next_emitted = []
for pkg in pending:
deps = dependencies[pkg.id]
deps.difference_update(emitted)
if deps:
next_pending.append(pkg)
else:
pkg.topological_tier = tier
next_emitted.append(pkg.name)
if not next_emitted:
# cyclic dependencies detected
self.log.warning("Cyclic dependencies: %s", next_pending)
for pkg in next_pending:
pkg.topological_tier = tier
next_pending = None
pending = next_pending
emitted = next_emitted
tier += 1
self.project.packages.sort(key = attrgetter("topological_tier", "id"))
def _populate_packages_and_dependencies(self, settings=None):
found = set()
extractor = PackageExtractor()
extractor.packages = self.project.packages
for pkg in self.project.packages:
found.add(pkg.name)
analysis_ignore = extractor._populate_package(
pkg, ignored_globs=settings.ignored_globs)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps = extractor._extra
extractor._extra = []
while deps:
pkg = deps.pop()
assert pkg.name not in found
pkg._analyse = False
found.add(pkg.name)
self.project.packages.append(pkg)
analysis_ignore = extractor._populate_package(
pkg, ignored_globs=settings.ignored_globs)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps.extend(extractor._extra)
extractor._extra = []
def _find_nodes(self, settings):
pkgs = {pkg.name: pkg for pkg in self.project.packages if pkg._analyse}
ws = settings.workspace
if not ws:
ws = settings.find_ros_workspace()
ws = os.path.abspath(ws)
if CppAstParser is None:
self.log.warning("C++ AST parser not found.")
extractor = NodeExtractor(pkgs, self.environment, ws = ws,
node_cache = self.node_cache,
parse_nodes = self.parse_nodes)
if self.parse_nodes and CppAstParser is not None:
if settings is None:
CppAstParser.set_library_path()
db_dir = os.path.join(extractor.workspace, "build")
if os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(db_dir)
else:
#library file if given explicitly, otherwise path
if settings.cpp_parser_lib_file:
CppAstParser.set_library_file(settings.cpp_parser_lib_file)
else:
CppAstParser.set_library_path(settings.cpp_parser_lib)
CppAstParser.set_standard_includes(settings.cpp_includes)
db_dir = settings.cpp_compile_db
if db_dir and os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(settings.cpp_compile_db)
for pkg in self.project.packages:
if pkg._analyse and pkg.name not in self.package_cache:
extractor.find_nodes(pkg)
def _update_node_cache(self):
self.log.debug("Importing cached Nodes.")
data = [datum for datum in self.node_cache.values()]
self.node_cache = {}
empty_dict = {}
empty_list = ()
for datum in data:
try:
pkg = self._get_package(datum["package"])
source_files = self._get_files(pkg, datum["files"])
except ValueError as e:
# either a package or a file is no longer part of the analysis
self.log.debug("Cached node %s: %s", datum["name"], e)
continue
mtime = datum["timestamp"]
for sf in source_files:
if sf.timestamp > mtime:
# a file was modified, needs to be parsed again
break
else:
node = Node(datum["name"], pkg, rosname = datum["rosname"],
nodelet = datum["nodelet"])
node.source_files = source_files
for p in datum["advertise"]:
node.advertise.append(self._pub_from_JSON(p))
for p in datum["subscribe"]:
node.subscribe.append(self._sub_from_JSON(p))
for p in datum["service"]:
node.service.append(self._srv_from_JSON(p))
for p in datum["client"]:
node.client.append(self._client_from_JSON(p))
for p in datum["readParam"]:
node.read_param.append(self._read_from_JSON(p))
for p in datum["writeParam"]:
node.write_param.append(self._write_from_JSON(p))
hpl = datum.get("hpl", empty_dict)
for p in hpl.get("properties", empty_list):
node.hpl_properties.append(p)
for a in hpl.get("assumptions", empty_list):
node.hpl_assumptions.append(a)
self.node_cache[node.node_name] = node
def _update_nodes_from_specs(self):
self.log.debug("Loading Nodes from specs.")
pkg_finder = PackageExtractor()
pkg_finder.packages.extend(self.project.packages)
nhm = NodeHints2(self.node_specs, pkg_finder=pkg_finder)
# nodes = dict(self.node_cache)
for pkg in self.project.packages:
for node in pkg.nodes:
node_type = node.node_name
if node_type not in self.node_cache:
self.log.debug(
"WARNING node %s is not in node cache!", node_type)
self.node_cache[node_type] = node
new_nodes = nhm.apply_to(self.node_cache, create=True)
for node in new_nodes:
assert node.node_name not in self.node_cache
self.node_cache[node.node_name] = node
node.package.nodes.append(node)
def _get_package(self, name):
for pkg in self.project.packages:
if pkg.name == name:
return pkg
raise ValueError("cannot find package: " + name)
def _get_files(self, pkg, filenames):
files = []
for filename in filenames:
found = False
for sf in pkg.source_files:
if sf.full_name == filename:
found = True
files.append(sf)
break
if not found:
raise ValueError("cannot find file: " + filename)
return files
def _pub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return AdvertiseCall(datum["name"], datum["namespace"], datum["type"],
datum["queue"], latched=datum.get("latched", False),
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _sub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return SubscribeCall(datum["name"], datum["namespace"], datum["type"],
datum["queue"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _srv_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return AdvertiseServiceCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _client_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _read_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return GetParamCall(datum["name"], datum["namespace"],
datum["type"], default_value=datum["default_value"],
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=cs, location=l(datum["location"]))
def _write_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return SetParamCall(datum["name"], datum["namespace"],
datum["type"], value=datum["value"],
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=cs, location=l(datum["location"]))
def _location_from_JSON(self, datum):
if datum is None:
return None
try:
pkg = self._get_package(datum["package"])
sf = None
filename = datum["file"]
if filename:
sf = self._get_files(pkg, [filename])[0]
except ValueError:
return None
return Location(pkg, file=sf, line=datum["line"], col=datum["column"],
fun=datum["function"], cls=datum["class"])
###############################################################################
# Repository Extractor
###############################################################################
class RepositoryCloneError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RepositoryExtractor(LoggingObject):
def __init__(self):
self.repositories = []
self.declared_packages = set()
def load_from_user(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_user(%s, %s)", name, data)
repo = Repository(name, proj = project)
repo.status = "private"
repo.vcs = data["type"]
repo.url = data["url"]
repo.version = data["version"]
repo.declared_packages = data["packages"]
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_from_distro(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_distro(%s, %s)", name, data)
if not "source" in data:
self.log.debug("There is no source in provided data.")
return
repo = Repository(name, proj = project)
repo.status = data.get("status")
src = data["source"]
repo.vcs = src["type"]
repo.url = src["url"]
repo.version = src["version"]
if "release" in data:
repo.declared_packages = data["release"].get("packages", [name])
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_needed_from_distro(self, data, pkgs, project = None):
if not pkgs:
return True
remaining = set(pkgs)
for name, info in data.items():
if not "release" in info:
continue
for pkg in info["release"].get("packages", [name]):
try:
remaining.remove(pkg)
self.load_from_distro(name, info, project = project)
except KeyError as e:
pass
if not remaining:
break
return not remaining
def download(self, repo_path):
self.log.debug("RepositoryExtractor.download(%s)", repo_path)
for repo in self.repositories:
if not repo.url:
self.log.debug("%s has no URL to download from.", repo.id)
continue
path = os.path.join(repo_path, repo.name)
clone = False
if not os.path.exists(path):
os.makedirs(path)
clone = True
with cwd(path):
if repo.vcs == "git":
self._download_git(repo, path, clone)
elif repo.vcs == "hg":
self._download_hg(repo, path, clone)
elif repo.vcs == "svn":
self._download_svn(repo, path, clone)
return True
GIT_INIT = ("git", "init")
GIT_PULL = ("git", "pull")
GIT_COUNT = ("git", "rev-list", "HEAD", "--count")
def _download_git(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_git(%s)", path)
try:
if clone:
subprocess.check_call(self.GIT_INIT)
subprocess.check_call(["git", "remote",
"add", "-t", repo.version,
"-f", "origin", repo.url])
subprocess.check_call(["git", "checkout", repo.version])
else:
subprocess.check_call(self.GIT_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git error: " + str(e))
HG_PULL = ("hg", "pull")
HG_COUNT = ("hg", "id", "--num", "--rev", "tip")
def _download_hg(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_hg(%s)", path)
try:
if clone:
subprocess.check_call(["hg", "clone", repo.url,
"-r", repo.version])
else:
subprocess.check_call(self.HG_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.HG_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("hg error: " + str(e))
SVN_FETCH = ("git", "svn", "fetch")
def _download_svn(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_svn(%s)", path)
try:
if clone:
if repo.version == "trunk":
version = repo.version
else:
version = "branches/" + repo.version
subprocess.check_call(["git", "svn", "clone",
"-T", version, repo.url])
else:
subprocess.check_call(self.SVN_FETCH)
self.path = path
self.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git-svn error: " + str(e))
###############################################################################
# Package Extractor
###############################################################################
class PackageExtractor(LoggingObject):
def __init__(self, alt_paths = None):
self.packages = []
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.alt_paths = alt_paths
self.altpack_pkgs = None
self.altstack_pkgs = None
self._pkg_cache = {}
self._extra = []
def refresh_package_cache(self):
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.altpack_pkgs = None
self.altstack_pkgs = None
# To use with LaunchParser.
def get(self, pkg_id, populate=True):
self.log.debug("%s.get('%s')", type(self).__name__, pkg_id)
if pkg_id in self._pkg_cache:
return self._pkg_cache[pkg_id]
for pkg in self.packages:
if pkg.id == pkg_id:
self._pkg_cache[pkg_id] = pkg
return pkg
try:
assert pkg_id.startswith("package:")
pkg = self._find(pkg_id[8:], None)
self._pkg_cache[pkg_id] = pkg
self._extra.append(pkg)
pkg._analyse = False
if populate:
self._populate_package(pkg)
except (IOError, ET.ParseError, ResourceNotFound):
return None
return pkg
def find_package(self, name, project=None, analyse=True):
try:
pkg = self._find(name, project)
pkg._analyse = analyse
self.packages.append(pkg)
if project:
project.packages.append(pkg)
for repo in project.repositories:
if name in repo.declared_packages:
pkg.repository = repo
repo.packages.append(pkg)
break
# self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def find_package_at(self, dirpath, populate=True):
try:
manifest = os.path.join(dirpath, "package.xml")
pkg = PackageParser.parse(manifest)
if pkg.id in self._pkg_cache:
return self._pkg_cache[pkg.id]
else:
self._pkg_cache[pkg.id] = pkg
if pkg not in self._extra:
self._extra.append(pkg)
pkg._analyse = False
if populate:
self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def _find(self, name, project):
path = None
if self.alt_paths:
if self.altpack_pkgs == None:
self.altpack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=False)
path = self.altpack_pkgs.get(name, None)
if (path == None):
if self.altstack_pkgs == None:
self.altstack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=True)
path = self.altstack_pkgs.get(name, None)
if path is None:
if self.rospack_pkgs == None:
self.rospack_pkgs = findRosPackages(as_stack=False)
path = self.rospack_pkgs.get(name, None)
if path is None:
if self.rosstack_pkgs == None:
self.rosstack_pkgs = findRosPackages(as_stack=True)
path = self.rosstack_pkgs.get(name, None)
if path is None:
raise KeyError(name)
return PackageParser.parse(os.path.join(path, "package.xml"),
project = project)
EXCLUDED = (".git", "doc", "cmake", ".eggs", "__pycache__")
_START_GLOB = (os.path.sep, '*', '?', '[')
_BYTE_CODE = (".pyc", ".pyd", ".pyo")
def _populate_package(self, pkg, ignored_globs=None):
self.log.debug("PackageExtractor.populate(%s, %s)", pkg, ignored_globs)
if not pkg.path:
self.log.debug("Package %s has no path", pkg.name)
return
self.log.info("Indexing source files for package %s", pkg.name)
analysis_ignore = {}
#pkgs = {pkg.id: pkg for pkg in self.packages}
launch_parser = LaunchParser(pkgs=self)
prefix = len(pkg.path) + len(os.path.sep)
if ignored_globs is None:
ignored_globs = ()
else:
ignored_globs = list(ignored_globs)
for i in range(len(ignored_globs)):
c = ignored_globs[i][0]
if not c in self._START_GLOB:
ignored_globs[i] = '*/' + ignored_globs[i]
for root, subdirs, files in os.walk(pkg.path, topdown=True):
if 'COLCON_IGNORE' in files or 'AMENT_IGNORE' in files or 'CATKIN_IGNORE' in files:
del subdirs[:] # don't traverse into subdirectories
continue # skip
subdirs[:] = [d for d in subdirs if d not in self.EXCLUDED]
path = root[prefix:]
for filename in files:
self.log.debug("Found file %s at %s", filename, path)
source = SourceFile(filename, path, pkg)
self.log.debug("File language: %s", source.language)
sfn = os.path.join(pkg.name, source.full_name)
if source.language == "unknown":
if filename.endswith(self._BYTE_CODE):
self.log.debug("Python bytecode file %s was ignored",
sfn)
continue # skip this file
if any(fnmatch(sfn, pattern)
for pattern in ignored_globs):
self.log.debug(
"File %s was ignored due to glob pattern", sfn)
continue # skip this file
ignore = source.set_file_stats()
if any(v for v in ignore.values()):
analysis_ignore[source.id] = ignore
if pkg._analyse and source.language == "launch":
self.log.info("Parsing launch file: " + source.path)
try:
source.tree = launch_parser.parse(source.path)
except LaunchParserError as e:
self.log.warning("Parsing error in %s:\n%s",
source.path, str(e))
pkg.source_files.append(source)
pkg.size += source.size
pkg.lines += source.lines
pkg.sloc += source.sloc
return analysis_ignore
###############################################################################
# Package Parser
###############################################################################
class PackageParser(LoggingObject):
@staticmethod
def parse(pkg_file, project = None):
PackageParser.log.debug("PkgParser.parse(%s, %s)", pkg_file, project)
with open(pkg_file, "r") as handle:
root = ET.parse(handle).getroot()
name = root.find("name").text.strip()
package = Package(name, proj = project)
package.path = os.path.dirname(pkg_file)
PackageParser.log.info("Found package %s at %s", package, package.path)
PackageParser._parse_metadata(root, package)
PackageParser._parse_export(root, package)
PackageParser._parse_dependencies(root, package)
return package
@staticmethod
def _parse_metadata(xml, package):
package.description = (xml.find("description").text or "").strip()
for el in xml.findall("maintainer"):
name = (el.text or "?").strip()
email = el.get("email") or "[email protected]"
package.maintainers.add(Person(name, email))
for el in xml.findall("author"):
name = (el.text or "?").strip()
email = el.get("email") or "[email protected]"
package.authors.add(Person(name, email))
for el in xml.findall("license"):
package.licenses.add((el.text or "?").strip())
for el in xml.findall("url"):
value = el.get("type")
if value is None or value == "website":
if el.text:
package.website = el.text.strip()
elif value == "repository":
if el.text:
package.vcs_url = el.text.strip()
elif value == "bugtracker":
if el.text:
package.bug_url = el.text.strip()
el = xml.find("version")
if el is not None:
package.version = (el.text or "?").strip()
@staticmethod
def _parse_export(xml, package):
el = xml.find("export")
if not el is None:
package.is_metapackage = not el.find("metapackage") is None
if not el.find("nodelet") is None:
nodelets = el.find("nodelet").get("plugin")
nodelets = nodelets.replace("${prefix}", package.path)
with open(nodelets, "r") as handle:
xmltext = "<export>{}</export>".format(handle.read())
root = ET.fromstring(xmltext)
PackageParser.log.info("Found nodelets at %s", nodelets)
libs = []
for child in root:
if child.tag == "library":
libs.append(child)
else:
libs.extend(child.findall("library"))
for el in libs:
libname = el.get("path").rsplit(os.sep)[-1]
for cl in el.findall("class"):
nodelet = cl.get("type").split("::")[-1]
node = Node(libname, package, nodelet = nodelet)
package.nodes.append(node)
@staticmethod
def _parse_dependencies(xml, package):
sources = ["build_depend"]
if xml.get("format") == "2":
sources.extend(("depend", "build_export_depend", "exec_depend"))
else:
sources.append("run_depend")
for src in sources:
for el in xml.findall(src):
name = el.text.strip()
if name:
package.dependencies.packages.add(name)
###############################################################################
# Hard-coded Node Parser
###############################################################################
class HardcodedNodeParser(LoggingObject):
model_dir = None
distro = None
_cache = {}
@classmethod
def get(cls, pkg, node_type):
cls.log.debug("Fetching hard-coded node: (%s, %s, %s)",
pkg, node_type, cls.distro)
node_id = "node:" + pkg + "/" + node_type
if node_id in cls._cache:
cls.log.debug("Node already in cache.")
return cls._cache[node_id]
filename = os.path.join(cls.model_dir, pkg + ".yaml")
try:
with open(filename) as handle:
data = yaml.safe_load(handle)
except IOError as e:
cls.log.debug("YAML file not found: %s", filename)
return None
if not cls.distro in data:
cls.log.debug("Package has no data for ROS %s.", cls.distro)
return None
if not node_type in data[cls.distro]:
cls.log.debug("Node does not exist for ROS %s.", cls.distro)
return None
cls.log.debug("Building node from YAML data.")
pkg = Package(pkg)
pkg.path = "/tmp/" + pkg.name
node = cls._build_node(node_type, cls.distro, pkg, data)
cls._cache[node_id] = node
return node
@classmethod
def _build_node(cls, node_type, distro, pkg, data):
node_data = data[distro][node_type]
base = node_data.get("base")
if base:
node = cls._build_node(node_type, base, pkg, data)
else:
node = Node(node_type, pkg, rosname = node_data.get("rosname"),
nodelet = node_type if node_data["nodelet"] else None)
for datum in node_data.get("advertise", ()):
loc = cls._loc(pkg, datum)
pub = AdvertiseCall(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
latched=datum.get("latched", False),
control_depth=datum["depth"],
repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.advertise.append(pub)
for datum in node_data.get("subscribe", ()):
loc = cls._loc(pkg, datum)
sub = SubscribeCall(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.subscribe.append(sub)
for datum in node_data.get("service", ()):
loc = cls._loc(pkg, datum)
srv = AdvertiseServiceCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.service.append(srv)
for datum in node_data.get("client", ()):
loc = cls._loc(pkg, datum)
cli = ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.client.append(cli)
for datum in node_data.get("readParam", ()):
loc = cls._loc(pkg, datum)
par = GetParamCall(datum["name"], datum["namespace"],
datum["type"], default_value=datum.get("default"),
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.read_param.append(par)
for datum in node_data.get("writeParam", ()):
loc = cls._loc(pkg, datum)
par = SetParamCall(datum["name"], datum["namespace"],
datum["type"], value=datum.get("value"),
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.write_param.append(par)
cls.log.debug("Hard-coded Node: " + str(node.to_JSON_object()))
return node
@classmethod
def _loc(cls, pkg, data):
loc = data.get("location")
if loc is None:
return None
p = loc.get("package")
if p is None or p != pkg.name:
return None
f = loc["file"]
for sf in pkg.source_files:
if sf.full_name == f:
f = sf
break
else:
parts = loc["file"].rsplit("/", 1)
if len(parts) == 1:
directory = ""
name = parts[0]
else:
assert len(parts) == 2
directory, name = parts
f = SourceFile(name, directory, pkg)
pkg.source_files.append(f)
return Location(pkg, file=f, line=loc["line"], col=loc["column"],
fun=loc.get("function"), cls=loc.get("class"))
###############################################################################
# Node Extractor
###############################################################################
class NodeExtractor(LoggingObject):
def __init__(self, pkgs, env, ws=None, node_cache=None, parse_nodes=False):
self.package = None
self.packages = pkgs
self.environment = env
self.workspace = ws
self.node_cache = node_cache
self.parse_nodes = parse_nodes
self.nodes = []
self.roscpp_extractor = None
self.rospy_extractor = None
def find_nodes(self, pkg):
self.log.debug("NodeExtractor.find_nodes(%s)", pkg)
self.package = pkg
srcdir = self.package.path[len(self.workspace):]
srcdir = os.path.join(self.workspace, srcdir.split(os.sep, 1)[0])
bindir = os.path.join(self.workspace, "build")
cmake_path = os.path.join(self.package.path, "CMakeLists.txt")
if os.path.isfile(cmake_path):
parser = RosCMakeParser(srcdir, bindir, pkgs = self.packages,
env = self.environment,
vars = self._default_variables())
parser.parse(cmake_path)
self._update_nodelets(parser.libraries)
self._register_nodes(parser.executables)
else:
# It may be normal for pure Python projects not to have a CMakeLists.txt
# Instead, search for python files with "def main():"
pattern = re.compile('^def\s+main\s*\(.*\)\s*:')
for file in pkg.source_files:
if file.language != 'python':
continue # continue with next file
entry_point_found = False
with open(file.path) as f:
for line in f:
match = pattern.match(line)
if match is not None:
entry_point_found = True
break
if entry_point_found == False:
continue # continue with next file
# else: this is a python file with a 'main' function,
# so we consider it a node.
node = Node(file.full_name, pkg)
node.source_files.append(file)
self.nodes.append(node)
self.package.nodes.append(node)
if self.parse_nodes:
self._extract_primitives()
def _default_variables(self):
# TODO: clean up these hardcoded values
v = {}
v["catkin_INCLUDE_DIRS"] = os.path.join(self.workspace,
"devel/include")
v["Boost_INCLUDE_DIRS"] = "/usr/include/"
v["Eigen_INCLUDE_DIRS"] = "/usr/include/eigen3"
v["ImageMagick_INCLUDE_DIRS"] = "/usr/include/ImageMagick"
v["PROJECT_SOURCE_DIR"] = self.package.path
return v
def _get_file(self, path):
for sf in self.package.source_files:
if sf.path == path:
return sf
return None
def _update_nodelets(self, libraries):
lib_files = {}
for target in libraries.values():
files = []
for path in target.files:
sf = self._get_file(path)
if sf:
files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
files.append(sf)
lib_files[target.prefixed_name] = files
for nodelet in self.package.nodes:
if not nodelet.is_nodelet:
continue
if nodelet.name in lib_files:
nodelet.source_files = lib_files[nodelet.name]
def _register_nodes(self, executables):
for target in executables.values():
node = Node(target.output_name, self.package)
for path in target.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
lang = node.language
if lang == "cpp" or lang == "python":
self.log.debug("register %s node: %s", lang, node.node_name)
self.nodes.append(node)
self.package.nodes.append(node)
else:
self.log.debug("CMake target is not a node: %s (%s) %s",
node.node_name, lang, node.source_files)
def _extract_primitives(self, force_when_cached=False):
self.roscpp_extractor = RoscppExtractor(self.package, self.workspace)
self.rospy_extractor = RospyExtractor(self.package, self.workspace)
for i in range(len(self.package.nodes)):
node = self.package.nodes[i]
self.log.debug("Extracting primitives for node %s", node.id)
if node.source_tree is not None:
self.log.debug("Node already has a source tree. Skipped.")
continue
if (node.node_name in self.node_cache) and not force_when_cached:
self.log.debug("Using Node %s from cache.", node.node_name)
node = self.node_cache[node.node_name]
assert node.package is self.package
self.package.nodes[i] = node
continue
node.source_tree = CodeGlobalScope()
node.advertise = []
node.subscribe = []
node.service = []
node.client = []
node.read_param = []
node.write_param = []
if not node.source_files:
self.log.warning("no source files for node " + node.id)
if node.language == "cpp" and CppAstParser is not None:
self.roscpp_extractor.extract(node)
elif node.language == "python":
self.rospy_extractor.extract(node)
else:
self.log.debug("Node written in %s.", node.language)
self.log.debug("Skipping parsing and primitive extraction.")
###############################################################################
# C++ Primitive Extractor
###############################################################################
class RoscppExtractor(LoggingObject):
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
def extract(self, node):
self.log.debug("Parsing C++ files for node %s", node.id)
parser = CppAstParser(workspace=self.workspace, logger=__name__)
for sf in node.source_files:
self.log.debug("Parsing C++ file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_nh_param_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
def _query_comm_primitives(self, node, gs):
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "ros::Publisher":
continue
self._on_publication(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "ros::Subscriber":
continue
self._on_subscription(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("advertiseService").get():
if call.canonical_type != "ros::ServiceServer":
continue
self._on_service(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("serviceClient").get():
if call.canonical_type != "ros::ServiceClient":
continue
self._on_client(node,
self._resolve_node_handle(call.method_of), call)
self.log.debug("Looking for image_transport::SubscriberFilter calls.")
for call in CodeQuery(gs).all_calls.where_name("SubscriberFilter").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@image_transport@S@SubscriberFilter"):
continue
if not "image_transport::SubscriberFilter" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, topic_pos = 1, queue_pos = 2,
msg_type = "sensor_msgs/Image")
self.log.debug("Looking for message_filters::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("Subscriber").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@message_filters@S@Subscriber"):
continue
if not "message_filters::Subscriber" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_node_handle(n),
call, topic_pos = 1, queue_pos = 2)
self.log.debug("Looking for image_transport::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "image_transport::Subscriber":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
self.log.debug("Looking for image_transport::Publisher.")
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "image_transport::Publisher":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_publication(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
def _query_nh_param_primitives(self, node, gs):
nh_prefix = "c:@N@ros@S@NodeHandle@"
gets = ("getParam", "getParamCached", "param")
reads = gets + ("hasParam", "searchParam")
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
param_type = default_value = None
if call.name in gets:
param_type = self._extract_param_type(call.arguments[1])
if call.name == "param":
if len(call.arguments) > 2:
default_value = self._extract_param_value(
call, arg_pos=2)
elif len(call.arguments) == 2:
default_value = self._extract_param_value(
call, arg_pos=1)
self._on_read_param(node, self._resolve_node_handle(call),
call, param_type, default_value)
sets = ("setParam",)
writes = sets + ("deleteParam",)
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
param_type = value = None
if len(call.arguments) >= 2 and call.name in sets:
param_type = self._extract_param_type(call.arguments[1])
value = self._extract_param_value(call, arg_pos=1)
self._on_write_param(node, self._resolve_node_handle(call),
call, param_type, value)
def _query_param_primitives(self, node, gs):
ros_prefix = "c:@N@ros@N@param@"
gets = ("get", "getCached", "param")
reads = gets + ("has",)
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
param_type = default_value = None
if call.name in gets:
param_type = self._extract_param_type(call.arguments[1])
if call.name == "param":
if len(call.arguments) > 2:
default_value = self._extract_param_value(
call, arg_pos=2)
elif len(call.arguments) == 2:
default_value = self._extract_param_value(
call, arg_pos=1)
self._on_read_param(node, "", call, param_type, default_value)
for call in (CodeQuery(gs).all_calls.where_name("search")
.where_result("bool").get()):
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
if len(call.arguments) > 2:
ns = resolve_expression(call.arguments[0])
if not isinstance(ns, basestring):
ns = "?"
else:
ns = "~"
self._on_read_param(node, ns, call, None, None)
sets = ("set",)
writes = sets + ("del",)
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
param_type = value = None
if len(call.arguments) >= 2 and call.name in sets:
param_type = self._extract_param_type(call.arguments[1])
value = self._extract_param_value(call, arg_pos=1)
self._on_write_param(node, "", call, param_type, value)
def _on_publication(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None, latch_pos=-1):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
latched = False
if len(call.arguments) >= 3 and len(call.arguments) > latch_pos:
latched = self._extract_latch(call, latch_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
pub = AdvertiseCall(name, ns, msg_type, queue_size, latched=latched,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found AdvertiseCall on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
sub = SubscribeCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found SubscribeCall on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
srv = AdvertiseServiceCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_client(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_read_param(self, node, ns, call, param_type, default_value):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
read = GetParamCall(name, ns, param_type,
default_value=default_value, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive = True))
node.read_param.append(read)
self.log.debug("Found Read on %s/%s (%s) (%s)",
ns, name, param_type, default_value)
def _on_write_param(self, node, ns, call, param_type, value):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
wrt = SetParamCall(name, ns, param_type, value=value,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive = True))
node.write_param.append(wrt)
self.log.debug("Found Write on %s/%s (%s) (%s)",
ns, name, param_type, value)
def _condition_location(self, condition_obj, sf):
if sf is not None:
if sf.path != condition_obj.file:
self.log.debug(("condition Location: files do not match: "
"'%s', '%s'"), sf.path, condition_obj.file)
if condition_obj.file.startswith(self.package.path):
for sf2 in self.package.source_files:
if sf2.path == condition_obj.file:
sf = sf2
break
self.log.debug("Location: found correct file")
return Location(self.package, file=sf, line=condition_obj.line,
col=condition_obj.column, fun=condition_obj.function.name)
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
source_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file,
line=call.line, col=call.column, fun=function)
def _resolve_it_node_handle(self, value):
value = resolve_expression(value)
if (isinstance(value, CppFunctionCall)
and value.name == "ImageTransport"):
return self._resolve_node_handle(value.arguments[0])
return "?"
def _resolve_node_handle(self, call):
ns = "?"
node_handle = getattr(call, 'method_of', None) or call
if getattr(node_handle, 'name', None) == 'operator->':
node_handle = node_handle.arguments[0]
node_handle_def = None
if isinstance(node_handle, CppReference):
node_handle_def = resolve_reference(node_handle)
elif isinstance(node_handle, CppDefaultArgument):
return ''
# A function needs to be called to create a NodeHandle (constructors
# are functions)
if isinstance(node_handle_def, CppFunctionCall):
# node_handle_def is a call to the constructor
if node_handle_def.name == 'NodeHandle':
args = node_handle_def.arguments
# Copy constructor
if len(args) == 1:
parent = args[0]
if isinstance(parent, CppFunctionCall):
if parent.name == 'getNodeHandle':
return ''
elif parent.name == 'getPrivateNodeHandle':
return '~'
return self._resolve_node_handle(parent)
# All other constructor have at least two arguments. The third
# is never meaningful
# If a parent NodeHande is passed, it is the first argument
# If a namespace argument is passed, it is either first or
# second parameter. Only the first has an empty default value.
prefix = ''
if isinstance(args[0], basestring):
ns = args[0]
elif isinstance(args[0], CppDefaultArgument):
ns = ''
elif isinstance(args[1], basestring):
prefix = self._resolve_node_handle(args[0])
ns = args[1]
else:
ns = "?"
if prefix:
ns = prefix + "/" + ns
elif node_handle_def.name == 'getNodeHandle':
ns = ''
elif node_handle_def.name == 'getPrivateNodeHandle':
ns = '~'
elif isinstance(node_handle_def, CppDefaultArgument):
ns = ''
return ns
def _extract_topic(self, call, topic_pos=0):
name = resolve_expression(call.arguments[topic_pos])
if not isinstance(name, basestring):
name = "?"
return name or "?"
def _extract_message_type(self, call):
if call.template:
template = call.template[0]
std_alloc = re.search("_<std::allocator<void>", template)
if std_alloc is not None:
template = template[:std_alloc.start()]
#assert re.match(r"\w+::\w+$", template)
if not re.match(r"\w+::\w+$", template):
self.log.debug("Weird message type: " + repr(template))
return template.replace("::", "/")
if (call.name not in ("subscribe", "advertiseService")
and 'NodeHandle' not in call.full_name):
return "?"
callback = (call.arguments[2]
if call.name == "subscribe"
else call.arguments[1])
while isinstance(callback, CppOperator):
callback = callback.arguments[0]
type_string = callback.result
try:
type_string = type_string.split(None, 1)[1]
except IndexError:
type_string = type_string.strip()
if type_string.startswith("(*)"):
type_string = type_string[3:]
if type_string[0] == "(" and type_string[-1] == ")":
type_string = type_string[1:-1]
if call.name == "advertiseService":
type_string = type_string.split(", ")[0]
is_const = type_string.startswith("const ")
if is_const:
type_string = type_string[6:]
is_ref = type_string.endswith(" &")
if is_ref:
type_string = type_string[:-2]
is_ptr = type_string.endswith("::ConstPtr")
if is_ptr:
type_string = type_string[:-10]
else:
is_ptr = type_string.endswith("ConstPtr")
if is_ptr:
type_string = type_string[:-8]
if type_string.endswith("::Request"):
type_string = type_string[:-9]
if type_string.startswith("boost::function"):
type_string = type_string[52:-25]
type_string = type_string.replace("::", "/")
if re.match(r"\w+/\w+$", type_string):
return type_string
return "?"
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_queue_size(self, call, queue_pos=1):
queue_size = resolve_expression(call.arguments[queue_pos])
if isinstance(queue_size, (int, float)):
return queue_size
return None
def _extract_latch(self, call, latch_pos):
expr = call.arguments[latch_pos]
self.log.debug("extract latched publisher from {!r}".format(expr))
if isinstance(expr, CppDefaultArgument):
self.log.debug("latch is default: false")
return False
latch = resolve_expression(expr)
self.log.debug("resolve latch expr returns {!r}".format(latch))
if not isinstance(latch, bool):
return None
return latch
def _extract_param_type(self, value):
self.log.debug("extract param type from {}".format(repr(value)))
if value is True or value is False:
return "bool"
if isinstance(value, int):
return "int"
if isinstance(value, float):
return "double"
if isinstance(value, basestring):
return "str"
cpp_type = getattr(value, "result", None)
if cpp_type:
self.log.debug("param type from C++ type {}".format(repr(cpp_type)))
if cpp_type == "std::string" or cpp_type == "char *":
return "str"
if cpp_type == "int":
return "int"
if cpp_type == "double":
return "double"
if cpp_type == "bool":
return "bool"
return "yaml" if cpp_type else None
def _extract_param_value(self, call, arg_pos=1):
self.log.debug("extract_param_value({!r}, pos={})".format(
call.arguments, arg_pos))
if len(call.arguments) <= arg_pos:
self.log.debug("Failed to extract param value: not enough arguments")
return None
value = resolve_expression(call.arguments[arg_pos])
if isinstance(value, CppEntity):
self.log.debug("Failed to extract param value: " + repr(value))
return None
return value
###############################################################################
# Python Primitive Extractor
###############################################################################
class RospyExtractor(LoggingObject):
queue_size_pos = {
'publisher': 6,
'subscriber': 4,
}
rospy_names = {
'publication': ('Publisher',),
'subscription': ('Subscriber',),
'service-def': ('Service',),
'service-call': ('ServiceProxy',),
}
@classmethod
def all_rospy_names(cls, type):
names = cls.rospy_names[type]
return tuple('rospy.' + name for name in names) + names
@staticmethod
def get_arg(call, pos, name):
try:
return next(
keyword.value
for keyword in call.named_args
if keyword.name == name)
except StopIteration:
try:
return call.arguments[pos]
except IndexError:
return None
@staticmethod
def invalid_call(call, n=1):
return (len(call.arguments) + len(call.named_args)
+ bool(call.star_args) + bool(call.kw_args)) <= n
@staticmethod
def split_ns_name(full_name):
if '/' in full_name:
ns, _, name = full_name.rpartition('/')
else:
ns, name = '', full_name
return ns, name
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
souce_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file, line=call.line,
fun=function)
@classmethod
def _extract_queue_size(cls, call):
pos = cls.queue_size_pos[call.name.lower()]
queue_size_arg = cls.get_arg(call, pos, 'queue_size')
try:
queue_size = resolve_expression(queue_size_arg)
assert(isinstance(queue_size, (int, float)))
return queue_size
except AssertionError:
return None
@classmethod
def _extract_message_type(cls, call, arg_name, msgs_imports, pkgs_imports, arg_pos=1):
msg_type = cls.get_arg(call, 1, arg_name)
# Very common case of calling type() on a message class
if isinstance(msg_type, CodeFunctionCall) and msg_type.name == 'type':
msg_type = msg_type.arguments[0].name
if isinstance(msg_type, CodeReference):
msg_type = resolve_reference(msg_type) or msg_type
if isinstance(msg_type, CodeReference):
if msg_type.field_of is None:
for pkg_name, msg_name in msgs_imports:
if msg_name == msg_type.name:
return pkg_name + "/" + msg_name
else:
maybe_pkg = msg_type.field_of
if isinstance(maybe_pkg, CodeReference):
pkg_name = maybe_pkg.name
if pkg_name in pkgs_imports:
return pkg_name + "/" + msg_type.name
return "?"
@classmethod
def _extract_topic(cls, call):
name = resolve_expression(cls.get_arg(call, 0, 'name'))
if not isinstance(name, basestring):
name = '?'
return cls.split_ns_name(name)
def _on_client(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list, self.pkgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_publication(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list, self.pkgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
pub = AdvertiseCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found AdvertiseCall on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list, self.pkgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
srv = AdvertiseServiceCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list, self.pkgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
sub = SubscribeCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found SubscribeCall on %s/%s (%s)", ns, name, msg_type)
def _query_comm_primitives(self, node, gs):
##################################
# Topics
##################################
publications = (CodeQuery(gs).all_calls
.where_name(('Publisher', 'rospy.Publisher'))
.get())
subscriptions = (CodeQuery(gs).all_calls
.where_name(('Subscriber', 'rospy.Subscriber'))
.get())
for call in publications:
self._on_publication(node, call)
for call in subscriptions:
self._on_subscription(node, call)
##################################
# Services
##################################
service_defs = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-def'))
.get())
service_calls = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-call'))
.get())
for call in service_defs:
self._on_service(node, call)
for call in service_calls:
self._on_client(node, call)
def _on_param_getter(self, node, call):
if self.invalid_call(call, n=0):
return
name = resolve_expression(self.get_arg(call, 0, 'param_name'))
if not isinstance(name, basestring):
name = '?'
ns, name = self.split_ns_name(name)
param_type = None
default_value = self.get_arg(call, 1, 'default')
if default_value is not None:
default_value = resolve_expression(default_value)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
getter = GetParamCall(name, ns, param_type,
default_value=default_value, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.read_param.append(getter)
self.log.debug("Found GetParamCall on %s/%s", ns, name)
def _on_param_setter(self, node, call):
if self.invalid_call(call):
return
name = resolve_expression(self.get_arg(call, 0, 'param_name'))
if not isinstance(name, basestring):
name = '?'
ns, name = self.split_ns_name(name)
param_type = None
value = resolve_expression(self.get_arg(call, 1, 'param_value'))
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
setter = SetParamCall(name, ns, param_type, value=value,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.write_param.append(setter)
self.log.debug("Found SetParamCall on %s/%s", ns, name)
def _query_param_primitives(self, node, gs):
getters = (CodeQuery(gs).all_calls
.where_name(('get_param', 'rospy.get_param'))
.get())
setters = (CodeQuery(gs).all_calls
.where_name(('set_param', 'rospy.set_param'))
.get())
for call in getters:
self._on_param_getter(node, call)
for call in setters:
self._on_param_setter(node, call)
# FIXME: missing:
# rospy.has_param(param_name)
# rospy.delete_param(param_name)
def _setup_path(self):
setup_file = os.path.join(self.package.path, 'setup.py')
if not os.path.isfile(setup_file):
return []
parser = PyAstParser(workspace=self.package.path)
setup = parser.parse(setup_file)
setup_call = (CodeQuery(setup).all_calls
.where_name('generate_distutils_setup')
.get()
or
CodeQuery(setup).all_calls
.where_name('setup')
.get())[0]
package_dir = self.get_arg(setup_call, 0, 'package_dir')
if hasattr(package_dir, 'value'):
package_dir = {
keyword.name: keyword.value
for keyword in self.get_arg(setup_call, 0, 'package_dir').value
}
else:
src_path = os.path.join(self.package.path, 'src')
package_dir = {'': 'src'} if os.path.exists(src_path) else {}
root = package_dir.get('', '')
return [os.path.join(self.package.path, root)]
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
self.pythonpath = self._setup_path()
def extract(self, node):
self.log.debug("Parsing Python files for node %s", node.id)
self.log.debug("PyAstParser(pythonpath={!r}, workspace={!r})".format(
self.pythonpath, self.workspace))
parser = PyAstParser(pythonpath=self.pythonpath,
workspace=self.workspace)
for sf in node.source_files:
self.log.debug("Parsing Python file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# In theory the imported names list should not be needed here, this is a fix to be able to locate the complete description of ros msgs types (i.e. PkgName/MsgName
self.msgs_list = []
self.pkgs_list = []
for imp_name in parser.imported_names_list:
s = str(imp_name)
if "msg" in s or "srv" in s:
ss = s.split(".")
if len(ss) < 2:
continue
if ss[-1] == "msg" or ss[-1] == "srv":
self.pkgs_list.append(ss[0])
elif ss[1] == "msg" or ss[1] == "srv":
self.msgs_list.append((ss[0], ss[2]))
else:
self.log.debug(("Python import with 'msg' or 'srv', "
"but unable to process it: ")
+ s)
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
###############################################################################
# Node Hints
###############################################################################
class NodeHints2(LoggingObject):
# pkg/node:
# fix: (fix variables)
# advertise@1: name
# getParam@1: true
# advertise: (always adds)
# - full JSON spec
# - full JSON spec
def __init__(self, hints, pkg_finder=None):
if not isinstance(hints, dict):
raise ValueError("expected dict of hints, got " + repr(hints))
for key, value in hints.items():
if not isinstance(key, basestring) or key.count("/") != 1:
raise ValueError("expected 'pkg/node' key, found " + repr(key))
if not isinstance(value, dict):
raise ValueError("expected dict value, found " + repr(value))
self.hints = hints
self.pkg_finder = pkg_finder
def apply_to(self, nodes, create=False):
if not self.hints:
return []
nodes = self._list_to_dict(nodes)
if create and not self.pkg_finder:
raise ValueError("received create=True but no pkg_finder")
new_nodes = []
for node_type, node_hints in self.hints.items():
node = nodes.get(node_type)
if node is not None:
fix_hints = node_hints.get("fix", _EMPTY_DICT)
if not isinstance(fix_hints, dict):
raise ValueError("expected dict in {}:fix; got {!r}".format(
node_type, fix_hints))
self.log.info("Merging extracted Node with hints: " + node_type)
self.log.debug("node specs %s %s", node, node_hints)
node.resolve_variables(fix_hints)
elif create:
self.log.info("Creating new Node from hints: " + node_type)
self.log.debug("node specs %s %s", node_type, node_hints)
node = self._create(node_type, node_hints)
if node is not None:
new_nodes.append(node)
if node is not None:
self._add_primitives(node, node_hints)
hpl = node_hints.get("hpl", _EMPTY_DICT)
node.hpl_properties = list(hpl.get("properties", _EMPTY_LIST))
node.hpl_assumptions = list(hpl.get("assumptions", _EMPTY_LIST))
return new_nodes
def _create(self, node_type, hints):
pkg_name, exe = node_type.split("/")
pkg = self.pkg_finder.get("package:" + pkg_name)
if pkg is None:
self.log.error("Unable to find package: " + repr(pkg_name))
return None
rosname = hints.get("rosname")
nodelet_cls = hints.get("nodelet")
node = Node(exe, pkg, rosname=rosname, nodelet=nodelet_cls)
return node
def _add_primitives(self, node, hints):
for key, attr, cls in self._PRIMITIVES:
calls = getattr(node, attr)
for datum in hints.get(key, _EMPTY_LIST):
call = cls.from_JSON_specs(datum)
call.location = self._location_from_JSON(datum.get("location"))
calls.append(call)
_PRIMITIVES = (
("advertise", "advertise", AdvertiseCall),
("subscribe", "subscribe", SubscribeCall),
("advertiseService", "service", AdvertiseServiceCall),
("serviceClient", "client", ServiceClientCall),
("getParam", "read_param", GetParamCall),
("setParam", "write_param", SetParamCall)
)
def _list_to_dict(self, nodes):
if isinstance(nodes, dict):
return nodes
return {node.node_name: node for node in nodes}
# FIXME code duplication
def _location_from_JSON(self, datum):
if datum is None:
return None
pkg = self.pkg_finder.get("package:" + datum["package"])
if pkg is None:
self.log.error("Unable to find package: " + repr(datum["package"]))
return None
source_file = None
filename = datum["file"]
if filename:
try:
source_file = next(sf for sf in pkg.source_files
if sf.full_name == filename)
except StopIteration:
self.log.error("Unable to find file: '{}/{}'".format(
datum["package"], filename))
return Location(pkg, file=source_file,
line=datum.get("line", 1), col=datum.get("column", 1),
fun=datum.get("function"), cls=datum.get("class"))
|
[] |
[] |
[
"ROS_VERSION"
] |
[]
|
["ROS_VERSION"]
|
python
| 1 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Wizblcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
import errno
from enum import Enum
import http.client
import logging
import optparse
import os
import pdb
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_rpc_proxy,
initialize_datadir,
get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
WIZBLCOIND_PROC_WAIT_TIMEOUT = 60
class WizblcoinTestFramework(object):
"""Base class for a wizblcoin test script.
Individual wizblcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = []
self.wizblcoind_processes = {}
self.mocktime = 0
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave wizblcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop wizblcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing wizblcoind/wizblcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: wizblcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""Start a wizblcoind and return RPC connection to it"""
datadir = os.path.join(dirname, "node" + str(i))
if binary is None:
binary = os.getenv("WIZBLCOIND", "wizblcoind")
args = [binary, "-datadir=" + datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(self.mocktime), "-uacomment=testnode%d" % i]
if extra_args is not None:
args.extend(extra_args)
self.wizblcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: wizblcoind started, waiting for RPC to come up")
self._wait_for_wizblcoind_start(self.wizblcoind_processes[i], datadir, i, rpchost)
self.log.debug("initialize_chain: RPC successfully started")
proxy = get_rpc_proxy(rpc_url(datadir, i, rpchost), i, timeout=timewait)
if self.options.coveragedir:
coverage.write_all_rpc_commands(self.options.coveragedir, proxy)
return proxy
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Start multiple wizblcoinds, return RPC connections to them"""
if extra_args is None:
extra_args = [None] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(self.start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except:
# If one node failed to start, stop the others
# TODO: abusing self.nodes in this way is a little hacky.
# Eventually we should do a better job of tracking nodes
self.nodes.extend(rpcs)
self.stop_nodes()
self.nodes = []
raise
return rpcs
def stop_node(self, i):
"""Stop a wizblcoind test node"""
self.log.debug("Stopping node %d" % i)
try:
self.nodes[i].stop()
except http.client.CannotSendRequest as e:
self.log.exception("Unable to stop node")
return_code = self.wizblcoind_processes[i].wait(timeout=WIZBLCOIND_PROC_WAIT_TIMEOUT)
del self.wizblcoind_processes[i]
assert_equal(return_code, 0)
def stop_nodes(self):
"""Stop multiple wizblcoind test nodes"""
for i in range(len(self.nodes)):
self.stop_node(i)
assert not self.wizblcoind_processes.values() # All connections must be gone now
def assert_start_raises_init_error(self, i, dirname, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, dirname, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'wizblcoind exited' in str(e) # node must have shutdown
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "wizblcoind should have exited with an error"
else:
assert_msg = "wizblcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.wizblcoind_processes[i].wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as wizblcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("WizblcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run wizblcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("WIZBLCOIND", "wizblcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.wizblcoind_processes[i] = subprocess.Popen(args)
self.log.debug("initialize_chain: wizblcoind started, waiting for RPC to come up")
self._wait_for_wizblcoind_start(self.wizblcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in wizblcoin.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
def _wait_for_wizblcoind_start(self, process, datadir, i, rpchost=None):
"""Wait for wizblcoind to start.
This means that RPC is accessible and fully initialized.
Raise an exception if wizblcoind exits during initialization."""
while True:
if process.poll() is not None:
raise Exception('wizblcoind exited with status %i during initialization' % process.returncode)
try:
# Check if .cookie file to be created
rpc = get_rpc_proxy(rpc_url(datadir, i, rpchost), i, coveragedir=self.options.coveragedir)
rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. wizblcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(0.25)
class ComparisonTestFramework(WizblcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some wizblcoind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("WIZBLCOIND", "wizblcoind"),
help="wizblcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("WIZBLCOIND", "wizblcoind"),
help="wizblcoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
[] |
[] |
[
"PYTHON_DEBUG",
"PATH",
"WIZBLCOIND"
] |
[]
|
["PYTHON_DEBUG", "PATH", "WIZBLCOIND"]
|
python
| 3 | 0 | |
5_roots/setup_ext.py
|
# Needed to build Cython
from distutils.core import setup
from Cython.Build import cythonize
# Change to newest gcc on Darwin
import os
from sys import platform
if platform.lower() == 'darwin':
os.environ['CC'] = 'gcc-5'
# Do the build
setup(ext_modules=cythonize('givens.pyx'))
setup(ext_modules=cythonize('datafun.pyx'))
setup(ext_modules=cythonize('globvar.pyx'))
|
[] |
[] |
[
"CC"
] |
[]
|
["CC"]
|
python
| 1 | 0 | |
main.go
|
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
imagev1alpha1_reflect "github.com/fluxcd/image-reflector-controller/api/v1alpha1"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/logger"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/probes"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
imagev1alpha1_auto "github.com/fluxcd/image-automation-controller/api/v1alpha1"
"github.com/fluxcd/image-automation-controller/controllers"
// +kubebuilder:scaffold:imports
)
const controllerName = "image-automation-controller"
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = imagev1alpha1_auto.AddToScheme(scheme)
_ = imagev1alpha1_reflect.AddToScheme(scheme)
_ = sourcev1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
var (
metricsAddr string
eventsAddr string
healthAddr string
enableLeaderElection bool
logLevel string
logJSON bool
watchAllNamespaces bool
)
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&eventsAddr, "events-addr", "", "The address of the events receiver.")
flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&logLevel, "log-level", "info", "Set logging level. Can be debug, info or error.")
flag.BoolVar(&logJSON, "log-json", false, "Set logging to JSON format.")
flag.BoolVar(&watchAllNamespaces, "watch-all-namespaces", true,
"Watch for custom resources in all namespaces, if set to false it will only watch the runtime namespace.")
flag.Parse()
ctrl.SetLogger(logger.NewLogger(logLevel, logJSON))
var eventRecorder *events.Recorder
if eventsAddr != "" {
if er, err := events.NewRecorder(eventsAddr, controllerName); err != nil {
setupLog.Error(err, "unable to create event recorder")
os.Exit(1)
} else {
eventRecorder = er
}
}
metricsRecorder := metrics.NewRecorder()
ctrlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...)
watchNamespace := ""
if !watchAllNamespaces {
watchNamespace = os.Getenv("RUNTIME_NAMESPACE")
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
HealthProbeBindAddress: healthAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "79628f79.fluxcd.io",
Namespace: watchNamespace,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
probes.SetupChecks(mgr, setupLog)
if err = (&controllers.ImageUpdateAutomationReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("ImageUpdateAutomation"),
Scheme: mgr.GetScheme(),
EventRecorder: mgr.GetEventRecorderFor(controllerName),
ExternalEventRecorder: eventRecorder,
MetricsRecorder: metricsRecorder,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ImageUpdateAutomation")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
|
[
"\"RUNTIME_NAMESPACE\""
] |
[] |
[
"RUNTIME_NAMESPACE"
] |
[]
|
["RUNTIME_NAMESPACE"]
|
go
| 1 | 0 | |
client.go
|
package shopify
import (
"os"
"github.com/r0busta/graphql"
log "github.com/sirupsen/logrus"
graphqlclient "github.com/xiatechs/go-shopify-graphql/v4/graphql"
)
const (
shopifyAPIVersion = "2021-04"
)
type Client struct {
gql *graphql.Client
Product ProductService
Variant VariantService
Inventory InventoryService
Collection CollectionService
Order OrderService
Fulfillment FulfillmentService
Location LocationService
Metafield MetafieldService
PriceList PriceListService
BulkOperation BulkOperationService
}
type ListOptions struct {
Query string
First int
Last int
After string
Before string
Reverse bool
}
func NewDefaultClient() (shopClient *Client) {
apiKey := os.Getenv("STORE_API_KEY")
password := os.Getenv("STORE_PASSWORD")
storeName := os.Getenv("STORE_NAME")
if apiKey == "" || password == "" || storeName == "" {
log.Fatalln("Shopify app API Key and/or Password and/or Store Name not set")
}
shopClient = NewClient(apiKey, password, storeName)
return
}
// NewClient returns a new client based on the configuration arguments received.
// Most of this client functionality is not enabled since it has no test coverage.
// Although it should, we cannot be sure that it works on the current api version since we haven't tested it.
// Enabling should require adding test coverage and proper testing.
// Calling any of the disabled (commented below) functionaly will likely result in a nil pointer de-reference.
func NewClient(apiKey string, password string, storeName string) *Client {
c := &Client{gql: newShopifyGraphQLClient(apiKey, password, storeName)}
c.BulkOperation = &BulkOperationServiceOp{client: c}
// c.Product = &ProductServiceOp{client: c}
// c.Variant = &VariantServiceOp{client: c}
// c.Inventory = &InventoryServiceOp{client: c}
// c.Collection = &CollectionServiceOp{client: c}
// c.Order = &OrderServiceOp{client: c}
// c.Fulfillment = &FulfillmentServiceOp{client: c}
// c.Location = &LocationServiceOp{client: c}
// c.Metafield = &MetafieldServiceOp{client: c}
c.PriceList = &PriceListServiceOp{c.BulkOperation, c.gql}
return c
}
func newShopifyGraphQLClient(apiKey string, password string, storeName string) *graphql.Client {
opts := []graphqlclient.Option{
graphqlclient.WithVersion(shopifyAPIVersion),
graphqlclient.WithPrivateAppAuth(apiKey, password),
}
return graphqlclient.NewClient(storeName, opts...)
}
func (c *Client) GraphQLClient() *graphql.Client {
return c.gql
}
|
[
"\"STORE_API_KEY\"",
"\"STORE_PASSWORD\"",
"\"STORE_NAME\""
] |
[] |
[
"STORE_API_KEY",
"STORE_NAME",
"STORE_PASSWORD"
] |
[]
|
["STORE_API_KEY", "STORE_NAME", "STORE_PASSWORD"]
|
go
| 3 | 0 | |
contrib/gitian-build.py
|
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/GROW-Project/gitian.sigs.git'])
if not os.path.isdir('GROW-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/GROW-Project/GROW-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('GROW'):
subprocess.check_call(['git', 'clone', 'https://github.com/GROW-Project/GROW.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('GROW-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../GROW/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'grow='+args.commit, '--url', 'grow='+args.url, '../GROW/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../GROW/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/grow-*.tar.gz build/out/src/grow-*.tar.gz ../GROW-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'grow='+args.commit, '--url', 'grow='+args.url, '../GROW/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../GROW/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/grow-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/grow-*.zip build/out/grow-*.exe ../GROW-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'grow='+args.commit, '--url', 'grow='+args.url, '../GROW/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../GROW/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/grow-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/grow-*.tar.gz build/out/grow-*.dmg ../GROW-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'config', 'user.signingkey', args.signer])
if args.linux:
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/GROW-' + args.version + '-win-unsigned.tar.gz inputs/GROW-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../GROW/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../GROW/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/grow-*win64-setup.exe ../GROW-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/grow-*win32-setup.exe ../GROW-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/GROW-' + args.version + '-osx-unsigned.tar.gz inputs/GROW-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../GROW/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../GROW/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/grow-osx-signed.dmg ../GROW-binaries/'+args.version+'/GROW-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-S', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
if args.linux:
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../GROW/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../GROW/contrib/gitian-descriptors/gitian-linux.yml'])
if args.windows:
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../GROW/contrib/gitian-descriptors/gitian-win.yml'])
if args.sign:
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../GROW/contrib/gitian-descriptors/gitian-win-signer.yml'])
if args.macos:
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../GROW/contrib/gitian-descriptors/gitian-osx.yml'])
if args.sign:
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../GROW/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/GROW-Project/GROW', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
os.chdir('GROW')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/GROW')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"USE_DOCKER",
"GITIAN_HOST_IP",
"USE_LXC",
"LXC_GUEST_IP"
] |
[]
|
["USE_DOCKER", "GITIAN_HOST_IP", "USE_LXC", "LXC_GUEST_IP"]
|
python
| 4 | 0 | |
Q2/simpleDatabaseDesign/simpleDatabaseDesign/asgi.py
|
"""
ASGI config for simpleDatabaseDesign project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simpleDatabaseDesign.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/httputil"
"os"
"os/exec"
"os/signal"
"path"
"regexp"
"strings"
"syscall"
"time"
"github.com/dustin/go-follow"
"github.com/gorilla/websocket"
)
var (
address = flag.String("address", ":8080", "address to listen on")
repositories = flag.String("repositories", "scraperwiki/tang", "colon separated list of repositories to watch")
allowedPushers = flag.String("allowed-pushers", "drj11:pwaller", "list of people allowed")
uid = flag.Int("uid", 0, "uid to run as")
github_user, github_password string
allowedPushersSet = map[string]bool{}
// Populated by `go install -ldflags '-X tangRev asdf -X tangDate asdf'
tangRev, tangDate string
)
func init() {
flag.Parse()
for _, who := range strings.Split(*allowedPushers, ":") {
allowedPushersSet[who] = true
}
github_user = os.Getenv("GITHUB_USER")
github_password = os.Getenv("GITHUB_PASSWORD")
env := os.Environ()
os.Clearenv()
for _, e := range env {
if strings.HasPrefix(e, "GITHUB_") {
continue
}
split := strings.SplitN(e, "=", 2)
key, value := split[0], split[1]
os.Setenv(key, value)
}
}
func check(err error) {
if err != nil {
panic(err)
}
}
func ensureChildDeath() {
sid, err := syscall.Setsid()
if err != nil {
sid = os.Getpid()
}
shc := fmt.Sprintf("trap 'kill -TERM -%d' HUP; while : ; do sleep 0.1; done", sid)
c := exec.Command("bash", "-c", shc)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGHUP,
}
err = c.Start()
check(err)
log.Println("Started..")
go func() {
err = c.Wait()
log.Println("Exited sentinel..", err)
}()
}
func main() {
ensureChildDeath()
if tangRev == "" {
log.Println("tangRev and tangDate unavailable.")
log.Println("Use install-tang script if you want build date/version")
} else {
log.Println("Starting", tangRev[:4], "committed", tangDate)
}
// Get the socket quickly so we can drop privileges ASAP
listener, err := getListener(*address)
check(err)
// Must read exe before the executable is replaced by deployment
// Must also read exe link before Setuid since we lose the privilege of
// reading it.
exe, err := os.Readlink("/proc/self/exe")
check(err)
// Drop privileges immediately after getting socket
if *uid != 0 {
panic("setuid is not supported, see http://code.google.com/p/go/issues/detail?id=1435")
log.Println("Setting UID =", *uid)
err = syscall.Setuid(*uid)
check(err)
}
err = gitSetupCredentialHelper()
check(err)
// Start catching signals early.
sig := make(chan os.Signal)
signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
// Make somewhere to put our logs
err = os.MkdirAll("logs/", 0777)
check(err)
go ServeHTTP(listener)
// Set up github hooks
configureHooks()
go func() {
// Hack to let github know that the process started successfully
// (Since the previous one may have been killed)
infoURL := "http://services.scraperwiki.com/tang/"
s := GithubStatus{"success", infoURL, "Tang running"}
updateStatus("scraperwiki/tang", tangRev, s)
}()
// Tell the user how to quit
if IsTerminal(os.Stdin.Fd()) {
log.Println("Hello, terminal user. CTRL-D (EOF) to exit.")
go ExitOnEOF()
} else {
log.Println("Send me SIGQUIT to exit.")
}
// Wait for a signal listed in `signal.Notify(sig, ...)`
value := <-sig
signal.Stop(sig)
log.Printf("Received %v", value)
if value == syscall.SIGTERM {
return
}
// We've been instructed to exit.
log.Printf("Revision %v exiting, restarting...", (tangRev + "doge")[:4])
// TODO(pwaller) Don't exec before everything else has finished.
// OTOH, that means waiting for other cruft in the pipeline, which
// might cause a significant delay.
// Maybe the process we exec to can wait on the children?
// This is probably very tricky to get right without delaying the exec.
// How do we find our children? Might involve iterating through /proc.
err = syscall.Exec(exe, os.Args, gitCredentialsEnviron())
check(err)
}
// Set up github hooks so that it notifies us for any chances to repositories
// we care about
func configureHooks() {
if *repositories == "" {
return
}
// JSON payload for github
// http://developer.github.com/v3/repos/hooks/#json-http
json := `{
"name": "web",
"config": {"url": "http://services.scraperwiki.com/hook",
"content_type": "json"},
"events": ["push", "issues", "issue_comment",
"commit_comment", "create", "delete",
"pull_request", "pull_request_review_comment",
"gollum", "watch", "release", "fork", "member",
"public", "team_add", "status"],
"active": true
}`
// Each of the repositories listed on the command line
repos := strings.Split(*repositories, ":")
for _, repo := range repos {
response, resp, err := Github(json, "repos", repo, "hooks")
if err == ErrSkipGithubEndpoint {
continue
}
check(err)
switch resp.StatusCode {
default:
log.Print(response)
case 422:
log.Println("Already hooked for", repo)
}
}
}
// Since CTRL-C is used for a reload, it's nice to have a way to exit (CTRL-D).
func ExitOnEOF() {
func() {
buf := make([]byte, 64*1024)
for {
_, err := os.Stdin.Read(buf)
if err == io.EOF {
log.Println("EOF, bye!")
os.Exit(0)
}
}
}()
}
type WebsocketWriter struct {
*websocket.Conn
}
func (ww *WebsocketWriter) Write(data []byte) (n int, err error) {
err = ww.WriteMessage(websocket.BinaryMessage, data)
if err == nil {
n = len(data)
}
return
}
func LiveLogHandler(response http.ResponseWriter, req *http.Request) {
ws, err := websocket.Upgrade(response, req, nil, 1024, 1024)
defer ws.Close()
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(response, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println(err)
return
}
stationaryFd, err := os.Open("/home/pwaller/test.log")
check(err)
defer stationaryFd.Close()
fd := follow.New(stationaryFd)
go func() {
var err error
// Wait until the other end closes the connection or sends
// a message.
t, m, err := ws.ReadMessage()
if err != io.EOF && err != io.ErrUnexpectedEOF {
log.Println("LiveLogHandler(): error reading msg: ", err)
}
_, _ = t, m
// Close the follow descripter, causes Copy to terminate
_ = fd.Close()
}()
w := &WebsocketWriter{ws}
// Blocks until web connection is closed.
_, err = io.Copy(w, fd)
// log.Println("Err =", err, n)
}
func ServeHTTP(l net.Listener) {
// Expose logs directory
pwd, err := os.Getwd()
check(err)
logDir := path.Join(pwd, "logs")
logHandler := http.FileServer(http.Dir(logDir))
log.Println("Serving logs at", logDir)
handler := NewTangHandler()
handler.HandleFunc("/tang/", handleTang)
handler.HandleFunc("/tang/live/logs/", LiveLogHandler)
handler.Handle("/tang/logs/", http.StripPrefix("/tang/logs/", logHandler))
handler.HandleFunc("/hook", handleHook)
err = http.Serve(l, handler)
log.Fatal(err)
}
type TangHandler struct {
*http.ServeMux
requests chan<- Request
}
func NewTangHandler() *TangHandler {
requests := make(chan Request)
go ServerRouter(requests)
return &TangHandler{http.NewServeMux(), requests}
}
func (th *TangHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Printf("Incoming request: %v %v", r.Host, r.URL)
if th.HandleQA(w, r) {
return
}
// Delegate
th.ServeMux.ServeHTTP(w, r)
}
// TODO(drj): add params here on the left of the branch.
var checkQA, _ = regexp.Compile(`^([^.]+).([^.]+).qa.scraperwiki.com(:\d+)?`)
func (th *TangHandler) HandleQA(w http.ResponseWriter, r *http.Request) (handled bool) {
pieces := checkQA.FindStringSubmatch(r.Host)
if pieces == nil {
return
}
handled = true
ref, repository := pieces[1], pieces[2]
_, _ = ref, repository
//fmt.Fprintf(w, "TODO, proxy for %v %v %v", r.Host, ref, repository)
serverChan := make(chan Server)
th.requests <- Request{r.Host, serverChan}
server := <-serverChan
_, err := server.ready()
if err != nil {
http.Error(w, fmt.Sprintf("TANG Error from server: %q",
err), 500)
return
}
p := httputil.NewSingleHostReverseProxy(server.url())
p.ServeHTTP(w, r)
return
}
func handleTang(w http.ResponseWriter, r *http.Request) {
w.Header()["Content-Type"] = []string{"text/plain; charset=utf-8"}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `<!DOCTYPE html><style>html, body { font-type: sans; }</style><pre id="content"><pre>`)
for i := 0; i < 100; i++ {
fmt.Fprintf(w, "%d elephants\n", i)
w.(http.Flusher).Flush()
time.Sleep(100 * time.Millisecond)
}
// fmt.Fprintf(w, `<script>window.location = "http://duckduckgo.com";</script>`)
}
|
[
"\"GITHUB_USER\"",
"\"GITHUB_PASSWORD\""
] |
[] |
[
"GITHUB_PASSWORD",
"GITHUB_USER"
] |
[]
|
["GITHUB_PASSWORD", "GITHUB_USER"]
|
go
| 2 | 0 | |
soracom/generated/cmd/port_mappings_create.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
"github.com/spf13/cobra"
)
// PortMappingsCreateCmdDuration holds value of 'duration' option
var PortMappingsCreateCmdDuration float64
// PortMappingsCreateCmdTlsRequired holds value of 'tlsRequired' option
var PortMappingsCreateCmdTlsRequired bool
// PortMappingsCreateCmdBody holds contents of request body to be sent
var PortMappingsCreateCmdBody string
func init() {
PortMappingsCreateCmd.Flags().Float64Var(&PortMappingsCreateCmdDuration, "duration", 0, TRAPI(""))
PortMappingsCreateCmd.Flags().BoolVar(&PortMappingsCreateCmdTlsRequired, "tls-required", false, TRAPI(""))
PortMappingsCreateCmd.Flags().StringVar(&PortMappingsCreateCmdBody, "body", "", TRCLI("cli.common_params.body.short_help"))
PortMappingsCmd.AddCommand(PortMappingsCreateCmd)
}
// PortMappingsCreateCmd defines 'create' subcommand
var PortMappingsCreateCmd = &cobra.Command{
Use: "create",
Short: TRAPI("/port_mappings:post:summary"),
Long: TRAPI(`/port_mappings:post:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectPortMappingsCreateCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectPortMappingsCreateCmdParams(ac *apiClient) (*apiParams, error) {
var body string
var parsedBody interface{}
var err error
body, err = buildBodyForPortMappingsCreateCmd()
if err != nil {
return nil, err
}
contentType := "application/json"
if contentType == "application/json" {
err = json.Unmarshal([]byte(body), &parsedBody)
if err != nil {
return nil, fmt.Errorf("invalid json format specified for `--body` parameter: %s", err)
}
}
return &apiParams{
method: "POST",
path: buildPathForPortMappingsCreateCmd("/port_mappings"),
query: buildQueryForPortMappingsCreateCmd(),
contentType: contentType,
body: body,
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForPortMappingsCreateCmd(path string) string {
return path
}
func buildQueryForPortMappingsCreateCmd() url.Values {
result := url.Values{}
return result
}
func buildBodyForPortMappingsCreateCmd() (string, error) {
var result map[string]interface{}
if PortMappingsCreateCmdBody != "" {
var b []byte
var err error
if strings.HasPrefix(PortMappingsCreateCmdBody, "@") {
fname := strings.TrimPrefix(PortMappingsCreateCmdBody, "@")
// #nosec
b, err = ioutil.ReadFile(fname)
} else if PortMappingsCreateCmdBody == "-" {
b, err = ioutil.ReadAll(os.Stdin)
} else {
b = []byte(PortMappingsCreateCmdBody)
}
if err != nil {
return "", err
}
err = json.Unmarshal(b, &result)
if err != nil {
return "", err
}
}
if result == nil {
result = make(map[string]interface{})
}
if PortMappingsCreateCmdDuration != 0 {
result["duration"] = PortMappingsCreateCmdDuration
}
if PortMappingsCreateCmdTlsRequired != false {
result["tlsRequired"] = PortMappingsCreateCmdTlsRequired
}
resultBytes, err := json.Marshal(result)
if err != nil {
return "", err
}
return string(resultBytes), nil
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
setup.py
|
#!/usr/bin/env python
#
# Copyright 2017 Pixar Animation Studios
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
""" Configuration file for the OpenTimelineIO Python Package. """
import os
import sys
import unittest
from setuptools import setup
import setuptools.command.build_py
import distutils.version
import pip
# Add command to upload to PyPI
# Set TWINE_USERNAME and TWINE_PASSWORD variables
# PyCharm Check: Emulate terminal in output console
if sys.argv[-1] == 'up':
os.system('rmdir /S/Q build')
os.system('rmdir /S/Q dist')
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
elif sys.argv[-1] == 'sdist':
os.system('rmdir /S/Q build')
os.system('rmdir /S/Q dist')
os.system('python setup.py sdist')
sys.exit()
# Make sure the environment contains an up to date enough version of pip.
PIP_VERSION = pip.__version__
REQUIRED_PIP_VERSION = "6.0.0"
if (
distutils.version.StrictVersion(PIP_VERSION)
<= distutils.version.StrictVersion(REQUIRED_PIP_VERSION)
):
sys.stderr.write(
"Your pip version is: '{}', OpenTimelineIO requires at least "
"version '{}'. Please update pip by running:\n"
"pip install -U pip\n".format(
PIP_VERSION,
REQUIRED_PIP_VERSION,
)
)
sys.exit(1)
# Make sure the environment contains an up to date enough version of setuptools.
try:
import setuptools.version
SETUPTOOLS_VERSION = setuptools.version.__version__
except ImportError:
SETUPTOOLS_VERSION = setuptools.__version__
REQUIRED_SETUPTOOLS_VERSION = '20.5.0'
if (
distutils.version.StrictVersion(SETUPTOOLS_VERSION)
<= distutils.version.StrictVersion(REQUIRED_SETUPTOOLS_VERSION)
):
sys.stderr.write(
"Your setuptools version is: '{}', OpenTimelineIO requires at least "
"version '{}'. Please update setuptools by running:\n"
"pip install -U setuptools\n".format(
SETUPTOOLS_VERSION,
REQUIRED_SETUPTOOLS_VERSION,
)
)
sys.exit(1)
# check the python version first
if (
sys.version_info[0] < 2 or
(sys.version_info[0] == 2 and sys.version_info[1] < 7)
):
sys.exit(
'OpenTimelineIO requires python2.7 or greater, detected version:'
' {}.{}'.format(
sys.version_info[0],
sys.version_info[1]
)
)
# Metadata that gets stamped into the __init__ files during the build phase.
PROJECT_METADATA = {
"version": "0.11.0.dev2",
"author": 'Pixar Animation Studios',
"author_email": '[email protected]',
"license": 'Modified Apache 2.0 License',
}
METADATA_TEMPLATE = """
__version__ = "{version}"
__author__ = "{author}"
__author_email__ = "{author_email}"
__license__ = "{license}"
"""
def _append_version_info_to_init_scripts(build_lib):
"""Stamp PROJECT_METADATA into __init__ files."""
for module in [
"opentimelineio_py",
"opentimelineio_py_contrib",
# "opentimelineview",
]:
target_file = os.path.join(build_lib, module, "__init__.py")
source_file = os.path.join(
os.path.dirname(__file__),
module, "__init__.py"
)
# get the base data from the original file
with open(source_file, 'r') as fi:
src_data = fi.read()
# write that + the suffix to the target file
with open(target_file, 'w') as fo:
fo.write(src_data)
fo.write(METADATA_TEMPLATE.format(**PROJECT_METADATA))
class AddMetadataToInits(setuptools.command.build_py.build_py):
"""Stamps PROJECT_METADATA into __init__ files."""
def run(self):
setuptools.command.build_py.build_py.run(self)
if not self.dry_run:
_append_version_info_to_init_scripts(self.build_lib)
def test_otio():
"""Discovers and runs tests"""
try:
# Clear the environment of a preset media linker
del os.environ['OTIO_DEFAULT_MEDIA_LINKER']
except KeyError:
pass
return unittest.TestLoader().discover('tests')
# copied from first paragraph of README.md
LONG_DESCRIPTION = """
Copy the source code from the last pure python: https://github.com/PixarAnimationStudios/OpenTimelineIO/tree/last_pure_python
Why did I create an old version of the warehouse: https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/756
**I did not add new features, and try to keep it available!**
The following is the original document description:
Main web site: http://opentimeline.io/
Documentation: https://opentimelineio.readthedocs.io/
GitHub: https://github.com/PixarAnimationStudios/OpenTimelineIO
Discussion group: https://lists.aswf.io/g/otio-discussion
OpenTimelineIO is an interchange format and API for
editorial cut information. OTIO is not a container format for media, rather it
contains information about the order and length of cuts and references to
external media.
OTIO includes both a file format and an API for manipulating that format. It
also includes a plugin architecture for writing adapters to convert from/to
existing editorial timeline formats. It also implements a dependency- less
library for dealing strictly with time, opentime.
You can provide adapters for your video editing tool or pipeline as needed.
Each adapter allows for import/export between that proprietary tool and the
OpenTimelineIO format."""
setup(
name='OpenTimelineIO-Py',
description='Editorial interchange format and API',
long_description=LONG_DESCRIPTION,
url='http://opentimeline.io',
project_urls={
'Source':
'https://github.com/PixarAnimationStudios/OpenTimelineIO',
'Documentation':
'https://opentimelineio.readthedocs.io/',
'Issues':
'https://github.com/PixarAnimationStudios/OpenTimelineIO/issues',
},
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Multimedia :: Video :: Non-Linear Editor',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Natural Language :: English',
],
keywords='film tv editing editorial edit non-linear edl time',
platforms='any',
packages=[
'opentimelineio_py',
'opentimelineio_py.adapters',
'opentimelineio_py.algorithms',
'opentimelineio_py.core',
'opentimelineio_py.schema',
'opentimelineio_py.schemadef',
'opentimelineio_py.plugins',
'opentimelineio_py.console',
'opentimelineio_py_contrib',
'opentimelineio_py_contrib.adapters',
# 'opentimelineio_py_contrib.adapters.aaf_adapter',
# 'opentimelineview',
],
package_data={
'opentimelineio_py': [
'adapters/builtin_adapters.plugin_manifest.json',
],
'opentimelineio_py_contrib': [
'adapters/contrib_adapters.plugin_manifest.json',
]
},
install_requires=[
# 'pyaaf2==1.2.0'
],
entry_points={
'console_scripts': [
# 'otioview = opentimelineview.console:main',
'otiocat = opentimelineio_py.console.otiocat:main',
'otioconvert = opentimelineio_py.console.otioconvert:main',
'otiostat = opentimelineio_py.console.otiostat:main',
'otioautogen_serialized_schema_docs = opentimelineio_py.console.autogen_serialized_datamodel:main',
],
},
extras_require={
'dev': [
'flake8>=3.5',
'coverage>=4.5',
'tox>=3.0',
'urllib3>=1.24.3'
],
# 'view': [
# 'PySide2~=5.11'
# ]
},
test_suite='setup.test_otio',
tests_require=[
'mock;python_version<"3.3"',
],
# because we need to open() the adapters manifest, we aren't zip-safe
zip_safe=False,
# Use the code that wires the PROJECT_METADATA into the __init__ files.
cmdclass={'build_py': AddMetadataToInits},
# expand the project metadata dictionary to fill in those values
**PROJECT_METADATA
)
|
[] |
[] |
[
"OTIO_DEFAULT_MEDIA_LINKER"
] |
[]
|
["OTIO_DEFAULT_MEDIA_LINKER"]
|
python
| 1 | 0 | |
sympy/testing/pytest.py
|
"""py.test hacks to support XFAIL/XPASS"""
from __future__ import print_function, division
import sys
import functools
import os
import contextlib
import warnings
from sympy.core.compatibility import get_function_name
from sympy.utilities.exceptions import SymPyDeprecationWarning
ON_TRAVIS = os.getenv("TRAVIS_BUILD_NUMBER", None)
try:
import pytest
USE_PYTEST = getattr(sys, "_running_pytest", False)
except ImportError:
USE_PYTEST = False
if USE_PYTEST:
raises = pytest.raises
warns = pytest.warns
skip = pytest.skip
XFAIL = pytest.mark.xfail
SKIP = pytest.mark.skip
slow = pytest.mark.slow
nocache_fail = pytest.mark.nocache_fail
else:
# Not using pytest so define the things that would have been imported from
# there.
def raises(expectedException, code=None):
"""
Tests that ``code`` raises the exception ``expectedException``.
``code`` may be a callable, such as a lambda expression or function
name.
If ``code`` is not given or None, ``raises`` will return a context
manager for use in ``with`` statements; the code to execute then
comes from the scope of the ``with``.
``raises()`` does nothing if the callable raises the expected exception,
otherwise it raises an AssertionError.
Examples
========
>>> from sympy.testing.pytest import raises
>>> raises(ZeroDivisionError, lambda: 1/0)
>>> raises(ZeroDivisionError, lambda: 1/2)
Traceback (most recent call last):
...
Failed: DID NOT RAISE
>>> with raises(ZeroDivisionError):
... n = 1/0
>>> with raises(ZeroDivisionError):
... n = 1/2
Traceback (most recent call last):
...
Failed: DID NOT RAISE
Note that you cannot test multiple statements via
``with raises``:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise, aborting the ``with``
... n = 9999/0 # never executed
This is just what ``with`` is supposed to do: abort the
contained statement sequence at the first exception and let
the context manager deal with the exception.
To test multiple statements, you'll need a separate ``with``
for each:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise
>>> with raises(ZeroDivisionError):
... n = 9999/0 # will also execute and raise
"""
if code is None:
return RaisesContext(expectedException)
elif callable(code):
try:
code()
except expectedException:
return
raise Failed("DID NOT RAISE")
elif isinstance(code, str):
raise TypeError(
"'raises(xxx, \"code\")' has been phased out; "
"change 'raises(xxx, \"expression\")' "
"to 'raises(xxx, lambda: expression)', "
"'raises(xxx, \"statement\")' "
"to 'with raises(xxx): statement'"
)
else:
raise TypeError("raises() expects a callable for the 2nd argument.")
class RaisesContext(object):
def __init__(self, expectedException):
self.expectedException = expectedException
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise Failed("DID NOT RAISE")
return issubclass(exc_type, self.expectedException)
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
class Failed(Exception):
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception as e:
message = str(e)
if message != "Timeout":
raise XFail(get_function_name(func))
else:
raise Skipped("Timeout")
raise XPass(get_function_name(func))
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
def SKIP(reason):
"""Similar to ``skip()``, but this is a decorator. """
def wrapper(func):
def func_wrapper():
raise Skipped(reason)
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
return wrapper
def slow(func):
func._slow = True
def func_wrapper():
func()
func_wrapper = functools.update_wrapper(func_wrapper, func)
func_wrapper.__wrapped__ = func
return func_wrapper
def nocache_fail(func):
"Dummy decorator for marking tests that fail when cache is disabled"
return func
@contextlib.contextmanager
def warns(warningcls, **kwargs):
"""Like raises but tests that warnings are emitted.
>>> from sympy.testing.pytest import warns
>>> import warnings
>>> with warns(UserWarning):
... warnings.warn('deprecated', UserWarning)
>>> with warns(UserWarning):
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type UserWarning\
was emitted. The list of emitted warnings is: [].
"""
match = kwargs.pop("match", "")
if kwargs:
raise TypeError("Invalid keyword arguments: %s" % kwargs)
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Hide all warnings but make sure that our warning is emitted
warnings.simplefilter("ignore")
warnings.filterwarnings("always", match, warningcls)
# Now run the test
yield
# Raise if expected warning not found
if not any(issubclass(w.category, warningcls) for w in warnrec):
msg = (
"Failed: DID NOT WARN."
" No warnings of type %s was emitted."
" The list of emitted warnings is: %s."
) % (warningcls, [w.message for w in warnrec])
raise Failed(msg)
@contextlib.contextmanager
def warns_deprecated_sympy():
"""Shorthand for ``warns(SymPyDeprecationWarning)``
This is the recommended way to test that ``SymPyDeprecationWarning`` is
emitted for deprecated features in SymPy. To test for other warnings use
``warns``. To suppress warnings without asserting that they are emitted
use ``ignore_warnings``.
>>> from sympy.testing.pytest import warns_deprecated_sympy
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> import warnings
>>> with warns_deprecated_sympy():
... SymPyDeprecationWarning("Don't use", feature="old thing",
... deprecated_since_version="1.0", issue=123).warn()
>>> with warns_deprecated_sympy():
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type \
SymPyDeprecationWarning was emitted. The list of emitted warnings is: [].
"""
with warns(SymPyDeprecationWarning):
yield
@contextlib.contextmanager
def ignore_warnings(warningcls):
"""Context manager to suppress warnings during tests.
This function is useful for suppressing warnings during tests. The warns
function should be used to assert that a warning is raised. The
ignore_warnings function is useful in situation when the warning is not
guaranteed to be raised (e.g. on importing a module) or if the warning
comes from third-party code.
When the warning is coming (reliably) from SymPy the warns function should
be preferred to ignore_warnings.
>>> from sympy.testing.pytest import ignore_warnings
>>> import warnings
Here's a warning:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... warnings.warn('deprecated', UserWarning)
Traceback (most recent call last):
...
UserWarning: deprecated
Let's suppress it with ignore_warnings:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... with ignore_warnings(UserWarning):
... warnings.warn('deprecated', UserWarning)
(No warning emitted)
"""
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Make sure our warning doesn't get filtered
warnings.simplefilter("always", warningcls)
# Now run the test
yield
# Reissue any warnings that we aren't testing for
for w in warnrec:
if not issubclass(w.category, warningcls):
warnings.warn_explicit(w.message, w.category, w.filename, w.lineno)
|
[] |
[] |
[
"TRAVIS_BUILD_NUMBER"
] |
[]
|
["TRAVIS_BUILD_NUMBER"]
|
python
| 1 | 0 | |
server.go
|
package main
import (
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/handler"
"github.com/pscn/go4graphql/api"
"github.com/pscn/go4graphql/graph"
)
const defaultPort = "3000"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
http.Handle("/", handler.Playground("GraphQL playground", "/query"))
http.Handle("/query", handler.GraphQL(
graph.NewExecutableSchema(
graph.Config{Resolvers: api.NewResolver(true)}),
))
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
backend/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_28976.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/httputil/httpapi.go
|
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httputil
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/http/httputil"
"os"
"strings"
"sync"
"time"
"github.com/getsentry/sentry-go"
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/clientapi/auth"
federationapiAPI "github.com/matrix-org/dendrite/federationapi/api"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
)
// BasicAuth is used for authorization on /metrics handlers
type BasicAuth struct {
Username string `yaml:"username"`
Password string `yaml:"password"`
}
// MakeAuthAPI turns a util.JSONRequestHandler function into an http.Handler which authenticates the request.
func MakeAuthAPI(
metricsName string, userAPI userapi.UserInternalAPI,
f func(*http.Request, *userapi.Device) util.JSONResponse,
) http.Handler {
h := func(req *http.Request) util.JSONResponse {
logger := util.GetLogger(req.Context())
device, err := auth.VerifyUserFromRequest(req, userAPI)
if err != nil {
logger.Debugf("VerifyUserFromRequest %s -> HTTP %d", req.RemoteAddr, err.Code)
return *err
}
// add the user ID to the logger
logger = logger.WithField("user_id", device.UserID)
req = req.WithContext(util.ContextWithLogger(req.Context(), logger))
// add the user to Sentry, if enabled
hub := sentry.GetHubFromContext(req.Context())
if hub != nil {
hub.Scope().SetTag("user_id", device.UserID)
hub.Scope().SetTag("device_id", device.ID)
}
defer func() {
if r := recover(); r != nil {
if hub != nil {
hub.CaptureException(fmt.Errorf("%s panicked", req.URL.Path))
}
// re-panic to return the 500
panic(r)
}
}()
jsonRes := f(req, device)
// do not log 4xx as errors as they are client fails, not server fails
if hub != nil && jsonRes.Code >= 500 {
hub.Scope().SetExtra("response", jsonRes)
hub.CaptureException(fmt.Errorf("%s returned HTTP %d", req.URL.Path, jsonRes.Code))
}
return jsonRes
}
return MakeExternalAPI(metricsName, h)
}
// MakeExternalAPI turns a util.JSONRequestHandler function into an http.Handler.
// This is used for APIs that are called from the internet.
func MakeExternalAPI(metricsName string, f func(*http.Request) util.JSONResponse) http.Handler {
// TODO: We shouldn't be directly reading env vars here, inject it in instead.
// Refactor this when we split out config structs.
verbose := false
if os.Getenv("DENDRITE_TRACE_HTTP") == "1" {
verbose = true
}
h := util.MakeJSONAPI(util.NewJSONRequestHandler(f))
withSpan := func(w http.ResponseWriter, req *http.Request) {
nextWriter := w
if verbose {
logger := logrus.NewEntry(logrus.StandardLogger())
// Log outgoing response
rec := httptest.NewRecorder()
nextWriter = rec
defer func() {
resp := rec.Result()
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
logger.Debugf("Failed to dump outgoing response: %s", err)
} else {
strSlice := strings.Split(string(dump), "\n")
for _, s := range strSlice {
logger.Debug(s)
}
}
// copy the response to the client
for hdr, vals := range resp.Header {
for _, val := range vals {
w.Header().Add(hdr, val)
}
}
w.WriteHeader(resp.StatusCode)
// discard errors as this is for debugging
_, _ = io.Copy(w, resp.Body)
_ = resp.Body.Close()
}()
// Log incoming request
dump, err := httputil.DumpRequest(req, true)
if err != nil {
logger.Debugf("Failed to dump incoming request: %s", err)
} else {
strSlice := strings.Split(string(dump), "\n")
for _, s := range strSlice {
logger.Debug(s)
}
}
}
span := opentracing.StartSpan(metricsName)
defer span.Finish()
req = req.WithContext(opentracing.ContextWithSpan(req.Context(), span))
h.ServeHTTP(nextWriter, req)
}
return http.HandlerFunc(withSpan)
}
// MakeHTMLAPI adds Span metrics to the HTML Handler function
// This is used to serve HTML alongside JSON error messages
func MakeHTMLAPI(metricsName string, f func(http.ResponseWriter, *http.Request) *util.JSONResponse) http.Handler {
withSpan := func(w http.ResponseWriter, req *http.Request) {
span := opentracing.StartSpan(metricsName)
defer span.Finish()
req = req.WithContext(opentracing.ContextWithSpan(req.Context(), span))
if err := f(w, req); err != nil {
h := util.MakeJSONAPI(util.NewJSONRequestHandler(func(req *http.Request) util.JSONResponse {
return *err
}))
h.ServeHTTP(w, req)
}
}
return promhttp.InstrumentHandlerCounter(
promauto.NewCounterVec(
prometheus.CounterOpts{
Name: metricsName,
Help: "Total number of http requests for HTML resources",
},
[]string{"code"},
),
http.HandlerFunc(withSpan),
)
}
// MakeInternalAPI turns a util.JSONRequestHandler function into an http.Handler.
// This is used for APIs that are internal to dendrite.
// If we are passed a tracing context in the request headers then we use that
// as the parent of any tracing spans we create.
func MakeInternalAPI(metricsName string, f func(*http.Request) util.JSONResponse) http.Handler {
h := util.MakeJSONAPI(util.NewJSONRequestHandler(f))
withSpan := func(w http.ResponseWriter, req *http.Request) {
carrier := opentracing.HTTPHeadersCarrier(req.Header)
tracer := opentracing.GlobalTracer()
clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
var span opentracing.Span
if err == nil {
// Default to a span without RPC context.
span = tracer.StartSpan(metricsName)
} else {
// Set the RPC context.
span = tracer.StartSpan(metricsName, ext.RPCServerOption(clientContext))
}
defer span.Finish()
req = req.WithContext(opentracing.ContextWithSpan(req.Context(), span))
h.ServeHTTP(w, req)
}
return http.HandlerFunc(withSpan)
}
// MakeFedAPI makes an http.Handler that checks matrix federation authentication.
func MakeFedAPI(
metricsName string,
serverName gomatrixserverlib.ServerName,
keyRing gomatrixserverlib.JSONVerifier,
wakeup *FederationWakeups,
f func(*http.Request, *gomatrixserverlib.FederationRequest, map[string]string) util.JSONResponse,
) http.Handler {
h := func(req *http.Request) util.JSONResponse {
fedReq, errResp := gomatrixserverlib.VerifyHTTPRequest(
req, time.Now(), serverName, keyRing,
)
if fedReq == nil {
return errResp
}
// add the user to Sentry, if enabled
hub := sentry.GetHubFromContext(req.Context())
if hub != nil {
hub.Scope().SetTag("origin", string(fedReq.Origin()))
hub.Scope().SetTag("uri", fedReq.RequestURI())
}
defer func() {
if r := recover(); r != nil {
if hub != nil {
hub.CaptureException(fmt.Errorf("%s panicked", req.URL.Path))
}
// re-panic to return the 500
panic(r)
}
}()
go wakeup.Wakeup(req.Context(), fedReq.Origin())
vars, err := URLDecodeMapValues(mux.Vars(req))
if err != nil {
return util.MatrixErrorResponse(400, "M_UNRECOGNISED", "badly encoded query params")
}
jsonRes := f(req, fedReq, vars)
// do not log 4xx as errors as they are client fails, not server fails
if hub != nil && jsonRes.Code >= 500 {
hub.Scope().SetExtra("response", jsonRes)
hub.CaptureException(fmt.Errorf("%s returned HTTP %d", req.URL.Path, jsonRes.Code))
}
return jsonRes
}
return MakeExternalAPI(metricsName, h)
}
type FederationWakeups struct {
FsAPI federationapiAPI.FederationInternalAPI
origins sync.Map
}
func (f *FederationWakeups) Wakeup(ctx context.Context, origin gomatrixserverlib.ServerName) {
key, keyok := f.origins.Load(origin)
if keyok {
lastTime, ok := key.(time.Time)
if ok && time.Since(lastTime) < time.Minute {
return
}
}
aliveReq := federationapiAPI.PerformServersAliveRequest{
Servers: []gomatrixserverlib.ServerName{origin},
}
aliveRes := federationapiAPI.PerformServersAliveResponse{}
if err := f.FsAPI.PerformServersAlive(ctx, &aliveReq, &aliveRes); err != nil {
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
"origin": origin,
}).Warn("incoming federation request failed to notify server alive")
} else {
f.origins.Store(origin, time.Now())
}
}
// WrapHandlerInBasicAuth adds basic auth to a handler. Only used for /metrics
func WrapHandlerInBasicAuth(h http.Handler, b BasicAuth) http.HandlerFunc {
if b.Username == "" || b.Password == "" {
logrus.Warn("Metrics are exposed without protection. Make sure you set up protection at proxy level.")
}
return func(w http.ResponseWriter, r *http.Request) {
// Serve without authorization if either Username or Password is unset
if b.Username == "" || b.Password == "" {
h.ServeHTTP(w, r)
return
}
user, pass, ok := r.BasicAuth()
if !ok || user != b.Username || pass != b.Password {
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return
}
h.ServeHTTP(w, r)
}
}
// WrapHandlerInCORS adds CORS headers to all responses, including all error
// responses.
// Handles OPTIONS requests directly.
func WrapHandlerInCORS(h http.Handler) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Authorization")
if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" {
// Its easiest just to always return a 200 OK for everything. Whether
// this is technically correct or not is a question, but in the end this
// is what a lot of other people do (including synapse) and the clients
// are perfectly happy with it.
w.WriteHeader(http.StatusOK)
} else {
h.ServeHTTP(w, r)
}
})
}
|
[
"\"DENDRITE_TRACE_HTTP\""
] |
[] |
[
"DENDRITE_TRACE_HTTP"
] |
[]
|
["DENDRITE_TRACE_HTTP"]
|
go
| 1 | 0 | |
run.py
|
import os, argparse
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import tensorflow as tf
import source.neuralnet as nn
import source.datamanager as dman
import source.tf_process as tfp
import source.stamper as stamper
stamper.print_stamp()
def main():
srnet = nn.SRNET()
dataset = dman.DataSet()
sess = tf.compat.v1.InteractiveSession()
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
tfp.training(sess=sess, neuralnet=srnet, saver=saver, dataset=dataset, epochs=FLAGS.epoch, batch_size=FLAGS.batch)
tfp.validation(sess=sess, neuralnet=srnet, saver=saver, dataset=dataset)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=5000, help='-')
parser.add_argument('--batch', type=int, default=16, help='-')
FLAGS, unparsed = parser.parse_known_args()
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pug/dj/crawlnmine/management/__init__.py
|
"""Copied from https://github.com/django/django/blob/stable/1.7.x/django/core/management/__init__.py
"""
from __future__ import unicode_literals
import collections
from importlib import import_module
import os
import sys
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (BaseCommand, CommandError,
CommandParser, handle_default_options)
from django.core.management.color import color_style
from django.utils import lru_cache
from django.utils import six
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = dict((name, 'pug.crawlnmine') for name in find_commands(__path__[0]))
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update(dict((name, app_config.name) for name in find_commands(path)))
return commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', name)
if command.use_argparse:
defaults = parser.parse_args(args=args)
defaults = dict(defaults._get_kwargs(), **options)
else:
# Legacy optparse method
defaults, _ = parser.parse_args(args=[])
defaults = dict(defaults.__dict__, **options)
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the jira.py and other pug/bin/* utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'pug.crawlnmine':
app = 'pug'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"jira.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
# This might trigger ImproperlyConfigured (masked in get_commands)
settings.INSTALLED_APPS
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
no_settings_commands = [
'help', 'version', '--help', '--version', '-h',
'compilemessages', 'makemessages',
'startapp', 'startproject',
]
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
# A handful of built-in management commands work without settings.
# Load the default settings -- where INSTALLED_APPS is empty.
if subcommand in no_settings_commands:
settings.configure()
if settings.configured:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special cases for 'jira.py --version' and 'jira.py --help' to work.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
|
[] |
[] |
[
"COMP_WORDS",
"COMP_CWORD"
] |
[]
|
["COMP_WORDS", "COMP_CWORD"]
|
python
| 2 | 0 | |
colorapp/src/gateway/main.go
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"os"
"strings"
"sync"
"github.com/aws/aws-xray-sdk-go/xray"
"github.com/pkg/errors"
)
const defaultPort = "8080"
const defaultStage = "default"
const maxColors = 1000
var colors [maxColors]string
var colorsIdx int
var colorsMutext = &sync.Mutex{}
func getServerPort() string {
port := os.Getenv("SERVER_PORT")
if port != "" {
return port
}
return defaultPort
}
func getStage() string {
stage := os.Getenv("STAGE")
if stage != "" {
return stage
}
return defaultStage
}
func getColorTellerEndpoint() (string, error) {
colorTellerEndpoint := os.Getenv("COLOR_TELLER_ENDPOINT")
if colorTellerEndpoint == "" {
return "", errors.New("COLOR_TELLER_ENDPOINT is not set")
}
return colorTellerEndpoint, nil
}
func xrayEnabled() bool {
enabled := os.Getenv("ENABLE_ENVOY_XRAY_TRACING")
return enabled == "1"
}
type colorHandler struct{}
func (h *colorHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
color, err := getColorFromColorTeller(request)
if err != nil {
log.Printf("Error fetching color, err: %s", err)
writer.WriteHeader(http.StatusInternalServerError)
writer.Write([]byte("500 - Unexpected Error"))
return
}
colorsMutext.Lock()
defer colorsMutext.Unlock()
addColor(color)
statsJson, err := json.Marshal(getRatios())
if err != nil {
fmt.Fprintf(writer, `{"color": "%s", "error": "%s"}`, color, err)
return
}
fmt.Fprintf(writer, `{"color": "%s", "stats": %s}`, color, statsJson)
}
func addColor(color string) {
colors[colorsIdx] = color
colorsIdx += 1
if colorsIdx >= maxColors {
colorsIdx = 0
}
}
func getRatios() map[string]float64 {
counts := make(map[string]int)
var total = 0
for _, c := range colors {
if c != "" {
counts[c] += 1
total += 1
}
}
ratios := make(map[string]float64)
for k, v := range counts {
ratio := float64(v) / float64(total)
ratios[k] = math.Round(ratio*100) / 100
}
return ratios
}
type clearColorStatsHandler struct{}
func (h *clearColorStatsHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
colorsMutext.Lock()
defer colorsMutext.Unlock()
colorsIdx = 0
for i := range colors {
colors[i] = ""
}
fmt.Fprint(writer, "cleared")
}
func getColorFromColorTeller(request *http.Request) (string, error) {
colorTellerEndpoint, err := getColorTellerEndpoint()
if err != nil {
return "-n/a-", err
}
var client *http.Client
if xrayEnabled() {
client = xray.Client(&http.Client{})
} else {
client = &http.Client{}
}
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s", colorTellerEndpoint), nil)
if err != nil {
return "-n/a-", err
}
resp, err := client.Do(req.WithContext(request.Context()))
if err != nil {
return "-n/a-", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "-n/a-", err
}
color := strings.TrimSpace(string(body))
if len(color) < 1 {
return "-n/a-", errors.New("Empty response from colorTeller")
}
return color, nil
}
func getTCPEchoEndpoint() (string, error) {
tcpEchoEndpoint := os.Getenv("TCP_ECHO_ENDPOINT")
if tcpEchoEndpoint == "" {
return "", errors.New("TCP_ECHO_ENDPOINT is not set")
}
return tcpEchoEndpoint, nil
}
type tcpEchoHandler struct{}
func (h *tcpEchoHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
endpoint, err := getTCPEchoEndpoint()
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(writer, "tcpecho endpoint is not set")
return
}
log.Printf("Dialing tcp endpoint %s", endpoint)
conn, err := net.Dial("tcp", endpoint)
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(writer, "Dial failed, err:%s", err.Error())
return
}
defer conn.Close()
strEcho := "Hello from gateway"
log.Printf("Writing '%s'", strEcho)
_, err = fmt.Fprintf(conn, strEcho)
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(writer, "Write to server failed, err:%s", err.Error())
return
}
reply, err := bufio.NewReader(conn).ReadString('\n')
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(writer, "Read from server failed, err:%s", err.Error())
return
}
fmt.Fprintf(writer, "Response from tcpecho server: %s", reply)
}
type pingHandler struct{}
func (h *pingHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
log.Println("ping requested, reponding with HTTP 200")
writer.WriteHeader(http.StatusOK)
}
func main() {
log.Println("Starting server, listening on port " + getServerPort())
colorTellerEndpoint, err := getColorTellerEndpoint()
if err != nil {
log.Fatalln(err)
}
tcpEchoEndpoint, err := getTCPEchoEndpoint()
if err != nil {
log.Println(err)
}
log.Println("Using color-teller at " + colorTellerEndpoint)
log.Println("Using tcp-echo at " + tcpEchoEndpoint)
handlers := map[string]http.Handler {
"/color": &colorHandler{},
"/color/clear": &clearColorStatsHandler{},
"/tcpecho": &tcpEchoHandler{},
"/ping": &pingHandler{},
}
if xrayEnabled() {
log.Println("xray tracing enabled")
xraySegmentNamer := xray.NewFixedSegmentNamer(fmt.Sprintf("%s-gateway", getStage()))
for route, handler := range handlers {
handlers[route] = xray.Handler(xraySegmentNamer, handler)
}
}
for route, handler := range handlers {
http.Handle(route, handler)
}
log.Fatal(http.ListenAndServe(":"+getServerPort(), nil))
}
|
[
"\"SERVER_PORT\"",
"\"STAGE\"",
"\"COLOR_TELLER_ENDPOINT\"",
"\"ENABLE_ENVOY_XRAY_TRACING\"",
"\"TCP_ECHO_ENDPOINT\""
] |
[] |
[
"COLOR_TELLER_ENDPOINT",
"SERVER_PORT",
"ENABLE_ENVOY_XRAY_TRACING",
"TCP_ECHO_ENDPOINT",
"STAGE"
] |
[]
|
["COLOR_TELLER_ENDPOINT", "SERVER_PORT", "ENABLE_ENVOY_XRAY_TRACING", "TCP_ECHO_ENDPOINT", "STAGE"]
|
go
| 5 | 0 | |
aries_cloudagent/commands/start.py
|
"""Entrypoint."""
import asyncio
import functools
import logging
import os
import signal
from argparse import ArgumentParser
from typing import Coroutine, Sequence
try:
import uvloop
except ImportError:
uvloop = None
from ..core.conductor import Conductor
from ..config import argparse as arg
from ..config.default_context import DefaultContextBuilder
from ..config.util import common_config
LOGGER = logging.getLogger(__name__)
async def start_app(conductor: Conductor):
"""Start up."""
await conductor.setup()
await conductor.start()
async def shutdown_app(conductor: Conductor):
"""Shut down."""
print("\nShutting down")
await conductor.stop()
def init_argument_parser(parser: ArgumentParser):
"""Initialize an argument parser with the module's arguments."""
return arg.load_argument_groups(parser, *arg.group.get_registered(arg.CAT_START))
def execute(argv: Sequence[str] = None):
"""Entrypoint."""
parser = ArgumentParser()
parser.prog += " start"
get_settings = init_argument_parser(parser)
args = parser.parse_args(argv)
settings = get_settings(args)
common_config(settings)
# Support WEBHOOK_URL environment variable
webhook_url = os.environ.get("WEBHOOK_URL")
if webhook_url:
webhook_urls = list(settings.get("admin.webhook_urls") or [])
webhook_urls.append(webhook_url)
settings["admin.webhook_urls"] = webhook_urls
# Create the Conductor instance
context_builder = DefaultContextBuilder(settings)
conductor = Conductor(context_builder)
# Run the application
if uvloop:
uvloop.install()
print("uvloop installed")
run_loop(start_app(conductor), shutdown_app(conductor))
def run_loop(startup: Coroutine, shutdown: Coroutine):
"""Execute the application, handling signals and ctrl-c."""
async def init(cleanup):
"""Perform startup, terminating if an exception occurs."""
try:
await startup
except Exception:
LOGGER.exception("Exception during startup:")
cleanup()
async def done():
"""Run shutdown and clean up any outstanding tasks."""
await shutdown
tasks = [
task
for task in asyncio.Task.all_tasks()
if task is not asyncio.Task.current_task()
]
for task in tasks:
task.cancel()
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
asyncio.get_event_loop().stop()
loop = asyncio.get_event_loop()
cleanup = functools.partial(asyncio.ensure_future, done(), loop=loop)
loop.add_signal_handler(signal.SIGTERM, cleanup)
asyncio.ensure_future(init(cleanup), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(done())
if __name__ == "__main__":
execute()
|
[] |
[] |
[
"WEBHOOK_URL"
] |
[]
|
["WEBHOOK_URL"]
|
python
| 1 | 0 | |
lvmd/vgservice_test.go
|
package lvmd
import (
"context"
"os"
"testing"
"time"
"github.com/cybozu-go/topolvm/lvmd/command"
"github.com/cybozu-go/topolvm/lvmd/proto"
"google.golang.org/grpc/metadata"
)
type mockWatchServer struct {
ch chan struct{}
ctx context.Context
}
func (s *mockWatchServer) Send(r *proto.WatchResponse) error {
s.ch <- struct{}{}
return nil
}
func (s *mockWatchServer) SetHeader(metadata.MD) error {
panic("implement me")
}
func (s *mockWatchServer) SendHeader(metadata.MD) error {
panic("implement me")
}
func (s *mockWatchServer) SetTrailer(metadata.MD) {
panic("implement me")
}
func (s *mockWatchServer) Context() context.Context {
return s.ctx
}
func (s *mockWatchServer) SendMsg(m interface{}) error {
panic("implement me")
}
func (s *mockWatchServer) RecvMsg(m interface{}) error {
panic("implement me")
}
func testWatch(t *testing.T, vg *command.VolumeGroup) {
ctx, cancel := context.WithCancel(context.Background())
vgService, notifier := NewVGService(vg, 1)
ch1 := make(chan struct{})
server1 := &mockWatchServer{
ctx: ctx,
ch: ch1,
}
done := make(chan struct{})
go func() {
vgService.Watch(&proto.Empty{}, server1)
done <- struct{}{}
}()
select {
case <-ch1:
case <-time.After(1 * time.Second):
t.Fatal("not received the first event")
}
notifier()
select {
case <-ch1:
case <-time.After(1 * time.Second):
t.Fatal("not received")
}
select {
case <-ch1:
t.Fatal("unexpected event")
default:
}
ch2 := make(chan struct{})
server2 := &mockWatchServer{
ctx: ctx,
ch: ch2,
}
go func() {
vgService.Watch(&proto.Empty{}, server2)
}()
notifier()
select {
case <-ch1:
case <-time.After(1 * time.Second):
t.Fatal("not received")
}
select {
case <-ch2:
case <-time.After(1 * time.Second):
t.Fatal("not received")
}
cancel()
select {
case <-done:
case <-time.After(1 * time.Second):
t.Fatal("not done")
}
}
func testVGService(t *testing.T, vg *command.VolumeGroup) {
vgService, _ := NewVGService(vg, 1)
res, err := vgService.GetLVList(context.Background(), &proto.Empty{})
if err != nil {
t.Fatal(err)
}
numVols1 := len(res.GetVolumes())
if numVols1 != 0 {
t.Errorf("numVolumes must be 0: %d", numVols1)
}
testtag := "testtag"
_, err = vg.CreateVolume("test1", 1<<30, []string{testtag})
if err != nil {
t.Fatal(err)
}
res, err = vgService.GetLVList(context.Background(), &proto.Empty{})
if err != nil {
t.Fatal(err)
}
numVols2 := len(res.GetVolumes())
if numVols2 != 1 {
t.Fatalf("numVolumes must be 1: %d", numVols2)
}
vol := res.GetVolumes()[0]
if vol.GetName() != "test1" {
t.Errorf(`Volume.Name != "test1": %s`, vol.GetName())
}
if vol.GetSizeGb() != 1 {
t.Errorf(`Volume.SizeGb != 1: %d`, vol.GetSizeGb())
}
if len(vol.GetTags()) != 1 {
t.Fatalf("number of tags must be 1")
}
if vol.GetTags()[0] != testtag {
t.Errorf(`Volume.Tags[0] != %s: %v`, testtag, vol.GetTags())
}
res2, err := vgService.GetFreeBytes(context.Background(), &proto.Empty{})
if err != nil {
t.Fatal(err)
}
freeBytes, err := vg.Free()
if err != nil {
t.Fatal(err)
}
expected := freeBytes - (1 << 30)
if res2.GetFreeBytes() != expected {
t.Errorf("Free bytes mismatch: %d", res2.GetFreeBytes())
}
}
func TestVGService(t *testing.T) {
uid := os.Getuid()
if uid != 0 {
t.Skip("run as root")
}
circleci := os.Getenv("CIRCLECI") == "true"
if circleci {
executorType := os.Getenv("CIRCLECI_EXECUTOR")
if executorType != "machine" {
t.Skip("run on machine executor")
}
}
vgName := "test_vgservice"
loop, err := MakeLoopbackVG(vgName)
if err != nil {
t.Fatal(err)
}
defer CleanLoopbackVG(loop, vgName)
vg, err := command.FindVolumeGroup(vgName)
if err != nil {
t.Fatal(err)
}
t.Run("VGService", func(t *testing.T) {
testVGService(t, vg)
})
t.Run("Watch", func(t *testing.T) {
testWatch(t, vg)
})
}
|
[
"\"CIRCLECI\"",
"\"CIRCLECI_EXECUTOR\""
] |
[] |
[
"CIRCLECI_EXECUTOR",
"CIRCLECI"
] |
[]
|
["CIRCLECI_EXECUTOR", "CIRCLECI"]
|
go
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.