filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
run_example.py | import models.SingleLayer_net as single_layer
import loss_functions.rank_based_loss as rbl
# import wandb
import torch
import utils.data_functions as df
import os
import json
import pandas as pd
import csv
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# wandb.init(project='example')
exp_name = 'example'
# wandb.run.name = exp_name
standardized_data = True
save_training_embeddings_to_plot = True
shuffle = False
drop_last = False
experiments_folder ="./example_data"
initial_embeddings_path = os.path.join(experiments_folder, 'Normalized_VGGish_embeddings_based_on_Training_Set')
train_initial_embeddings_path = os.path.join(initial_embeddings_path, 'train')
val_initial_embeddings_path = os.path.join(initial_embeddings_path, 'val')
test_initial_embeddings_path = os.path.join(initial_embeddings_path, 'test')
results_folder = os.path.join(experiments_folder, "results_"+exp_name)
checkpoints_folder = os.path.join(results_folder, "checkpoints")
if not os.path.exists(checkpoints_folder):
os.makedirs(checkpoints_folder)
if save_training_embeddings_to_plot:
if not os.path.exists(os.path.join(checkpoints_folder, "Embeddings_plot")):
os.mkdir(os.path.join(checkpoints_folder, "Embeddings_plot"))
train_df = pd.read_csv(os.path.join(experiments_folder, 'train.csv'), dtype = str)
val_df = pd.read_csv(os.path.join(experiments_folder, 'val.csv'), dtype = str)
test_df = pd.read_csv(os.path.join(experiments_folder, 'test.csv'), dtype = str)
configs = {"EMBEDDINGS_SIZE" : 128,
"output_EMBEDDINGS_SIZE" :3,
"EARLY_STOPPING_PTC" : 20,
"LR" : 1e-5,
"BATCH_SIZE" : 12,
"n_epochs" : 100,
}
params = {'batch_size': configs["BATCH_SIZE"],'shuffle': shuffle, 'drop_last': drop_last}
training_set = df.RankBasedLossHierarchicalLabelsEmbeddings(train_df, train_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])
training_generator = torch.utils.data.DataLoader(training_set, **params)
len_train = len(training_set)
validation_set = df.RankBasedLossHierarchicalLabelsEmbeddings(val_df , val_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])
params_val = {'batch_size': configs["BATCH_SIZE"],'shuffle': False, 'drop_last': False}
validation_generator = torch.utils.data.DataLoader(validation_set, **params_val)
len_val = len(validation_set)
model =single_layer.SingleLayerHypersphereConstraint(configs)
# wandb.watch(model)
# wandb.config = configs
# wandb.config["architecture"] = "LinLayer_cosinedist"
# wandb.config["dataset"] = "TuT"
with open(os.path.join(results_folder, 'configs_dict'), "w") as c:
json.dump(configs, c)
checkpoint_name = rbl.train_RbL(model, training_generator, validation_generator,
checkpoints_folder, configs['EARLY_STOPPING_PTC'], save_training_embeddings_to_plot,
configs['n_epochs'], configs, distance='cosine',
number_of_ranks = 4)
print( "\nFinished training, will now use the checkpoint to generate embeddings for the test set:")
# Predict with checkpoint:
# if save_embeddings_to_plot:
if not os.path.exists(os.path.join(results_folder, "test_Embeddings_plot")):
os.mkdir(os.path.join(results_folder, "test_Embeddings_plot"))
test_set = df.RankBasedLossHierarchicalLabelsEmbeddings(test_df, test_initial_embeddings_path, target_labels = 'hierarchical_labels')
test_generator = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)
len_test = len(test_set)
# load the checkpoint, configs and model
with open(os.path.join(results_folder, "configs_dict") )as c:
configs = json.load(c)
model=single_layer.SingleLayerHypersphereConstraint(configs)
model.load_state_dict(torch.load(checkpoint_name)["net_dict"])
sil_id, sil_species =rbl.predict(model, test_generator, configs, results_folder)
print("sil_fine level", sil_id)
print('sil_coarse level', sil_species)
with open(os.path.join(results_folder, 'silhouettes_on_test_set.csv'), 'w') as fout:
writer = csv.writer(fout)
writer.writerow(['sil_fine_level', str(sil_id)])
writer.writerow(['sil_coarse_level', str(sil_species)]) | [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
mars/services/scheduling/worker/tests/test_workerslot.py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import sys
import time
from typing import Tuple, Union
import psutil
import pytest
import pandas as pd
from ..... import oscar as mo
from .....oscar import ServerClosed
from .....oscar.errors import NoFreeSlot, SlotStateError
from .....oscar.backends.allocate_strategy import IdleLabel
from .....utils import get_next_port
from ...supervisor import GlobalSlotManagerActor
from ...worker import BandSlotManagerActor, BandSlotControlActor
class MockGlobalSlotManagerActor(mo.Actor):
def __init__(self):
self._result = None
@mo.extensible
def update_subtask_slots(self, band: Tuple, session_id: str, subtask_id: str, slots: int):
self._result = (band, session_id, subtask_id, slots)
def get_result(self):
return self._result
@pytest.fixture
async def actor_pool(request):
start_method = os.environ.get('POOL_START_METHOD', 'forkserver') \
if sys.platform != 'win32' else None
n_slots = request.param
pool = await mo.create_actor_pool(
f'127.0.0.1:{get_next_port()}', n_process=n_slots,
labels=[None] + ['numa-0'] * n_slots,
subprocess_start_method=start_method)
async with pool:
global_slots_ref = await mo.create_actor(
MockGlobalSlotManagerActor, uid=GlobalSlotManagerActor.default_uid(),
address=pool.external_address)
slot_manager_ref = await mo.create_actor(
BandSlotManagerActor,
(pool.external_address, 'numa-0'), n_slots, global_slots_ref,
uid=BandSlotManagerActor.gen_uid('numa-0'),
address=pool.external_address)
try:
yield pool, slot_manager_ref
finally:
await slot_manager_ref.destroy()
ActorPoolType = Tuple[mo.MainActorPoolType, Union[BandSlotManagerActor, mo.ActorRef]]
class TaskActor(mo.Actor):
def __init__(self, call_logs, slot_id=0):
self._call_logs = call_logs
self._dispatch_ref = None
self._slot_id = slot_id
@classmethod
def gen_uid(cls, slot_id):
return f'{slot_id}_task_actor'
async def __post_create__(self):
self._dispatch_ref = await mo.actor_ref(
BandSlotManagerActor.gen_uid('numa-0'), address=self.address)
await self._dispatch_ref.register_slot.tell(self._slot_id, os.getpid())
async def queued_call(self, key, session_stid, delay):
try:
self._call_logs[key] = time.time()
await asyncio.sleep(delay)
finally:
if session_stid is not None:
await self._dispatch_ref.release_free_slot(self._slot_id, session_stid)
def get_call_logs(self):
return self._call_logs
@pytest.mark.asyncio
@pytest.mark.parametrize('actor_pool', [0], indirect=True)
async def test_slot_assign(actor_pool: ActorPoolType):
pool, slot_manager_ref = actor_pool
call_logs = dict()
group_size = 4
delay = 1
await asyncio.gather(*(
mo.create_actor(TaskActor, call_logs, slot_id=slot_id,
uid=TaskActor.gen_uid(slot_id), address=pool.external_address)
for slot_id in range(group_size)
))
assert len((await slot_manager_ref.dump_data()).free_slots) == group_size
async def task_fun(idx):
session_stid = ('session_id', f'subtask_id{idx}')
slot_id = await slot_manager_ref.acquire_free_slot(session_stid)
assert slot_id == await slot_manager_ref.get_subtask_slot(session_stid)
ref = await mo.actor_ref(uid=TaskActor.gen_uid(slot_id), address=pool.external_address)
await ref.queued_call(idx, session_stid, delay)
tasks = []
start_time = time.time()
for idx in range(group_size + 1):
tasks.append(asyncio.create_task(task_fun(idx)))
await asyncio.gather(*tasks)
log_series = pd.Series(call_logs).sort_index() - start_time
assert len(log_series) == group_size + 1
assert log_series.iloc[:group_size].max() < delay / 4
assert log_series.iloc[group_size:].min() > delay / 4
call_logs.clear()
tasks = []
start_time = time.time()
for idx in range(group_size * 2 + 1):
tasks.append(asyncio.create_task(task_fun(idx)))
await asyncio.sleep(delay / 10)
tasks[group_size].cancel()
await asyncio.wait(tasks)
with pytest.raises(asyncio.CancelledError):
tasks[group_size].result()
log_series = pd.Series(call_logs).sort_index() - start_time
assert len(log_series) == group_size * 2
assert log_series.iloc[:group_size].max() < delay / 4
assert log_series.iloc[group_size:].min() > delay / 4
@pytest.mark.asyncio
@pytest.mark.parametrize('actor_pool', [1], indirect=True)
async def test_slot_kill(actor_pool: ActorPoolType):
pool, slot_manager_ref = actor_pool
strategy = IdleLabel('numa-0', 'task_actor')
task_ref = await mo.create_actor(TaskActor, {},
allocate_strategy=strategy,
address=pool.external_address)
assert await mo.actor_ref(BandSlotControlActor.gen_uid('numa-0', 0),
address=pool.external_address)
delayed_task = asyncio.create_task(task_ref.queued_call('key', None, 10))
await asyncio.sleep(0.1)
# check if process hosting the actor is closed
kill_task = asyncio.create_task(slot_manager_ref.kill_slot(0))
await asyncio.sleep(0)
kill_task2 = asyncio.create_task(slot_manager_ref.kill_slot(0))
with pytest.raises(ServerClosed):
await delayed_task
# check if slot actor is restored
await kill_task
# check if secondary task makes no change
await kill_task2
assert await mo.actor_ref(BandSlotControlActor.gen_uid('numa-0', 0),
address=pool.external_address)
assert await mo.actor_ref(task_ref)
@pytest.mark.asyncio
@pytest.mark.parametrize('actor_pool', [3], indirect=True)
async def test_slot_restart(actor_pool: ActorPoolType):
pool, slot_manager_ref = actor_pool
strategy = IdleLabel('numa-0', 'task_actor')
task_refs = []
for idx in range(3):
ref = await mo.create_actor(
TaskActor, {}, slot_id=idx, allocate_strategy=strategy,
address=pool.external_address)
await ref.queued_call('idx', None, idx)
task_refs.append(ref)
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id1'))
slot_id2 = await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id2'))
await slot_manager_ref.release_free_slot(slot_id2, ('session_id', 'subtask_id2'))
async def record_finish_time(coro):
await coro
return time.time()
restart_task1 = asyncio.create_task(record_finish_time(
slot_manager_ref.restart_free_slots()))
await asyncio.sleep(0)
restart_task2 = asyncio.create_task(record_finish_time(
slot_manager_ref.restart_free_slots()))
acquire_task = asyncio.create_task(record_finish_time(
slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id3'))))
await asyncio.gather(restart_task1, restart_task2, acquire_task)
# check only slots with running records are restarted
assert len(await task_refs[0].get_call_logs()) > 0
assert len(await task_refs[1].get_call_logs()) == 0
assert len(await task_refs[2].get_call_logs()) > 0
assert abs(restart_task1.result() - acquire_task.result()) < 0.1
@pytest.mark.asyncio
@pytest.mark.parametrize('actor_pool', [1], indirect=True)
async def test_report_usage(actor_pool: ActorPoolType):
pool, slot_manager_ref = actor_pool
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id'))
await asyncio.sleep(1.3)
global_slot_ref = await mo.actor_ref(uid=GlobalSlotManagerActor.default_uid(),
address=pool.external_address)
_band, session_id, subtask_id, slots = await global_slot_ref.get_result()
assert slots == pytest.approx(1.0)
assert session_id == 'session_id'
assert subtask_id == 'subtask_id'
@pytest.mark.asyncio
@pytest.mark.parametrize('actor_pool', [1], indirect=True)
async def test_slot_fault_tolerance(actor_pool: ActorPoolType):
pool, slot_manager_ref = actor_pool
# acquire -> slot restarted = can't acquire more.
slot_id = await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id'))
await slot_manager_ref.register_slot(slot_id, os.getpid())
with pytest.raises(NoFreeSlot):
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id'), block=False)
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id'))
# acquire -> release -> slot restarted = can only acquire once.
slot_id = await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id2'))
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id2'))
await slot_manager_ref.register_slot(slot_id, os.getpid())
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id2'))
with pytest.raises(NoFreeSlot):
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id2'), block=False)
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id2'))
# acquire -> release -> acquire -> slot restarted = can't acquire more.
slot_id = await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id3'))
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id3'))
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id3'))
await slot_manager_ref.register_slot(slot_id, os.getpid())
with pytest.raises(NoFreeSlot):
await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id3'), block=False)
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id3'))
@pytest.mark.asyncio
@pytest.mark.parametrize('actor_pool', [1], indirect=True)
async def test_slot_exception(actor_pool: ActorPoolType):
pool, slot_manager_ref = actor_pool
# make sure the BandSlotControlActor has registered.
slot_id = await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id'))
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id'))
if sys.platform == 'win32':
with pytest.raises(ValueError):
await slot_manager_ref.register_slot(1, -1)
else:
with pytest.raises((psutil.AccessDenied, psutil.NoSuchProcess)):
await slot_manager_ref.register_slot(1, 0)
dump_data = await slot_manager_ref.dump_data()
# after the register_slot is correctly handled,
# we can assert 1 not in free slots.
assert 1 in dump_data.free_slots
slot_id = await slot_manager_ref.acquire_free_slot(('session_id', 'subtask_id'))
with pytest.raises(SlotStateError):
# release session_stid not matched the acquired value.
await slot_manager_ref.release_free_slot(slot_id, ('session_id', 'subtask_id1'))
dump_data = await slot_manager_ref.dump_data()
# the slot is not released.
assert slot_id not in dump_data.free_slots
not_acquired_slot = next(iter(dump_data.free_slots))
with pytest.raises(SlotStateError):
await slot_manager_ref.release_free_slot(not_acquired_slot, ('session_id', 'subtask_id1'))
| [] | [] | [
"POOL_START_METHOD"
] | [] | ["POOL_START_METHOD"] | python | 1 | 0 | |
src/main/java/com/upscale/front/config/HerokuDatabaseConfiguration.java | package com.upscale.front.config;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.jdbc.DataSourceBuilder;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.ApplicationContextException;
import org.springframework.context.annotation.*;
import javax.sql.DataSource;
@Configuration
@Profile(Constants.SPRING_PROFILE_HEROKU)
public class HerokuDatabaseConfiguration {
private final Logger log = LoggerFactory.getLogger(HerokuDatabaseConfiguration.class);
@Bean
@ConfigurationProperties(prefix = "spring.datasource.hikari")
public DataSource dataSource(DataSourceProperties dataSourceProperties) {
log.debug("Configuring Heroku Datasource");
String herokuUrl = System.getenv("JDBC_DATABASE_URL");
if (herokuUrl != null) {
return DataSourceBuilder
.create(dataSourceProperties.getClassLoader())
.type(HikariDataSource.class)
.url(herokuUrl)
.build();
} else {
throw new ApplicationContextException("Heroku database URL is not configured, you must set $JDBC_DATABASE_URL");
}
}
}
| [
"\"JDBC_DATABASE_URL\""
] | [] | [
"JDBC_DATABASE_URL"
] | [] | ["JDBC_DATABASE_URL"] | java | 1 | 0 | |
benchmarks/run.py | import os
import taichi as ti
def get_benchmark_dir():
return os.path.join(ti.core.get_repo_dir(), 'benchmarks')
class Case:
def __init__(self, name, func):
self.name = name
self.func = func
self.records = {}
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def run(self):
print(f'==> {self.name}:')
os.environ['TI_CURRENT_BENCHMARK'] = self.name
self.func()
class Suite:
def __init__(self, filename):
self.cases = []
print(filename)
self.name = filename[:-3]
loc = {}
exec(f'import {self.name} as suite', {}, loc)
suite = loc['suite']
case_keys = list(
sorted(filter(lambda x: x.startswith('benchmark_'), dir(suite))))
self.cases = [Case(k, getattr(suite, k)) for k in case_keys]
def run(self):
print(f'{self.name}:')
for case in sorted(self.cases):
case.run()
class TaichiBenchmark:
def __init__(self):
self.suites = []
benchmark_dir = get_benchmark_dir()
for f in map(os.path.basename, sorted(os.listdir(benchmark_dir))):
if f != 'run.py' and f.endswith('.py') and f[0] != '_':
self.suites.append(Suite(f))
def run(self):
output_dir = os.environ.get('TI_BENCHMARK_OUTPUT_DIR', '.')
filename = f'{output_dir}/benchmark.yml'
try:
with open(filename, 'r+') as f:
f.truncate() # clear the previous result
except FileNotFoundError:
pass
print("Running...")
for s in self.suites:
s.run()
b = TaichiBenchmark()
b.run()
| [] | [] | [
"TI_CURRENT_BENCHMARK",
"TI_BENCHMARK_OUTPUT_DIR"
] | [] | ["TI_CURRENT_BENCHMARK", "TI_BENCHMARK_OUTPUT_DIR"] | python | 2 | 0 | |
BaseTools/Source/Python/GenFds/GenFds.py | ## @file
# generate flash image
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from optparse import OptionParser
import sys
import Common.LongFilePathOs as os
import linecache
import FdfParser
import Common.BuildToolError as BuildToolError
from GenFdsGlobalVariable import GenFdsGlobalVariable
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from Workspace.BuildClassObject import PcdClassObject
from Workspace.BuildClassObject import ModuleBuildClassObject
import RuleComplexFile
from EfiSection import EfiSection
import StringIO
import Common.TargetTxtClassObject as TargetTxtClassObject
import Common.ToolDefClassObject as ToolDefClassObject
from Common.DataType import *
import Common.GlobalData as GlobalData
from Common import EdkLogger
from Common.String import *
from Common.Misc import DirCache, PathClass
from Common.Misc import SaveFileOnChange
from Common.Misc import ClearDuplicatedInf
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildVersion import gBUILD_VERSION
from Common.MultipleWorkspace import MultipleWorkspace as mws
import FfsFileStatement
import glob
from struct import unpack
## Version and Copyright
versionNumber = "1.0" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + versionNumber
__copyright__ = "Copyright (c) 2007 - 2017, Intel Corporation All rights reserved."
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def main():
global Options
Options = myOptionParser()
global Workspace
Workspace = ""
ArchList = None
ReturnCode = 0
EdkLogger.Initialize()
try:
if Options.verbose != None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
GenFdsGlobalVariable.VerboseMode = True
if Options.FixedAddress != None:
GenFdsGlobalVariable.FixedLoadAddress = True
if Options.quiet != None:
EdkLogger.SetLevel(EdkLogger.QUIET)
if Options.debug != None:
EdkLogger.SetLevel(Options.debug + 1)
GenFdsGlobalVariable.DebugLevel = Options.debug
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if (Options.Workspace == None):
EdkLogger.error("GenFds", OPTION_MISSING, "WORKSPACE not defined",
ExtraData="Please use '-w' switch to pass it or set the WORKSPACE environment variable.")
elif not os.path.exists(Options.Workspace):
EdkLogger.error("GenFds", PARAMETER_INVALID, "WORKSPACE is invalid",
ExtraData="Please use '-w' switch to pass it or set the WORKSPACE environment variable.")
else:
Workspace = os.path.normcase(Options.Workspace)
GenFdsGlobalVariable.WorkSpaceDir = Workspace
if 'EDK_SOURCE' in os.environ.keys():
GenFdsGlobalVariable.EdkSourceDir = os.path.normcase(os.environ['EDK_SOURCE'])
if (Options.debug):
GenFdsGlobalVariable.VerboseLogger("Using Workspace:" + Workspace)
if Options.GenfdsMultiThread:
GenFdsGlobalVariable.EnableGenfdsMultiThread = True
os.chdir(GenFdsGlobalVariable.WorkSpaceDir)
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(GenFdsGlobalVariable.WorkSpaceDir, PackagesPath)
if (Options.filename):
FdfFilename = Options.filename
FdfFilename = GenFdsGlobalVariable.ReplaceWorkspaceMacro(FdfFilename)
if FdfFilename[0:2] == '..':
FdfFilename = os.path.realpath(FdfFilename)
if not os.path.isabs(FdfFilename):
FdfFilename = mws.join(GenFdsGlobalVariable.WorkSpaceDir, FdfFilename)
if not os.path.exists(FdfFilename):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=FdfFilename)
GenFdsGlobalVariable.FdfFile = FdfFilename
GenFdsGlobalVariable.FdfFileTimeStamp = os.path.getmtime(FdfFilename)
else:
EdkLogger.error("GenFds", OPTION_MISSING, "Missing FDF filename")
if (Options.BuildTarget):
GenFdsGlobalVariable.TargetName = Options.BuildTarget
if (Options.ToolChain):
GenFdsGlobalVariable.ToolChainTag = Options.ToolChain
if (Options.activePlatform):
ActivePlatform = Options.activePlatform
ActivePlatform = GenFdsGlobalVariable.ReplaceWorkspaceMacro(ActivePlatform)
if ActivePlatform[0:2] == '..':
ActivePlatform = os.path.realpath(ActivePlatform)
if not os.path.isabs (ActivePlatform):
ActivePlatform = mws.join(GenFdsGlobalVariable.WorkSpaceDir, ActivePlatform)
if not os.path.exists(ActivePlatform) :
EdkLogger.error("GenFds", FILE_NOT_FOUND, "ActivePlatform doesn't exist!")
else:
EdkLogger.error("GenFds", OPTION_MISSING, "Missing active platform")
GlobalData.BuildOptionPcd = Options.OptionPcd if Options.OptionPcd else {}
GenFdsGlobalVariable.ActivePlatform = PathClass(NormPath(ActivePlatform))
if (Options.ConfDirectory):
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(Options.ConfDirectory)
if ConfDirectoryPath.startswith('"'):
ConfDirectoryPath = ConfDirectoryPath[1:]
if ConfDirectoryPath.endswith('"'):
ConfDirectoryPath = ConfDirectoryPath[:-1]
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ.keys():
ConfDirectoryPath = os.path.normcase(os.environ["CONF_PATH"])
else:
# Get standard WORKSPACE/Conf, use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(GenFdsGlobalVariable.WorkSpaceDir, 'Conf')
GenFdsGlobalVariable.ConfDir = ConfDirectoryPath
if not GlobalData.gConfDirectory:
GlobalData.gConfDirectory = GenFdsGlobalVariable.ConfDir
BuildConfigurationFile = os.path.normpath(os.path.join(ConfDirectoryPath, "target.txt"))
if os.path.isfile(BuildConfigurationFile) == True:
TargetTxt = TargetTxtClassObject.TargetTxtClassObject()
TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
# if no build target given in command line, get it from target.txt
if not GenFdsGlobalVariable.TargetName:
BuildTargetList = TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
if len(BuildTargetList) != 1:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="Only allows one instance for Target.")
GenFdsGlobalVariable.TargetName = BuildTargetList[0]
# if no tool chain given in command line, get it from target.txt
if not GenFdsGlobalVariable.ToolChainTag:
ToolChainList = TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if ToolChainList == None or len(ToolChainList) == 0:
EdkLogger.error("GenFds", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.")
if len(ToolChainList) != 1:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="Only allows one instance for ToolChain.")
GenFdsGlobalVariable.ToolChainTag = ToolChainList[0]
else:
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
#Set global flag for build mode
GlobalData.gIgnoreSource = Options.IgnoreSources
if Options.Macros:
for Pair in Options.Macros:
if Pair.startswith('"'):
Pair = Pair[1:]
if Pair.endswith('"'):
Pair = Pair[:-1]
List = Pair.split('=')
if len(List) == 2:
if not List[1].strip():
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="No Value given for Macro %s" %List[0])
if List[0].strip() == "EFI_SOURCE":
GlobalData.gEfiSource = List[1].strip()
GlobalData.gGlobalDefines["EFI_SOURCE"] = GlobalData.gEfiSource
continue
elif List[0].strip() == "EDK_SOURCE":
GlobalData.gEdkSource = List[1].strip()
GlobalData.gGlobalDefines["EDK_SOURCE"] = GlobalData.gEdkSource
continue
elif List[0].strip() in ["WORKSPACE", "TARGET", "TOOLCHAIN"]:
GlobalData.gGlobalDefines[List[0].strip()] = List[1].strip()
else:
GlobalData.gCommandLineDefines[List[0].strip()] = List[1].strip()
else:
GlobalData.gCommandLineDefines[List[0].strip()] = "TRUE"
os.environ["WORKSPACE"] = Workspace
# Use the -t and -b option as gGlobalDefines's TOOLCHAIN and TARGET if they are not defined
if "TARGET" not in GlobalData.gGlobalDefines.keys():
GlobalData.gGlobalDefines["TARGET"] = GenFdsGlobalVariable.TargetName
if "TOOLCHAIN" not in GlobalData.gGlobalDefines.keys():
GlobalData.gGlobalDefines["TOOLCHAIN"] = GenFdsGlobalVariable.ToolChainTag
if "TOOL_CHAIN_TAG" not in GlobalData.gGlobalDefines.keys():
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = GenFdsGlobalVariable.ToolChainTag
"""call Workspace build create database"""
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
BuildWorkSpace = WorkspaceDatabase(GlobalData.gDatabasePath)
BuildWorkSpace.InitDatabase()
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = DirCache(Workspace)
GlobalData.gWorkspace = Workspace
if (Options.archList) :
ArchList = Options.archList.split(',')
else:
# EdkLogger.error("GenFds", OPTION_MISSING, "Missing build ARCH")
ArchList = BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, 'COMMON', Options.BuildTarget, Options.ToolChain].SupArchList
TargetArchList = set(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, 'COMMON', Options.BuildTarget, Options.ToolChain].SupArchList) & set(ArchList)
if len(TargetArchList) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, "Target ARCH %s not in platform supported ARCH %s" % (str(ArchList), str(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, 'COMMON'].SupArchList)))
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirFromDscDict[Arch] = NormPath(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, Options.BuildTarget, Options.ToolChain].OutputDirectory)
GenFdsGlobalVariable.PlatformName = BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, Options.BuildTarget, Options.ToolChain].PlatformName
if (Options.outputDir):
OutputDirFromCommandLine = GenFdsGlobalVariable.ReplaceWorkspaceMacro(Options.outputDir)
if not os.path.isabs (OutputDirFromCommandLine):
OutputDirFromCommandLine = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, OutputDirFromCommandLine)
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = OutputDirFromCommandLine
else:
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = os.path.join(GenFdsGlobalVariable.OutputDirFromDscDict[Arch], GenFdsGlobalVariable.TargetName + '_' + GenFdsGlobalVariable.ToolChainTag)
for Key in GenFdsGlobalVariable.OutputDirDict:
OutputDir = GenFdsGlobalVariable.OutputDirDict[Key]
if OutputDir[0:2] == '..':
OutputDir = os.path.realpath(OutputDir)
if OutputDir[1] != ':':
OutputDir = os.path.join (GenFdsGlobalVariable.WorkSpaceDir, OutputDir)
if not os.path.exists(OutputDir):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=OutputDir)
GenFdsGlobalVariable.OutputDirDict[Key] = OutputDir
""" Parse Fdf file, has to place after build Workspace as FDF may contain macros from DSC file """
FdfParserObj = FdfParser.FdfParser(FdfFilename)
FdfParserObj.ParseFile()
if FdfParserObj.CycleReferenceCheck():
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "Cycle Reference Detected in FDF file")
if (Options.uiFdName) :
if Options.uiFdName.upper() in FdfParserObj.Profile.FdDict.keys():
GenFds.OnlyGenerateThisFd = Options.uiFdName
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such an FD in FDF file: %s" % Options.uiFdName)
if (Options.uiFvName) :
if Options.uiFvName.upper() in FdfParserObj.Profile.FvDict.keys():
GenFds.OnlyGenerateThisFv = Options.uiFvName
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such an FV in FDF file: %s" % Options.uiFvName)
if (Options.uiCapName) :
if Options.uiCapName.upper() in FdfParserObj.Profile.CapsuleDict.keys():
GenFds.OnlyGenerateThisCap = Options.uiCapName
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such a Capsule in FDF file: %s" % Options.uiCapName)
GenFdsGlobalVariable.WorkSpace = BuildWorkSpace
if ArchList != None:
GenFdsGlobalVariable.ArchList = ArchList
# Dsc Build Data will handle Pcd Settings from CommandLine.
"""Modify images from build output if the feature of loading driver at fixed address is on."""
if GenFdsGlobalVariable.FixedLoadAddress:
GenFds.PreprocessImage(BuildWorkSpace, GenFdsGlobalVariable.ActivePlatform)
# Record the FV Region info that may specific in the FD
if FdfParserObj.Profile.FvDict and FdfParserObj.Profile.FdDict:
for Fv in FdfParserObj.Profile.FvDict:
FvObj = FdfParserObj.Profile.FvDict[Fv]
for Fd in FdfParserObj.Profile.FdDict:
FdObj = FdfParserObj.Profile.FdDict[Fd]
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType != 'FV':
continue
for RegionData in RegionObj.RegionDataList:
if FvObj.UiFvName.upper() == RegionData.upper():
if FvObj.FvRegionInFD:
if FvObj.FvRegionInFD != RegionObj.Size:
EdkLogger.error("GenFds", FORMAT_INVALID, "The FV %s's region is specified in multiple FD with different value." %FvObj.UiFvName)
else:
FvObj.FvRegionInFD = RegionObj.Size
RegionObj.BlockInfoOfRegion(FdObj.BlockSizeList, FvObj)
"""Call GenFds"""
GenFds.GenFd('', FdfParserObj, BuildWorkSpace, ArchList)
"""Generate GUID cross reference file"""
GenFds.GenerateGuidXRefFile(BuildWorkSpace, ArchList, FdfParserObj)
"""Display FV space info."""
GenFds.DisplayFvSpaceInfo(FdfParserObj)
except FdfParser.Warning, X:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except FatalError, X:
if Options.debug != None:
import traceback
EdkLogger.quiet(traceback.format_exc())
ReturnCode = X.args[0]
except:
import traceback
EdkLogger.error(
"\nPython",
CODE_ERROR,
"Tools code failure",
ExtraData="Please send email to [email protected] for help, attaching following call stack trace!\n",
RaiseError=False
)
EdkLogger.quiet(traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
ClearDuplicatedInf()
return ReturnCode
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
## FindExtendTool()
#
# Find location of tools to process data
#
# @param KeyStringList Filter for inputs of section generation
# @param CurrentArchList Arch list
# @param NameGuid The Guid name
#
def FindExtendTool(KeyStringList, CurrentArchList, NameGuid):
ToolDb = ToolDefClassObject.ToolDefDict(GenFdsGlobalVariable.ConfDir).ToolsDefTxtDatabase
# if user not specify filter, try to deduce it from global data.
if KeyStringList == None or KeyStringList == []:
Target = GenFdsGlobalVariable.TargetName
ToolChain = GenFdsGlobalVariable.ToolChainTag
if ToolChain not in ToolDb['TOOL_CHAIN_TAG']:
EdkLogger.error("GenFds", GENFDS_ERROR, "Can not find external tool because tool tag %s is not defined in tools_def.txt!" % ToolChain)
KeyStringList = [Target + '_' + ToolChain + '_' + CurrentArchList[0]]
for Arch in CurrentArchList:
if Target + '_' + ToolChain + '_' + Arch not in KeyStringList:
KeyStringList.append(Target + '_' + ToolChain + '_' + Arch)
if GenFdsGlobalVariable.GuidToolDefinition:
if NameGuid in GenFdsGlobalVariable.GuidToolDefinition.keys():
return GenFdsGlobalVariable.GuidToolDefinition[NameGuid]
ToolDefinition = ToolDefClassObject.ToolDefDict(GenFdsGlobalVariable.ConfDir).ToolsDefTxtDictionary
ToolPathTmp = None
ToolOption = None
ToolPathKey = None
ToolOptionKey = None
KeyList = None
for ToolDef in ToolDefinition.items():
if NameGuid == ToolDef[1]:
KeyList = ToolDef[0].split('_')
Key = KeyList[0] + \
'_' + \
KeyList[1] + \
'_' + \
KeyList[2]
if Key in KeyStringList and KeyList[4] == 'GUID':
ToolPathKey = Key + '_' + KeyList[3] + '_PATH'
ToolOptionKey = Key + '_' + KeyList[3] + '_FLAGS'
ToolPath = ToolDefinition.get(ToolPathKey)
ToolOption = ToolDefinition.get(ToolOptionKey)
if ToolPathTmp == None:
ToolPathTmp = ToolPath
else:
if ToolPathTmp != ToolPath:
EdkLogger.error("GenFds", GENFDS_ERROR, "Don't know which tool to use, %s or %s ?" % (ToolPathTmp, ToolPath))
BuildOption = {}
for Arch in CurrentArchList:
Platform = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
# key is (ToolChainFamily, ToolChain, CodeBase)
for item in Platform.BuildOptions:
if '_PATH' in item[1] or '_FLAGS' in item[1] or '_GUID' in item[1]:
if not item[0] or (item[0] and GenFdsGlobalVariable.ToolChainFamily== item[0]):
if item[1] not in BuildOption:
BuildOption[item[1]] = Platform.BuildOptions[item]
if BuildOption:
ToolList = [TAB_TOD_DEFINES_TARGET, TAB_TOD_DEFINES_TOOL_CHAIN_TAG, TAB_TOD_DEFINES_TARGET_ARCH]
for Index in range(2, -1, -1):
for Key in dict(BuildOption):
List = Key.split('_')
if List[Index] == '*':
for String in ToolDb[ToolList[Index]]:
if String in [Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]:
List[Index] = String
NewKey = '%s_%s_%s_%s_%s' % tuple(List)
if NewKey not in BuildOption:
BuildOption[NewKey] = BuildOption[Key]
continue
del BuildOption[Key]
elif List[Index] not in ToolDb[ToolList[Index]]:
del BuildOption[Key]
if BuildOption:
if not KeyList:
for Op in BuildOption:
if NameGuid == BuildOption[Op]:
KeyList = Op.split('_')
Key = KeyList[0] + '_' + KeyList[1] +'_' + KeyList[2]
if Key in KeyStringList and KeyList[4] == 'GUID':
ToolPathKey = Key + '_' + KeyList[3] + '_PATH'
ToolOptionKey = Key + '_' + KeyList[3] + '_FLAGS'
if ToolPathKey in BuildOption.keys():
ToolPathTmp = BuildOption.get(ToolPathKey)
if ToolOptionKey in BuildOption.keys():
ToolOption = BuildOption.get(ToolOptionKey)
GenFdsGlobalVariable.GuidToolDefinition[NameGuid] = (ToolPathTmp, ToolOption)
return ToolPathTmp, ToolOption
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def myOptionParser():
usage = "%prog [options] -f input_file -a arch_list -b build_target -p active_platform -t tool_chain_tag -D \"MacroName [= MacroValue]\""
Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber))
Parser.add_option("-f", "--file", dest="filename", type="string", help="Name of FDF file to convert", action="callback", callback=SingleCheckCallback)
Parser.add_option("-a", "--arch", dest="archList", help="comma separated list containing one or more of: IA32, X64, IPF, ARM, AARCH64 or EBC which should be built, overrides target.txt?s TARGET_ARCH")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-p", "--platform", type="string", dest="activePlatform", help="Set the ACTIVE_PLATFORM, overrides target.txt ACTIVE_PLATFORM setting.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-w", "--workspace", type="string", dest="Workspace", default=os.environ.get('WORKSPACE'), help="Set the WORKSPACE",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-o", "--outputDir", type="string", dest="outputDir", help="Name of Build Output directory",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-r", "--rom_image", dest="uiFdName", help="Build the image using the [FD] section named by FdUiName.")
Parser.add_option("-i", "--FvImage", dest="uiFvName", help="Build the FV image using the [FV] section named by UiFvName")
Parser.add_option("-C", "--CapsuleImage", dest="uiCapName", help="Build the Capsule image using the [Capsule] section named by UiCapName")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Set the build TARGET, overrides target.txt TARGET setting.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-t", "--tagname", type="string", dest="ToolChain", help="Using the tools: TOOL_CHAIN_TAG name to build the platform.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-s", "--specifyaddress", dest="FixedAddress", action="store_true", type=None, help="Specify driver load address.")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Options, args) = Parser.parse_args()
return Options
## The class implementing the EDK2 flash image generation process
#
# This process includes:
# 1. Collect workspace information, includes platform and module information
# 2. Call methods of Fd class to generate FD
# 3. Call methods of Fv class to generate FV that not belong to FD
#
class GenFds :
FdfParsef = None
# FvName, FdName, CapName in FDF, Image file name
ImageBinDict = {}
OnlyGenerateThisFd = None
OnlyGenerateThisFv = None
OnlyGenerateThisCap = None
## GenFd()
#
# @param OutputDir Output directory
# @param FdfParser FDF contents parser
# @param Workspace The directory of workspace
# @param ArchList The Arch list of platform
#
def GenFd (OutputDir, FdfParser, WorkSpace, ArchList):
GenFdsGlobalVariable.SetDir ('', FdfParser, WorkSpace, ArchList)
GenFdsGlobalVariable.VerboseLogger(" Generate all Fd images and their required FV and Capsule images!")
if GenFds.OnlyGenerateThisCap != None and GenFds.OnlyGenerateThisCap.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict.keys():
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict.get(GenFds.OnlyGenerateThisCap.upper())
if CapsuleObj != None:
CapsuleObj.GenCapsule()
return
if GenFds.OnlyGenerateThisFd != None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict.get(GenFds.OnlyGenerateThisFd.upper())
if FdObj != None:
FdObj.GenFd()
return
elif GenFds.OnlyGenerateThisFd == None and GenFds.OnlyGenerateThisFv == None:
for FdName in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[FdName]
FdObj.GenFd()
GenFdsGlobalVariable.VerboseLogger("\n Generate other FV images! ")
if GenFds.OnlyGenerateThisFv != None and GenFds.OnlyGenerateThisFv.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys():
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(GenFds.OnlyGenerateThisFv.upper())
if FvObj != None:
Buffer = StringIO.StringIO()
FvObj.AddToBuffer(Buffer)
Buffer.close()
return
elif GenFds.OnlyGenerateThisFv == None:
for FvName in GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys():
Buffer = StringIO.StringIO('')
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[FvName]
FvObj.AddToBuffer(Buffer)
Buffer.close()
if GenFds.OnlyGenerateThisFv == None and GenFds.OnlyGenerateThisFd == None and GenFds.OnlyGenerateThisCap == None:
if GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict != {}:
GenFdsGlobalVariable.VerboseLogger("\n Generate other Capsule images!")
for CapsuleName in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict.keys():
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[CapsuleName]
CapsuleObj.GenCapsule()
if GenFdsGlobalVariable.FdfParser.Profile.OptRomDict != {}:
GenFdsGlobalVariable.VerboseLogger("\n Generate all Option ROM!")
for DriverName in GenFdsGlobalVariable.FdfParser.Profile.OptRomDict.keys():
OptRomObj = GenFdsGlobalVariable.FdfParser.Profile.OptRomDict[DriverName]
OptRomObj.AddToBuffer(None)
@staticmethod
def GenFfsMakefile(OutputDir, FdfParser, WorkSpace, ArchList, GlobalData):
GenFdsGlobalVariable.SetEnv(FdfParser, WorkSpace, ArchList, GlobalData)
for FdName in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[FdName]
FdObj.GenFd(Flag=True)
for FvName in GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys():
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[FvName]
FvObj.AddToBuffer(Buffer=None, Flag=True)
if GenFdsGlobalVariable.FdfParser.Profile.OptRomDict != {}:
for DriverName in GenFdsGlobalVariable.FdfParser.Profile.OptRomDict.keys():
OptRomObj = GenFdsGlobalVariable.FdfParser.Profile.OptRomDict[DriverName]
OptRomObj.AddToBuffer(Buffer=None, Flag=True)
return GenFdsGlobalVariable.FfsCmdDict
## GetFvBlockSize()
#
# @param FvObj Whose block size to get
# @retval int Block size value
#
def GetFvBlockSize(FvObj):
DefaultBlockSize = 0x1
FdObj = None
if GenFds.OnlyGenerateThisFd != None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[GenFds.OnlyGenerateThisFd.upper()]
if FdObj == None:
for ElementFd in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
for ElementRegion in ElementFd.RegionList:
if ElementRegion.RegionType == 'FV':
for ElementRegionData in ElementRegion.RegionDataList:
if ElementRegionData != None and ElementRegionData.upper() == FvObj.UiFvName:
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
else:
return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList)
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
return DefaultBlockSize
else:
for ElementRegion in FdObj.RegionList:
if ElementRegion.RegionType == 'FV':
for ElementRegionData in ElementRegion.RegionDataList:
if ElementRegionData != None and ElementRegionData.upper() == FvObj.UiFvName:
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
else:
return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList)
return DefaultBlockSize
## DisplayFvSpaceInfo()
#
# @param FvObj Whose block size to get
# @retval None
#
def DisplayFvSpaceInfo(FdfParser):
FvSpaceInfoList = []
MaxFvNameLength = 0
for FvName in FdfParser.Profile.FvDict:
if len(FvName) > MaxFvNameLength:
MaxFvNameLength = len(FvName)
FvSpaceInfoFileName = os.path.join(GenFdsGlobalVariable.FvDir, FvName.upper() + '.Fv.map')
if os.path.exists(FvSpaceInfoFileName):
FileLinesList = linecache.getlines(FvSpaceInfoFileName)
TotalFound = False
Total = ''
UsedFound = False
Used = ''
FreeFound = False
Free = ''
for Line in FileLinesList:
NameValue = Line.split('=')
if len(NameValue) == 2:
if NameValue[0].strip() == 'EFI_FV_TOTAL_SIZE':
TotalFound = True
Total = NameValue[1].strip()
if NameValue[0].strip() == 'EFI_FV_TAKEN_SIZE':
UsedFound = True
Used = NameValue[1].strip()
if NameValue[0].strip() == 'EFI_FV_SPACE_SIZE':
FreeFound = True
Free = NameValue[1].strip()
if TotalFound and UsedFound and FreeFound:
FvSpaceInfoList.append((FvName, Total, Used, Free))
GenFdsGlobalVariable.InfLogger('\nFV Space Information')
for FvSpaceInfo in FvSpaceInfoList:
Name = FvSpaceInfo[0]
TotalSizeValue = long(FvSpaceInfo[1], 0)
UsedSizeValue = long(FvSpaceInfo[2], 0)
FreeSizeValue = long(FvSpaceInfo[3], 0)
if UsedSizeValue == TotalSizeValue:
Percentage = '100'
else:
Percentage = str((UsedSizeValue + 0.0) / TotalSizeValue)[0:4].lstrip('0.')
GenFdsGlobalVariable.InfLogger(Name + ' ' + '[' + Percentage + '%Full] ' + str(TotalSizeValue) + ' total, ' + str(UsedSizeValue) + ' used, ' + str(FreeSizeValue) + ' free')
## PreprocessImage()
#
# @param BuildDb Database from build meta data files
# @param DscFile modules from dsc file will be preprocessed
# @retval None
#
def PreprocessImage(BuildDb, DscFile):
PcdDict = BuildDb.BuildObject[DscFile, 'COMMON', GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].Pcds
PcdValue = ''
for Key in PcdDict:
PcdObj = PcdDict[Key]
if PcdObj.TokenCName == 'PcdBsBaseAddress':
PcdValue = PcdObj.DefaultValue
break
if PcdValue == '':
return
Int64PcdValue = long(PcdValue, 0)
if Int64PcdValue == 0 or Int64PcdValue < -1:
return
TopAddress = 0
if Int64PcdValue > 0:
TopAddress = Int64PcdValue
ModuleDict = BuildDb.BuildObject[DscFile, 'COMMON', GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].Modules
for Key in ModuleDict:
ModuleObj = BuildDb.BuildObject[Key, 'COMMON', GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
print ModuleObj.BaseName + ' ' + ModuleObj.ModuleType
def GenerateGuidXRefFile(BuildDb, ArchList, FdfParserObj):
GuidXRefFileName = os.path.join(GenFdsGlobalVariable.FvDir, "Guid.xref")
GuidXRefFile = StringIO.StringIO('')
GuidDict = {}
ModuleList = []
FileGuidList = []
for Arch in ArchList:
PlatformDataBase = BuildDb.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
for ModuleFile in PlatformDataBase.Modules:
Module = BuildDb.BuildObject[ModuleFile, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if Module in ModuleList:
continue
else:
ModuleList.append(Module)
GuidXRefFile.write("%s %s\n" % (Module.Guid, Module.BaseName))
for key, item in Module.Protocols.items():
GuidDict[key] = item
for key, item in Module.Guids.items():
GuidDict[key] = item
for key, item in Module.Ppis.items():
GuidDict[key] = item
for FvName in FdfParserObj.Profile.FvDict:
for FfsObj in FdfParserObj.Profile.FvDict[FvName].FfsList:
if not isinstance(FfsObj, FfsFileStatement.FileStatement):
InfPath = PathClass(NormPath(mws.join(GenFdsGlobalVariable.WorkSpaceDir, FfsObj.InfFileName)))
FdfModule = BuildDb.BuildObject[InfPath, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if FdfModule in ModuleList:
continue
else:
ModuleList.append(FdfModule)
GuidXRefFile.write("%s %s\n" % (FdfModule.Guid, FdfModule.BaseName))
for key, item in FdfModule.Protocols.items():
GuidDict[key] = item
for key, item in FdfModule.Guids.items():
GuidDict[key] = item
for key, item in FdfModule.Ppis.items():
GuidDict[key] = item
else:
FileStatementGuid = FfsObj.NameGuid
if FileStatementGuid in FileGuidList:
continue
else:
FileGuidList.append(FileStatementGuid)
Name = []
FfsPath = os.path.join(GenFdsGlobalVariable.FvDir, 'Ffs')
FfsPath = glob.glob(os.path.join(FfsPath, FileStatementGuid) + '*')
if not FfsPath:
continue
if not os.path.exists(FfsPath[0]):
continue
MatchDict = {}
ReFileEnds = re.compile('\S+(.ui)$|\S+(fv.sec.txt)$|\S+(.pe32.txt)$|\S+(.te.txt)$|\S+(.pic.txt)$|\S+(.raw.txt)$|\S+(.ffs.txt)$')
FileList = os.listdir(FfsPath[0])
for File in FileList:
Match = ReFileEnds.search(File)
if Match:
for Index in range(1, 8):
if Match.group(Index) and Match.group(Index) in MatchDict:
MatchDict[Match.group(Index)].append(File)
elif Match.group(Index):
MatchDict[Match.group(Index)] = [File]
if not MatchDict:
continue
if '.ui' in MatchDict:
for File in MatchDict['.ui']:
with open(os.path.join(FfsPath[0], File), 'rb') as F:
F.read()
length = F.tell()
F.seek(4)
TmpStr = unpack('%dh' % ((length - 4) / 2), F.read())
Name = ''.join([chr(c) for c in TmpStr[:-1]])
else:
FileList = []
if 'fv.sec.txt' in MatchDict:
FileList = MatchDict['fv.sec.txt']
elif '.pe32.txt' in MatchDict:
FileList = MatchDict['.pe32.txt']
elif '.te.txt' in MatchDict:
FileList = MatchDict['.te.txt']
elif '.pic.txt' in MatchDict:
FileList = MatchDict['.pic.txt']
elif '.raw.txt' in MatchDict:
FileList = MatchDict['.raw.txt']
elif '.ffs.txt' in MatchDict:
FileList = MatchDict['.ffs.txt']
else:
pass
for File in FileList:
with open(os.path.join(FfsPath[0], File), 'r') as F:
Name.append((F.read().split()[-1]))
if not Name:
continue
Name = ' '.join(Name) if type(Name) == type([]) else Name
GuidXRefFile.write("%s %s\n" %(FileStatementGuid, Name))
# Append GUIDs, Protocols, and PPIs to the Xref file
GuidXRefFile.write("\n")
for key, item in GuidDict.items():
GuidXRefFile.write("%s %s\n" % (GuidStructureStringToGuidString(item).upper(), key))
if GuidXRefFile.getvalue():
SaveFileOnChange(GuidXRefFileName, GuidXRefFile.getvalue(), False)
GenFdsGlobalVariable.InfLogger("\nGUID cross reference file can be found at %s" % GuidXRefFileName)
elif os.path.exists(GuidXRefFileName):
os.remove(GuidXRefFileName)
GuidXRefFile.close()
##Define GenFd as static function
GenFd = staticmethod(GenFd)
GetFvBlockSize = staticmethod(GetFvBlockSize)
DisplayFvSpaceInfo = staticmethod(DisplayFvSpaceInfo)
PreprocessImage = staticmethod(PreprocessImage)
GenerateGuidXRefFile = staticmethod(GenerateGuidXRefFile)
if __name__ == '__main__':
r = main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
| [] | [] | [
"EDK_SOURCE",
"WORKSPACE",
"CONF_PATH",
"PACKAGES_PATH"
] | [] | ["EDK_SOURCE", "WORKSPACE", "CONF_PATH", "PACKAGES_PATH"] | python | 4 | 0 | |
cmd/channel_dispatcher/main.go | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/signals"
"knative.dev/eventing-natss/pkg/reconciler/dispatcher/natss"
)
const component = "natsschannel-dispatcher"
func main() {
ctx := signals.NewContext()
ns := os.Getenv("NAMESPACE")
if ns != "" {
ctx = injection.WithNamespaceScope(ctx, ns)
}
sharedmain.MainWithContext(ctx, component, natss.NewController)
}
| [
"\"NAMESPACE\""
] | [] | [
"NAMESPACE"
] | [] | ["NAMESPACE"] | go | 1 | 0 | |
seisflows/system/multicore.py |
import os
import sys
import numpy as np
from os.path import abspath, basename, join
from subprocess import Popen
from time import sleep
from seisflows.tools import unix
from seisflows.tools.tools import call, findpath, nproc, saveobj
from seisflows.config import ParameterError, custom_import
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
class multicore(custom_import('system', 'serial')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
super(multicore, self).check()
# number of tasks
if 'NTASK' not in PAR:
raise ParameterError(PAR, 'NTASK')
# number of cores per task
if 'NPROC' not in PAR:
raise ParameterError(PAR, 'NPROC')
# number of available cores
if 'NPROCMAX' not in PAR:
setattr(PAR, 'NPROCMAX', nproc())
# maximum number of concurrent tasks
if 'NTASKMAX' not in PAR:
setattr(PAR, 'NTASKMAX', PAR.NPROCMAX/PAR.NPROC)
# assertions
assert PAR.NPROC <= PAR.NPROCMAX
def run(self, classname, method, hosts='all', **kwargs):
""" Executes the following task:
classname.method(*args, **kwargs)
"""
self.checkpoint()
self.save_kwargs(classname, method, kwargs)
if hosts == 'all':
running_tasks = dict()
queued_tasks = list(range(PAR.NTASK))
# implements "work queue" pattern
while queued_tasks or running_tasks:
# launch queued tasks
while len(queued_tasks) > 0 and \
len(running_tasks) < PAR.NTASKMAX:
i = queued_tasks.pop(0)
p = self._launch(classname, method, taskid=i)
running_tasks[i] = p
sleep(0.1)
# checks status of running tasks
for i, p in list(running_tasks.items()).copy():
if p.poll() != None:
running_tasks.pop(i)
if running_tasks:
sleep(0.1)
print('')
elif hosts == 'head':
os.environ['SEISFLOWS_TASKID'] = str(0)
func = getattr(__import__('seisflows_'+classname), method)
func(**kwargs)
else:
raise KeyError('Bad keyword argument: system.run: hosts')
### private methods
def _launch(self, classname, method, taskid=0):
env = list(os.environ.copy().items())
env += [['SEISFLOWS_TASKID', str(taskid)]]
self.progress(taskid)
p = Popen(
findpath('seisflows.system') +'/'+ 'wrappers/run '
+ PATH.OUTPUT + ' '
+ classname + ' '
+ method,
shell=True,
env=dict(env))
return p
def save_kwargs(self, classname, method, kwargs):
kwargspath = join(PATH.OUTPUT, 'kwargs')
kwargsfile = join(kwargspath, classname+'_'+method+'.p')
unix.mkdir(kwargspath)
saveobj(kwargsfile, kwargs)
| [] | [] | [
"SEISFLOWS_TASKID"
] | [] | ["SEISFLOWS_TASKID"] | python | 1 | 0 | |
IPython/core/shellapp.py | # encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
Authors
-------
* Min Ragan-Kelley
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import sys
from IPython.config.application import boolean_flag
from IPython.config.configurable import Configurable
from IPython.config.loader import Config
from IPython.core import pylabtools
from IPython.utils import py3compat
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from IPython.utils.traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, Dict
)
from IPython.lib.inputhook import guis
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted([key for key in guis if key is not None]))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
# pydb flag doesn't do any config, as core.debugger switches on import,
# which is before parsing. This just allows the flag to be passed.
shell_flags.update(dict(
pydb=({},
"""Use the third party 'pydb' package as debugger, instead of pdb.
Requires that pydb is installed."""
)
))
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of func-
tions, and optionally can use colors for this, syntax highlighting
source code and various other elements. However, because this
information is passed through a pager (like 'less') and many pagers get
confused with color codes, this option is off by default. You can test
it and turn it on permanently in your ipython_config.py file if it
works for you. Test it and turn it on permanently if it works with
your system. The magic function %%color_info allows you to toggle this
interactively for testing.""",
"Disable using colors for info related things."
)
addflag('deep-reload', 'InteractiveShell.deep_reload',
"""Enable deep (recursive) reloading by default. IPython can use the
deep_reload module which reloads changes in modules recursively (it
replaces the reload() function, so you don't need to change anything to
use it). deep_reload() forces a full reload of modules whose code may
have changed, which the default reload() function does not. When
deep_reload is off, IPython will use the normal reload(), but
deep_reload will still be available as dreload(). This feature is off
by default [which means that you have both normal reload() and
dreload()].""",
"Disable deep (recursive) reloading by default."
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep'] = (nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp': {'pylab': 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp': {'matplotlib': 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode, config=True,
help="A list of dotted module names of IPython extensions to load."
)
extra_extension = Unicode('', config=True,
help="dotted module name of an IPython extension to load."
)
def _extra_extension_changed(self, name, old, new):
if new:
# add to self.extensions
self.extensions.append(new)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode, [u'storemagic'], config=False)
hide_initial_ns = Bool(True, config=True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
)
exec_files = List(Unicode, config=True,
help="""List of files to run at IPython startup."""
)
exec_PYTHONSTARTUP = Bool(True, config=True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
)
file_to_run = Unicode('', config=True,
help="""A file to be run""")
exec_lines = List(Unicode, config=True,
help="""lines of code to run at IPython startup."""
)
code_to_run = Unicode('', config=True,
help="Execute the given command string."
)
module_to_run = Unicode('', config=True,
help="Run the module as a script."
)
gui = CaselessStrEnum(gui_keys, config=True,
help="Enable GUI event loop integration with any of {0}.".format(
gui_keys)
)
matplotlib = CaselessStrEnum(backend_keys,
config=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
pylab = CaselessStrEnum(backend_keys,
config=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
)
pylab_import_all = Bool(True, config=True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path"""
if sys.path[0] != '':
sys.path.insert(0, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(
key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warn(
"Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warn("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s, matplotlib=%s", gui, backend)
if key == "auto":
print("Using matplotlib backend: %s" % backend)
else:
gui = r
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
extensions = self.default_extensions + self.extensions
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
self.log.warn("Error in loading extension: %s" % ext +
"\nCheck your config files in %s" % self.profile_dir.location
)
self.shell.showtraceback()
except:
self.log.warn("Unknown error in loading extensions:")
self.shell.showtraceback()
def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
# Hide variables defined here from %who etc.
if self.hide_initial_ns:
self.shell.user_ns_hidden.update(self.shell.user_ns)
# command-line execution (ipython -i script.py, ipython -m module)
# should *not* be excluded from %whos
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
def _run_exec_lines(self):
"""Run lines of code in IPythonApp.exec_lines in the user's namespace."""
if not self.exec_lines:
return
try:
self.log.debug("Running code from IPythonApp.exec_lines...")
for line in self.exec_lines:
try:
self.log.info("Running code in user namespace: %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warn("Error in executing line in user "
"namespace: %s" % line)
self.shell.showtraceback()
except:
self.log.warn("Unknown error in handling IPythonApp.exec_lines:")
self.shell.showtraceback()
def _exec_file(self, fname):
try:
full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError as e:
self.log.warn("File not found: %r" % fname)
return
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv
sys.argv = [full_filename] + self.extra_args[1:]
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [py3compat.cast_bytes(a) for a in sys.argv]
try:
if os.path.isfile(full_filename):
self.log.info("Running file in user namespace: %s" %
full_filename)
# Ensure that __file__ is always defined to match Python
# behavior.
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = fname
if full_filename.endswith('.ipy'):
self.shell.safe_execfile_ipy(full_filename)
else:
# default to python, even without extension
self.shell.safe_execfile(full_filename,
self.shell.user_ns)
finally:
sys.argv = save_argv
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dir = self.profile_dir.startup_dir
startup_files = []
if self.exec_PYTHONSTARTUP and not (self.file_to_run or self.code_to_run or self.module_to_run):
if os.environ.get('PYTHONSTARTUP', False):
startup_files.append(os.environ['PYTHONSTARTUP'])
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warn("Unknown error in handling startup files:")
self.shell.showtraceback()
def _run_exec_files(self):
"""Run files from IPythonApp.exec_files"""
if not self.exec_files:
return
self.log.debug("Running files in IPythonApp.exec_files...")
try:
for fname in self.exec_files:
self._exec_file(fname)
except:
self.log.warn("Unknown error in handling IPythonApp.exec_files:")
self.shell.showtraceback()
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warn("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
# Like Python itself, ignore the second if the first of these is
# present
elif self.file_to_run:
fname = self.file_to_run
try:
self._exec_file(fname)
except:
self.log.warn("Error in executing file in user namespace: %s" %
fname)
self.shell.showtraceback()
def _run_module(self):
"""Run module specified at the command-line."""
if self.module_to_run:
# Make sure that the module gets a proper sys.argv as if it were
# run using `python -m`.
save_argv = sys.argv
sys.argv = [sys.executable] + self.extra_args
try:
self.shell.safe_run_module(self.module_to_run,
self.shell.user_ns)
finally:
sys.argv = save_argv
| [] | [] | [
"PYTHONSTARTUP"
] | [] | ["PYTHONSTARTUP"] | python | 1 | 0 | |
examples/mnist/jax_mnist_train.py | import os
import argparse
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.training import train_state
import optax
import grn
class Model(nn.Module):
training: bool
@nn.compact
def __call__(self, x):
x = x.reshape((len(x), -1)) # flatten
x = nn.Dense(features=128)(x)
x = nn.relu(x)
x = nn.Dropout(0.2)(x, deterministic=not self.training)
x = nn.Dense(features=10)(x)
return x
@grn.job()
def train_job(epochs: int, batch_size: int) -> dict:
# Force JAX to not preallocate 90% of the GPU.
# Instead use a dynamic growth policy.
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = "false"
# Normally we would prefer to use tensorflow_datasets, but
# we already have tensorflow installed for this example.
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = jnp.float32(x_train) / 255.0
x_test = jnp.float32(x_test) / 255.0
def cross_entropy_loss(log_scores, labels):
one_hot = jax.nn.one_hot(labels, num_classes=10)
return -jnp.mean(jnp.sum(one_hot * log_scores, axis=-1))
def cross_entropy_from_logits_loss(logits, labels):
return cross_entropy_loss(nn.log_softmax(logits), labels)
def compute_metrics(logits, labels) -> dict:
loss = cross_entropy_from_logits_loss(logits, labels)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return dict(
loss=loss,
accuracy=accuracy)
def create_train_state(rng, dropout_rng, learning_rate=1e-3):
model = Model(training=True)
params = model.init(
{'params': rng, 'dropout': dropout_rng},
jnp.ones([1, 28, 28, 1]))['params']
optimizer = optax.adam(learning_rate)
return train_state.TrainState.create(
apply_fn=model.apply,
params=params,
tx=optimizer)
@jax.jit
def train_step(state, batch, dropout_rng):
def loss_func(params):
logits = Model(training=True).apply(
{'params': params}, batch[0],
rngs={'dropout': dropout_rng})
loss = cross_entropy_from_logits_loss(logits, labels=batch[1])
return loss, logits
grad_func = jax.value_and_grad(loss_func, has_aux=True)
(_, logits), grads = grad_func(state.params)
state = state.apply_gradients(grads=grads)
metrics = compute_metrics(logits, labels=batch[1])
return state, metrics
@jax.jit
def eval_step(params, batch):
logits = Model(training=False).apply({'params': params}, batch[0])
return compute_metrics(logits, labels=batch[1])
def train_epoch(state, batch_size, rng, dropout_rng):
train_ds_size = len(x_train)
steps_per_epoch = train_ds_size // batch_size
perms = jax.random.permutation(rng, train_ds_size)
perms = perms[:steps_per_epoch * batch_size]
perms = perms.reshape((steps_per_epoch, batch_size))
batch_metrics = []
for perm in perms:
batch = (x_train[perm], y_train[perm])
state, metrics = train_step(state, batch, dropout_rng)
batch_metrics.append(metrics)
return state
def eval_model(params, x, y):
metrics = eval_step(params, (x, y))
metrics = jax.device_get(metrics)
summary = jax.tree_map(lambda x: x.item(), metrics)
return summary['loss'], summary['accuracy']
rng = jax.random.PRNGKey(0)
rng, init_rng, init_dropout_rng = jax.random.split(rng, 3)
state = create_train_state(init_rng, init_dropout_rng, learning_rate=1e-3)
del init_rng
epochs = 1
batch_size = 128
for epoch in range(epochs):
# Create separate PRNG key for shuffling data.
rng, input_rng, dropout_rng = jax.random.split(rng, 3)
state = train_epoch(state, batch_size, input_rng, dropout_rng)
train_loss, train_acc = eval_model(state.params, x_train, y_train)
test_loss, test_acc = eval_model(state.params, x_test, y_test)
print(
f'Epoch {epoch + 1} / {epochs} | '
f'train loss {train_loss}, acc {train_acc} | '
f'test loss {test_loss}, acc {test_acc}')
@train_job.profile
def _():
print('I am a special version of the job used for profiling.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch-size', type=int, default=128)
args = parser.parse_args()
trained_params = train_job(
epochs=args.epochs,
batch_size=args.batch_size)
| [] | [] | [
"XLA_PYTHON_CLIENT_PREALLOCATE"
] | [] | ["XLA_PYTHON_CLIENT_PREALLOCATE"] | python | 1 | 0 | |
common/patreon/poller.go | package patreon
import (
"context"
"github.com/jonas747/patreon-go"
"github.com/jonas747/yagpdb/common"
"github.com/mediocregopher/radix.v3"
"github.com/sirupsen/logrus"
"golang.org/x/oauth2"
"os"
"strconv"
"sync"
"time"
)
type Poller struct {
mu sync.RWMutex
config *oauth2.Config
token *oauth2.Token
client *patreon.Client
activePatrons []*Patron
}
func Run() {
accessToken := os.Getenv("YAGPDB_PATREON_API_ACCESS_TOKEN")
refreshToken := os.Getenv("YAGPDB_PATREON_API_REFRESH_TOKEN")
clientID := os.Getenv("YAGPDB_PATREON_API_CLIENT_ID")
clientSecret := os.Getenv("YAGPDB_PATREON_API_CLIENT_SECRET")
if accessToken == "" || clientID == "" || clientSecret == "" {
PatreonDisabled(nil, "Missing one of YAGPDB_PATREON_API_ACCESS_TOKEN, YAGPDB_PATREON_API_CLIENT_ID, YAGPDB_PATREON_API_CLIENT_SECRET")
return
}
var storedRefreshToken string
common.RedisPool.Do(radix.Cmd(&storedRefreshToken, "GET", "patreon_refresh_token"))
config := &oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
Endpoint: oauth2.Endpoint{
AuthURL: patreon.AuthorizationURL,
TokenURL: patreon.AccessTokenURL,
},
Scopes: []string{"users", "pledges-to-me", "my-campaign"},
}
token := &oauth2.Token{
AccessToken: "",
RefreshToken: refreshToken,
// Must be non-nil, otherwise token will not be expired
Expiry: time.Now().Add(-24 * time.Hour),
}
tc := oauth2.NewClient(context.Background(), &TokenSourceSaver{inner: config.TokenSource(context.Background(), token)})
// Either use the token provided in the env vars or a cached one in redis
pClient := patreon.NewClient(tc)
user, err := pClient.FetchUser()
if err != nil {
if storedRefreshToken == "" {
PatreonDisabled(err, "Failed fetching current user with env var refresh token, no refresh token stored in redis.")
return
}
logrus.WithError(err).Warn("Patreon: Failed fetching current user with env var refresh token, trying stored token")
tCop := *token
tCop.RefreshToken = storedRefreshToken
tc = oauth2.NewClient(context.Background(), &TokenSourceSaver{inner: config.TokenSource(context.Background(), &tCop)})
pClient = patreon.NewClient(tc)
user, err = pClient.FetchUser()
if err != nil {
PatreonDisabled(err, "Unable to fetch user with redis patreon token.")
return
}
}
poller := &Poller{
config: config,
token: token,
client: pClient,
}
ActivePoller = poller
logrus.Info("Patreon integration activated as ", user.Data.ID, ": ", user.Data.Attributes.FullName)
go poller.Run()
}
func PatreonDisabled(err error, reason string) {
l := logrus.NewEntry(logrus.StandardLogger())
if err != nil {
l = l.WithError(err)
}
l.Warn("Not starting patreon integration, also means that premium statuses wont update. " + reason)
}
func (p *Poller) Run() {
ticker := time.NewTicker(time.Minute)
for {
p.Poll()
<-ticker.C
}
}
func (p *Poller) Poll() {
// Get your campaign data
campaignResponse, err := p.client.FetchCampaign()
if err != nil {
logrus.WithError(err).Error("Patreon: Failed fetching campaign")
return
}
campaignId := campaignResponse.Data[0].ID
cursor := ""
page := 1
patrons := make([]*Patron, 0, 25)
for {
pledgesResponse, err := p.client.FetchPledges(campaignId,
patreon.WithPageSize(25),
patreon.WithCursor(cursor))
if err != nil {
logrus.WithError(err).Error("Patreon: Failed fetching pledges")
return
}
// Get all the users in an easy-to-lookup way
users := make(map[string]*patreon.User)
for _, item := range pledgesResponse.Included.Items {
u, ok := item.(*patreon.User)
if !ok {
continue
}
users[u.ID] = u
}
// Loop over the pledges to get e.g. their amount and user name
for _, pledge := range pledgesResponse.Data {
if !pledge.Attributes.DeclinedSince.Time.IsZero() {
continue
}
user, ok := users[pledge.Relationships.Patron.Data.ID]
if !ok {
continue
}
patron := &Patron{
AmountCents: pledge.Attributes.AmountCents,
Avatar: user.Attributes.ImageURL,
}
if user.Attributes.Vanity != "" {
patron.Name = user.Attributes.Vanity
} else {
patron.Name = user.Attributes.FirstName
}
if user.Attributes.SocialConnections.Discord != nil && user.Attributes.SocialConnections.Discord.UserID != "" {
discordID, _ := strconv.ParseInt(user.Attributes.SocialConnections.Discord.UserID, 10, 64)
patron.DiscordID = discordID
}
patrons = append(patrons, patron)
// fmt.Printf("%s is pledging %d cents, Discord: %d\r\n", patron.Name, patron.AmountCents, patron.DiscordID)
}
// Get the link to the next page of pledges
nextLink := pledgesResponse.Links.Next
if nextLink == "" {
break
}
cursor = nextLink
page++
}
patrons = append(patrons, &Patron{
DiscordID: common.Conf.Owner,
Name: "Owner",
AmountCents: 10000,
})
// Swap the stored ones, this dosent mutate the existing returned slices so we dont have to do any copying on each request woo
p.mu.Lock()
p.activePatrons = patrons
p.mu.Unlock()
}
func (p *Poller) GetPatrons() (patrons []*Patron) {
p.mu.RLock()
patrons = p.activePatrons
p.mu.RUnlock()
return
}
type TokenSourceSaver struct {
inner oauth2.TokenSource
lastRefreshToken string
}
func (t *TokenSourceSaver) Token() (*oauth2.Token, error) {
tk, err := t.inner.Token()
if err == nil {
if t.lastRefreshToken != tk.RefreshToken {
logrus.Info("Patreon: New refresh token")
common.RedisPool.Do(radix.Cmd(nil, "SET", "patreon_refresh_token", tk.RefreshToken))
t.lastRefreshToken = tk.RefreshToken
}
}
return tk, err
}
| [
"\"YAGPDB_PATREON_API_ACCESS_TOKEN\"",
"\"YAGPDB_PATREON_API_REFRESH_TOKEN\"",
"\"YAGPDB_PATREON_API_CLIENT_ID\"",
"\"YAGPDB_PATREON_API_CLIENT_SECRET\""
] | [] | [
"YAGPDB_PATREON_API_CLIENT_ID",
"YAGPDB_PATREON_API_ACCESS_TOKEN",
"YAGPDB_PATREON_API_REFRESH_TOKEN",
"YAGPDB_PATREON_API_CLIENT_SECRET"
] | [] | ["YAGPDB_PATREON_API_CLIENT_ID", "YAGPDB_PATREON_API_ACCESS_TOKEN", "YAGPDB_PATREON_API_REFRESH_TOKEN", "YAGPDB_PATREON_API_CLIENT_SECRET"] | go | 4 | 0 | |
vendor/github.com/rancher/go-rancher/client/common.go | package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"time"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
const (
SELF = "self"
COLLECTION = "collection"
)
var (
debug = false
dialer = &websocket.Dialer{}
)
type ClientOpts struct {
Url string
AccessKey string
SecretKey string
Timeout time.Duration
}
type ApiError struct {
StatusCode int
Url string
Msg string
Status string
Body string
}
func (e *ApiError) Error() string {
return e.Msg
}
func IsNotFound(err error) bool {
apiError, ok := err.(*ApiError)
if !ok {
return false
}
return apiError.StatusCode == http.StatusNotFound
}
func newApiError(resp *http.Response, url string) *ApiError {
contents, err := ioutil.ReadAll(resp.Body)
var body string
if err != nil {
body = "Unreadable body."
} else {
body = string(contents)
}
data := map[string]interface{}{}
if json.Unmarshal(contents, &data) == nil {
delete(data, "id")
delete(data, "links")
delete(data, "actions")
delete(data, "type")
delete(data, "status")
buf := &bytes.Buffer{}
for k, v := range data {
if v == nil {
continue
}
if buf.Len() > 0 {
buf.WriteString(", ")
}
fmt.Fprintf(buf, "%s=%v", k, v)
}
body = buf.String()
}
formattedMsg := fmt.Sprintf("Bad response statusCode [%d]. Status [%s]. Body: [%s] from [%s]",
resp.StatusCode, resp.Status, body, url)
return &ApiError{
Url: url,
Msg: formattedMsg,
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
func contains(array []string, item string) bool {
for _, check := range array {
if check == item {
return true
}
}
return false
}
func appendFilters(urlString string, filters map[string]interface{}) (string, error) {
if len(filters) == 0 {
return urlString, nil
}
u, err := url.Parse(urlString)
if err != nil {
return "", err
}
q := u.Query()
for k, v := range filters {
if l, ok := v.([]string); ok {
for _, v := range l {
q.Add(k, v)
}
} else {
q.Add(k, fmt.Sprintf("%v", v))
}
}
u.RawQuery = q.Encode()
return u.String(), nil
}
func setupRancherBaseClient(rancherClient *RancherBaseClientImpl, opts *ClientOpts) error {
if opts.Timeout == 0 {
opts.Timeout = time.Second * 10
}
client := &http.Client{Timeout: opts.Timeout}
req, err := http.NewRequest("GET", opts.Url, nil)
if err != nil {
return err
}
req.SetBasicAuth(opts.AccessKey, opts.SecretKey)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, opts.Url)
}
schemasUrls := resp.Header.Get("X-API-Schemas")
if len(schemasUrls) == 0 {
return errors.New("Failed to find schema at [" + opts.Url + "]")
}
if schemasUrls != opts.Url {
req, err = http.NewRequest("GET", schemasUrls, nil)
req.SetBasicAuth(opts.AccessKey, opts.SecretKey)
if err != nil {
return err
}
resp, err = client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, opts.Url)
}
}
var schemas Schemas
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(bytes, &schemas)
if err != nil {
return err
}
rancherClient.Opts = opts
rancherClient.Schemas = &schemas
for _, schema := range schemas.Data {
rancherClient.Types[schema.Id] = schema
}
return nil
}
func NewListOpts() *ListOpts {
return &ListOpts{
Filters: map[string]interface{}{},
}
}
func (rancherClient *RancherBaseClientImpl) setupRequest(req *http.Request) {
req.SetBasicAuth(rancherClient.Opts.AccessKey, rancherClient.Opts.SecretKey)
}
func (rancherClient *RancherBaseClientImpl) newHttpClient() *http.Client {
if rancherClient.Opts.Timeout == 0 {
rancherClient.Opts.Timeout = time.Second * 10
}
return &http.Client{Timeout: rancherClient.Opts.Timeout}
}
func (rancherClient *RancherBaseClientImpl) doDelete(url string) error {
client := rancherClient.newHttpClient()
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
rancherClient.setupRequest(req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode >= 300 {
return newApiError(resp, url)
}
return nil
}
func (rancherClient *RancherBaseClientImpl) Websocket(url string, headers map[string][]string) (*websocket.Conn, *http.Response, error) {
return dialer.Dial(url, http.Header(headers))
}
func (rancherClient *RancherBaseClientImpl) doGet(url string, opts *ListOpts, respObject interface{}) error {
if opts == nil {
opts = NewListOpts()
}
url, err := appendFilters(url, opts.Filters)
if err != nil {
return err
}
if debug {
fmt.Println("GET " + url)
}
client := rancherClient.newHttpClient()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
rancherClient.setupRequest(req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, url)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if debug {
fmt.Println("Response <= " + string(byteContent))
}
if err := json.Unmarshal(byteContent, respObject); err != nil {
return errors.Wrap(err, fmt.Sprintf("Failed to parse: %s", byteContent))
}
return nil
}
func (rancherClient *RancherBaseClientImpl) List(schemaType string, opts *ListOpts, respObject interface{}) error {
return rancherClient.doList(schemaType, opts, respObject)
}
func (rancherClient *RancherBaseClientImpl) doList(schemaType string, opts *ListOpts, respObject interface{}) error {
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.CollectionMethods, "GET") {
return errors.New("Resource type [" + schemaType + "] is not listable")
}
collectionUrl, ok := schema.Links[COLLECTION]
if !ok {
return errors.New("Failed to find collection URL for [" + schemaType + "]")
}
return rancherClient.doGet(collectionUrl, opts, respObject)
}
func (rancherClient *RancherBaseClientImpl) Post(url string, createObj interface{}, respObject interface{}) error {
return rancherClient.doModify("POST", url, createObj, respObject)
}
func (rancherClient *RancherBaseClientImpl) GetLink(resource Resource, link string, respObject interface{}) error {
url := resource.Links[link]
if url == "" {
return fmt.Errorf("Failed to find link: %s", link)
}
return rancherClient.doGet(url, &ListOpts{}, respObject)
}
func (rancherClient *RancherBaseClientImpl) doModify(method string, url string, createObj interface{}, respObject interface{}) error {
bodyContent, err := json.Marshal(createObj)
if err != nil {
return err
}
if debug {
fmt.Println(method + " " + url)
fmt.Println("Request => " + string(bodyContent))
}
client := rancherClient.newHttpClient()
req, err := http.NewRequest(method, url, bytes.NewBuffer(bodyContent))
if err != nil {
return err
}
rancherClient.setupRequest(req)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Content-Length", string(len(bodyContent)))
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return newApiError(resp, url)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(byteContent) > 0 {
if debug {
fmt.Println("Response <= " + string(byteContent))
}
return json.Unmarshal(byteContent, respObject)
}
return nil
}
func (rancherClient *RancherBaseClientImpl) Create(schemaType string, createObj interface{}, respObject interface{}) error {
return rancherClient.doCreate(schemaType, createObj, respObject)
}
func (rancherClient *RancherBaseClientImpl) doCreate(schemaType string, createObj interface{}, respObject interface{}) error {
if createObj == nil {
createObj = map[string]string{}
}
if respObject == nil {
respObject = &map[string]interface{}{}
}
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.CollectionMethods, "POST") {
return errors.New("Resource type [" + schemaType + "] is not creatable")
}
var collectionUrl string
collectionUrl, ok = schema.Links[COLLECTION]
if !ok {
// return errors.New("Failed to find collection URL for [" + schemaType + "]")
// This is a hack to address https://github.com/rancher/cattle/issues/254
re := regexp.MustCompile("schemas.*")
collectionUrl = re.ReplaceAllString(schema.Links[SELF], schema.PluralName)
}
return rancherClient.doModify("POST", collectionUrl, createObj, respObject)
}
func (rancherClient *RancherBaseClientImpl) Update(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error {
return rancherClient.doUpdate(schemaType, existing, updates, respObject)
}
func (rancherClient *RancherBaseClientImpl) doUpdate(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error {
if existing == nil {
return errors.New("Existing object is nil")
}
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
if updates == nil {
updates = map[string]string{}
}
if respObject == nil {
respObject = &map[string]interface{}{}
}
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "PUT") {
return errors.New("Resource type [" + schemaType + "] is not updatable")
}
return rancherClient.doModify("PUT", selfUrl, updates, respObject)
}
func (rancherClient *RancherBaseClientImpl) ById(schemaType string, id string, respObject interface{}) error {
return rancherClient.doById(schemaType, id, respObject)
}
func (rancherClient *RancherBaseClientImpl) doById(schemaType string, id string, respObject interface{}) error {
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "GET") {
return errors.New("Resource type [" + schemaType + "] can not be looked up by ID")
}
collectionUrl, ok := schema.Links[COLLECTION]
if !ok {
return errors.New("Failed to find collection URL for [" + schemaType + "]")
}
err := rancherClient.doGet(collectionUrl+"/"+id, nil, respObject)
//TODO check for 404 and return nil, nil
return err
}
func (rancherClient *RancherBaseClientImpl) Delete(existing *Resource) error {
if existing == nil {
return nil
}
return rancherClient.doResourceDelete(existing.Type, existing)
}
func (rancherClient *RancherBaseClientImpl) doResourceDelete(schemaType string, existing *Resource) error {
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "DELETE") {
return errors.New("Resource type [" + schemaType + "] can not be deleted")
}
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
return rancherClient.doDelete(selfUrl)
}
func (rancherClient *RancherBaseClientImpl) Reload(existing *Resource, output interface{}) error {
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
return rancherClient.doGet(selfUrl, NewListOpts(), output)
}
func (rancherClient *RancherBaseClientImpl) Action(schemaType string, action string,
existing *Resource, inputObject, respObject interface{}) error {
return rancherClient.doAction(schemaType, action, existing, inputObject, respObject)
}
func (rancherClient *RancherBaseClientImpl) doAction(schemaType string, action string,
existing *Resource, inputObject, respObject interface{}) error {
if existing == nil {
return errors.New("Existing object is nil")
}
actionUrl, ok := existing.Actions[action]
if !ok {
return errors.New(fmt.Sprintf("Action [%v] not available on [%v]", action, existing))
}
_, ok = rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
var input io.Reader
if inputObject != nil {
bodyContent, err := json.Marshal(inputObject)
if err != nil {
return err
}
if debug {
fmt.Println("Request => " + string(bodyContent))
}
input = bytes.NewBuffer(bodyContent)
}
client := rancherClient.newHttpClient()
req, err := http.NewRequest("POST", actionUrl, input)
if err != nil {
return err
}
rancherClient.setupRequest(req)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Content-Length", "0")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return newApiError(resp, actionUrl)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if debug {
fmt.Println("Response <= " + string(byteContent))
}
return json.Unmarshal(byteContent, respObject)
}
func init() {
debug = os.Getenv("RANCHER_CLIENT_DEBUG") == "true"
if debug {
fmt.Println("Rancher client debug on")
}
}
| [
"\"RANCHER_CLIENT_DEBUG\""
] | [] | [
"RANCHER_CLIENT_DEBUG"
] | [] | ["RANCHER_CLIENT_DEBUG"] | go | 1 | 0 | |
tests/pkg/utils/utils.go | // Copyright (c) 2021 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project
package utils
import (
"context"
"encoding/json"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"github.com/ghodss/yaml"
"github.com/prometheus/common/log"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func NewUnversionedRestClient(url, kubeconfig, ctx string) *rest.RESTClient {
klog.V(5).Infof("Create unversionedRestClient for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, ctx)
if err != nil {
panic(err)
}
oldNegotiatedSerializer := config.NegotiatedSerializer
config.NegotiatedSerializer = unstructuredscheme.NewUnstructuredNegotiatedSerializer()
kubeRESTClient, err := rest.UnversionedRESTClientFor(config)
// restore cfg before leaving
defer func(cfg *rest.Config) { cfg.NegotiatedSerializer = oldNegotiatedSerializer }(config)
if err != nil {
panic(err)
}
return kubeRESTClient
}
func NewKubeClient(url, kubeconfig, ctx string) kubernetes.Interface {
klog.V(5).Infof("Create kubeclient for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, ctx)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
func NewKubeClientDynamic(url, kubeconfig, ctx string) dynamic.Interface {
klog.V(5).Infof("Create kubeclient dynamic for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, ctx)
if err != nil {
panic(err)
}
clientset, err := dynamic.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
func NewKubeClientAPIExtension(url, kubeconfig, ctx string) apiextensionsclientset.Interface {
klog.V(5).Infof("Create kubeclient apiextension for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, ctx)
if err != nil {
panic(err)
}
clientset, err := apiextensionsclientset.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
// func NewKubeClientDiscovery(url, kubeconfig, ctx string) *discovery.DiscoveryClient {
// klog.V(5).Infof("Create kubeclient discovery for url %s using kubeconfig path %s\n", url, kubeconfig)
// config, err := LoadConfig(url, kubeconfig, ctx)
// if err != nil {
// panic(err)
// }
// clientset, err := discovery.NewDiscoveryClientForConfig(config)
// if err != nil {
// panic(err)
// }
// return clientset
// }
func CreateMCOTestingRBAC(opt TestOptions) error {
// create new service account and new clusterrolebinding and bind the serviceaccount to cluster-admin clusterrole
// then the bearer token can be retrieved from the secret of created serviceaccount
mcoTestingCRBName := "mco-e2e-testing-crb"
mcoTestingSAName := "mco-e2e-testing-sa"
mcoTestingCRB := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: mcoTestingCRBName,
Labels: map[string]string{
"app": "mco-e2e-testing",
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: "cluster-admin",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: mcoTestingSAName,
Namespace: MCO_NAMESPACE,
},
},
}
if err := CreateCRB(opt, true, mcoTestingCRB); err != nil {
return fmt.Errorf("failed to create clusterrolebing for %s: %v", mcoTestingCRB.GetName(), err)
}
mcoTestingSA := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: mcoTestingSAName,
Namespace: MCO_NAMESPACE,
},
}
if err := CreateSA(opt, true, MCO_NAMESPACE, mcoTestingSA); err != nil {
return fmt.Errorf("failed to create serviceaccount for %s: %v", mcoTestingSA.GetName(), err)
}
return nil
}
func DeleteMCOTestingRBAC(opt TestOptions) error {
// delete the created service account and clusterrolebinding
mcoTestingCRBName := "mco-e2e-testing-crb"
mcoTestingSAName := "mco-e2e-testing-sa"
if err := DeleteCRB(opt, true, mcoTestingCRBName); err != nil {
return err
}
if err := DeleteSA(opt, true, MCO_NAMESPACE, mcoTestingSAName); err != nil {
return err
}
return nil
}
func FetchBearerToken(opt TestOptions) (string, error) {
config, err := LoadConfig(
opt.HubCluster.MasterURL,
opt.KubeConfig,
opt.HubCluster.KubeContext)
if err != nil {
return "", err
}
if config.BearerToken != "" {
return config.BearerToken, nil
}
clientKube := NewKubeClient(opt.HubCluster.MasterURL, opt.KubeConfig, opt.HubCluster.KubeContext)
secretList, err := clientKube.CoreV1().Secrets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{FieldSelector: "type=kubernetes.io/service-account-token"})
if err != nil {
return "", err
}
for _, secret := range secretList.Items {
if secret.GetObjectMeta() != nil && len(secret.GetObjectMeta().GetAnnotations()) > 0 {
annos := secret.GetObjectMeta().GetAnnotations()
sa, saExists := annos["kubernetes.io/service-account.name"]
_, createByExists := annos["kubernetes.io/created-by"]
if saExists && !createByExists && sa == "mco-e2e-testing-sa" {
data := secret.Data
if token, ok := data["token"]; ok {
return string(token), nil
}
}
}
}
return "", fmt.Errorf("failed to get bearer token")
}
func LoadConfig(url, kubeconfig, ctx string) (*rest.Config, error) {
if kubeconfig == "" {
kubeconfig = os.Getenv("KUBECONFIG")
}
klog.V(5).Infof("Kubeconfig path %s\n", kubeconfig)
// If we have an explicit indication of where the kubernetes config lives, read that.
if kubeconfig != "" {
if ctx == "" {
// klog.V(5).Infof("clientcmd.BuildConfigFromFlags with %s and %s", url, kubeconfig)
return clientcmd.BuildConfigFromFlags(url, kubeconfig)
} else {
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
&clientcmd.ConfigOverrides{
CurrentContext: ctx,
}).ClientConfig()
}
}
// If not, try the in-cluster config.
if c, err := rest.InClusterConfig(); err == nil {
// log.Print("incluster\n")
return c, nil
}
// If no in-cluster config, try the default location in the user's home directory.
if usr, err := user.Current(); err == nil {
klog.V(5).Infof("clientcmd.BuildConfigFromFlags for url %s using %s\n", url, filepath.Join(usr.HomeDir, ".kube", "config"))
if c, err := clientcmd.BuildConfigFromFlags(url, filepath.Join(usr.HomeDir, ".kube", "config")); err == nil {
return c, nil
}
}
return nil, fmt.Errorf("could not create a valid kubeconfig")
}
//Apply a multi resources file to the cluster described by the url, kubeconfig and ctx.
//url of the cluster
//kubeconfig which contains the ctx
//ctx, the ctx to use
//yamlB, a byte array containing the resources file
func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error {
yamls := strings.Split(string(yamlB), "---")
// yamlFiles is an []string
for _, f := range yamls {
if len(strings.TrimSpace(f)) == 0 {
continue
}
obj := &unstructured.Unstructured{}
klog.V(5).Infof("obj:%v\n", obj.Object)
err := yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
var kind string
if v, ok := obj.Object["kind"]; !ok {
return fmt.Errorf("kind attribute not found in %s", f)
} else {
kind = v.(string)
}
klog.V(5).Infof("kind: %s\n", kind)
var apiVersion string
if v, ok := obj.Object["apiVersion"]; !ok {
return fmt.Errorf("apiVersion attribute not found in %s", f)
} else {
apiVersion = v.(string)
}
klog.V(5).Infof("apiVersion: %s\n", apiVersion)
clientKube := NewKubeClient(url, kubeconfig, ctx)
clientAPIExtension := NewKubeClientAPIExtension(url, kubeconfig, ctx)
// now use switch over the type of the object
// and match each type-case
switch kind {
case "CustomResourceDefinition":
klog.V(5).Infof("Install CRD: %s\n", f)
obj := &apiextensionsv1beta1.CustomResourceDefinition{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientAPIExtension.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientAPIExtension.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
existingObject.Spec = obj.Spec
klog.Warningf("CRD %s already exists, updating!", existingObject.Name)
_, err = clientAPIExtension.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), existingObject, metav1.UpdateOptions{})
}
case "Namespace":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.Namespace{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().Namespaces().Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s already exists, updating!", obj.Kind, obj.Name)
_, err = clientKube.CoreV1().Namespaces().Update(context.TODO(), existingObject, metav1.UpdateOptions{})
}
case "ServiceAccount":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.ServiceAccount{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().ServiceAccounts(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().ServiceAccounts(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().ServiceAccounts(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "ClusterRoleBinding":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &rbacv1.ClusterRoleBinding{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.RbacV1().ClusterRoleBindings().Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.RbacV1().ClusterRoleBindings().Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.RbacV1().ClusterRoleBindings().Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "Secret":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.Secret{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().Secrets(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().Secrets(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().Secrets(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "ConfigMap":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.ConfigMap{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().ConfigMaps(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().ConfigMaps(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().ConfigMaps(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "Service":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.Service{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().Services(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().Services(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().Services(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "PersistentVolumeClaim":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.PersistentVolumeClaim{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "Deployment":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &appsv1.Deployment{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.AppsV1().Deployments(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.AppsV1().Deployments(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.AppsV1().Deployments(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "LimitRange":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.LimitRange{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().LimitRanges(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().LimitRanges(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().LimitRanges(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
case "ResourceQuota":
klog.V(5).Infof("Install %s: %s\n", kind, f)
obj := &corev1.ResourceQuota{}
err = yaml.Unmarshal([]byte(f), obj)
if err != nil {
return err
}
existingObject, errGet := clientKube.CoreV1().ResourceQuotas(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{})
if errGet != nil {
_, err = clientKube.CoreV1().ResourceQuotas(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.ObjectMeta = existingObject.ObjectMeta
klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name)
_, err = clientKube.CoreV1().ResourceQuotas(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
default:
switch kind {
case "MultiClusterObservability":
klog.V(5).Infof("Install MultiClusterObservability: %s\n", f)
default:
return fmt.Errorf("resource %s not supported", kind)
}
gvr := NewMCOGVRV1BETA2()
if apiVersion == "observability.open-cluster-management.io/v1beta1" {
gvr = NewMCOGVRV1BETA1()
}
// url string, kubeconfig string, ctx string
opt := TestOptions{
HubCluster: Cluster{
MasterURL: url,
KubeContext: ctx,
},
KubeConfig: kubeconfig,
}
clientDynamic := NewKubeClientDynamic(url, kubeconfig, ctx)
if ns := obj.GetNamespace(); ns != "" {
existingObject, errGet := clientDynamic.Resource(gvr).Namespace(ns).Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
if errGet != nil {
if ips, err := GetPullSecret(opt); err == nil {
obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips
}
_, err = clientDynamic.Resource(gvr).Namespace(ns).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.Object["metadata"] = existingObject.Object["metadata"]
klog.Warningf("%s %s/%s already exists, updating!", obj.GetKind(), obj.GetNamespace(), obj.GetName())
_, err = clientDynamic.Resource(gvr).Namespace(ns).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
} else {
existingObject, errGet := clientDynamic.Resource(gvr).Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
if errGet != nil {
if ips, err := GetPullSecret(opt); err == nil {
obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips
}
_, err = clientDynamic.Resource(gvr).Create(context.TODO(), obj, metav1.CreateOptions{})
} else {
obj.Object["metadata"] = existingObject.Object["metadata"]
klog.Warningf("%s %s already exists, updating!", obj.GetKind(), obj.GetName())
_, err = clientDynamic.Resource(gvr).Update(context.TODO(), obj, metav1.UpdateOptions{})
}
}
}
if err != nil {
return err
}
}
return nil
}
//StatusContainsTypeEqualTo check if u contains a condition type with value typeString
func StatusContainsTypeEqualTo(u *unstructured.Unstructured, typeString string) bool {
if u != nil {
if v, ok := u.Object["status"]; ok {
status := v.(map[string]interface{})
if v, ok := status["conditions"]; ok {
conditions := v.([]interface{})
for _, v := range conditions {
condition := v.(map[string]interface{})
if v, ok := condition["type"]; ok {
if v.(string) == typeString {
return true
}
}
}
}
}
}
return false
}
//GetCluster returns the first cluster with a given tag
func GetCluster(tag string, clusters []Cluster) *Cluster {
for _, cluster := range clusters {
if tag, ok := cluster.Tags[tag]; ok {
if tag {
return &cluster
}
}
}
return nil
}
//GetClusters returns all clusters with a given tag
func GetClusters(tag string, clusters []Cluster) []*Cluster {
filteredClusters := make([]*Cluster, 0)
for i, cluster := range clusters {
if tag, ok := cluster.Tags[tag]; ok {
if tag {
filteredClusters = append(filteredClusters, &clusters[i])
}
}
}
return filteredClusters
}
func HaveServerResources(c Cluster, kubeconfig string, expectedAPIGroups []string) error {
clientAPIExtension := NewKubeClientAPIExtension(c.MasterURL, kubeconfig, c.KubeContext)
clientDiscovery := clientAPIExtension.Discovery()
for _, apiGroup := range expectedAPIGroups {
klog.V(1).Infof("Check if %s exists", apiGroup)
_, err := clientDiscovery.ServerResourcesForGroupVersion(apiGroup)
if err != nil {
klog.V(1).Infof("Error while retrieving server resource %s: %s", apiGroup, err.Error())
return err
}
}
return nil
}
func HaveCRDs(c Cluster, kubeconfig string, expectedCRDs []string) error {
clientAPIExtension := NewKubeClientAPIExtension(c.MasterURL, kubeconfig, c.KubeContext)
clientAPIExtensionV1beta1 := clientAPIExtension.ApiextensionsV1beta1()
for _, crd := range expectedCRDs {
klog.V(1).Infof("Check if %s exists", crd)
_, err := clientAPIExtensionV1beta1.CustomResourceDefinitions().Get(context.TODO(), crd, metav1.GetOptions{})
if err != nil {
klog.V(1).Infof("Error while retrieving crd %s: %s", crd, err.Error())
return err
}
}
return nil
}
func HaveDeploymentsInNamespace(c Cluster, kubeconfig string, namespace string, expectedDeploymentNames []string) error {
client := NewKubeClient(c.MasterURL, kubeconfig, c.KubeContext)
versionInfo, err := client.Discovery().ServerVersion()
if err != nil {
return err
}
klog.V(1).Infof("Server version info: %v", versionInfo)
deployments := client.AppsV1().Deployments(namespace)
for _, deploymentName := range expectedDeploymentNames {
klog.V(1).Infof("Check if deployment %s exists", deploymentName)
deployment, err := deployments.Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
klog.V(1).Infof("Error while retrieving deployment %s: %s", deploymentName, err.Error())
return err
}
if deployment.Status.Replicas != deployment.Status.ReadyReplicas {
err = fmt.Errorf("%s: Expect %d but got %d Ready replicas",
deploymentName,
deployment.Status.Replicas,
deployment.Status.ReadyReplicas)
klog.Errorln(err)
return err
}
for _, condition := range deployment.Status.Conditions {
if condition.Reason == "MinimumReplicasAvailable" {
if condition.Status != corev1.ConditionTrue {
err = fmt.Errorf("%s: Expect %s but got %s",
deploymentName,
condition.Status,
corev1.ConditionTrue)
klog.Errorln(err)
return err
}
}
}
}
return nil
}
func GetKubeVersion(client *rest.RESTClient) version.Info {
kubeVersion := version.Info{}
versionBody, err := client.Get().AbsPath("/version").Do(context.TODO()).Raw()
if err != nil {
log.Error(err, "fail to GET /version")
return version.Info{}
}
err = json.Unmarshal(versionBody, &kubeVersion)
if err != nil {
log.Error(fmt.Errorf("fail to Unmarshal, got '%s': %v", string(versionBody), err), "")
return version.Info{}
}
return kubeVersion
}
func IsOpenshift(client *rest.RESTClient) bool {
//check whether the cluster is openshift or not for openshift version 3.11 and before
_, err := client.Get().AbsPath("/version/openshift").Do(context.TODO()).Raw()
if err == nil {
klog.V(5).Info("Found openshift version from /version/openshift")
return true
}
//check whether the cluster is openshift or not for openshift version 4.1
_, err = client.Get().AbsPath("/apis/config.openshift.io/v1/clusterversions").Do(context.TODO()).Raw()
if err == nil {
klog.V(5).Info("Found openshift version from /apis/config.openshift.io/v1/clusterversions")
return true
}
klog.V(5).Infof("fail to GET openshift version, assuming not OpenShift: %s", err.Error())
return false
}
// IntegrityChecking checks to ensure all required conditions are met when completing the specs
func IntegrityChecking(opt TestOptions) error {
return CheckMCOComponents(opt)
}
// GetPullSecret checks the secret from MCH CR and return the secret name
func GetPullSecret(opt TestOptions) (string, error) {
clientDynamic := NewKubeClientDynamic(
opt.HubCluster.MasterURL,
opt.KubeConfig,
opt.HubCluster.KubeContext)
mchList, err := clientDynamic.Resource(NewOCMMultiClusterHubGVR()).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", err
}
if len(mchList.Items) == 0 {
return "", fmt.Errorf("can not find the MCH operator CR in the cluster")
}
mchName := mchList.Items[0].GetName()
mchNs := mchList.Items[0].GetNamespace()
getMCH, err := clientDynamic.Resource(NewOCMMultiClusterHubGVR()).Namespace(mchNs).Get(context.TODO(), mchName, metav1.GetOptions{})
if err != nil {
return "", err
}
spec := getMCH.Object["spec"].(map[string]interface{})
if _, ok := spec["imagePullSecret"]; !ok {
return "", fmt.Errorf("can not find imagePullSecret in MCH CR")
}
ips := spec["imagePullSecret"].(string)
return ips, nil
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
molecule/upgrade/tests/test_default.py | # Copyright 2020 Carnegie Mellon University.
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING
# INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON
# UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS
# TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE
# OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE
# MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND
# WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
# Released under a MIT (SEI)-style license, please see license.txt or contact
# [email protected] for full terms.
# [DISTRIBUTION STATEMENT A] This material has been approved for public release
# and unlimited distribution. Please see Copyright notice for non-US
# Government use and distribution.
# CERT is registered in the U.S. Patent and Trademark Office by Carnegie Mellon
# University.
# This Software includes and/or makes use of the following Third-Party Software
# subject to its own license:
# 1. ansible (https://github.com/ansible/ansible/tree/devel/licenses) Copyright
# 2019 Red Hat, Inc.
# 2. molecule
# (https://github.com/ansible-community/molecule/blob/master/LICENSE) Copyright
# 2018 Red Hat, Inc.
# 3. testinfra (https://github.com/philpep/testinfra/blob/master/LICENSE)
# Copyright 2020 Philippe Pepiot.
# DM20-0509
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('svc', [
'yaf'
])
def test_svc(host, svc):
service = host.service(svc)
assert service.is_running
# assert service.is_enabled
def test_yaf_version(host):
version = "2.12.1"
command = """PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig pkg-config \
--modversion libyaf"""
cmd = host.run(command)
assert version in cmd.stdout
| [] | [] | [
"MOLECULE_INVENTORY_FILE"
] | [] | ["MOLECULE_INVENTORY_FILE"] | python | 1 | 0 | |
tests/scalenet_test.py | from __future__ import absolute_import, print_function
import unittest
import os
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import regularizers
from niftynet.network.scalenet import ScaleNet
from tests.niftynet_testcase import NiftyNetTestCase
@unittest.skipIf(os.environ.get('QUICKTEST', "").lower() == "true", 'Skipping slow tests')
class ScaleNetTest(NiftyNetTestCase):
def test_3d_shape(self):
input_shape = (2, 32, 32, 32, 4)
x = tf.ones(input_shape)
scalenet_layer = ScaleNet(num_classes=5)
out = scalenet_layer(x, is_training=True)
print(scalenet_layer.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 32, 5), out.shape)
def test_2d_shape(self):
input_shape = (2, 32, 32, 4)
x = tf.ones(input_shape)
scalenet_layer = ScaleNet(num_classes=5)
out = scalenet_layer(x, is_training=True)
print(scalenet_layer.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 5), out.shape)
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 4)
x = tf.ones(input_shape)
scalenet_layer = ScaleNet(num_classes=5,
w_regularizer=regularizers.l2_regularizer(
0.3))
out = scalenet_layer(x, is_training=True)
print(scalenet_layer.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 32, 5), out.shape)
def test_2d_reg_shape(self):
input_shape = (2, 32, 32, 4)
x = tf.ones(input_shape)
scalenet_layer = ScaleNet(num_classes=5,
w_regularizer=regularizers.l2_regularizer(
0.3))
out = scalenet_layer(x, is_training=True)
print(scalenet_layer.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 5), out.shape)
if __name__ == "__main__":
tf.test.main()
| [] | [] | [
"QUICKTEST"
] | [] | ["QUICKTEST"] | python | 1 | 0 | |
python/ray/tests/test_advanced_2.py | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 3
ray.init(num_cpus=num_gpus, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.2)
return os.getpid()
start_time = time.time()
while True:
num_workers_started = len(
set(ray.get([f.remote() for _ in range(num_gpus)])))
if num_workers_started == num_gpus:
break
if time.time() > start_time + 10:
raise RayTestTimeoutException(
"Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
ray.get([f1.remote() for _ in range(10)])
ray.get([f2.remote() for _ in range(10)])
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
node_id = ray.worker.global_worker.node.unique_id
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != node_id
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.node.unique_id
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_node_id_resource(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3)
cluster.add_node(num_cpus=3)
ray.init(address=cluster.address)
local_node = ray.state.current_node_id()
# Note that these will have the same IP in the test cluster
assert len(ray.state.node_ids()) == 2
assert local_node in ray.state.node_ids()
@ray.remote(resources={local_node: 1})
def f():
return ray.state.current_node_id()
# Check the node id resource is automatically usable for scheduling.
assert ray.get(f.remote()) == ray.state.current_node_id()
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(500)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
del resources["memory"]
del resources["object_store_memory"]
for key in list(resources.keys()):
if key.startswith("node:"):
del resources[key]
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.", resources)
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test_utils.wait_for_pid_to_exit(pid1)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
internal/tests/kvtest.go | // Copyright 2017-2019 Lei Ni ([email protected])
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package tests contains various helper functions and modules used in tests.
This package is internally used by Dragonboat, applications are not expected to
import this package.
*/
package tests
import (
"crypto/md5"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/lni/dragonboat/internal/tests/kvpb"
"github.com/lni/dragonboat/internal/utils/random"
"github.com/lni/dragonboat/statemachine"
)
// random delays
func generateRandomDelay() {
v := rand.Uint64()
if v%10000 == 0 {
time.Sleep(300 * time.Millisecond)
} else if v%1000 == 0 {
time.Sleep(100 * time.Millisecond)
} else if v%100 == 0 {
time.Sleep(10 * time.Millisecond)
} else if v%20 == 0 {
time.Sleep(2 * time.Millisecond)
}
}
func getLargeRandomDelay() uint64 {
// in IO error injection test, we don't want such delays
ioei := os.Getenv("IOEI")
if len(ioei) > 0 {
return 0
}
v := rand.Uint64() % 100
if v == 0 {
return 30 * 1000
}
if v < 10 {
return 1 * 1000
}
if v < 30 {
return 500
}
if v < 50 {
return 100
}
return 50
}
// KVTest is a in memory key-value store struct used for testing purposes.
// Note that both key/value are suppose to be valid utf-8 strings.
type KVTest struct {
ClusterID uint64 `json:"-"`
NodeID uint64 `json:"-"`
KVStore map[string]string `json:"KVStore"`
Count uint64 `json:"Count"`
Junk []byte `json:"Junk"`
closed bool
aborted bool `json:"-"`
externalFileTest bool
pbkvPool *sync.Pool
}
// NewKVTest creates and return a new KVTest object.
func NewKVTest(clusterID uint64, nodeID uint64) statemachine.IStateMachine {
fmt.Println("kvtest with stoppable snapshot created")
s := &KVTest{
KVStore: make(map[string]string),
ClusterID: clusterID,
NodeID: nodeID,
Junk: make([]byte, 3*1024),
}
v := os.Getenv("EXTERNALFILETEST")
s.externalFileTest = len(v) > 0
fmt.Printf("junk data inserted, external file test %t\n", s.externalFileTest)
// write some junk data consistent across the cluster
for i := 0; i < len(s.Junk); i++ {
s.Junk[i] = 2
}
s.pbkvPool = &sync.Pool{
New: func() interface{} {
return &kvpb.PBKV{}
},
}
return s
}
// Lookup performances local looks up for the sepcified data.
func (s *KVTest) Lookup(key []byte) []byte {
if s.closed {
panic("lookup called after Close()")
}
if s.aborted {
panic("Lookup() called after abort set to true")
}
v, ok := s.KVStore[string(key)]
generateRandomDelay()
if ok {
return []byte(v)
}
return []byte("")
}
// Update updates the object using the specified committed raft entry.
func (s *KVTest) Update(data []byte) uint64 {
s.Count++
if s.aborted {
panic("update() called after abort set to true")
}
if s.closed {
panic("update called after Close()")
}
generateRandomDelay()
dataKv := s.pbkvPool.Get().(*kvpb.PBKV)
err := proto.Unmarshal(data, dataKv)
if err != nil {
panic(err)
}
s.updateStore(dataKv.GetKey(), dataKv.GetVal())
s.pbkvPool.Put(dataKv)
return uint64(len(data))
}
func (s *KVTest) saveExternalFile(fileCollection statemachine.ISnapshotFileCollection) {
dir, err := os.Getwd()
if err != nil {
panic(err)
}
rn := random.LockGuardedRand.Uint64()
fn := fmt.Sprintf("external-%d-%d-%d-%d.data",
s.ClusterID, s.NodeID, s.Count, rn)
fp := filepath.Join(dir, fn)
f, err := os.Create(fp)
if err != nil {
panic(err)
}
content := fmt.Sprintf("external-test-data-%d", s.Count)
_, err = f.Write([]byte(content))
if err != nil {
panic(err)
}
if err = f.Close(); err != nil {
panic(err)
}
fmt.Printf("adding an external file, path %s", fp)
fileCollection.AddFile(1, fp, []byte(content))
}
func checkExternalFile(files []statemachine.SnapshotFile, clusterID uint64) {
if len(files) != 1 {
panic("snapshot external file missing")
}
fr := files[0]
if fr.FileID != 1 {
panic("FileID value not expected")
}
wcontent := string(fr.Metadata)
content, err := ioutil.ReadFile(fr.Filepath)
if err != nil {
panic(err)
}
if string(content) != wcontent {
panic(fmt.Sprintf("unexpected external file content got %s, want %s, fp %s",
string(content), wcontent, fr.Filepath))
}
log.Printf("external file check done")
}
// SaveSnapshot saves the current object state into a snapshot using the
// specified io.Writer object.
func (s *KVTest) SaveSnapshot(w io.Writer,
fileCollection statemachine.ISnapshotFileCollection,
done <-chan struct{}) (uint64, error) {
if s.closed {
panic("save snapshot called after Close()")
}
if s.externalFileTest {
s.saveExternalFile(fileCollection)
}
delay := getLargeRandomDelay()
fmt.Printf("random delay %d ms\n", delay)
for delay > 0 {
delay -= 10
time.Sleep(10 * time.Millisecond)
select {
case <-done:
return 0, statemachine.ErrSnapshotStopped
default:
}
}
data, err := json.Marshal(s)
if err != nil {
panic(err)
}
n, err := w.Write(data)
if err != nil {
return 0, err
}
if n != len(data) {
panic("didn't write the whole data buf")
}
return uint64(len(data)), nil
}
// RecoverFromSnapshot recovers the state using the provided snapshot.
func (s *KVTest) RecoverFromSnapshot(r io.Reader,
files []statemachine.SnapshotFile,
done <-chan struct{}) error {
if s.closed {
panic("recover from snapshot called after Close()")
}
if s.externalFileTest {
checkExternalFile(files, s.ClusterID)
}
delay := getLargeRandomDelay()
fmt.Printf("random delay %d ms\n", delay)
for delay > 0 {
delay -= 10
time.Sleep(10 * time.Millisecond)
select {
case <-done:
s.aborted = true
return statemachine.ErrSnapshotStopped
default:
}
}
var store KVTest
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
if err := json.Unmarshal(data, &store); err != nil {
return err
}
if store.aborted {
panic("snapshot image contains aborted==true")
}
s.KVStore = store.KVStore
s.Count = store.Count
s.Junk = store.Junk
return nil
}
// Close closes the IStateMachine instance
func (s *KVTest) Close() {
s.closed = true
log.Printf("%d:%dKVStore has been closed", s.ClusterID, s.NodeID)
}
// GetHash returns a uint64 representing the current object state.
func (s *KVTest) GetHash() uint64 {
data, err := json.Marshal(s)
if err != nil {
panic(err)
}
hash := md5.New()
if _, err = hash.Write(data); err != nil {
panic(err)
}
md5sum := hash.Sum(nil)
return binary.LittleEndian.Uint64(md5sum[:8])
}
func (s *KVTest) updateStore(key string, value string) {
s.KVStore[key] = value
}
| [
"\"IOEI\"",
"\"EXTERNALFILETEST\""
] | [] | [
"IOEI",
"EXTERNALFILETEST"
] | [] | ["IOEI", "EXTERNALFILETEST"] | go | 2 | 0 | |
examples/suppression/GetSpecificBlock.java | import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sendgrid.*;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
//////////////////////////////////////////////////////////////////
// Retrieve a specific block
// GET /suppression/blocks/{email}
public class GetSpecificBlock {
public static void main(String[] args) throws IOException {
try {
SendGrid sg = new SendGrid(System.getenv("SENDGRID_API_KEY"));
Request request = new Request();
request.setMethod(Method.GET);
request.setEndpoint("suppression/blocks/{email}");
Response response = sg.api(request);
System.out.println(response.getStatusCode());
System.out.println(response.getBody());
System.out.println(response.getHeaders());
} catch (IOException ex) {
throw ex;
}
}
} | [
"\"SENDGRID_API_KEY\""
] | [] | [
"SENDGRID_API_KEY"
] | [] | ["SENDGRID_API_KEY"] | java | 1 | 0 | |
tests/k8s/test_k8s.py | # kind version has to be bumped to v0.11.1 since pytest-kind is just using v0.10.0 which does not work on ubuntu in ci
import pytest
import os
import asyncio
import yaml
from pytest_kind import cluster
from docarray import DocumentArray
from jina import Executor, Flow, Document, requests
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.deployments.config.k8s import K8sDeploymentConfig
from jina.parsers import set_deployment_parser
from jina.serve.networking import K8sGrpcConnectionPool
cluster.KIND_VERSION = 'v0.11.1'
async def create_all_flow_deployments_and_wait_ready(
flow_dump_path,
namespace,
api_client,
app_client,
core_client,
deployment_replicas_expected,
logger,
):
from kubernetes import utils
namespace = namespace.lower()
namespace_object = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {'name': f'{namespace}'},
}
try:
logger.info(f'create Namespace {namespace}')
utils.create_from_dict(api_client, namespace_object)
except:
pass
while True:
ns_items = core_client.list_namespace().items
if any(item.metadata.name == namespace for item in ns_items):
logger.info(f'created Namespace {namespace}')
break
logger.info(f'waiting for Namespace {namespace}')
await asyncio.sleep(1.0)
deployment_set = set(os.listdir(flow_dump_path))
for deployment_name in deployment_set:
file_set = set(os.listdir(os.path.join(flow_dump_path, deployment_name)))
for file in file_set:
try:
utils.create_from_yaml(
api_client,
yaml_file=os.path.join(flow_dump_path, deployment_name, file),
namespace=namespace,
)
except Exception as e:
# some objects are not successfully created since they exist from previous files
logger.info(
f'Did not create ressource from {file} for pod {deployment_name} due to {e} '
)
pass
# wait for all the pods to be up
expected_deployments = sum(deployment_replicas_expected.values())
while True:
namespaced_pods = core_client.list_namespaced_pod(namespace)
if (
namespaced_pods.items is not None
and len(namespaced_pods.items) == expected_deployments
):
break
logger.info(
f'Waiting for all {expected_deployments} Deployments to be created, only got {len(namespaced_pods.items) if namespaced_pods.items is not None else None}'
)
await asyncio.sleep(1.0)
# wait for all the pods to be up
resp = app_client.list_namespaced_deployment(namespace=namespace)
deployment_names = set([item.metadata.name for item in resp.items])
assert deployment_names == set(deployment_replicas_expected.keys())
while len(deployment_names) > 0:
deployments_ready = []
for deployment_name in deployment_names:
api_response = app_client.read_namespaced_deployment(
name=deployment_name, namespace=namespace
)
expected_num_replicas = deployment_replicas_expected[deployment_name]
if (
api_response.status.ready_replicas is not None
and api_response.status.ready_replicas == expected_num_replicas
):
deployments_ready.append(deployment_name)
for deployment_name in deployments_ready:
deployment_names.remove(deployment_name)
logger.info(f'Waiting for {deployment_names} to be ready')
await asyncio.sleep(1.0)
async def run_test(flow, core_client, namespace, endpoint, n_docs=10, request_size=100):
# start port forwarding
from jina.clients import Client
gateway_pod_name = (
core_client.list_namespaced_pod(
namespace=namespace, label_selector='app=gateway'
)
.items[0]
.metadata.name
)
config_path = os.environ['KUBECONFIG']
import portforward
with portforward.forward(
namespace, gateway_pod_name, flow.port_expose, flow.port_expose, config_path
):
client_kwargs = dict(
host='localhost',
port=flow.port_expose,
return_responses=True,
asyncio=True,
)
client_kwargs.update(flow._common_kwargs)
client = Client(**client_kwargs)
client.show_progress = True
responses = []
async for resp in client.post(
endpoint,
inputs=[Document() for _ in range(n_docs)],
request_size=request_size,
):
responses.append(resp)
return responses
@pytest.fixture()
def k8s_flow_with_sharding(docker_images, polling):
flow = Flow(name='test-flow-with-sharding', port_expose=9090, protocol='http').add(
name='test_executor',
shards=2,
replicas=2,
uses=f'docker://{docker_images[0]}',
uses_after=f'docker://{docker_images[1]}',
polling=polling,
)
return flow
@pytest.fixture
def k8s_flow_configmap(docker_images):
flow = Flow(name='k8s-flow-configmap', port_expose=9090, protocol='http').add(
name='test_executor',
uses=f'docker://{docker_images[0]}',
env={'k1': 'v1', 'k2': 'v2'},
)
return flow
@pytest.fixture
def k8s_flow_gpu(docker_images):
flow = Flow(name='k8s-flow-gpu', port_expose=9090, protocol='http').add(
name='test_executor',
uses=f'docker://{docker_images[0]}',
gpus=1,
)
return flow
@pytest.fixture
def k8s_flow_with_reload_executor(docker_images):
flow = Flow(name='test-flow-with-reload', port_expose=9090, protocol='http').add(
name='test_executor',
replicas=2,
uses_with={'argument': 'value1'},
uses=f'docker://{docker_images[0]}',
)
return flow
@pytest.fixture
def k8s_flow_scale(docker_images, shards):
DEFAULT_REPLICAS = 2
flow = Flow(name='test-flow-scale', port_expose=9090, protocol='http').add(
name='test_executor',
shards=shards,
replicas=DEFAULT_REPLICAS,
)
return flow
@pytest.fixture
def k8s_flow_with_needs(docker_images):
flow = (
Flow(
name='test-flow-with-needs',
port_expose=9090,
protocol='http',
)
.add(
name='segmenter',
uses=f'docker://{docker_images[0]}',
)
.add(
name='textencoder',
uses=f'docker://{docker_images[0]}',
needs='segmenter',
)
.add(
name='imageencoder',
uses=f'docker://{docker_images[0]}',
needs='segmenter',
)
.add(
name='merger',
uses_before=f'docker://{docker_images[1]}',
needs=['imageencoder', 'textencoder'],
)
)
return flow
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize('k8s_connection_pool', [True, False])
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'executor-merger', 'jinaai/jina']],
indirect=True,
)
async def test_flow_with_needs(
logger, k8s_connection_pool, k8s_flow_with_needs, tmpdir
):
dump_path = os.path.join(str(tmpdir), 'test-flow-with-needs')
namespace = f'test-flow-with-needs-{k8s_connection_pool}'.lower()
k8s_flow_with_needs.to_k8s_yaml(
dump_path, k8s_namespace=namespace, k8s_connection_pool=k8s_connection_pool
)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'segmenter-head': 1,
'segmenter': 1,
'textencoder-head': 1,
'textencoder': 1,
'imageencoder-head': 1,
'imageencoder': 1,
'merger-head': 1,
'merger': 1,
},
logger=logger,
)
resp = await run_test(
flow=k8s_flow_with_needs,
namespace=namespace,
core_client=core_client,
endpoint='/debug',
)
expected_traversed_executors = {
'segmenter',
'imageencoder',
'textencoder',
}
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert set(doc.tags['traversed-executors']) == expected_traversed_executors
core_client.delete_namespace(namespace)
@pytest.mark.timeout(3600)
@pytest.mark.asyncio
@pytest.mark.parametrize('k8s_connection_pool', [True, False])
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'executor-merger', 'jinaai/jina']],
indirect=True,
)
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
async def test_flow_with_sharding(
k8s_flow_with_sharding, k8s_connection_pool, polling, tmpdir, logger
):
dump_path = os.path.join(str(tmpdir), 'test-flow-with-sharding')
namespace = f'test-flow-with-sharding-{polling}-{k8s_connection_pool}'.lower()
k8s_flow_with_sharding.to_k8s_yaml(
dump_path, k8s_namespace=namespace, k8s_connection_pool=k8s_connection_pool
)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'test-executor-head': 1,
'test-executor-0': 2,
'test-executor-1': 2,
},
logger=logger,
)
resp = await run_test(
flow=k8s_flow_with_sharding,
namespace=namespace,
core_client=core_client,
endpoint='/debug',
)
core_client.delete_namespace(namespace)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
if polling == 'ALL':
assert set(doc.tags['traversed-executors']) == {
'test_executor-0',
'test_executor-1',
}
assert set(doc.tags['shard_id']) == {0, 1}
assert doc.tags['parallel'] == [2, 2]
assert doc.tags['shards'] == [2, 2]
else:
assert len(set(doc.tags['traversed-executors'])) == 1
assert set(doc.tags['traversed-executors']) == {'test_executor-0'} or set(
doc.tags['traversed-executors']
) == {'test_executor-1'}
assert len(set(doc.tags['shard_id'])) == 1
assert 0 in set(doc.tags['shard_id']) or 1 in set(doc.tags['shard_id'])
assert doc.tags['parallel'] == [2]
assert doc.tags['shards'] == [2]
@pytest.mark.timeout(3600)
@pytest.mark.asyncio
@pytest.mark.parametrize('k8s_connection_pool', [True, False])
@pytest.mark.parametrize(
'docker_images', [['test-executor', 'jinaai/jina']], indirect=True
)
async def test_flow_with_configmap(
k8s_flow_configmap, k8s_connection_pool, docker_images, tmpdir, logger
):
dump_path = os.path.join(str(tmpdir), 'test-flow-with-configmap')
namespace = f'test-flow-with-configmap-{k8s_connection_pool}'.lower()
k8s_flow_configmap.to_k8s_yaml(
dump_path, k8s_namespace=namespace, k8s_connection_pool=k8s_connection_pool
)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'test-executor-head': 1,
'test-executor': 1,
},
logger=logger,
)
resp = await run_test(
flow=k8s_flow_configmap,
namespace=namespace,
core_client=core_client,
endpoint='/env',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['JINA_LOG_LEVEL'] == 'INFO'
assert doc.tags['k1'] == 'v1'
assert doc.tags['k2'] == 'v2'
assert doc.tags['env'] == {'k1': 'v1', 'k2': 'v2'}
core_client.delete_namespace(namespace)
@pytest.mark.timeout(3600)
@pytest.mark.asyncio
@pytest.mark.skip('Need to config gpu host.')
@pytest.mark.parametrize(
'docker_images', [['test-executor', 'jinaai/jina']], indirect=True
)
async def test_flow_with_gpu(k8s_flow_gpu, docker_images, tmpdir, logger):
dump_path = os.path.join(str(tmpdir), 'test-flow-with-gpu')
namespace = f'test-flow-with-gpu'
k8s_flow_gpu.to_k8s_yaml(dump_path, k8s_namespace=namespace)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'test-executor-head': 1,
'test-executor': 1,
},
logger=logger,
)
resp = await run_test(
flow=k8s_flow_gpu,
namespace=namespace,
core_client=core_client,
endpoint='/cuda',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['resources']['limits'] == {'nvidia.com/gpu:': 1}
core_client.delete_namespace(namespace)
@pytest.mark.timeout(3600)
@pytest.mark.asyncio
@pytest.mark.parametrize(
'docker_images', [['reload-executor', 'jinaai/jina']], indirect=True
)
async def test_rolling_update_simple(
k8s_flow_with_reload_executor, docker_images, tmpdir, logger
):
dump_path = os.path.join(str(tmpdir), 'test-flow-with-reload')
namespace = f'test-flow-with-reload-executor'
k8s_flow_with_reload_executor.to_k8s_yaml(dump_path, k8s_namespace=namespace)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'test-executor-head': 1,
'test-executor': 2,
},
logger=logger,
)
resp = await run_test(
flow=k8s_flow_with_reload_executor,
namespace=namespace,
core_client=core_client,
endpoint='/exec',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['argument'] == 'value1'
import yaml
with open(os.path.join(dump_path, 'test_executor', 'test-executor.yml')) as f:
yml_document_all = list(yaml.safe_load_all(f))
yml_deployment = yml_document_all[-1]
container_args = yml_deployment['spec']['template']['spec']['containers'][0]['args']
container_args[container_args.index('--uses-with') + 1] = '{"argument": "value2"}'
yml_deployment['spec']['template']['spec']['containers'][0]['args'] = container_args
app_client.patch_namespaced_deployment(
name='test-executor', namespace=namespace, body=yml_deployment
)
await asyncio.sleep(10.0)
resp = await run_test(
flow=k8s_flow_with_reload_executor,
namespace=namespace,
core_client=core_client,
endpoint='/index',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['argument'] == 'value2'
core_client.delete_namespace(namespace)
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize('k8s_connection_pool', [True, False])
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'jinaai/jina']],
indirect=True,
)
async def test_flow_with_workspace(logger, k8s_connection_pool, docker_images, tmpdir):
flow = Flow(name='k8s_flow-with_workspace', port_expose=9090, protocol='http').add(
name='test_executor',
uses=f'docker://{docker_images[0]}',
workspace='/shared',
)
dump_path = os.path.join(str(tmpdir), 'test-flow-with-workspace')
namespace = f'test-flow-with-workspace-{k8s_connection_pool}'.lower()
flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'test-executor-head': 1,
'test-executor': 1,
},
logger=logger,
)
resp = await run_test(
flow=flow,
namespace=namespace,
core_client=core_client,
endpoint='/workspace',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['workspace'] == '/shared/TestExecutor/0'
core_client.delete_namespace(namespace)
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize('k8s_connection_pool', [True, False])
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'jinaai/jina']],
indirect=True,
)
async def test_flow_connection_pool(logger, k8s_connection_pool, docker_images, tmpdir):
flow = Flow(name='k8s_flow_connection_pool', port_expose=9090, protocol='http').add(
name='test_executor',
replicas=2,
uses=f'docker://{docker_images[0]}',
)
dump_path = os.path.join(str(tmpdir), 'test-flow-connection-pool')
namespace = f'test-flow-connection-pool-{k8s_connection_pool}'.lower()
flow.to_k8s_yaml(
dump_path, k8s_namespace=namespace, k8s_connection_pool=k8s_connection_pool
)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
'test-executor-head': 1,
'test-executor': 2,
},
logger=logger,
)
resp = await run_test(
flow=flow,
namespace=namespace,
core_client=core_client,
endpoint='/debug',
n_docs=100,
request_size=5,
)
core_client.delete_namespace(namespace)
visited = set()
for r in resp:
for doc in r.docs:
visited.add(doc.tags['hostname'])
if k8s_connection_pool:
assert len(visited) == 2
else:
assert len(visited) == 1
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize(
'docker_images',
[['jinaai/jina']],
indirect=True,
)
async def test_flow_with_external_native_deployment(logger, docker_images, tmpdir):
class DocReplaceExecutor(Executor):
@requests
def add(self, **kwargs):
return DocumentArray(
[Document(text='executor was here') for _ in range(100)]
)
args = set_deployment_parser().parse_args(['--uses', 'DocReplaceExecutor'])
with Deployment(args) as external_deployment:
flow = Flow(name='k8s_flow-with_external_deployment', port_expose=9090).add(
name='external_executor',
external=True,
host=f'172.17.0.1',
port_in=external_deployment.head_port_in,
)
namespace = 'test-flow-with-external-deployment'
dump_path = os.path.join(str(tmpdir), namespace)
flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
},
logger=logger,
)
resp = await run_test(
flow=flow,
namespace=namespace,
core_client=core_client,
endpoint='/',
)
docs = resp[0].docs
assert len(docs) == 100
for doc in docs:
assert doc.text == 'executor was here'
core_client.delete_namespace(namespace)
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'jinaai/jina']],
indirect=True,
)
async def test_flow_with_external_k8s_deployment(logger, docker_images, tmpdir):
namespace = 'test-flow-with-external-k8s-deployment'
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
app_client = client.AppsV1Api(api_client=api_client)
await _create_external_deployment(api_client, app_client, docker_images, tmpdir)
flow = Flow(name='k8s_flow-with_external_deployment', port_expose=9090).add(
name='external_executor',
external=True,
host='external-deployment-head.external-deployment-ns.svc',
port_in=K8sGrpcConnectionPool.K8S_PORT_IN,
)
dump_path = os.path.join(str(tmpdir), namespace)
flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)
await create_all_flow_deployments_and_wait_ready(
dump_path,
namespace=namespace,
api_client=api_client,
app_client=app_client,
core_client=core_client,
deployment_replicas_expected={
'gateway': 1,
},
logger=logger,
)
resp = await run_test(
flow=flow,
namespace=namespace,
core_client=core_client,
endpoint='/workspace',
)
docs = resp[0].docs
for doc in docs:
assert 'workspace' in doc.tags
async def _create_external_deployment(api_client, app_client, docker_images, tmpdir):
namespace = 'external-deployment-ns'
args = set_deployment_parser().parse_args(
['--uses', f'docker://{docker_images[0]}', '--name', 'external-deployment']
)
external_deployment_config = K8sDeploymentConfig(args=args, k8s_namespace=namespace)
configs = external_deployment_config.to_k8s_yaml()
deployment_base = os.path.join(tmpdir, 'external-deployment')
filenames = []
for name, k8s_objects in configs:
filename = os.path.join(deployment_base, f'{name}.yml')
os.makedirs(deployment_base, exist_ok=True)
with open(filename, 'w+') as fp:
filenames.append(filename)
for i, k8s_object in enumerate(k8s_objects):
yaml.dump(k8s_object, fp)
if i < len(k8s_objects) - 1:
fp.write('---\n')
from kubernetes import utils
namespace_object = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {'name': f'{namespace}'},
}
try:
utils.create_from_dict(api_client, namespace_object)
except:
pass
for filename in filenames:
try:
utils.create_from_yaml(
api_client,
yaml_file=filename,
namespace=namespace,
)
except:
pass
await asyncio.sleep(1.0)
| [] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | python | 1 | 0 | |
sumo/tests/complex/tutorial/traci_pedestrian_crossing/runner.py | #!/usr/bin/env python
"""
@file runner.py
@author Lena Kalleske
@author Daniel Krajzewicz
@author Michael Behrisch
@author Jakob Erdmann
@date 2009-03-26
@version $Id$
Tutorial for traffic light control via the TraCI interface.
This scenario models a pedestrian crossing which switches on demand.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR/TS, Germany
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import optparse
import subprocess
import random
# the directory in which this script resides
THISDIR = os.path.dirname(__file__)
# we need to import python modules from the $SUMO_HOME/tools directory
# If the the environment variable SUMO_HOME is not set, try to locate the python
# modules relative to this script
try:
# tutorial in tests
sys.path.append(os.path.join(THISDIR, '..', '..', '..', '..', "tools"))
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
THISDIR, "..", "..", "..")), "tools")) # tutorial in docs
import traci
from sumolib import checkBinary # noqa
import randomTrips
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
# minimum green time for the vehicles
MIN_GREEN_TIME = 15
# the first phase in tls plan. see 'pedcrossing.tll.xml'
VEHICLE_GREEN_PHASE = 0
# the id of the traffic light (there is only one). This is identical to the
# id of the controlled intersection (by default)
TLSID = 'C'
# pedestrian edges at the controlled intersection
WALKINGAREAS = [':C_w0', ':C_w1']
CROSSINGS = [':C_c0']
def run():
"""execute the TraCI control loop"""
# track the duration for which the green phase of the vehicles has been
# active
greenTimeSoFar = 0
# whether the pedestrian button has been pressed
activeRequest = False
# main loop. do something every simulation step until no more vehicles are
# loaded or running
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep()
# decide wether there is a waiting pedestrian and switch if the green
# phase for the vehicles exceeds its minimum duration
if not activeRequest:
activeRequest = checkWaitingPersons()
if traci.trafficlights.getPhase(TLSID) == VEHICLE_GREEN_PHASE:
greenTimeSoFar += 1
if greenTimeSoFar > MIN_GREEN_TIME:
# check whether someone has pushed the button
if activeRequest:
# switch to the next phase
traci.trafficlights.setPhase(
TLSID, VEHICLE_GREEN_PHASE + 1)
# reset state
activeRequest = False
greenTimeSoFar = 0
sys.stdout.flush()
traci.close()
def checkWaitingPersons():
"""check whether a person has requested to cross the street"""
# check both sides of the crossing
for edge in WALKINGAREAS:
peds = traci.edge.getLastStepPersonIDs(edge)
# check who is waiting at the crossing
# we assume that pedestrians push the button upon
# standing still for 1s
for ped in peds:
if (traci.person.getWaitingTime(ped) == 1 and
traci.person.getNextEdge(ped) in CROSSINGS):
print("%s pushes the button" % ped)
return True
return False
def get_options():
"""define options for this script and interpret the command line"""
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
# this is the main entry point of this script
if __name__ == "__main__":
# load whether to run with or without GUI
options = get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if options.nogui:
sumoBinary = checkBinary('sumo')
else:
sumoBinary = checkBinary('sumo-gui')
net = 'pedcrossing.net.xml'
# build the multi-modal network from plain xml inputs
subprocess.call([checkBinary('netconvert'),
'-c', os.path.join('data', 'pedcrossing.netccfg'),
'--output-file', net],
stdout=sys.stdout, stderr=sys.stderr)
# generate the pedestrians for this simulation
randomTrips.main(randomTrips.get_options([
'--net-file', net,
'--output-trip-file', 'pedestrians.trip.xml',
'--seed', '42', # make runs reproducible
'--pedestrians',
'--prefix', 'ped',
# prevent trips that start and end on the same edge
'--min-distance', '1',
'--trip-attributes', 'departPos="random" arrivalPos="random"',
'--binomial', '4',
'--period', '35']))
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
traci.start([sumoBinary, '-c', os.path.join('data', 'run.sumocfg')])
run()
| [] | [] | [
"SUMO_HOME"
] | [] | ["SUMO_HOME"] | python | 1 | 0 | |
pkg/kubernetes/client/client.go | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"fmt"
"os"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/openebs/jiva-operator/pkg/apis"
jv "github.com/openebs/jiva-operator/pkg/apis/openebs/v1alpha1"
"github.com/openebs/jiva-operator/pkg/jivavolume"
analytics "github.com/openebs/jiva-operator/pkg/usage"
"github.com/openebs/jiva-operator/pkg/utils"
env "github.com/openebs/lib-csi/pkg/common/env"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/cloud-provider/volume/helpers"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
const (
defaultNS = "openebs"
defaultSizeBytes = 5 * helpers.GiB
// pvcNameKey holds the name of the PVC which is passed as a parameter
// in CreateVolume request
pvcNameKey = "csi.storage.k8s.io/pvc/name"
// OpenEBSNamespace is the environment variable to get openebs namespace
// This environment variable is set via kubernetes downward API
OpenEBSNamespace = "OPENEBS_NAMESPACE"
)
var (
// openebsNamespace is the namespace where jiva operator is deployed
openebsNamespace string
)
// Client is the wrapper over the k8s client that will be used by
// jiva-operator to interface with etcd
type Client struct {
cfg *rest.Config
client client.Client
}
// New creates a new client object using the given config
func New(config *rest.Config) (*Client, error) {
c := &Client{
cfg: config,
}
err := c.Set()
if err != nil {
return c, err
}
return c, nil
}
// Set sets the client using the config
func (cl *Client) Set() error {
c, err := client.New(cl.cfg, client.Options{})
if err != nil {
return err
}
cl.client = c
return nil
}
// RegisterAPI registers the API scheme in the client using the manager.
// This function needs to be called only once a client object
func (cl *Client) RegisterAPI(opts manager.Options) error {
mgr, err := manager.New(cl.cfg, opts)
if err != nil {
return err
}
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
return err
}
return nil
}
// GetJivaVolume get the instance of JivaVolume CR.
func (cl *Client) GetJivaVolume(name string) (*jv.JivaVolume, error) {
instance, err := cl.ListJivaVolume(name)
if err != nil {
logrus.Errorf("Failed to get JivaVolume CR: %v, err: %v", name, err)
return nil, status.Errorf(codes.Internal, "Failed to get JivaVolume CR: {%v}, err: {%v}", name, err)
}
if len(instance.Items) == 0 {
return nil, status.Errorf(codes.NotFound, "Failed to get JivaVolume CR: {%v}", name)
}
return &instance.Items[0], nil
}
// UpdateJivaVolume update the JivaVolume CR
func (cl *Client) UpdateJivaVolume(cr *jv.JivaVolume) (bool, error) {
err := cl.client.Update(context.TODO(), cr)
if err != nil {
if k8serrors.IsConflict(err) {
return true, err
}
logrus.Errorf("Failed to update JivaVolume CR: {%v}, err: {%v}", cr.Name, err)
return false, err
}
return false, nil
}
func getDefaultLabels(pv string) map[string]string {
return map[string]string{
"openebs.io/persistent-volume": pv,
"openebs.io/component": "jiva-volume",
}
}
func getdefaultAnnotations(policy string) map[string]string {
annotations := map[string]string{}
if policy != "" {
annotations["openebs.io/volume-policy"] = policy
}
return annotations
}
// CreateJivaVolume check whether JivaVolume CR already exists and creates one
// if it doesn't exist.
func (cl *Client) CreateJivaVolume(req *csi.CreateVolumeRequest) (string, error) {
var (
sizeBytes int64
accessType string
)
name := utils.StripName(req.GetName())
policyName := req.GetParameters()["policy"]
pvcName := req.GetParameters()[pvcNameKey]
ns := os.Getenv("OPENEBS_NAMESPACE")
if req.GetCapacityRange() == nil {
logrus.Warningf("CreateVolume: capacity range is nil, provisioning with default size: {%v (bytes)}", defaultSizeBytes)
sizeBytes = defaultSizeBytes
} else {
sizeBytes = req.GetCapacityRange().RequiredBytes
}
size := resource.NewQuantity(sizeBytes, resource.BinarySI)
volSizeGiB, err := helpers.RoundUpToGiB(*size)
if err != nil {
return "", status.Errorf(codes.Internal, "Failed to round up volume size, err: %v", err)
}
capacity := fmt.Sprintf("%dGi", volSizeGiB)
caps := req.GetVolumeCapabilities()
for _, cap := range caps {
switch cap.GetAccessType().(type) {
case *csi.VolumeCapability_Block:
accessType = "block"
case *csi.VolumeCapability_Mount:
accessType = "mount"
}
}
jiva := jivavolume.New().WithKindAndAPIVersion("JivaVolume", "openebs.io/v1alpha1").
WithNameAndNamespace(name, ns).
WithAnnotations(getdefaultAnnotations(policyName)).
WithLabels(getDefaultLabels(name)).
WithPV(name).
WithCapacity(capacity).
WithAccessType(accessType).
WithVersionDetails()
if jiva.Errs != nil {
return "", status.Errorf(codes.Internal, "Failed to build JivaVolume CR, err: {%v}", jiva.Errs)
}
obj := jiva.Instance()
objExists := &jv.JivaVolume{}
err = cl.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: ns}, objExists)
if err != nil && k8serrors.IsNotFound(err) {
logrus.Infof("Creating a new JivaVolume CR {name: %v, namespace: %v}", name, ns)
err = cl.client.Create(context.TODO(), obj)
if err != nil {
return "", status.Errorf(codes.Internal, "Failed to create JivaVolume CR, err: {%v}", err)
}
SendEventOrIgnore(pvcName, name, size.String(), "", "jiva-csi", analytics.VolumeProvision)
return name, nil
} else if err != nil {
return "", status.Errorf(codes.Internal, "Failed to get the JivaVolume details, err: {%v}", err)
}
if objExists.Spec.Capacity != obj.Spec.Capacity {
return "", status.Errorf(codes.AlreadyExists, "Failed to create JivaVolume CR, volume with different size already exists")
}
return name, nil
}
// ListJivaVolume returns the list of JivaVolume resources
func (cl *Client) ListJivaVolume(volumeID string) (*jv.JivaVolumeList, error) {
volumeID = utils.StripName(volumeID)
obj := &jv.JivaVolumeList{}
opts := []client.ListOption{
client.MatchingLabels(getDefaultLabels(volumeID)),
}
if err := cl.client.List(context.TODO(), obj, opts...); err != nil {
return nil, err
}
return obj, nil
}
// GetJivaVolume returns the list of JivaVolume resources
func (cl *Client) GetJivaVolumeResource(volumeID string) (*jv.JivaVolume, error) {
volumeID = utils.StripName(volumeID)
obj := &jv.JivaVolume{}
if err := cl.client.Get(context.TODO(), types.NamespacedName{Name: volumeID, Namespace: GetOpenEBSNamespace()}, obj); err != nil {
return nil, err
}
return obj, nil
}
// ListJivaVolumeWithOpts returns the list of JivaVolume resources
func (cl *Client) ListJivaVolumeWithOpts(opts map[string]string) (*jv.JivaVolumeList, error) {
obj := &jv.JivaVolumeList{}
options := []client.ListOption{
client.MatchingLabels(opts),
}
if err := cl.client.List(context.TODO(), obj, options...); err != nil {
return nil, err
}
return obj, nil
}
// DeleteJivaVolume delete the JivaVolume CR
func (cl *Client) DeleteJivaVolume(volumeID string) error {
obj, err := cl.ListJivaVolume(volumeID)
if err != nil {
return err
}
if len(obj.Items) == 0 {
logrus.Warningf("DeleteVolume: JivaVolume: {%v}, not found, ignore deletion...", volumeID)
return nil
}
logrus.Debugf("DeleteVolume: object: {%+v}", obj)
instance := obj.Items[0].DeepCopy()
if err := cl.client.Delete(context.TODO(), instance); err != nil {
return err
}
return nil
}
// GetNode gets the node which satisfies the topology info
func (cl *Client) GetNode(nodeName string) (*corev1.Node, error) {
node := &corev1.Node{}
if err := cl.client.Get(context.TODO(), types.NamespacedName{Name: nodeName, Namespace: ""}, node); err != nil {
return node, err
}
return node, nil
}
// GetOpenEBSNamespace returns namespace where
// jiva operator is running
func GetOpenEBSNamespace() string {
if openebsNamespace == "" {
openebsNamespace = env.Get(OpenEBSNamespace)
}
return openebsNamespace
}
// sendEventOrIgnore sends anonymous cstor provision/delete events
func SendEventOrIgnore(pvcName, pvName, capacity, replicaCount, stgType, method string) {
if env.Truthy(analytics.OpenEBSEnableAnalytics) {
analytics.New().Build().ApplicationBuilder().
SetVolumeType(stgType, method).
SetDocumentTitle(pvName).
SetCampaignName(pvcName).
SetLabel(analytics.EventLabelCapacity).
SetReplicaCount(replicaCount, method).
SetCategory(method).
SetVolumeCapacity(capacity).Send()
}
}
| [
"\"OPENEBS_NAMESPACE\""
] | [] | [
"OPENEBS_NAMESPACE"
] | [] | ["OPENEBS_NAMESPACE"] | go | 1 | 0 | |
microservices/svc/multiplier/conf.py | import os
MULTIPLIER_ADDRESS = os.environ.get('MULTIPLIER_ADDRESS', '0.0.0.0:5050')
IP_ADDRESS = MULTIPLIER_ADDRESS.split(':')[0]
PORT = int(MULTIPLIER_ADDRESS.split(':')[1])
| [] | [] | [
"MULTIPLIER_ADDRESS"
] | [] | ["MULTIPLIER_ADDRESS"] | python | 1 | 0 | |
util/src/main/java/com/psddev/dari/util/JspUtils.java | package com.psddev.dari.util;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletResponseWrapper;
import javax.servlet.jsp.JspFactory;
import javax.xml.bind.DatatypeConverter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** JSP utility methods. */
public final class JspUtils {
private static final Pattern ABSOLUTE_URI_PATTERN = Pattern.compile("(?i)[a-z][-a-z0-9+.]*:.*");
private static final Logger LOGGER = LoggerFactory.getLogger(JspUtils.class);
private static final String WEB_INF_DIRECTORY = "WEB-INF/";
private static final String ATTRIBUTE_PREFIX = JspUtils.class.getName() + ".";
private static final String EMBEDDED_SETTINGS_ATTRIBUTE = ATTRIBUTE_PREFIX + "embeddedSettings";
private static final String EMBEDDED_CONTEXT_PATHS = ATTRIBUTE_PREFIX + "embeddedContextPaths";
private static final String HEADER_RESPONSE_ATTRIBUTE = ATTRIBUTE_PREFIX + "headerResponse";
private static final String ID_ATTRIBUTE = ATTRIBUTE_PREFIX + "id";
private static final String IS_FINISHED_ATTRIBUTE = ATTRIBUTE_PREFIX + "isFinished";
/**
* Controls access using the basic authentication scheme described
* in <a href="http://tools.ietf.org/html/rfc2617">RFC 2617</a>.
* Note that if running in non-{@linkplain Settings#isProduction
* production environment} and the given {@code username} and
* {@code password} is blank, this method will return {@code true}.
* Typical use looks like:
*
* <p><blockquote><pre>
* if (!JspUtils.authenticateBasic(request, response, realm, username, password)) {
* return; // Should not send anything else to the response.
* }
* </pre></blockquote>
*
* @deprecated Use {@link #getBasicCredentials} and {@link #setBasicAuthenticationHeader} instead.
*/
@Deprecated
public static boolean authenticateBasic(
HttpServletRequest request,
HttpServletResponse response,
String realm,
String username,
String password) {
if (ObjectUtils.isBlank(username) &&
ObjectUtils.isBlank(password) &&
!Settings.isProduction()) {
return true;
}
String[] credentials = getBasicCredentials(request);
if (credentials != null &&
credentials[0].equals(username) &&
credentials[1].equals(password)) {
return true;
} else {
setBasicAuthenticationHeader(response, realm);
return false;
}
}
/**
* Returns the basic access authentication credentials from the
* {@code Authorization} header.
*
* @param request Can't be {@code null}.
* @return {@code null} if not found, or a 2-element string array.
*/
public static String[] getBasicCredentials(HttpServletRequest request) {
String header = request.getHeader("Authorization");
if (!ObjectUtils.isBlank(header)) {
int spaceAt = header.indexOf(' ');
if (spaceAt > -1 && "Basic".equals(header.substring(0, spaceAt))) {
String encoding = request.getCharacterEncoding();
if (ObjectUtils.isBlank(encoding)) {
encoding = "UTF-8";
}
String decoded = null;
try {
byte[] decodedBytes = DatatypeConverter.parseBase64Binary(header.substring(spaceAt + 1));
decoded = new String(decodedBytes, encoding);
} catch (IllegalArgumentException error) {
// Not a valid Base64 string, so just ignore the header
// value.
} catch (UnsupportedEncodingException error) {
throw new IllegalStateException(error);
}
if (!ObjectUtils.isBlank(decoded)) {
int colonAt = decoded.indexOf(':');
if (colonAt > -1) {
return new String[] {
decoded.substring(0, colonAt),
decoded.substring(colonAt + 1) };
}
}
}
}
return null;
}
/**
* Sets the {@code WWW-Authenticate} header requiring basic access
* authentication in the given {@code realm}.
*
* @param response Can't be {@code null}.
* @param realm May be @{code null}.
*/
public static void setBasicAuthenticationHeader(HttpServletResponse response, String realm) {
StringBuilder header = new StringBuilder();
header.append("Basic realm=\"");
if (realm != null) {
header.append(realm.replace("\"", "\\\""));
}
header.append('"');
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader("WWW-Authenticate", header.toString());
}
/**
* Creates a cookie signature based on the given {@code name},
* {@code value}, and {@code timestamp}.
*/
private static String createCookieSignature(
String name,
String value,
long timestamp) {
try {
MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
sha1.update(name.getBytes(StringUtils.UTF_8));
sha1.update(value.getBytes(StringUtils.UTF_8));
sha1.update(Long.valueOf(timestamp).byteValue());
sha1.update(Settings.getSecret().getBytes(StringUtils.UTF_8));
return StringUtils.hex(sha1.digest());
} catch (NoSuchAlgorithmException error) {
throw new IllegalStateException("Can't hash using SHA-1!", error);
}
}
/**
* Creates a new unique ID that can be used to identify anything
* within the given {@code request}.
*
* @see #getId
*/
public static String createId(ServletRequest request) {
String id = "i" + UUID.randomUUID().toString().replace("-", "");
request.setAttribute(ID_ATTRIBUTE, id);
return id;
}
/**
* Signals the given {@code request} that it's finished
* and shouldn't process anything further.
*
* @see #isFinished
*/
public static void finish(ServletRequest request) {
request.setAttribute(IS_FINISHED_ATTRIBUTE, Boolean.TRUE);
}
/**
* Forwards to the resource at the given {@code path} modified by the
* given {@code parameters}.
*/
public static void forward(
ServletRequest request,
ServletResponse response,
String path,
Object... parameters)
throws IOException, ServletException {
Map<String, Object> old = setAttributes(request, parameters);
try {
request.getRequestDispatcher(path).forward(request, response);
} finally {
setAttributesWithMap(request, old);
}
}
/**
* Returns the absolute version of the given {@code path}. The return
* value includes the context path and is meant for final display
* (e.g. an HTML page).
*
* @param path If empty, existing query parameters from the given
* {@code request} are added in addition to the given {@code parameters}.
*
* @see #resolvePath
*/
public static String getAbsolutePath(
HttpServletRequest request,
String path,
Object... parameters) {
return getEmbeddedAbsolutePath(null, request, path, parameters);
}
/** Returns the absolute version of the given {@code url}. */
public static String getAbsoluteUrl(
HttpServletRequest request,
String url,
Object... parameters) {
return getEmbeddedAbsoluteUrl(null, request, url, parameters);
}
/**
* Returns the cookie with the given {@code name} from the given
* {@code request}.
*/
public static Cookie getCookie(HttpServletRequest request, String name) {
Cookie[] cookies = request.getCookies();
if (cookies != null) {
for (Cookie cookie : cookies) {
if (cookie.getName().equals(name)) {
return cookie;
}
}
}
return null;
}
/** Returns the current context path of the given {@code request}. */
public static String getCurrentContextPath(HttpServletRequest request) {
return isIncluded(request) ?
(String) request.getAttribute("javax.servlet.include.context_path") :
request.getContextPath();
}
/** Returns the current path info of the given {@code request}. */
public static String getCurrentPathInfo(HttpServletRequest request) {
return isIncluded(request) ?
(String) request.getAttribute("javax.servlet.include.path_info") :
request.getPathInfo();
}
/** Returns the current query string of the given {@code request}. */
public static String getCurrentQueryString(HttpServletRequest request) {
return isIncluded(request) ?
(String) request.getAttribute("javax.servlet.include.query_string") :
request.getQueryString();
}
/** Returns the current servlet path of the given {@code request}. */
public static String getCurrentServletPath(HttpServletRequest request) {
return isIncluded(request) ?
(String) request.getAttribute("javax.servlet.include.servlet_path") :
request.getServletPath();
}
/** Returns the exception that the given {@code request} is handling. */
public static Throwable getErrorException(HttpServletRequest request) {
return (Throwable) request.getAttribute("javax.servlet.error.exception");
}
/**
* Returns the URI that caused the error currently being handled
* by the given {@code request}.
*/
public static String getErrorRequestUri(ServletRequest request) {
return (String) request.getAttribute("javax.servlet.error.request_uri");
}
/**
* Use {@link #getErrorRequestUri(ServletRequest)} instead.
*/
public static String getErrorRequestUri(HttpServletRequest request) {
return getErrorRequestUri((ServletRequest) request);
}
/**
* Returns the first proxy header value, which may be a
* comma-separated list of values.
*/
private static String getFirstProxyHeader(String header) {
if (ObjectUtils.isBlank(header)) {
return null;
} else {
int commaAt = header.indexOf(',');
if (commaAt > -1) {
header = header.substring(0, commaAt);
}
}
return header;
}
/**
* Returns the servlet response associated with the given
* {@code request} that can be used to write the headers.
*
* @see #setHeaderResponse
* @see HeaderResponseFilter
*/
public static ServletResponse getHeaderResponse(
ServletRequest request,
ServletResponse response) {
ServletResponse headerResponse = (ServletResponse) request.getAttribute(HEADER_RESPONSE_ATTRIBUTE);
return headerResponse != null ? headerResponse : response;
}
/**
* Returns the host ({@code X-Forwarded-Host} or {@code Host} header which
* may include the port number) from the given {@code request}.
*
* @param request Can't be {@code null}.
* @return Never {@code null}.
*/
public static String getHost(HttpServletRequest request) {
String host = getFirstProxyHeader(request.getHeader("X-Forwarded-Host"));
return host != null ? host : request.getHeader("Host");
}
/**
* Returns the host name (no port number) from the given {@code request}.
*
* @param request Can't be {@code null}.
* @return Never {@code null}.
*/
public static String getHostname(HttpServletRequest request) {
String host = getHost(request);
int colonAt = host.lastIndexOf(':');
return colonAt > -1 ? host.substring(0, colonAt) : host;
}
/** Returns the host URL from the given {@code request}. */
public static String getHostUrl(HttpServletRequest request) {
return request.getScheme() + "://" + getHost(request);
}
/** Returns the protocol relative host URL from the given {@code request}. */
public static String getProtocolRelativeHostUrl(HttpServletRequest request) {
return "//" + getHost(request);
}
/**
* Returns the unique ID last created within the given
* {@code request}.
*
* @see #createId
*/
public static String getId(ServletRequest request) {
Object id = request.getAttribute(ID_ATTRIBUTE);
if (!(id instanceof String)) {
throw new IllegalStateException("Unique ID was never created!");
}
return (String) id;
}
/** Returns the original context path of the given {@code request}. */
public static String getOriginalContextPath(HttpServletRequest request) {
return isForwarded(request) ?
(String) request.getAttribute("javax.servlet.forward.context_path") :
request.getContextPath();
}
/** Returns the original path info of the given {@code request}. */
public static String getOriginalPathInfo(HttpServletRequest request) {
return isForwarded(request) ?
(String) request.getAttribute("javax.servlet.forward.path_info") :
request.getPathInfo();
}
/** Returns the original query string of the given {@code request}. */
public static String getOriginalQueryString(HttpServletRequest request) {
return isForwarded(request) ?
(String) request.getAttribute("javax.servlet.forward.query_string") :
request.getQueryString();
}
/** Returns the original servlet path of the given {@code request}. */
public static String getOriginalServletPath(HttpServletRequest request) {
return isForwarded(request) ?
(String) request.getAttribute("javax.servlet.forward.servlet_path") :
request.getServletPath();
}
/**
* Returns the IP address of the client that made the given
* {@code request}.
*/
public static String getRemoteAddress(HttpServletRequest request) {
String address = getFirstProxyHeader(request.getHeader("X-Forwarded-For"));
return address != null ? address : request.getRemoteAddr();
}
/**
* Returns the value of the signed cookie with the given {@code name}.
*
* @see #setSignedCookie
*/
public static String getSignedCookie(
HttpServletRequest request,
String name) {
return getSignedCookieWithExpiry(request, name, 0);
}
/**
* Returns the value of the signed cookie with the given {@code name} so
* long as the given {@code expirationDuration} has not been exceeded. A
* zero or negative {@code expirationDuration} signifies that the cookie
* does not expire.
*
* @see #setSignedCookie
*/
public static String getSignedCookieWithExpiry(
HttpServletRequest request,
String name,
long expirationDuration) {
Cookie cookie = getCookie(request, name);
if (cookie == null) {
return null;
} else {
return unsignCookieWithExpiry(name, cookie.getValue(), expirationDuration);
}
}
/**
* Returns the writer associated with the given {@code response}.
*
* <p>Unlike the {@link ServletResponse#getWriter}, this method won't
* ever throw {@link IllegalStateException}.</p>
*/
public static PrintWriter getWriter(HttpServletResponse response) throws IOException {
try {
return response.getWriter();
} catch (IllegalStateException error) {
return new PrintWriter(new OutputStreamWriter(response.getOutputStream(), StringUtils.UTF_8));
}
}
/**
* {@linkplain javax.servlet.RequestDispatcher#include Includes} the
* resource at the given {@code path} and writes its output to the
* given {@code writer}. The given {@code attributes} are set on the
* request before execution, and any overriden values are restored
* before this method returns.
*
* @return {@code false} if servlet processing shouldn't continue
* any further (e.g. for when the resource redirects to a different
* page).
*/
public static boolean include(
HttpServletRequest request,
HttpServletResponse response,
Writer writer,
String path,
Object... attributes)
throws IOException, ServletException {
return includeEmbedded(null, request, response, writer, path, attributes);
}
/**
* Returns {@code true} if the given {@code request} is made with
* Ajax.
*/
public static boolean isAjaxRequest(HttpServletRequest request) {
return "XMLHttpRequest".equals(request.getHeader("X-Requested-With"));
}
/**
* Returns {@code true} if the given {@code request} and
* {@code response} has finished and shouldn't be processed
* any further.
*
* @see #finish
*/
public static boolean isFinished(
ServletRequest request,
ServletResponse response) {
return (response instanceof HttpServletResponse &&
((HttpServletResponse) response).containsHeader("Location")) ||
request.getAttribute(IS_FINISHED_ATTRIBUTE) != null;
}
/**
* Returns {@code true} if the given {@code request} is a form
* post.
*/
public static boolean isFormPost(HttpServletRequest request) {
return "POST".equals(request.getMethod());
}
/**
* Returns {@code true} if the given {@code request} is currently
* handling an error.
*/
public static boolean isError(ServletRequest request) {
return getErrorRequestUri(request) != null;
}
/**
* Use {@link #isError(ServletRequest)} instead.
*/
public static boolean isError(HttpServletRequest request) {
return isError((ServletRequest) request);
}
/**
* Returns {@code true} if the given {@code request} is forwarded from
* another.
*/
public static boolean isForwarded(ServletRequest request) {
return request.getAttribute("javax.servlet.forward.context_path") != null;
}
/**
* Returns {@code true} if the given {@code request} is included from
* another.
*/
public static boolean isIncluded(ServletRequest request) {
return request.getAttribute("javax.servlet.include.context_path") != null;
}
/**
* Returns {@code true} if the given {@code request} is secure. This method
* checks:
*
* <ul>
* <li>{@link ServletRequest#isSecure}</li>
* <li>{@code X-Forwarded-Proto} header</li>
* <li>{@code HTTPS} environment variable</li>
* </ul>
*/
public static boolean isSecure(HttpServletRequest request) {
return request.isSecure() ||
"https".equalsIgnoreCase(request.getHeader("X-Forwarded-Proto")) ||
System.getenv("HTTPS") != null;
}
/** @deprecated Use {@link #isSecure} instead. */
@Deprecated
public static boolean isSecureRequest(HttpServletRequest request) {
return isSecure(request);
}
/**
* Proxies the given {@code request} and {@code response} to the given
* {@code url} and writes the result to the given {@code writer}. Note
* that this method requires a HttpServletRequest implementation that
* allows re-reading the request content.
*
* @see ReusableRequestFilter
*/
public static void proxy(
HttpServletRequest request,
HttpServletResponse response,
Writer writer,
Object url,
Object... parameters)
throws IOException {
String method = request.getMethod();
String urlString = JspUtils.getAbsolutePath(request, url.toString(), parameters);
InputStream requestStream = null;
Reader requestReader = null;
BufferedOutputStream connectionOut = null;
BufferedInputStream connectionIn = null;
HttpURLConnection connection;
try {
connection = (HttpURLConnection) new URL(urlString).openConnection();
connection.setRequestMethod(method);
connection.setDoOutput(true);
connection.setDoInput(true);
connection.setUseCaches(true);
for (Enumeration<?> e = request.getHeaderNames(); e.hasMoreElements();) {
String headerName = e.nextElement().toString();
String headerValue = request.getHeader(headerName);
connection.setRequestProperty(headerName, headerValue);
}
connection.connect();
if ("POST".equalsIgnoreCase(method)) {
connectionOut = new BufferedOutputStream(connection.getOutputStream());
// first try to get the input stream
try {
requestStream = request.getInputStream();
int data;
while ((data = requestStream.read()) != -1) {
connectionOut.write(data);
}
} catch (IllegalStateException e1) {
// stream is unavailable, try the reader
try {
requestReader = request.getReader();
int data;
while ((data = requestReader.read()) != -1) {
connectionOut.write(data);
}
} catch (IllegalArgumentException e2) {
// oh well, we tried
}
}
connectionOut.flush();
}
connectionIn = new BufferedInputStream(connection.getInputStream());
int data;
while ((data = connectionIn.read()) != -1) {
writer.write(data);
}
connection.disconnect();
} finally {
if (requestStream != null) {
requestStream.close();
}
if (connectionIn != null) {
connectionIn.close();
}
if (connectionOut != null) {
connectionOut.close();
}
}
}
/**
* Redirects to the given {@code path} modified by the given
* {@code parameters} using the {@value
* javax.servlet.http.HttpServletResponse#SC_FOUND} status code.
*/
public static void redirect(
HttpServletRequest request,
HttpServletResponse response,
Object path,
Object... parameters)
throws IOException {
redirectEmbedded(null, request, response, path, parameters);
}
/**
* Redirects to the given {@code path} modified by the given
* {@code parameters} using the {@value
* javax.servlet.http.HttpServletResponse#SC_MOVED_PERMANENTLY}
* status code.
*/
public static void redirectPermanently(
HttpServletRequest request,
HttpServletResponse response,
Object path,
Object... parameters)
throws IOException {
redirectEmbeddedPermanently(null, request, response, path, parameters);
}
/**
* Resolves the given {@code path} in context of the given
* {@code request}. The return value includes the {@linkplain
* #getEmbeddedContextPath embedded context path}.
*
* <p>The following list details the different behaviors based on the
* given {@code path}:
*
* <ul>
* <li>If {@code null} or empty, returns the servlet path.
* <li>If it starts with {@code /}, prefixes the given {@code path}
* with the {@linkplain #getEmbeddedContextPath embedded context path}
* and returns it.
* <li>If it looks like an absolute URI, returns the given {@code path}
* without any changes.
* <li>Otherwise, resolves the path in context of the {@linkplain
* #getCurrentServletPath current servlet path} and returns it.
* </ul>
*
* @param context If {@code null}, the embedded context path won't
* be detected. This is only to support legacy APIs, and new
* applications shouldn't depend on this behavior.
*
* @see #getAbsolutePath(ServletContext, HttpServletRequest, String, Object...)
*/
public static String resolvePath(
ServletContext context,
HttpServletRequest request,
String path) {
if (path == null || path.isEmpty()) {
return request.getServletPath();
} else if (path.startsWith("/")) {
if (context != null) {
return getEmbeddedContextPath(context, request) + path;
} else {
return path;
}
} else if (ABSOLUTE_URI_PATTERN.matcher(path).matches()) {
return path;
} else {
try {
URI currentPath = new URI(getCurrentServletPath(request));
return currentPath.resolve(path).toString();
} catch (URISyntaxException ex) {
return path;
}
}
}
/**
* Sets all given {@code attributes} in the given {@code request}
* and returns the previously set values.
*/
public static Map<String, Object> setAttributes(
ServletRequest request,
Object... attributes) {
Map<String, Object> old = new HashMap<String, Object>();
for (int i = 0, length = attributes.length; i < length; i += 2) {
String key = String.valueOf(attributes[i]);
old.put(key, request.getAttribute(key));
request.setAttribute(key, i + 1 < length ? attributes[i + 1] : null);
}
return old;
}
/**
* Sets all given {@code attributes} in the given {@code request}
* and returns the previously set values.
*/
public static Map<String, Object> setAttributesWithMap(
ServletRequest request,
Map<String, Object> attributes) {
Map<String, Object> old = new HashMap<String, Object>();
for (Map.Entry<String, Object> e : attributes.entrySet()) {
String key = e.getKey();
old.put(key, request.getAttribute(key));
request.setAttribute(key, e.getValue());
}
return old;
}
/**
* Sets the servlet response that can be used to write the
* headers on the given {@code request}.
*
* <p>SRV.8.3 in the Java servlet specification states (emphasis added):
*
* <p><blockquote>The {@code include} method of the
* {@code RequestDispatcher} interface may be called at any time.
* The target servlet of the {@code include} method has access to all
* aspects of the request object, but its use of the response object
* is more limited.
*
* <br><br>It can only write information to the
* {@code ServletOutputStream} or {@code Writer} of the response
* object and commit a response by writing content past the end of
* the response buffer, or by explicitly calling the {@code flushBuffer}
* method of the {@code ServletResponse} interface. <strong>It cannot
* set headers or call any method that affects the headers of the
* response. Any attempt to do so must be ignored.</strong></blockquote>
*
* <p>This method should be used in the parent request so that the
* included page still has access to set the headers through the
* parent response object.
*
* @see #getHeaderResponse
* @see HeaderResponseFilter
*/
public static void setHeaderResponse(
ServletRequest request,
ServletResponse response) {
request.setAttribute(HEADER_RESPONSE_ATTRIBUTE, response);
}
/**
* Signs the given {@code cookie} and sets it in the given
* {@code response}.
*
* @see #getSignedCookie
*/
public static void setSignedCookie(
HttpServletResponse response,
Cookie cookie) {
cookie.setValue(signCookie(cookie.getName(), cookie.getValue()));
response.addCookie(cookie);
}
/**
* Signs the given {@code unsignedValue} that's associated to a
* cookie with the given {@code name}.
*/
public static String signCookie(String name, String unsignedValue) {
long timestamp = System.currentTimeMillis();
return unsignedValue +
"|" + timestamp +
"|" + createCookieSignature(name, unsignedValue, timestamp);
}
/**
* Unsigns the given {@code signedValue} that's associated to a
* cookie with the given {@code name}.
*/
public static String unsignCookie(String name, String signedValue) {
return unsignCookieWithExpiry(name, signedValue, 0);
}
/**
* Unsigns the given {@code signedValue} that's associated to a
* cookie with the given {@code name} so long as the
* given {@code expirationDuration} has not been exceeded. A zero or
* negative {@code expirationDuration} signifies that the cookie does not
* expire.
*/
public static String unsignCookieWithExpiry(String name, String signedValue, long expirationDuration) {
String[] parts = StringUtils.split(signedValue, "\\|");
if (parts.length != 3) {
LOGGER.debug("Not a valid signed cookie! {}", signedValue);
return null;
}
String unsignedValue = parts[0];
long timestamp = ObjectUtils.to(long.class, parts[1]);
String signature = parts[2];
String signatureCheck = createCookieSignature(name, unsignedValue, timestamp);
if (!signatureCheck.equals(signature)) {
LOGGER.debug("Failed signature! {} != {}", signatureCheck, signature);
return null;
}
long expiration = System.currentTimeMillis() - expirationDuration;
if (expirationDuration > 0 && timestamp < expiration) {
LOGGER.debug("Signature expired! {} < {}", timestamp, expiration);
return null;
}
return unsignedValue;
}
// --- Embedded web application ---
/**
* Returns the context path of the embedded web application associated
* with the given {@code context} and {@code path}. This is detected
* by checking for the existence of {@code WEB-INF} in the common parent
* directory.
*/
public static String getEmbeddedContextPath(ServletContext context, String path) {
@SuppressWarnings("unchecked")
Map<String, String> contextPaths = (Map<String, String>) context.getAttribute(EMBEDDED_CONTEXT_PATHS);
if (contextPaths == null) {
contextPaths = new ConcurrentHashMap<String, String>();
context.setAttribute(EMBEDDED_CONTEXT_PATHS, contextPaths);
}
String contextPath = contextPaths.get(path);
if (contextPath == null) {
try {
URI pathUri = new URI(path).resolve("./");
while (context.getResource(pathUri.resolve(WEB_INF_DIRECTORY).toString()) == null &&
pathUri.toString().length() > 1) {
pathUri = pathUri.resolve("../");
}
String pathString = pathUri.toString();
contextPath = pathString.substring(0, pathString.length() - 1);
} catch (MalformedURLException error) {
// Default context path if the given path is malformed.
} catch (URISyntaxException error) {
// Default context path if the resolved URI is malformed.
}
if (contextPath == null) {
contextPath = "";
}
contextPaths.put(path, contextPath);
}
return contextPath;
}
/**
* Returns the servlet path {@linkplain #getEmbeddedContextPath in
* context of} the embedded web application associated with the given
* {@code context} and {@code path}.
*/
public static String getEmbeddedServletPath(ServletContext context, String path) {
String contextPath = getEmbeddedContextPath(context, path);
return path.substring(contextPath.length());
}
/**
* Returns the settings for all embedded web applications associated
* with the given {@code context}, keyed by their {@linkplain
* #getEmbeddedContextPath context paths}.
*/
public static Map<String, Properties> getEmbeddedSettings(ServletContext context) {
@SuppressWarnings("unchecked")
Map<String, Properties> all = (Map<String, Properties>) context.getAttribute(EMBEDDED_SETTINGS_ATTRIBUTE);
if (all == null) {
all = new CompactMap<String, Properties>();
addEmbeddedSettings(context, all, "/" + JspUtils.WEB_INF_DIRECTORY, "/");
context.setAttribute(EMBEDDED_SETTINGS_ATTRIBUTE, all);
}
return all;
}
private static void addEmbeddedSettings(
ServletContext context,
Map<String, Properties> all,
String suffix,
String path) {
@SuppressWarnings("unchecked")
Set<String> subPaths = (Set<String>) context.getResourcePaths(path);
if (subPaths == null) {
return;
}
for (String subPath : subPaths) {
if (subPath.endsWith(suffix)) {
Properties properties = new Properties();
String file = subPath + "classes/settings.properties";
InputStream input = context.getResourceAsStream(file);
if (input != null) {
try {
try {
properties.load(input);
all.put(subPath.substring(0, subPath.length() - suffix.length()), properties);
} finally {
input.close();
}
} catch (IOException error) {
LOGGER.warn(String.format(
"Can't read [%s] settings file!", file),
error);
}
}
} else if (subPath.endsWith("/")) {
addEmbeddedSettings(context, all, suffix, subPath);
}
}
}
/**
* Returns the absolute version of the given {@code path}. The return
* value includes the context path and is meant for final display
* (e.g. an HTML page).
*
* @param path If empty, existing query parameters from the given
* {@code request} are added in addition to the given {@code parameters}.
*
* @see #resolvePath
*/
public static String getEmbeddedAbsolutePath(
ServletContext context,
HttpServletRequest request,
String path,
Object... parameters) {
String resolved = resolvePath(context, request, path);
if (path != null && path.isEmpty()) {
String queryString = request.getQueryString();
if (queryString != null && queryString.length() > 0) {
resolved += "?" + StringUtils.replaceAll(
queryString,
"([?&])_[^=]*=[^&]*(?:&|$)", "$1",
"&$", "");
}
}
return StringUtils.addQueryParameters(
request.getContextPath() + resolved,
parameters);
}
/** Returns the absolute version of the given {@code url}. */
public static String getEmbeddedAbsoluteUrl(
ServletContext context,
HttpServletRequest request,
String url,
Object... parameters) {
return getHostUrl(request) + getAbsolutePath(context, request, url, parameters);
}
/** Returns the absolute protocol relative version of the given {@code url}. */
public static String getEmbeddedAbsoluteProtocolRelativeUrl(
ServletContext context,
HttpServletRequest request,
String url,
Object... parameters) {
return getProtocolRelativeHostUrl(request) + getAbsolutePath(context, request, url, parameters);
}
/**
* {@linkplain javax.servlet.RequestDispatcher#include Includes} the
* resource at the given {@code path} and writes its output to the
* given {@code writer}. The given {@code attributes} are set on the
* request before execution, and any overriden values are restored
* before this method returns.
*
* @return {@code false} if servlet processing shouldn't continue
* any further (e.g. for when the resource redirects to a different
* page).
*/
public static boolean includeEmbedded(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
Writer writer,
String path,
Object... attributes)
throws IOException, ServletException {
Map<String, Object> old = setAttributes(request, attributes);
try {
path = resolvePath(context, request, path);
response = new IncludedHttpServletResponse(response, writer);
request.getRequestDispatcher(path).include(request, response);
} finally {
setAttributesWithMap(request, old);
}
return isFinished(request, response);
}
/**
* {@linkplain #include Included JSPs} need to inherit the writer
* from the parent response for corrent rendering.
*/
private static class IncludedHttpServletResponse extends HttpServletResponseWrapper {
private final PrintWriter writer;
public IncludedHttpServletResponse(
HttpServletResponse response,
Writer writer) {
super(response);
this.writer = writer instanceof PrintWriter ?
(PrintWriter) writer :
new PrintWriter(writer);
}
// --- HttpServletResponseWrapper support ---
@Override
public PrintWriter getWriter() throws IOException {
return writer;
}
}
/**
* Redirects to the given {@code path} modified by the given
* {@code parameters} using the {@value
* javax.servlet.http.HttpServletResponse#SC_FOUND} status code.
*/
public static void redirectEmbedded(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
Object path,
Object... parameters)
throws IOException {
redirectEmbeddedWithStatus(context, request, response, HttpServletResponse.SC_FOUND, path, parameters);
}
/**
* Redirects to the given {@code path} modified by the given
* {@code parameters} using the {@value
* javax.servlet.http.HttpServletResponse#SC_MOVED_PERMANENTLY}
* status code.
*/
public static void redirectEmbeddedPermanently(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
Object path,
Object... parameters)
throws IOException {
redirectEmbeddedWithStatus(context, request, response, HttpServletResponse.SC_MOVED_PERMANENTLY, path, parameters);
}
/**
* Redirects to the given {@code path} modified by the given
* {@code parameters} using the given {@code status} code.
*/
private static void redirectEmbeddedWithStatus(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
int status,
Object path,
Object... parameters)
throws IOException {
response = (HttpServletResponse) getHeaderResponse(request, response);
response.setStatus(status);
response.setHeader("Location", response.encodeRedirectURL(getAbsolutePath(context, request, path == null ? null : path.toString(), parameters)));
}
/**
* Wraps the default JSP factory using an instance of the the given
* {@code wrapperClass}.
*/
public static void wrapDefaultJspFactory(Class<? extends JspFactoryWrapper> wrapperClass) {
JspFactory factory = JspFactory.getDefaultFactory();
for (JspFactory f = factory; f instanceof JspFactoryWrapper; f = ((JspFactoryWrapper) f).getDelegate()) {
if (wrapperClass.isInstance(f)) {
return;
}
}
JspFactoryWrapper wrapper = TypeDefinition.getInstance(wrapperClass).newInstance();
wrapper.setDelegate(factory);
JspFactory.setDefaultFactory(wrapper);
}
/**
* Unwraps the default JSP factory and restore the original default if
* it's an instance of the given {@code wrapperClass}.
*/
public static void unwrapDefaultJspFactory(Class<? extends JspFactoryWrapper> wrapperClass) {
JspFactory factory = JspFactory.getDefaultFactory();
if (factory instanceof JspFactoryWrapper) {
JspFactory.setDefaultFactory(((JspFactoryWrapper) factory).getDelegate());
}
}
// --- Deprecated ---
/** @deprecated Use {@link #getEmbeddedAbsolutePath} instead. */
@Deprecated
public static String getAbsolutePath(
ServletContext context,
HttpServletRequest request,
String path,
Object... parameters) {
return getEmbeddedAbsolutePath(context, request, path, parameters);
}
/** @deprecated Use {@link #getEmbeddedAbsoluteUrl} instead. */
@Deprecated
public static String getAbsoluteUrl(
ServletContext context,
HttpServletRequest request,
String url,
Object... parameters) {
return getEmbeddedAbsoluteUrl(context, request, url, parameters);
}
/** @deprecated Use {@link #getEmbeddedContextPath(ServletContext, String)} instead. */
@Deprecated
public static String getEmbeddedContextPath(ServletContext context, HttpServletRequest request) {
return getEmbeddedContextPath(context, getCurrentServletPath(request));
}
/** @deprecated Use {@link #getEmbeddedServletPath(ServletContext, String)} instead. */
@Deprecated
public static String getEmbeddedServletPath(ServletContext context, HttpServletRequest request) {
return getEmbeddedServletPath(context, getCurrentServletPath(request));
}
/** @deprecated Use {@link #getHost} instead. */
@Deprecated
public static String getFullyQualifiedDomain(HttpServletRequest request) {
return getHost(request);
}
/** @deprecated Use {@link #getAbsoluteUrl} instead. */
@Deprecated
public static String getFullyQualifiedUrl(HttpServletRequest request) {
String url = getAbsoluteUrl(request, "/");
return url.substring(0, url.length() - 1);
}
/** @deprecated Use {@link #getAbsolutePath} instead. */
@Deprecated
public static String getUrl(
HttpServletRequest request,
String path,
Object... parameters) {
return getAbsolutePath(request, path, parameters);
}
/** @deprecated Use {@link #includeEmbedded} instead. */
@Deprecated
public static boolean include(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
Writer writer,
String path,
Object... attributes)
throws IOException, ServletException {
return includeEmbedded(context, request, response, writer, path, attributes);
}
/** @deprecated Use {@link #redirectEmbedded} instead. */
@Deprecated
public static void redirect(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
Object path,
Object... parameters)
throws IOException {
redirectEmbedded(context, request, response, path, parameters);
}
/** @deprecated Use {@link #redirectEmbeddedPermanently} instead. */
@Deprecated
public static void redirectPermanently(
ServletContext context,
HttpServletRequest request,
HttpServletResponse response,
Object path,
Object... parameters)
throws IOException {
redirectEmbeddedPermanently(context, request, response, path, parameters);
}
}
| [
"\"HTTPS\""
] | [] | [
"HTTPS"
] | [] | ["HTTPS"] | java | 1 | 0 | |
database_api.py | from pymongo import MongoClient
from dotenv import load_dotenv
import logging
import os
import certifi
load_dotenv()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
"""
This file creates an Database object that handles all things related to the database.
This might include querying from or inserting information into the database.
Logic regarding the database should go into this file.
"""
class DatabaseAPI:
def __init__(self):
self.connect()
self.db_test = self.client.test
self.db = self.client.course_selection
def connect(self):
self.client = MongoClient(os.getenv("MONGO"), tlsCAFile=certifi.where())
self.db_admin = self.client.admin
logger.info(f"MongoDB server status: {self.db_admin.command('serverStatus')}")
def get_all_test(self):
# note that when you are returning, you want it to be jsonify-able,
# which means that all fields either have to be a str or a int/float
# it will cause an error if some values are of type ObjectId which
# are return by default that are created by mongo
# you can specify which field to return or not to return in the second dict
# you send to the db
logger.info("Querying for all people in test database.")
try:
ret = list(self.db_test.people.find({}, {"_id": 0}))
except Exception as e:
logger.error(
f"Failed to query for all people in test database with error {e}"
)
ret = None
return ret
def close(self):
self.client.close()
if __name__ == "__main__":
# a basic example of how to use, can remove later
db = DatabaseAPI()
logger.info(db.get_all_test())
db.close()
| [] | [] | [
"MONGO"
] | [] | ["MONGO"] | python | 1 | 0 | |
flod_admin_frontend/flod_admin_frontend/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask import Flask
from webassets.loaders import PythonLoader
from flask.ext.assets import Environment, Bundle
app = Flask(__name__)
app.debug = os.environ.get('DEBUG') == 'True'
assets = Environment(app)
assets.debug = app.debug
bundles = PythonLoader('assetbundle').load_bundles()
for name, bundle in bundles.iteritems():
assets.register(name, bundle)
from flod_admin_frontend import views, proxy, matrikkel_proxy
def check_environment(app):
if 'AUTH_TOKEN_SECRET' not in os.environ:
raise EnvironmentError(('Environment variable AUTH_TOKEN_SECRET must '
'be set'))
check_environment(app)
| [] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | python | 1 | 0 | |
test/extended/router/weighted.go | package router
import (
"encoding/csv"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/origin/test/extended/util"
)
var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() {
defer g.GinkgoRecover()
var (
configPath = exutil.FixturePath("testdata", "weighted-router.yaml")
oc = exutil.NewCLI("weighted-router", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
imagePrefix := os.Getenv("OS_IMAGE_PREFIX")
if len(imagePrefix) == 0 {
imagePrefix = "openshift/origin"
}
err := oc.AsAdmin().Run("new-app").Args("-f", configPath, "-p", "IMAGE="+imagePrefix+"-haproxy-router").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
})
g.Describe("The HAProxy router", func() {
g.It("should serve a route that points to two services and respect weights", func() {
defer func() {
if g.CurrentGinkgoTestDescription().Failed {
dumpWeightedRouterLogs(oc, g.CurrentGinkgoTestDescription().FullTestText)
}
}()
ns := oc.KubeFramework().Namespace.Name
execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod")
defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }()
g.By(fmt.Sprintf("creating a weighted router from a config file %q", configPath))
var routerIP string
err := wait.Poll(time.Second, changeTimeoutSeconds*time.Second, func() (bool, error) {
pod, err := oc.KubeFramework().ClientSet.Core().Pods(oc.KubeFramework().Namespace.Name).Get("weighted-router", metav1.GetOptions{})
if err != nil {
return false, err
}
if len(pod.Status.PodIP) == 0 {
return false, nil
}
routerIP = pod.Status.PodIP
return true, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
// router expected to listen on port 80
routerURL := fmt.Sprintf("http://%s", routerIP)
g.By("waiting for the healthz endpoint to respond")
healthzURI := fmt.Sprintf("http://%s:1936/healthz", routerIP)
err = waitForRouterOKResponseExec(ns, execPodName, healthzURI, routerIP, changeTimeoutSeconds)
o.Expect(err).NotTo(o.HaveOccurred())
host := "weighted.example.com"
times := 100
g.By(fmt.Sprintf("checking that %d requests go through successfully", times))
// wait for the request to stabilize
err = waitForRouterOKResponseExec(ns, execPodName, routerURL, "weighted.example.com", changeTimeoutSeconds)
o.Expect(err).NotTo(o.HaveOccurred())
// all requests should now succeed
err = expectRouteStatusCodeRepeatedExec(ns, execPodName, routerURL, "weighted.example.com", http.StatusOK, times)
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("checking that there are three weighted backends in the router stats"))
var trafficValues []string
err = wait.PollImmediate(100*time.Millisecond, changeTimeoutSeconds*time.Second, func() (bool, error) {
statsURL := fmt.Sprintf("http://%s:1936/;csv", routerIP)
stats, err := getAuthenticatedRouteURLViaPod(ns, execPodName, statsURL, host, "admin", "password")
o.Expect(err).NotTo(o.HaveOccurred())
trafficValues, err = parseStats(stats, "weightedroute", 7)
o.Expect(err).NotTo(o.HaveOccurred())
return len(trafficValues) == 3, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
trafficEP1, err := strconv.Atoi(trafficValues[0])
o.Expect(err).NotTo(o.HaveOccurred())
trafficEP2, err := strconv.Atoi(trafficValues[1])
o.Expect(err).NotTo(o.HaveOccurred())
weightedRatio := float32(trafficEP1) / float32(trafficEP2)
if weightedRatio < 5 && weightedRatio > 0.2 {
e2e.Failf("Unexpected weighted ratio for incoming traffic: %v (%d/%d)", weightedRatio, trafficEP1, trafficEP2)
}
g.By(fmt.Sprintf("checking that zero weights are also respected by the router"))
host = "zeroweight.example.com"
err = expectRouteStatusCodeExec(ns, execPodName, routerURL, host, http.StatusServiceUnavailable)
o.Expect(err).NotTo(o.HaveOccurred())
})
})
})
func parseStats(stats string, backendSubstr string, statsField int) ([]string, error) {
r := csv.NewReader(strings.NewReader(stats))
records, err := r.ReadAll()
if err != nil {
return nil, err
}
fieldValues := make([]string, 0)
for _, rec := range records {
if strings.Contains(rec[0], backendSubstr) && !strings.Contains(rec[1], "BACKEND") {
fieldValues = append(fieldValues, rec[statsField])
}
}
return fieldValues, nil
}
func dumpWeightedRouterLogs(oc *exutil.CLI, name string) {
log, _ := e2e.GetPodLogs(oc.AdminKubeClient(), oc.KubeFramework().Namespace.Name, "weighted-router", "router")
e2e.Logf("Weighted Router test %s logs:\n %s", name, log)
}
| [
"\"OS_IMAGE_PREFIX\""
] | [] | [
"OS_IMAGE_PREFIX"
] | [] | ["OS_IMAGE_PREFIX"] | go | 1 | 0 | |
pkg/cmd/joblog/joblog.go | package joblog
import (
"context"
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"github.com/jenkins-x-plugins/jx-admin/pkg/bootjobs"
"github.com/jenkins-x-plugins/jx-admin/pkg/rootcmd"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/helper"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/templates"
"github.com/jenkins-x/jx-helpers/v3/pkg/input"
"github.com/jenkins-x/jx-helpers/v3/pkg/input/inputfactory"
"github.com/jenkins-x/jx-helpers/v3/pkg/kube"
"github.com/jenkins-x/jx-helpers/v3/pkg/kube/jobs"
"github.com/jenkins-x/jx-helpers/v3/pkg/kube/podlogs"
"github.com/jenkins-x/jx-helpers/v3/pkg/kube/pods"
"github.com/jenkins-x/jx-helpers/v3/pkg/options"
"github.com/jenkins-x/jx-helpers/v3/pkg/stringhelpers"
"github.com/jenkins-x/jx-helpers/v3/pkg/termcolor"
"github.com/jenkins-x/jx-kube-client/v3/pkg/kubeclient"
logger "github.com/jenkins-x/jx-logging/v3/pkg/log"
"github.com/pkg/errors"
"github.com/spf13/cobra"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// Options contains the command line arguments for this command
type Options struct {
options.BaseOptions
Namespace string
JobSelector string
GitOperatorSelector string
ContainerName string
CommitSHA string
Duration time.Duration
PollPeriod time.Duration
NoTail bool
ShaMode bool
WaitMode bool
ErrOut io.Writer
Out io.Writer
KubeClient kubernetes.Interface
Input input.Interface
timeEnd time.Time
podStatusMap map[string]string
}
var (
info = termcolor.ColorInfo
cmdLong = templates.LongDesc(`
Views the boot Job logs in the cluster
`)
cmdExample = templates.Examples(`
* views the current boot logs
` + bashExample("log") + `
`)
)
// bashExample returns markdown for a bash script expression
func bashExample(cli string) string {
return fmt.Sprintf("\n```bash \n%s %s\n```\n", rootcmd.BinaryName, cli)
}
// NewCmdJobLog creates the new command
func NewCmdJobLog() (*cobra.Command, *Options) {
options := &Options{}
command := &cobra.Command{
Use: "log",
Short: "views the boot Job logs in the cluster",
Aliases: []string{"logs"},
Long: cmdLong,
Example: cmdExample,
Run: func(command *cobra.Command, args []string) {
err := options.Run()
helper.CheckErr(err)
},
}
command.Flags().StringVarP(&options.Namespace, "namespace", "n", "", "the namespace where the boot jobs run. If not specified it will look in: jx-git-operator and jx")
command.Flags().StringVarP(&options.JobSelector, "selector", "s", "app=jx-boot", "the selector of the boot Job pods")
command.Flags().StringVarP(&options.GitOperatorSelector, "git-operator-selector", "g", "app=jx-git-operator", "the selector of the git operator pod")
command.Flags().StringVarP(&options.ContainerName, "container", "c", "job", "the name of the container in the boot Job to log")
command.Flags().StringVarP(&options.CommitSHA, "commit-sha", "", "", "the git commit SHA of the git repository to query the boot Job for")
command.Flags().BoolVarP(&options.WaitMode, "wait", "w", false, "wait for the next active Job to start")
command.Flags().BoolVarP(&options.ShaMode, "sha-mode", "", false, "if --commit-sha is not specified then default the git commit SHA from $ and fail if it could not be found")
command.Flags().DurationVarP(&options.Duration, "duration", "d", time.Minute*30, "how long to wait for a Job to be active and a Pod to be ready")
command.Flags().DurationVarP(&options.PollPeriod, "poll", "", time.Second*1, "duration between polls for an active Job or Pod")
options.BaseOptions.AddBaseFlags(command)
return command, options
}
func (o *Options) Run() error {
err := o.Validate()
if err != nil {
return err
}
client := o.KubeClient
selector := o.JobSelector
containerName := o.ContainerName
ns, err := bootjobs.FindGitOperatorNamespace(client, o.Namespace)
if err != nil {
return errors.Wrapf(err, "failed to find the git operator namespace")
}
jobs, err := bootjobs.GetSortedJobs(client, ns, selector, o.CommitSHA)
if err != nil {
return errors.Wrapf(err, "failed to get jobs")
}
if !o.WaitMode && len(jobs) <= 1 {
if len(jobs) == 0 {
o.WaitMode = true
} else {
j := jobs[0]
if j.Status.Active > 0 {
o.WaitMode = true
}
}
}
if o.WaitMode {
err = o.waitForGitOperator(client, ns, selector)
if err != nil {
return errors.Wrapf(err, "failed to wait for git operator")
}
return o.waitForActiveJob(client, ns, selector, info, containerName)
}
return o.pickJobToLog(client, ns, selector, jobs)
}
func (o *Options) waitForGitOperator(client kubernetes.Interface, ns, selector string) error {
o.timeEnd = time.Now().Add(o.Duration)
logger.Logger().Infof("waiting for the Git Operator to be ready in namespace %s...", info(ns))
goPod, err := pods.WaitForPodSelectorToBeReady(client, ns, o.GitOperatorSelector, o.Duration)
if err != nil {
return errors.Wrapf(err, "failed waiting for the git operator pod to be ready in namespace %s with selector %s", ns, o.GitOperatorSelector)
}
if goPod == nil {
logger.Logger().Infof(`Could not find the git operator.
Are you sure you have installed the git operator?
See: https://jenkins-x.io/docs/v3/guides/operator/
`)
return errors.Wrapf(err, "no git operator pod to be ready in namespace %s with selector %s", ns, o.GitOperatorSelector)
}
logger.Logger().Infof("the Git Operator is running in pod %s\n\n", info(goPod.Name))
if o.CommitSHA != "" {
logger.Logger().Infof("waiting for boot Job pod with selector %s in namespace %s for commit SHA %s...", info(selector), info(ns), info(o.CommitSHA))
} else {
logger.Logger().Infof("waiting for boot Job pod with selector %s in namespace %s...", info(selector), info(ns))
}
return nil
}
func (o *Options) waitForActiveJob(client kubernetes.Interface, ns string, selector string, info func(a ...interface{}) string, containerName string) error {
job, err := o.waitForLatestJob(client, ns, selector)
if err != nil {
return errors.Wrapf(err, "failed to wait for active Job in namespace %s with selector %v", ns, selector)
}
logger.Logger().Infof("waiting for Job %s to complete...", info(job.Name))
return o.viewActiveJobLog(client, ns, selector, containerName, job)
}
func (o *Options) viewActiveJobLog(client kubernetes.Interface, ns string, selector string, containerName string, job *batchv1.Job) error {
var foundPods []string
for {
complete, pod, err := o.waitForJobCompleteOrPodRunning(client, ns, selector, job.Name)
if err != nil {
return err
}
if complete {
return nil
}
if pod == nil {
return errors.Errorf("No pod found for namespace %s with selector %v", ns, selector)
}
if time.Now().After(o.timeEnd) {
return errors.Errorf("timed out after waiting for duration %s", o.Duration.String())
}
// lets verify the container name
err = verifyContainerName(pod, containerName)
if err != nil {
return err
}
podName := pod.Name
if stringhelpers.StringArrayIndex(foundPods, podName) < 0 {
foundPods = append(foundPods, podName)
}
logger.Logger().Infof("\ntailing boot Job pod %s\n\n", info(podName))
err = podlogs.TailLogs(ns, podName, containerName, o.ErrOut, o.Out)
if err != nil {
logger.Logger().Warnf("failed to tail log: %s", err.Error())
}
pod, err = client.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "failed to get pod %s in namespace %s", podName, ns)
}
if pods.IsPodCompleted(pod) {
if pods.IsPodSucceeded(pod) {
logger.Logger().Infof("boot Job pod %s has %s", info(podName), info("Succeeded"))
} else {
logger.Logger().Infof("boot Job pod %s has %s", info(podName), termcolor.ColorError(string(pod.Status.Phase)))
}
} else if pod.DeletionTimestamp != nil {
logger.Logger().Infof("boot Job pod %s is %s", info(podName), termcolor.ColorWarning("Terminating"))
}
}
}
func (o *Options) viewJobLog(client kubernetes.Interface, ns string, selector string, containerName string, job *batchv1.Job) error {
opts := metav1.ListOptions{
LabelSelector: "job-name=" + job.Name,
}
podList, err := client.CoreV1().Pods(ns).List(context.TODO(), opts)
if err != nil && apierrors.IsNotFound(err) {
err = nil
}
if err != nil {
return errors.Wrapf(err, "failed to list pods in namespace %s with selector %s", ns, selector)
}
var answer error
for i := range podList.Items {
pod := &podList.Items[i]
// lets verify the container name
err = verifyContainerName(pod, containerName)
if err != nil {
return err
}
// wait for a pod to be running, ready or completed
condition := func(pod *v1.Pod) bool {
return pods.IsPodReady(pod) || pods.IsPodCompleted(pod) || pod.Status.Phase == corev1.PodRunning
}
err = pods.WaitforPodNameCondition(client, ns, pod.Name, o.Duration, condition)
if err != nil {
return errors.Wrapf(err, "failed to wait for pod %s to be running", pod.Name)
}
podName := pod.Name
logger.Logger().Infof("\ntailing boot Job pod %s\n\n", info(podName))
err = podlogs.TailLogs(ns, podName, containerName, o.ErrOut, o.Out)
if err != nil {
logger.Logger().Warnf("failed to tail log: %s", err.Error())
}
pod, err = client.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "failed to get pod %s in namespace %s", podName, ns)
}
if pods.IsPodCompleted(pod) {
if pods.IsPodSucceeded(pod) {
logger.Logger().Infof("boot Job pod %s has %s", info(podName), info("Succeeded"))
} else {
logger.Logger().Infof("boot Job pod %s has %s", info(podName), termcolor.ColorError(string(pod.Status.Phase)))
if answer == nil {
answer = errors.Errorf("boot Job pod %s has %s", podName, string(pod.Status.Phase))
}
}
} else if pod.DeletionTimestamp != nil {
logger.Logger().Infof("boot Job pod %s is %s", info(podName), termcolor.ColorWarning("Terminating"))
}
}
return answer
}
// Validate verifies the settings are correct and we can lazy create any required resources
func (o *Options) Validate() error {
if o.NoTail {
return nil
}
if o.ErrOut == nil {
o.ErrOut = os.Stderr
}
if o.Out == nil {
o.Out = os.Stdout
}
if o.ShaMode && o.CommitSHA == "" {
o.CommitSHA = os.Getenv("PULL_BASE_SHA")
if o.ShaMode && o.CommitSHA == "" {
return errors.Errorf("you have specified --sha-mode but no $PULL_BASE_SHA is defined or --commit-sha option supplied")
}
}
var err error
o.KubeClient, err = kube.LazyCreateKubeClientWithMandatory(o.KubeClient, true)
if err != nil {
return errors.Wrapf(err, "failed to create kubernetes client")
}
if o.Namespace == "" {
o.Namespace, err = kubeclient.CurrentNamespace()
if err != nil {
return errors.Wrapf(err, "failed to detect current namespace. Try supply --namespace")
}
}
if o.Input == nil {
o.Input = inputfactory.NewInput(&o.BaseOptions)
}
return nil
}
func (o *Options) waitForLatestJob(client kubernetes.Interface, ns, selector string) (*batchv1.Job, error) {
for {
job, err := o.getLatestJob(client, ns, selector)
if err != nil {
return nil, errors.Wrapf(err, "failed to ")
}
if job != nil {
if o.CommitSHA != "" || !jobs.IsJobFinished(job) {
return job, nil
}
}
if time.Now().After(o.timeEnd) {
return nil, errors.Errorf("timed out after waiting for duration %s", o.Duration.String())
}
time.Sleep(o.PollPeriod)
}
}
func (o *Options) waitForJobCompleteOrPodRunning(client kubernetes.Interface, ns, selector, jobName string) (bool, *corev1.Pod, error) {
if o.podStatusMap == nil {
o.podStatusMap = map[string]string{}
}
for {
complete, job, err := o.checkIfJobComplete(client, ns, jobName)
if err != nil {
return false, nil, errors.Wrapf(err, "failed to check for Job %s complete", jobName)
}
if complete {
if job != nil && !jobs.IsJobSucceeded(job) {
return true, nil, errors.Errorf("job %s failed", jobName)
}
return true, nil, nil
}
pod, err := pods.GetReadyPodForSelector(client, ns, selector)
if err != nil {
return false, pod, errors.Wrapf(err, "failed to query ready pod in namespace %s with selector %s", ns, selector)
}
if pod != nil {
status := pods.PodStatus(pod)
if o.podStatusMap[pod.Name] != status && !pods.IsPodCompleted(pod) && pod.DeletionTimestamp == nil {
logger.Logger().Infof("pod %s has status %s", termcolor.ColorInfo(pod.Name), termcolor.ColorInfo(status))
o.podStatusMap[pod.Name] = status
}
if pod.Status.Phase == v1.PodRunning || pods.IsPodReady(pod) {
return false, pod, nil
}
}
if time.Now().After(o.timeEnd) {
return false, nil, errors.Errorf("timed out after waiting for duration %s", o.Duration.String())
}
time.Sleep(o.PollPeriod)
}
}
func (o *Options) getLatestJob(client kubernetes.Interface, ns, selector string) (*batchv1.Job, error) {
jobList, err := client.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{
LabelSelector: selector,
})
if err != nil && !apierrors.IsNotFound(err) {
return nil, errors.Wrapf(err, "failed to list jobList in namespace %s selector %s", ns, selector)
}
if len(jobList.Items) == 0 {
return nil, nil
}
if o.CommitSHA != "" {
for i := 0; i < len(jobList.Items); i++ {
job := &jobList.Items[i]
labels := job.Labels
if labels != nil {
if o.CommitSHA == labels[bootjobs.LabelCommitSHA] {
return job, nil
}
}
}
return nil, nil
}
// lets find the newest job...
latest := jobList.Items[0]
for i := 1; i < len(jobList.Items); i++ {
job := jobList.Items[i]
if job.CreationTimestamp.After(latest.CreationTimestamp.Time) {
latest = job
}
}
return &latest, nil
}
func (o *Options) checkIfJobComplete(client kubernetes.Interface, ns, name string) (bool, *batchv1.Job, error) {
job, err := client.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
if job == nil || err != nil {
return false, nil, errors.Wrapf(err, "failed to list jobList in namespace %s name %s", ns, name)
}
if jobs.IsJobFinished(job) {
if jobs.IsJobSucceeded(job) {
logger.Logger().Infof("boot Job %s has %s", info(job.Name), info("Succeeded"))
return true, job, nil
}
logger.Logger().Infof("boot Job %s has %s", info(job.Name), termcolor.ColorError("Failed"))
return true, job, nil
}
logger.Logger().Debugf("boot Job %s is not completed yet", info(job.Name))
return false, job, nil
}
func (o *Options) pickJobToLog(client kubernetes.Interface, ns string, selector string, jobs []batchv1.Job) error {
var names []string
m := map[string]*batchv1.Job{}
for i := range jobs {
j := &jobs[i]
name := toJobName(j, len(jobs)-i)
m[name] = j
names = append(names, name)
}
name, err := o.Input.PickNameWithDefault(names, "select the Job to view:", "", "select which boot Job you wish to log")
if err != nil {
return errors.Wrapf(err, "failed to pick a boot job name")
}
if name == "" {
return errors.Errorf("no boot Jobs to view. Try add --active to wait for the next boot job")
}
job := m[name]
if job == nil {
return errors.Errorf("cannot find Job %s", name)
}
return o.viewJobLog(client, ns, selector, o.ContainerName, job)
}
func toJobName(j *batchv1.Job, number int) string {
status := JobStatus(j)
d := time.Now().Sub(j.CreationTimestamp.Time).Round(time.Minute)
return fmt.Sprintf("#%d started %s %s", number, d.String(), status)
}
func JobStatus(j *batchv1.Job) string {
if jobs.IsJobSucceeded(j) {
return "Succeeded"
}
if jobs.IsJobFinished(j) {
return "Failed"
}
if j.Status.Active > 0 {
return "Running"
}
return "Pending"
}
func verifyContainerName(pod *corev1.Pod, name string) error {
var names []string
for i := range pod.Spec.Containers {
if pod.Spec.Containers[i].Name == name {
return nil
}
names = append(names, pod.Spec.Containers[i].Name)
}
sort.Strings(names)
return errors.Errorf("invalid container name %s for pod %s. Available names: %s", name, pod.Name, strings.Join(names, ", "))
}
| [
"\"PULL_BASE_SHA\""
] | [] | [
"PULL_BASE_SHA"
] | [] | ["PULL_BASE_SHA"] | go | 1 | 0 | |
pkg/resources/container.go | package resources
import (
"os"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
hawtiov1alpha1 "github.com/hawtio/hawtio-operator/pkg/apis/hawtio/v1alpha1"
)
const containerPortName = "https"
func newContainer(hawtio *hawtiov1alpha1.Hawtio, envVars []corev1.EnvVar, imageRepository string) corev1.Container {
container := corev1.Container{
Name: hawtio.Name + "-container",
Image: getImageFor(hawtio.Spec.Version, imageRepository),
Env: envVars,
ReadinessProbe: &corev1.Probe{
InitialDelaySeconds: 5,
TimeoutSeconds: 1,
PeriodSeconds: 5,
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromString(containerPortName),
Path: "/online",
Scheme: "HTTPS",
},
},
},
LivenessProbe: &corev1.Probe{
TimeoutSeconds: 1,
PeriodSeconds: 10,
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromString(containerPortName),
Path: "/online",
Scheme: "HTTPS",
},
},
},
Ports: []corev1.ContainerPort{
{
Name: containerPortName,
ContainerPort: 8443,
Protocol: "TCP",
},
},
Resources: hawtio.Spec.Resources,
}
return container
}
//TODO(): will replace that function with update code committed in PR #23
func getImageFor(version string, imageRepository string) string {
tag := "latest"
if len(version) > 0 {
tag = version
}
repository := os.Getenv("IMAGE_REPOSITORY")
if repository == "" {
if imageRepository != "" {
repository = imageRepository
} else {
repository = "docker.io/hawtio/online"
}
}
return repository + ":" + tag
}
| [
"\"IMAGE_REPOSITORY\""
] | [] | [
"IMAGE_REPOSITORY"
] | [] | ["IMAGE_REPOSITORY"] | go | 1 | 0 | |
webserver/pkgpkr/webservice/views.py | """
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
| [] | [] | [
"GH_TOKEN",
"SELENIUM_TEST"
] | [] | ["GH_TOKEN", "SELENIUM_TEST"] | python | 2 | 0 | |
provider/serverities_test.go | package provider
import (
"context"
"fmt"
"os"
"strings"
"testing"
"github.com/firehydrant/terraform-provider-firehydrant/firehydrant"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func TestAccSeverities(t *testing.T) {
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.Test(t, resource.TestCase{
PreCheck: func() { testFireHydrantIsSetup(t) },
ProviderFactories: defaultProviderFactories(),
CheckDestroy: testSeverityDoesNotExist("firehydrant_severity.terraform-acceptance-test-severity"),
Steps: []resource.TestStep{
{
Config: testSeverityConfig(rName),
Check: resource.ComposeTestCheckFunc(
testSeverityExists("firehydrant_severity.terraform-acceptance-test-severity"),
resource.TestCheckResourceAttr("firehydrant_severity.terraform-acceptance-test-severity", "slug", strings.ToUpper(rName)),
),
},
// TODO(bobbytables): Updating severities in Terraform is currently problematic because FireHydrant uses
// slugs as the IDs and those are stored in Terraform state as the resource ID. Since updates can change a slug but terraform updates _wont_
// update the resource with the new slug as the ID, it's technically not possible to perform a slug update in Terraform against FireHydrant.
// {
// Config: testSeverityConfig(rNameUpdated),
// Check: resource.ComposeTestCheckFunc(
// testSeverityExists("firehydrant_severity.terraform-acceptance-test-severity"),
// resource.TestCheckResourceAttr("firehydrant_severity.terraform-acceptance-test-severity", "slug", strings.ToUpper(rNameUpdated)),
// ),
// },
},
})
}
const testSeverityConfigTemplate = `
resource "firehydrant_severity" "terraform-acceptance-test-severity" {
slug = "%s"
}
`
func testSeverityConfig(rName string) string {
return fmt.Sprintf(testSeverityConfigTemplate, strings.ToUpper(rName))
}
func testSeverityExists(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("Not found: %s", resourceName)
}
if rs.Primary.ID == "" {
return fmt.Errorf("ID was not set")
}
c, err := firehydrant.NewRestClient(os.Getenv("FIREHYDRANT_API_KEY"))
if err != nil {
return err
}
svc, err := c.GetSeverity(context.TODO(), rs.Primary.ID)
if err != nil {
return err
}
if expected, got := rs.Primary.Attributes["slug"], svc.Slug; expected != got {
return fmt.Errorf("Expected slug %s, got %s", expected, got)
}
if expected, got := rs.Primary.Attributes["description"], svc.Description; expected != got {
return fmt.Errorf("Expected description %s, got %s", expected, got)
}
return nil
}
}
func testSeverityDoesNotExist(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs := s.RootModule().Resources[resourceName]
if rs.Primary.ID == "" {
return fmt.Errorf("ID was not set")
}
_, err := firehydrant.NewRestClient(os.Getenv("FIREHYDRANT_API_KEY"))
if err != nil {
return err
}
// TODO: Archives dont hide severitys from the details endpoint
// svc, err := c.GetSeverity(context.TODO(), rs.Primary.ID)
// if svc != nil {
// return fmt.Errorf("The severity existed, when it should not")
// }
// if _, isNotFound := err.(firehydrant.NotFound); !isNotFound {
// return err
// }
return nil
}
}
| [
"\"FIREHYDRANT_API_KEY\"",
"\"FIREHYDRANT_API_KEY\""
] | [] | [
"FIREHYDRANT_API_KEY"
] | [] | ["FIREHYDRANT_API_KEY"] | go | 1 | 0 | |
src/cd.go | package src
import (
"bytes"
"fmt"
"strconv"
"text/template"
"time"
"github.com/golang/protobuf/proto"
"github.com/revan730/clipper-cd-worker/types"
commonTypes "github.com/revan730/clipper-common/types"
)
func renderManifestTemplate(manifest string, params types.ManifestValues) (string, error) {
tpl := template.New("k8s manifest user's template")
tpl, err := tpl.Parse(manifest)
if err != nil {
return "", err
}
var buf bytes.Buffer
err = tpl.Execute(&buf, params)
if err != nil {
return "", err
}
return buf.String(), nil
}
func (w *Worker) recordRevision(d types.Deployment, stdout string) error {
revision := &types.Revision{
DeploymentID: d.ID,
ArtifactID: d.ArtifactID,
Date: time.Now(),
Stdout: stdout,
Replicas: d.Replicas,
}
return w.databaseClient.CreateRevision(revision)
}
// updateDeploymentImage calls kubectl to change deployment image, using
// lock to syncronize update operations on deployment
func (w *Worker) updateDeploymentImage(dep types.Deployment, artifactID int64) {
artifact, err := w.ciClient.GetBuildArtifactByID(artifactID)
if err != nil {
w.log.Error("Failed to get build artifact", err)
return
}
w.log.Info("Got artifact with url " + artifact.Name)
lockRes := strconv.FormatInt(dep.ID, 10)
err = w.distLock.Lock(lockRes)
if err != nil {
w.log.Error("Failed to acquire deployment lock", err)
return
}
changedOk, stdout := w.kubectl.ChangeImage(dep.K8SName, artifact.Name)
err = w.distLock.Unlock(lockRes)
if err != nil {
w.log.Error("Failed to release deployment lock", err)
}
dep.ArtifactID = artifact.ID
err = w.recordRevision(dep, stdout)
if err != nil {
w.log.Error("Failed to record revision", err)
}
if changedOk == true {
err = w.databaseClient.SaveDeployment(&dep)
if err != nil {
w.log.Error("Failed to save deployment to db", err)
return
}
}
}
// scaleDeployment calls kubectl to scale deployment, using
// lock to syncronize update operations on deployment
func (w *Worker) scaleDeployment(dep types.Deployment, replicas int64) {
lockRes := strconv.FormatInt(dep.ID, 10)
err := w.distLock.Lock(lockRes)
if err != nil {
w.log.Error("Failed to acquire deployment lock", err)
return
}
scaledOk, stdout := w.kubectl.ScaleDeployment(dep.K8SName, replicas)
err = w.distLock.Unlock(lockRes)
if err != nil {
w.log.Error("Failed to release deployment lock", err)
}
dep.Replicas = replicas
err = w.recordRevision(dep, stdout)
if err != nil {
w.log.Error("Failed to record revision", err)
}
if scaledOk == true {
err = w.databaseClient.SaveDeployment(&dep)
if err != nil {
w.log.Error("Failed to save deployment to db", err)
return
}
}
}
// executeCDJob rolls new image onto k8s deployment
func (w *Worker) executeCDJob(CDJob commonTypes.CDJob) {
w.log.Info("Got CD job message")
deployments, err := w.databaseClient.FindDeploymentsByRepo(CDJob.RepoID)
if err != nil {
w.log.Error("Failed to execute CD job", err)
return
}
for _, dep := range deployments {
go w.updateDeploymentImage(dep, CDJob.ArtifactID)
}
}
func (w *Worker) updateImageFromProto(d types.Deployment) {
// Find deployment in database and call image update
deployment, err := w.databaseClient.FindDeployment(d.ID)
if err != nil {
w.log.Error("Failed to find deployment", err)
return
}
go w.updateDeploymentImage(*deployment, d.ArtifactID)
}
func (w *Worker) scaleFromProto(d types.Deployment) {
// Find deployment in database and call deployment scale
deployment, err := w.databaseClient.FindDeployment(d.ID)
if err != nil {
w.log.Error("Failed to find deployment", err)
return
}
go w.scaleDeployment(*deployment, d.Replicas)
}
// deleteDeployment removes deployment from k8s
func (w *Worker) deleteDeployment(d types.Deployment) bool {
artifact, err := w.ciClient.GetBuildArtifactByID(d.ArtifactID)
if err != nil {
w.log.Error("Failed to get build artifact", err)
return false
}
manifestVals := types.ManifestValues{
Name: d.K8SName,
Image: artifact.Name,
Replicas: d.Replicas,
}
manifest, err := renderManifestTemplate(d.Manifest, manifestVals)
if err != nil {
w.log.Error("Failed to render manifest template", err)
return false
}
ok, stdout := w.kubectl.DeleteDeployment(manifest)
if ok != true {
w.log.Info("Failed to delete deployment: " + stdout)
}
return ok
}
// initDeployment creates new deployment in k8s using manifest and
// provided image url
func (w *Worker) initDeployment(d types.Deployment) {
w.log.Info("Initializing new deployment")
artifact, err := w.ciClient.GetBuildArtifactByID(d.ArtifactID)
if err != nil {
w.log.Error("Failed to get build artifact", err)
return
}
manifestVals := types.ManifestValues{
Name: d.K8SName,
Image: artifact.Name,
Replicas: d.Replicas,
}
manifest, err := renderManifestTemplate(d.Manifest, manifestVals)
if err != nil {
w.log.Error("Failed to render manifest template", err)
return
}
fmt.Println("Manifest:\n" + manifest)
ok, stdout := w.kubectl.CreateDeployment(manifest)
if ok != true {
fmt.Println("fucked up")
}
fmt.Println("stdout: " + stdout)
err = w.recordRevision(d, stdout)
if err != nil {
w.log.Error("Failed to write revision to db", err)
}
if ok == true {
d.IsInitialized = true
err = w.databaseClient.SaveDeployment(&d)
if err != nil {
w.log.Error("Failed to update deployment db record", err)
}
}
}
func (w *Worker) reInitDeployment(d types.Deployment, manifest string) {
lockRes := strconv.FormatInt(d.ID, 10)
err := w.distLock.Lock(lockRes)
if err != nil {
w.log.Error("Failed to acquire deployment lock", err)
return
}
deleteOk := w.deleteDeployment(d)
if deleteOk == true {
d.Manifest = manifest
w.initDeployment(d)
}
err = w.distLock.Unlock(lockRes)
if err != nil {
w.log.Error("Failed to release deployment lock", err)
}
}
func (w *Worker) updateManifestFromProto(d types.Deployment) {
// Find deployment in database and call deployment scale
deployment, err := w.databaseClient.FindDeployment(d.ID)
if err != nil {
w.log.Error("Failed to find deployment", err)
return
}
go w.reInitDeployment(*deployment, d.Manifest)
}
func (w *Worker) deleteFromProto(d types.Deployment) {
if d.IsInitialized != true {
return
}
lockRes := strconv.FormatInt(d.ID, 10)
err := w.distLock.Lock(lockRes)
if err != nil {
w.log.Error("Failed to acquire deployment lock", err)
return
}
w.deleteDeployment(d)
err = w.distLock.Unlock(lockRes)
if err != nil {
w.log.Error("Failed to release deployment lock", err)
}
}
func (w *Worker) startConsuming() {
defer w.jobsQueue.Close()
blockMain := make(chan bool)
cdMsgsQueue, err := w.jobsQueue.MakeCDMsgChan()
if err != nil {
w.log.Fatal("Failed to create CD jobs channel", err)
}
initChan := w.apiServer.GetDepsChan()
changeImageChan := w.apiServer.GetImageChangeChan()
scaleChan := w.apiServer.GetScaleChan()
reInitChan := w.apiServer.GetReInitChan()
deleteChan := w.apiServer.GetDeleteChan()
go func() {
for {
select {
case m := <-cdMsgsQueue:
body := string(m)
w.log.Info("Received message from queue: " + body)
jobMsg := commonTypes.CDJob{}
err := proto.Unmarshal(m, &jobMsg)
if err != nil {
w.log.Error("Failed to unmarshal job message", err)
break
}
go w.executeCDJob(jobMsg)
case m := <-initChan:
w.log.Info("New deployment: " + m.K8SName)
go w.initDeployment(m)
case m := <-changeImageChan:
go w.updateImageFromProto(m)
case m := <-scaleChan:
go w.scaleFromProto(m)
case m := <-reInitChan:
go w.updateManifestFromProto(m)
case m := <-deleteChan:
go w.deleteFromProto(m)
}
}
}()
w.log.Info("Worker started")
<-blockMain
}
| [] | [] | [] | [] | [] | go | null | null | null |
gst/pipeline.go | // Package gst provides an easy API to create a GStreamer pipeline
package gst
/*
#cgo pkg-config: gstreamer-1.0 gstreamer-app-1.0
#include "gst.h"
*/
import "C"
import (
"fmt"
"os"
"strconv"
"strings"
"sync"
"unsafe"
"github.com/creamlab/ducksoup/types"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
// global state
var (
nvidiaEnabled bool
)
func init() {
nvidiaEnabled = strings.ToLower(os.Getenv("DS_NVIDIA")) == "true"
}
// Pipeline is a wrapper for a GStreamer pipeline and output track
type Pipeline struct {
mu sync.Mutex
id string // same as local/output track id
join types.JoinPayload
cPipeline *C.GstElement
audioOutput types.TrackWriter
videoOutput types.TrackWriter
filePrefix string
pliCallback func()
// stoppedCount=2 if audio and video have been stopped
stoppedCount int
// log
logger zerolog.Logger
}
func fileName(namespace string, prefix string, suffix string) string {
return namespace + "/" + prefix + "-" + suffix + ".mkv"
}
// API
func StartMainLoop() {
C.gstStartMainLoop()
}
// create a GStreamer pipeline
func CreatePipeline(join types.JoinPayload, filePrefix string) *Pipeline {
pipelineStr := newPipelineDef(join, filePrefix)
id := uuid.New().String()
cPipelineStr := C.CString(pipelineStr)
cId := C.CString(id)
defer C.free(unsafe.Pointer(cPipelineStr))
defer C.free(unsafe.Pointer(cId))
logger := log.With().
Str("context", "pipeline").
Str("namespace", join.Namespace).
Str("room", join.RoomId).
Str("user", join.UserId).
Str("pipeline", id).
Logger()
p := &Pipeline{
mu: sync.Mutex{},
id: id,
join: join,
cPipeline: C.gstParsePipeline(cPipelineStr, cId),
filePrefix: filePrefix,
stoppedCount: 0,
logger: logger,
}
p.logger.Info().Str("pipeline", pipelineStr).Msg("pipeline_created")
pipelineStoreSingleton.add(p)
return p
}
func (p *Pipeline) outputFiles() []string {
namespace := p.join.Namespace
hasFx := len(p.join.AudioFx) > 0 || len(p.join.VideoFx) > 0
if hasFx {
return []string{fileName(namespace, p.filePrefix, "dry"), fileName(namespace, p.filePrefix, "wet")}
} else {
return []string{fileName(namespace, p.filePrefix, "dry")}
}
}
func (p *Pipeline) PushRTP(kind string, buffer []byte) {
s := C.CString(kind + "_src")
defer C.free(unsafe.Pointer(s))
b := C.CBytes(buffer)
defer C.free(b)
C.gstPushBuffer(s, p.cPipeline, b, C.int(len(buffer)))
}
func (p *Pipeline) PushRTCP(kind string, buffer []byte) {
s := C.CString(kind + "_buffer")
defer C.free(unsafe.Pointer(s))
b := C.CBytes(buffer)
defer C.free(b)
//C.gstPushRTCPBuffer(s, p.cPipeline, b, C.int(len(buffer)))
}
func (p *Pipeline) BindTrack(kind string, t types.TrackWriter) (files []string) {
if kind == "audio" {
p.audioOutput = t
} else {
p.videoOutput = t
}
if p.audioOutput != nil && p.videoOutput != nil {
p.start()
files = p.outputFiles()
}
return
}
func (p *Pipeline) BindPLICallback(c func()) {
p.pliCallback = c
}
// start the GStreamer pipeline
func (p *Pipeline) start() {
genPLI := 1
if os.Getenv("DS_GST_DISABLE_PLI") == "true" {
genPLI = 0
}
C.gstStartPipeline(p.cPipeline, C.int(genPLI))
recording_prefix := fmt.Sprintf("%s/%s", p.join.Namespace, p.filePrefix)
p.logger.Info().Str("recording_prefix", recording_prefix).Msg("pipeline_started")
}
// stop the GStreamer pipeline
func (p *Pipeline) Stop() {
p.mu.Lock()
defer p.mu.Unlock()
p.stoppedCount += 1
if p.stoppedCount == 2 { // audio and video buffers from mixerSlice have been stopped
C.gstStopPipeline(p.cPipeline)
p.logger.Info().Msg("pipeline_stopped")
}
}
func (p *Pipeline) getPropInt(name string, prop string) int {
cName := C.CString(name)
cProp := C.CString(prop)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cProp))
return int(C.gstGetPropInt(p.cPipeline, cName, cProp))
}
func (p *Pipeline) setPropInt(name string, prop string, value int) {
// fx prefix needed (added during pipeline initialization)
cName := C.CString(name)
cProp := C.CString(prop)
cValue := C.int(value)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cProp))
C.gstSetPropInt(p.cPipeline, cName, cProp, cValue)
}
func (p *Pipeline) setPropFloat(name string, prop string, value float32) {
// fx prefix needed (added during pipeline initialization)
cName := C.CString(name)
cProp := C.CString(prop)
cValue := C.float(value)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cProp))
C.gstSetPropFloat(p.cPipeline, cName, cProp, cValue)
}
func (p *Pipeline) SetEncodingRate(kind string, value64 uint64) {
// see https://gstreamer.freedesktop.org/documentation/x264/index.html?gi-language=c#x264enc:bitrate
// see https://gstreamer.freedesktop.org/documentation/nvcodec/GstNvBaseEnc.html?gi-language=c#GstNvBaseEnc:bitrate
// see https://gstreamer.freedesktop.org/documentation/opus/opusenc.html?gi-language=c#opusenc:bitrate
value := int(value64)
prop := "bitrate"
if kind == "audio" {
p.setPropInt("audio_encoder_wet", prop, value)
} else {
names := []string{"video_encoder_dry", "video_encoder_wet"}
if p.join.VideoFormat == "VP8" {
// see https://gstreamer.freedesktop.org/documentation/vpx/GstVPXEnc.html?gi-language=c#GstVPXEnc:target-bitrate
prop = "target-bitrate"
} else if p.join.VideoFormat == "H264" {
// in kbit/s for x264enc and nvh264enc
value = value / 1000
}
for _, n := range names {
p.setPropInt(n, prop, value)
}
}
}
func (p *Pipeline) SetFxProp(name string, prop string, value float32) {
// fx prefix needed (added during pipeline initialization)
p.setPropFloat("client_"+name, prop, value)
}
func (p *Pipeline) GetFxProp(name string, prop string) float32 {
// fx prefix needed (added during pipeline initialization)
cName := C.CString("client_" + name)
cProp := C.CString(prop)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cProp))
return float32(C.gstGetPropFloat(p.cPipeline, cName, cProp))
}
func (p *Pipeline) SetFxPolyProp(name string, prop string, kind string, value string) {
cName := C.CString("client_" + name)
cProp := C.CString(prop)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cProp))
switch kind {
case "float":
if v, err := strconv.ParseFloat(value, 32); err == nil {
cValue := C.float(float32(v))
C.gstSetPropFloat(p.cPipeline, cName, cProp, cValue)
}
case "double":
if v, err := strconv.ParseFloat(value, 64); err == nil {
cValue := C.double(v)
C.gstSetPropDouble(p.cPipeline, cName, cProp, cValue)
}
case "int":
if v, err := strconv.ParseInt(value, 10, 32); err == nil {
cValue := C.int(int32(v))
C.gstSetPropInt(p.cPipeline, cName, cProp, cValue)
}
case "uint64":
if v, err := strconv.ParseInt(value, 10, 64); err == nil {
cValue := C.ulong(v)
C.gstSetPropUint64(p.cPipeline, cName, cProp, cValue)
}
}
}
| [
"\"DS_NVIDIA\"",
"\"DS_GST_DISABLE_PLI\""
] | [] | [
"DS_GST_DISABLE_PLI",
"DS_NVIDIA"
] | [] | ["DS_GST_DISABLE_PLI", "DS_NVIDIA"] | go | 2 | 0 | |
syne_tune/backend/sagemaker_backend/sagemaker_backend.py | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional
import boto3
from botocore.exceptions import ClientError
import numpy as np
from sagemaker import LocalSession, Session
from sagemaker.estimator import Framework
from syne_tune.backend.trial_backend import TrialBackend
from syne_tune.constants import ST_INSTANCE_TYPE, ST_INSTANCE_COUNT, ST_CHECKPOINT_DIR
from syne_tune.util import s3_experiment_path
from syne_tune.backend.trial_status import TrialResult, Status
from syne_tune.backend.sagemaker_backend.sagemaker_utils import (
sagemaker_search,
get_log,
sagemaker_fit,
metric_definitions_from_names,
add_syne_tune_dependency,
map_identifier_limited_length,
s3_copy_files_recursively,
s3_delete_files_recursively,
)
logger = logging.getLogger(__name__)
class SageMakerBackend(TrialBackend):
def __init__(
self,
sm_estimator: Framework,
metrics_names: Optional[List[str]] = None,
s3_path: Optional[str] = None,
delete_checkpoints: bool = False,
*args,
**sagemaker_fit_kwargs,
):
"""
:param sm_estimator: sagemaker estimator to be fitted
:param sm_client: sagemaker client, for instance obtained with `sm = boto3.client(service_name='sagemaker')`
:param metrics_names: name of metrics passed to `report`, used to plot live curve in sagemaker (optional, only
used for visualization purpose)
:param s3_path: S3 base path used for checkpointing. The full path
also involves the tuner name and the trial_id
:param sagemaker_fit_kwargs: extra arguments that are passed to sagemaker.estimator.Framework when fitting the
job, for instance `{'train': 's3://my-data-bucket/path/to/my/training/data'}`
"""
assert (
not delete_checkpoints
), "delete_checkpoints=True not yet supported for SageMaker backend"
super(SageMakerBackend, self).__init__()
self.sm_estimator = sm_estimator
# edit the sagemaker estimator so that metrics of the user can be plotted over time by sagemaker and so that
# the report.py code is available
if metrics_names is None:
metrics_names = []
self.metrics_names = metrics_names
self.add_metric_definitions_to_sagemaker_estimator(metrics_names)
st_prefix = "st-"
if self.sm_estimator.base_job_name is None:
base_job_name = st_prefix
else:
base_job_name = st_prefix + self.sm_estimator.base_job_name
# Make sure len(base_job_name) <= 63
self.sm_estimator.base_job_name = map_identifier_limited_length(base_job_name)
add_syne_tune_dependency(self.sm_estimator)
self.job_id_mapping = {}
self.sagemaker_fit_kwargs = sagemaker_fit_kwargs
# we keep the list of jobs that were paused/stopped as Sagemaker training job status is not immediately changed
# after stopping a job.
self.paused_jobs = set()
self.stopped_jobs = set()
# Counts how often a trial has been resumed
self.resumed_counter = dict()
if s3_path is None:
s3_path = s3_experiment_path()
self.s3_path = s3_path.rstrip("/")
self.tuner_name = None
@property
def sm_client(self):
return boto3.client(service_name="sagemaker")
def add_metric_definitions_to_sagemaker_estimator(self, metrics_names: List[str]):
# We add metric definitions corresponding to the metrics passed by `report` that the user wants to track
# this allows to plot live learning curves of metrics in Sagemaker console.
# The reason why we ask to the user metric names is that they are required to be known before hand so that live
# plotting works.
if self.sm_estimator.metric_definitions is None:
self.sm_estimator.metric_definitions = metric_definitions_from_names(
metrics_names
)
else:
self.sm_estimator.metric_definitions = (
self.sm_estimator.metric_definitions
+ metric_definitions_from_names(self.metrics_names)
)
if len(self.sm_estimator.metric_definitions) > 40:
logger.warning(
"Sagemaker only supports 40 metrics for learning curve visualization, keeping only the first 40"
)
self.sm_estimator.metric_definitions = self.sm_estimator.metric_definitions[
:40
]
def _all_trial_results(self, trial_ids: List[int]) -> List[TrialResult]:
res = sagemaker_search(
trial_ids_and_names=[
(jobid, self.job_id_mapping[jobid]) for jobid in trial_ids
],
sm_client=self.sm_client,
)
# overrides the status return by Sagemaker as the stopping decision may not have been propagated yet.
for trial_res in res:
if trial_res.trial_id in self.paused_jobs:
trial_res.status = Status.paused
if trial_res.trial_id in self.stopped_jobs:
trial_res.status = Status.stopped
return res
@staticmethod
def _numpy_serialize(dict):
def np_encoder(object):
if isinstance(object, np.generic):
return object.item()
return json.loads(json.dumps(dict, default=np_encoder))
def _checkpoint_s3_uri_for_trial(self, trial_id: int) -> str:
res_path = self.s3_path
if self.tuner_name is not None:
res_path = f"{res_path}/{self.tuner_name}"
return f"{res_path}/{str(trial_id)}/checkpoints/"
def _schedule(self, trial_id: int, config: Dict):
config[ST_CHECKPOINT_DIR] = "/opt/ml/checkpoints"
hyperparameters = config.copy()
# This passes the instance type and instance count to the training function in Sagemaker as hyperparameters
# with reserved names `st_instance_type` and `st_instance_count`.
# We pass them as hyperparameters as it is not easy to get efficiently from inside Sagemaker training script
# (this information is not given for instance as Sagemaker environment variables).
# This allows to: 1) measure cost in the worker 2) tune instance_type and instance_count by having
# `st_instance_type` or `st_instance_count` in the config space.
# TODO once we have a multiobjective scheduler, we should add an example on how to tune instance-type/count.
if ST_INSTANCE_TYPE not in hyperparameters:
hyperparameters[ST_INSTANCE_TYPE] = self.sm_estimator.instance_type
else:
self.sm_estimator.instance_type = hyperparameters[ST_INSTANCE_TYPE]
if ST_INSTANCE_COUNT not in hyperparameters:
hyperparameters[ST_INSTANCE_COUNT] = self.sm_estimator.instance_count
else:
self.sm_estimator.instance_count = hyperparameters[ST_INSTANCE_COUNT]
if self.sm_estimator.instance_type != "local":
checkpoint_s3_uri = self._checkpoint_s3_uri_for_trial(trial_id)
logging.info(
f"Trial {trial_id} will checkpoint results to {checkpoint_s3_uri}."
)
else:
# checkpointing is not supported in local mode. When using local mode with remote tuner (for instance for
# debugging), results are not stored.
checkpoint_s3_uri = None
# Once a trial gets resumed, the running job number has to feature in
# the SM job_name
job_name = f"{self.tuner_name}-{trial_id}"
job_running_number = self.resumed_counter.get(trial_id, 0)
if job_running_number > 0:
job_name += f"-{job_running_number}"
jobname = sagemaker_fit(
sm_estimator=self.sm_estimator,
# the encoder fixes json error "TypeError: Object of type 'int64' is not JSON serializable"
hyperparameters=self._numpy_serialize(hyperparameters),
checkpoint_s3_uri=checkpoint_s3_uri,
job_name=job_name,
**self.sagemaker_fit_kwargs,
)
logger.info(f"scheduled {jobname} for trial-id {trial_id}")
self.job_id_mapping[trial_id] = jobname
def _pause_trial(self, trial_id: int):
self._stop_trial_job(trial_id)
self.paused_jobs.add(trial_id)
def _stop_trial(self, trial_id: int):
training_job_name = self.job_id_mapping[trial_id]
logger.info(f"stopping {trial_id} ({training_job_name})")
self._stop_trial_job(trial_id)
self.stopped_jobs.add(trial_id)
def _stop_trial_job(self, trial_id: int):
training_job_name = self.job_id_mapping[trial_id]
try:
self.sm_client.stop_training_job(TrainingJobName=training_job_name)
except ClientError:
# the scheduler may have decided to stop a job that finished already
pass
def _resume_trial(self, trial_id: int):
assert (
trial_id in self.paused_jobs
), f"Try to resume trial {trial_id} that was not paused before."
self.paused_jobs.remove(trial_id)
if trial_id in self.resumed_counter:
self.resumed_counter[trial_id] += 1
else:
self.resumed_counter[trial_id] = 1
def stdout(self, trial_id: int) -> List[str]:
return get_log(self.job_id_mapping[trial_id])
def stderr(self, trial_id: int) -> List[str]:
return get_log(self.job_id_mapping[trial_id])
@property
def source_dir(self) -> Optional[str]:
return self.sm_estimator.source_dir
def set_entrypoint(self, entry_point: str):
self.sm_estimator.entry_point = entry_point
def entrypoint_path(self) -> Path:
return Path(self.sm_estimator.entry_point)
def __getstate__(self):
# dont store sagemaker client that cannot be serialized, we could remove it by changing our interface
# and having kwargs/args of SagemakerFramework in the constructor of this class (that would be serializable)
# plus the class (for instance PyTorch)
self.sm_estimator.sagemaker_session = None
self.sm_estimator.latest_training_job = None
self.sm_estimator.jobs = []
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
self.initialize_sagemaker_session()
# adjust the dependencies when running Sagemaker backend on sagemaker with remote launcher
# since they are in a different path
is_running_on_sagemaker = "SM_OUTPUT_DIR" in os.environ
if is_running_on_sagemaker:
# todo support dependencies on Sagemaker estimator, one way would be to ship them with the remote
# dependencies
self.sm_estimator.dependencies = [
Path(dep).name for dep in self.sm_estimator.dependencies
]
def initialize_sagemaker_session(self):
if boto3.Session().region_name is None:
# avoids error "Must setup local AWS configuration with a region supported by SageMaker."
# in case no region is explicitely configured
os.environ["AWS_DEFAULT_REGION"] = "us-west-2"
if self.sm_estimator.instance_type in ("local", "local_gpu"):
if (
self.sm_estimator.instance_type == "local_gpu"
and self.sm_estimator.instance_count > 1
):
raise RuntimeError("Distributed Training in Local GPU is not supported")
self.sm_estimator.sagemaker_session = LocalSession()
else:
self.sm_estimator.sagemaker_session = Session()
def copy_checkpoint(self, src_trial_id: int, tgt_trial_id: int):
s3_source_path = self._checkpoint_s3_uri_for_trial(src_trial_id)
s3_target_path = self._checkpoint_s3_uri_for_trial(tgt_trial_id)
logger.info(
f"Copying checkpoint files from {s3_source_path} to " + s3_target_path
)
result = s3_copy_files_recursively(s3_source_path, s3_target_path)
num_action_calls = result["num_action_calls"]
if num_action_calls == 0:
logger.info(f"No checkpoint files found at {s3_source_path}")
else:
num_successful_action_calls = result["num_successful_action_calls"]
assert num_successful_action_calls == num_action_calls, (
f"{num_successful_action_calls} files copied successfully, "
+ f"{num_action_calls - num_successful_action_calls} failures. "
+ "Error:\n"
+ result["first_error_message"]
)
def delete_checkpoint(self, trial_id: int):
s3_path = self._checkpoint_s3_uri_for_trial(trial_id)
result = s3_delete_files_recursively(s3_path)
num_action_calls = result["num_action_calls"]
if num_action_calls > 0:
num_successful_action_calls = result["num_successful_action_calls"]
if num_successful_action_calls == num_action_calls:
logger.info(
f"Deleted {num_action_calls} checkpoint files from {s3_path}"
)
else:
logger.warning(
f"Successfully deleted {num_successful_action_calls} "
f"checkpoint files from {s3_path}, but failed to delete "
f"{num_action_calls - num_successful_action_calls} files. "
"Error:\n" + result["first_error_message"]
)
def set_path(
self, results_root: Optional[str] = None, tuner_name: Optional[str] = None
):
# we use the tuner-name to set the checkpoint directory
self.tuner_name = tuner_name
| [] | [] | [
"AWS_DEFAULT_REGION"
] | [] | ["AWS_DEFAULT_REGION"] | python | 1 | 0 | |
chief/main.go | package main
import (
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
machinery "github.com/RichardKnop/machinery/v1"
"github.com/RichardKnop/machinery/v1/backends/result"
machineryConfig "github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/ghodss/yaml"
"github.com/google/uuid"
"github.com/urfave/cli"
validator "gopkg.in/go-playground/validator.v9"
)
var (
app *cli.App
configPath string
server *machinery.Server
irgshConfig IrgshConfig
)
type Submission struct {
TaskUUID string `json:"taskUUID"`
Timestamp time.Time `json:"timestamp"`
SourceURL string `json:"sourceUrl"`
PackageURL string `json:"packageUrl"`
Tarball string `json:"tarball"`
IsExperimental bool `json:"isExperimental"`
}
type ArtifactsPayloadResponse struct {
Data []string `json:"data"`
}
type SubmitPayloadResponse struct {
PipelineId string `json:"pipelineId"`
Jobs []string `json:"jobs,omitempty"`
}
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
// Load config
configPath = os.Getenv("IRGSH_CONFIG_PATH")
if len(configPath) == 0 {
configPath = "/etc/irgsh/config.yml"
}
irgshConfig = IrgshConfig{}
yamlFile, err := ioutil.ReadFile(configPath)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
err = yaml.Unmarshal(yamlFile, &irgshConfig)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
validate := validator.New()
err = validate.Struct(irgshConfig.Chief)
if err != nil {
log.Fatal(err.Error())
os.Exit(1)
}
app = cli.NewApp()
app.Name = "irgsh-go"
app.Usage = "irgsh-go distributed packager"
app.Author = "BlankOn Developer"
app.Email = "[email protected]"
app.Version = "IRGSH_GO_VERSION"
app.Action = func(c *cli.Context) error {
server, err = machinery.NewServer(
&machineryConfig.Config{
Broker: irgshConfig.Redis,
ResultBackend: irgshConfig.Redis,
DefaultQueue: "irgsh",
},
)
if err != nil {
fmt.Println("Could not create server : " + err.Error())
}
serve()
return nil
}
app.Run(os.Args)
}
func serve() {
http.HandleFunc("/", IndexHandler)
http.HandleFunc("/api/v1/artifacts", ArtifactsHandler)
http.HandleFunc("/api/v1/submit", PackageSubmitHandler)
http.HandleFunc("/api/v1/status", BuildStatusHandler)
http.HandleFunc("/api/v1/artifact-upload", artifactUploadHandler())
http.HandleFunc("/api/v1/log-upload", logUploadHandler())
http.HandleFunc("/api/v1/build-iso", BuildISOHandler)
artifactFs := http.FileServer(http.Dir(irgshConfig.Chief.Workdir + "/artifacts"))
http.Handle("/artifacts/", http.StripPrefix("/artifacts/", artifactFs))
logFs := http.FileServer(http.Dir(irgshConfig.Chief.Workdir + "/logs"))
http.Handle("/logs/", http.StripPrefix("/logs/", logFs))
log.Println("irgsh-go chief now live on port 8080")
log.Fatal(http.ListenAndServe(":8080", nil))
}
func IndexHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "irgsh-chief "+app.Version)
}
func ArtifactsHandler(w http.ResponseWriter, r *http.Request) {
files, err := filepath.Glob(irgshConfig.Chief.Workdir + "/artifacts/*")
if err != nil {
fmt.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "500")
}
artifacts := []string{}
for _, a := range files {
artifacts = append(artifacts, strings.Split(a, "artifacts/")[1])
}
// TODO pagination
payload := ArtifactsPayloadResponse{Data: artifacts}
jsonStr, _ := json.Marshal(payload)
fmt.Fprintf(w, string(jsonStr))
}
func PackageSubmitHandler(w http.ResponseWriter, r *http.Request) {
submission := Submission{}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&submission)
if err != nil {
fmt.Println(err.Error())
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "400")
return
}
submission.Timestamp = time.Now()
submission.TaskUUID = submission.Timestamp.Format("2006-01-02-150405") + "_" + uuid.New().String()
// Verifying the signature against current gpg keyring
// TODO generic wrapper for auth check
tarballB64 := submission.Tarball
buff, err := base64.StdEncoding.DecodeString(tarballB64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "500")
return
}
cmdStr := "mkdir -p " + irgshConfig.Chief.Workdir + "/submissions/" + submission.TaskUUID
fmt.Println(cmdStr)
cmd := exec.Command("bash", "-c", cmdStr)
err = cmd.Run()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "500")
return
}
path := irgshConfig.Chief.Workdir + "/submissions/" + submission.TaskUUID + "/" + submission.TaskUUID + ".tar.gz"
fmt.Println(path)
err = ioutil.WriteFile(path, buff, 07440)
if err != nil {
fmt.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "500")
return
}
cmdStr = "cd " + irgshConfig.Chief.Workdir + "/submissions/" + submission.TaskUUID
cmdStr += " && tar -xvf " + submission.TaskUUID + ".tar.gz && rm -f " + submission.TaskUUID + ".tar.gz"
fmt.Println(cmdStr)
err = exec.Command("bash", "-c", cmdStr).Run()
if err != nil {
fmt.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "500")
return
}
cmdStr = "cd " + irgshConfig.Chief.Workdir + "/submissions/" + submission.TaskUUID + " && "
// TODO This gnupg path should be configurable with config.yml
cmdStr += "GNUPGHOME=/var/lib/irgsh/gnupg gpg --verify *.dsc"
fmt.Println(cmdStr)
err = exec.Command("bash", "-c", cmdStr).Run()
if err != nil {
fmt.Println(err.Error())
w.WriteHeader(http.StatusUnauthorized)
fmt.Fprintf(w, "401 Unauthorized")
return
}
jsonStr, err := json.Marshal(submission)
if err != nil {
fmt.Println(err.Error())
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "400")
return
}
buildSignature := tasks.Signature{
Name: "build",
UUID: submission.TaskUUID,
Args: []tasks.Arg{
{
Type: "string",
Value: string(jsonStr),
},
},
}
repoSignature := tasks.Signature{
Name: "repo",
UUID: submission.TaskUUID,
}
chain, _ := tasks.NewChain(&buildSignature, &repoSignature)
_, err = server.SendChain(chain)
if err != nil {
fmt.Println("Could not send chain : " + err.Error())
}
payload := SubmitPayloadResponse{PipelineId: submission.TaskUUID}
jsonStr, _ = json.Marshal(payload)
fmt.Fprintf(w, string(jsonStr))
}
func BuildStatusHandler(w http.ResponseWriter, r *http.Request) {
keys, ok := r.URL.Query()["uuid"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "403")
return
}
var UUID string
UUID = keys[0]
buildSignature := tasks.Signature{
Name: "build",
UUID: UUID,
Args: []tasks.Arg{
{
Type: "string",
Value: "xyz",
},
},
}
// Recreate the AsyncResult instance using the signature and server.backend
car := result.NewAsyncResult(&buildSignature, server.GetBackend())
car.Touch()
taskState := car.GetState()
res := fmt.Sprintf("{ \"pipelineId\": \"" + taskState.TaskUUID + "\", \"state\": \"" + taskState.State + "\" }")
fmt.Fprintf(w, res)
}
func artifactUploadHandler() http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
keys, ok := r.URL.Query()["id"]
if !ok || len(keys[0]) < 1 {
log.Println("Url Param 'uuid' is missing")
w.WriteHeader(http.StatusBadRequest)
return
}
id := keys[0]
targetPath := irgshConfig.Chief.Workdir + "/artifacts"
// parse and validate file and post parameters
file, _, err := r.FormFile("uploadFile")
if err != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
defer file.Close()
fileBytes, err := ioutil.ReadAll(file)
if err != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
// check file type, detectcontenttype only needs the first 512 bytes
filetype := http.DetectContentType(fileBytes)
switch filetype {
case "application/gzip", "application/x-gzip":
break
default:
log.Println("File upload rejected: should be a compressed tar.gz file.")
w.WriteHeader(http.StatusBadRequest)
}
fileName := id + ".tar.gz"
newPath := filepath.Join(targetPath, fileName)
// write file
newFile, err := os.Create(newPath)
if err != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
defer newFile.Close()
if _, err := newFile.Write(fileBytes); err != nil || newFile.Close() != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
// TODO should be in JSON string
w.WriteHeader(http.StatusOK)
})
}
func logUploadHandler() http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
keys, ok := r.URL.Query()["id"]
if !ok || len(keys[0]) < 1 {
log.Println("Url Param 'id' is missing")
w.WriteHeader(http.StatusBadRequest)
return
}
id := keys[0]
keys, ok = r.URL.Query()["type"]
if !ok || len(keys[0]) < 1 {
log.Println("Url Param 'type' is missing")
w.WriteHeader(http.StatusBadRequest)
return
}
logType := keys[0]
targetPath := irgshConfig.Chief.Workdir + "/logs"
// parse and validate file and post parameters
file, _, err := r.FormFile("uploadFile")
if err != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
defer file.Close()
fileBytes, err := ioutil.ReadAll(file)
if err != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
// check file type, detectcontenttype only needs the first 512 bytes
filetype := strings.Split(http.DetectContentType(fileBytes), ";")[0]
switch filetype {
case "text/plain":
break
default:
log.Println("File upload rejected: should be a plain text log file.")
w.WriteHeader(http.StatusBadRequest)
}
fileName := id + "." + logType + ".log"
newPath := filepath.Join(targetPath, fileName)
// write file
newFile, err := os.Create(newPath)
if err != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
defer newFile.Close()
if _, err := newFile.Write(fileBytes); err != nil || newFile.Close() != nil {
log.Println(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
// TODO should be in JSON string
w.WriteHeader(http.StatusOK)
})
}
func BuildISOHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("iso")
signature := tasks.Signature{
Name: "iso",
UUID: uuid.New().String(),
Args: []tasks.Arg{
{
Type: "string",
Value: "iso-specific-value",
},
},
}
// TODO grab the asyncResult here
_, err := server.SendTask(&signature)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Println("Could not send task : " + err.Error())
fmt.Fprintf(w, "500")
}
// TODO should be in JSON string
w.WriteHeader(http.StatusOK)
}
| [
"\"IRGSH_CONFIG_PATH\""
] | [] | [
"IRGSH_CONFIG_PATH"
] | [] | ["IRGSH_CONFIG_PATH"] | go | 1 | 0 | |
scripting/scripting_test.go | package scripting
import (
"context"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tychoish/jasper"
"github.com/tychoish/jasper/options"
"github.com/tychoish/jasper/testutil"
)
func isInPath(binary string) bool {
_, err := exec.LookPath(binary)
return err == nil
}
func evgTaskContains(subs string) bool {
return strings.Contains(os.Getenv("EVR_TASK_ID"), subs)
}
func makeScriptingEnv(ctx context.Context, t *testing.T, mgr jasper.Manager, opts options.ScriptingHarness) Harness {
se, err := NewHarness(mgr, opts)
require.NoError(t, err)
return se
}
func TestScriptingHarness(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
manager, err := jasper.NewSynchronizedManager(false)
require.NoError(t, err)
defer manager.Close(ctx)
tmpdir, err := ioutil.TempDir(testutil.BuildDirectory(), "scripting_tests")
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
}()
type seTest struct {
Name string
Case func(*testing.T, options.ScriptingHarness)
}
for _, env := range []struct {
Name string
Supported bool
DefaultOptions options.ScriptingHarness
Tests []seTest
}{
{
Name: "Roswell",
Supported: isInPath("ros"),
DefaultOptions: &options.ScriptingRoswell{
Path: filepath.Join(tmpdir, "roswell"),
Lisp: "sbcl-bin",
Output: options.Output{},
},
Tests: []seTest{
{
Name: "Options",
Case: func(t *testing.T, opts options.ScriptingHarness) {
require.Equal(t, "ros", opts.Interpreter())
require.NotZero(t, opts.ID())
},
},
{
Name: "HelloWorldScript",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.RunScript(ctx, `(defun main () (print "hello world"))`))
},
},
{
Name: "RunHelloWorld",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.Run(ctx, []string{`(print "hello world")`}))
},
},
{
Name: "ScriptExitError",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.Error(t, se.RunScript(ctx, `(sb-ext:exit :code 42)`))
},
},
},
},
{
Name: "Python3",
Supported: isInPath("python3") && !evgTaskContains("ubuntu"),
DefaultOptions: &options.ScriptingPython{
VirtualEnvPath: filepath.Join(tmpdir, "python3"),
LegacyPython: false,
InterpreterBinary: "python3",
Output: options.Output{},
},
Tests: []seTest{
{
Name: "Options",
Case: func(t *testing.T, opts options.ScriptingHarness) {
require.True(t, strings.HasSuffix(opts.Interpreter(), "python"))
require.NotZero(t, opts.ID())
},
},
{
Name: "HelloWorldScript",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.RunScript(ctx, `print("hello world")`))
},
},
{
Name: "RunHelloWorld",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.Run(ctx, []string{"-c", `print("hello world")`}))
},
},
{
Name: "ScriptExitError",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.Error(t, se.RunScript(ctx, `exit(42)`))
},
},
},
},
{
Name: "Python2",
Supported: isInPath("python") && !evgTaskContains("windows"),
DefaultOptions: &options.ScriptingPython{
VirtualEnvPath: filepath.Join(tmpdir, "python2"),
LegacyPython: true,
InterpreterBinary: "python",
Packages: []string{"wheel"},
Output: options.Output{},
},
Tests: []seTest{
{
Name: "Options",
Case: func(t *testing.T, opts options.ScriptingHarness) {
require.True(t, strings.HasSuffix(opts.Interpreter(), "python"))
require.NotZero(t, opts.ID())
},
},
{
Name: "HelloWorldScript",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.RunScript(ctx, `print("hello world")`))
},
},
{
Name: "RunHelloWorld",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.Run(ctx, []string{"-c", `print("hello world")`}))
},
},
{
Name: "ScriptExitError",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.Error(t, se.RunScript(ctx, `exit(42)`))
},
},
},
},
{
Name: "Golang",
Supported: isInPath("go"),
DefaultOptions: &options.ScriptingGolang{
Gopath: filepath.Join(tmpdir, "gopath"),
Goroot: runtime.GOROOT(),
Packages: []string{
"github.com/pkg/errors",
},
Output: options.Output{},
},
Tests: []seTest{
{
Name: "Options",
Case: func(t *testing.T, opts options.ScriptingHarness) {
require.True(t, strings.HasSuffix(opts.Interpreter(), "go"))
require.NotZero(t, opts.ID())
},
},
{
Name: "HelloWorldScript",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.NoError(t, se.RunScript(ctx, `package main; import "fmt"; func main() { fmt.Println("Hello World")}`))
},
},
{
Name: "ScriptExitError",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
require.Error(t, se.RunScript(ctx, `package main; import "os"; func main() { os.Exit(42) }`))
},
},
{
Name: "Dependencies",
Case: func(t *testing.T, opts options.ScriptingHarness) {
se := makeScriptingEnv(ctx, t, manager, opts)
tmpFile := filepath.Join(tmpdir, "fake_script.go")
require.NoError(t, ioutil.WriteFile(tmpFile, []byte(`package main; import ("fmt"; "github.com/pkg/errors"); func main() { fmt.Println(errors.New("error")) }`), 0755))
defer func() {
assert.NoError(t, os.Remove(tmpFile))
}()
err = se.Run(ctx, []string{tmpFile})
require.NoError(t, err)
},
},
{
Name: "RunFile",
Case: func(t *testing.T, opts options.ScriptingHarness) {
if runtime.GOOS == "windows" {
t.Skip("windows paths")
}
se := makeScriptingEnv(ctx, t, manager, opts)
tmpFile := filepath.Join(tmpdir, "fake_script.go")
require.NoError(t, ioutil.WriteFile(tmpFile, []byte(`package main; import "os"; func main() { os.Exit(0) }`), 0755))
defer func() {
assert.NoError(t, os.Remove(tmpFile))
}()
err = se.Run(ctx, []string{tmpFile})
require.NoError(t, err)
},
},
{
Name: "Build",
Case: func(t *testing.T, opts options.ScriptingHarness) {
if runtime.GOOS == "windows" {
t.Skip("windows paths")
}
se := makeScriptingEnv(ctx, t, manager, opts)
tmpFile := filepath.Join(tmpdir, "fake_script.go")
require.NoError(t, ioutil.WriteFile(tmpFile, []byte(`package main; import "os"; func main() { os.Exit(0) }`), 0755))
defer func() {
assert.NoError(t, os.Remove(tmpFile))
}()
_, err := se.Build(ctx, testutil.BuildDirectory(), []string{
"-o", filepath.Join(tmpdir, "fake_script"),
tmpFile,
})
require.NoError(t, err)
_, err = os.Stat(filepath.Join(tmpFile))
require.NoError(t, err)
},
},
},
},
} {
t.Run(env.Name, func(t *testing.T) {
if !env.Supported {
t.Skipf("%s is not supported in the current system", env.Name)
return
}
require.NoError(t, env.DefaultOptions.Validate())
t.Run("Config", func(t *testing.T) {
start := time.Now()
se := makeScriptingEnv(ctx, t, manager, env.DefaultOptions)
require.NoError(t, se.Setup(ctx))
dur := time.Since(start)
require.NotNil(t, se)
t.Run("ID", func(t *testing.T) {
require.Equal(t, env.DefaultOptions.ID(), se.ID())
assert.Len(t, se.ID(), 40)
})
t.Run("Caching", func(t *testing.T) {
start := time.Now()
require.NoError(t, se.Setup(ctx))
assert.True(t, time.Since(start) < dur, "%s < %s",
time.Since(start), dur)
})
})
for _, test := range env.Tests {
t.Run(test.Name, func(t *testing.T) {
test.Case(t, env.DefaultOptions)
})
}
t.Run("Testing", func(t *testing.T) {
se := makeScriptingEnv(ctx, t, manager, env.DefaultOptions)
res, err := se.Test(ctx, tmpdir)
require.NoError(t, err)
require.Len(t, res, 0)
})
t.Run("Cleanup", func(t *testing.T) {
se := makeScriptingEnv(ctx, t, manager, env.DefaultOptions)
require.NoError(t, se.Cleanup(ctx))
})
})
}
}
| [
"\"EVR_TASK_ID\""
] | [] | [
"EVR_TASK_ID"
] | [] | ["EVR_TASK_ID"] | go | 1 | 0 | |
lib/mzbench_api_client.py |
from __future__ import print_function
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import json
import os
import sys
import re
import requests
import multipart
import string
class MZBenchAPIException(Exception):
pass
def start(host, script_file, script_content,
node_commit = None, nodes = None, workers_per_node = None, deallocate_after_bench = None,
provision_nodes = None, provision_workers = None, benchmark_name = None,
cloud = None, tags = None, emails=[], includes=[], env={}, no_cert_check = False,
exclusive = None
):
"""Starts a bench
:param host: MZBench API server host with port
:type host: str
:param script_file: Scenario filename for dashboard
:type script_file: str or unicode
:param script_content: Scenario content to execute
:type script_content: str or unicode
:param node_commit: Commit or branch name for MZBench node, default is "master"
:type node_commit: str
:param nodes: Number of nodes to allocate or node list, 1 by default
:type nodes: int or list of strings
:param workers_per_node: Number of workers to start on one node
:type workers_per_node: int
:param deallocate_after_bench: Deallocate nodes after bench is over
:type deallocate_after_bench: "true" or "false"
:param provision_nodes: Install required software
:type provision_nodes: "true" or "false"
:param provision_workers: Install workers
:type provision_workers: "true" or "false"
:param benchmark_name: Set benchmark name
:type benchmark_name: str or unicode
:param cloud: Specify cloud provider to use
:type cloud: str or unicode
:param tags: Benchmark tags
:type tags: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:param exclusive: Exclusive label
:type exclusive: str or unicode
:param emails: Emails to notify on bench results
:type emails: List of strings
:param env: Dictionary of environment variables to substitute
:type env: Dictionary
:returns: Operation status
:rtype: Dictionary
"""
import erl_utils
import bdl_utils
import math
script_utils = bdl_utils if bdl_utils.is_bdl_scenario(script_content) else erl_utils
script_terms = script_utils.convert(script_content, env)
includes = script_utils.get_includes(script_terms)
if workers_per_node is not None:
desired_num_nodes = int(math.ceil(float(script_utils.get_num_of_workers(script_terms))/float(workers_per_node)))
else:
desired_num_nodes = None
if nodes is not None:
if isinstance(nodes, int):
params = [('nodes', desired_num_nodes if desired_num_nodes is not None else nodes)]
else:
params = [('nodes', ','.join(nodes[:desired_num_nodes] if desired_num_nodes is not None else nodes))]
else:
params = [] if desired_num_nodes is None else [('nodes', desired_num_nodes)]
if deallocate_after_bench is not None:
params += [('deallocate_after_bench', deallocate_after_bench)]
if provision_nodes is not None:
params += [('provision_nodes', provision_nodes)]
if provision_workers is not None:
params += [('provision_workers', provision_workers)]
if benchmark_name is not None:
params += [('benchmark_name', benchmark_name)]
if cloud is not None:
params += [('cloud', cloud)]
if tags is not None:
params += [('tags', tags)]
if exclusive is not None:
params += [('exclusive', exclusive)]
if node_commit is not None:
params += [('node_commit', node_commit)]
params += [('email', email) for email in emails]
params += [(k, v) for k, v in env.items()]
files = [('bench',
{'filename': os.path.basename(script_file),
'content': script_content})]
for (incname, incurl) in includes:
script_dir = os.path.dirname(script_file)
if not re.search(r'^https?://', incurl, re.IGNORECASE):
filename = os.path.join(script_dir, incurl)
try:
with open(filename) as fi:
files.append(('include',
{'filename': incurl, 'content': fi.read()}))
except IOError as e:
print("Failed to get content for resource ({0}, {1}): {2}".format(
incname, incurl, e), file=sys.stderr)
raise
body, headers = multipart.encode_multipart({}, files)
return assert_successful_post(
host,
'/start',
params,
data=body, headers=headers, no_cert_check = no_cert_check)
def restart(host, bench_id, no_cert_check = False):
"""Creates a copy of a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id to copy
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: operation status
:rtype: dict
"""
return assert_successful_get(host, '/restart', {'id': bench_id})
def log(host, bench_id, no_cert_check = False):
"""Outputs log for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: log
:rtype: generator of str
"""
for x in stream_lines(host, '/log', {'id': bench_id}, no_cert_check = no_cert_check):
yield x
def userlog(host, bench_id, no_cert_check = False):
"""Outputs user log for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: log
:rtype: generator of str
"""
for x in stream_lines(host, '/userlog', {'id': bench_id}, no_cert_check = no_cert_check):
yield x
def change_env(host, bench_id, env, no_cert_check = False):
"""Changes environment variables for existing benchmark on the fly
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:param env: Dictionary of environment variables to substitute
:type env: Dictionary
"""
env['id'] = bench_id
return assert_successful_get(host, '/change_env', env, no_cert_check = no_cert_check)
def run_command(host, bench_id, pool, percent, bdl_command, no_cert_check = False):
"""Executes worker operation on a given percent of a pool on the fly
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param pool: pool number from the top of the script, starting from 1
:type pool: int
:param percent: percent of workers 0 < percent <= 100
:type percent: int
:param command: BDL statement to be executed
:type command: string
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
import bdl_utils
bdl_utils.convert("#!benchDL\n" + bdl_command, {}) # To check syntax
return assert_successful_get(
host,
'/run_command',
{'id': bench_id,
'pool': pool,
'percent': percent,
'command': bdl_command}, no_cert_check = no_cert_check)
def data(host, bench_id, no_cert_check = False):
"""Outputs CSV data for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: CSV data
:rtype: generator of str
"""
for x in stream_lines(host, '/data', {'id': bench_id}, no_cert_check = no_cert_check):
yield x
def status(host, bench_id, wait=False, no_cert_check = False):
"""Get bench status
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: benchmark status
:rtype: dict
"""
return assert_successful_get(
host,
'/status',
{'id': bench_id,
'wait': 'true' if wait else 'false'}, no_cert_check = no_cert_check)
def results(host, bench_id, wait=False, no_cert_check = False):
"""Get bench results
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: benchmark resulting metric values
:rtype: dict
"""
return assert_successful_get(
host,
'/results',
{'id': bench_id,
'wait': 'true' if wait else 'false'}, no_cert_check = no_cert_check)
def stop(host, bench_id, no_cert_check = False):
"""Stop a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: operation status
:rtype: dict
"""
return assert_successful_get(
host,
'/stop',
{'id': bench_id}, no_cert_check = no_cert_check)
def clusters_info(host, no_cert_check = False):
"""Get info about currenlty allocated clusters
:param host: MZBench API server host with port
:type host: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/clusters_info', {}, no_cert_check = no_cert_check)
def deallocate_cluster(host, cluster_id, no_cert_check = False):
"""Deallocate cluster
:param host: MZBench API server host with port
:type host: str
:param cluster_id: id of target cluster
:type cluster_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/deallocate_cluster', {'id': cluster_id}, no_cert_check = no_cert_check)
def remove_cluster_info(host, cluster_id, no_cert_check = False):
"""Remove cluster record from the table of current allocated cluster info
:param host: MZBench API server host with port
:type host: str
:param cluster_id: id of target cluster
:type cluster_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/remove_cluster_info', {'id': cluster_id}, no_cert_check = no_cert_check)
def add_tags(host, bench_id, tags, no_cert_check = False):
"""Add tags to an existing benchmark
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param tags: Tags to add
:type tags: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/add_tags', {'id': bench_id, 'tags': tags}, no_cert_check = no_cert_check)
def remove_tags(host, bench_id, tags, no_cert_check = False):
"""Remove tags from an existing benchmark
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param tags: Tags to remove
:type tags: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/remove_tags', {'id': bench_id, 'tags': tags}, no_cert_check = no_cert_check)
def addproto(host):
if host.startswith("http://") or host.startswith("https://"):
return host
return "http://" + host
def stream_lines(host, endpoint, args, no_cert_check = False):
try:
response = requests.get(
addproto(host) + endpoint + '?' + urlencode(args),
stream=True, verify = not no_cert_check, headers=get_auth_headers(host))
for line in fast_iter_lines(response, chunk_size=1024):
try:
yield line
except ValueError:
print(line)
if response.status_code == 200:
pass
else:
raise MZBenchAPIException('Server call to {0} failed with code {1}'.format(endpoint, response.status_code))
except requests.exceptions.ConnectionError as e:
raise MZBenchAPIException('Connect to "{0}" failed with message: {1}'.format(host, e))
def fast_iter_lines(response, chunk_size=512):
pending = None
for chunk in response.iter_content(chunk_size=chunk_size):
lines = chunk.splitlines()
if pending is not None:
if lines:
lines[0] = pending + lines[0]
else:
lines.append(pending)
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
def assert_successful_request(perform_request):
def wrapped(*args, **kwargs):
try:
response = perform_request(*args, **kwargs)
if response.status_code == 200:
return response.json()
else:
try:
data = json.loads(response.text)
except:
raise MZBenchAPIException('Server call with arguments {0} failed with code {1} respose body:\n{2}'.format(args, response.status_code, response.text))
if ('reason_code' in data and 'reason' in data):
raise MZBenchAPIException('Server call with arguments {0} failed with code {1} and reason: {2}\n{3}'.format(args, response.status_code, data['reason_code'], data['reason']))
else:
from StringIO import StringIO
io = StringIO()
json.dump(data, io, indent=4)
raise MZBenchAPIException('Server call with arguments {0} failed with code {1} respose body:\n{2}'.format(args, response.status_code, io.getvalue()))
except requests.exceptions.ConnectionError as e:
raise MZBenchAPIException('Connect to "{0}" failed with message: {1}'.format(args[0], e))
return wrapped
@assert_successful_request
def assert_successful_get(host, endpoint, args, no_cert_check = False):
return requests.get(
addproto(host) + endpoint + '?' + urlencode(args),
verify=not no_cert_check, headers=get_auth_headers(host))
@assert_successful_request
def assert_successful_post(host, endpoint, args, data=None, headers=None, no_cert_check = False):
return requests.post(
addproto(host) + endpoint + '?' + urlencode(args),
data=data,
headers=add_auth_headers(headers, host),
verify=not no_cert_check)
def add_auth_headers(headers, host):
auth_headers = get_auth_headers(host);
if (headers is None):
return auth_headers;
if (auth_headers is None):
return headers;
headers.update(auth_headers)
return headers
def get_auth_headers(host):
token = read_token(host)
if (token is not None):
return {"Authorization": "Bearer {}".format(string.rstrip(token, " \n\r"))}
else:
return None
def read_token(host):
if 'MZBENCHTOKEN' in os.environ:
token_file = os.environ['MZBENCHTOKEN']
else:
token_file = os.path.expanduser("~/.config/mzbench/token")
if (not os.path.isfile(token_file)):
return None
with open(token_file) as f:
s = f.read()
for line in s.split('\n'):
line_no_comments = line.split('#', 1)[0]
strtokens = line_no_comments.split()
if len(strtokens) > 1 and host == strtokens[0]:
return strtokens[1]
if len(strtokens) == 1:
return line_no_comments
return None
| [] | [] | [
"MZBENCHTOKEN"
] | [] | ["MZBENCHTOKEN"] | python | 1 | 0 | |
python/profileLib.py | #!/usr/bin/env python3
import sys
import bz2
import re
import os
import subprocess
import hashlib
import pickle
import pathlib
from filelock import FileLock
from datetime import datetime
import tempfile
import csv
from copy import copy
LABEL_UNKNOWN = '_unknown'
LABEL_FOREIGN = '_foreign'
LABEL_KERNEL = '_kernel'
LABEL_UNSUPPORTED = '_unsupported'
cacheVersion = 'c0.2'
profileVersion = '0.5'
aggProfileVersion = 'agg0.9'
annProfileVersion = 'ann0.1'
unwindInline = True if 'UNWIND_INLINE' in os.environ and os.environ['UNWIND_INLINE'] == '1' else False
disableCache = True if 'DISABLE_CACHE' in os.environ and os.environ['DISABLE_CACHE'] == '1' else False
crossCompile = "" if 'CROSS_COMPILE' not in os.environ else os.environ['CROSS_COMPILE']
cacheFolder = str(pathlib.Path.home()) + "/.cache/pperf/" if 'PPERF_CACHE' not in os.environ else os.environ['PPERF_CACHE']
_toolchainVersion = None
class AGGSAMPLE:
time = 0
power = 1
energy = 2
samples = 3
execs = 4
label = 5
mappedSample = 6
class SAMPLE:
pc = 0 # int
binary = 1 # str
file = 2 # str
function = 3 # str
basicblock = 4 # str
line = 5 # int
instruction = 6 # str
meta = 7 # int
names = ['pc', 'binary', 'file', 'function', 'basicblock', 'line', 'instruction', 'meta']
invalid = [None, None, None, None, None, None, None]
class META:
normalInstruction = 0
branchInstruction = 1
branchTarget = 2
dynamicBranchTarget = 4
functionHead = 8
functionBack = 16
basicblockHead = 32
basicblockBack = 64
def getToolchainVersion():
global _toolchainVersion
if _toolchainVersion is not None:
return _toolchainVersion
global crossCompile
addr2line = subprocess.run(f"{crossCompile}addr2line -v | head -n 1 | egrep -Eo '[0-9]+\.[0-9.]+$'", shell=True, stdout=subprocess.PIPE)
addr2line.check_returncode()
_toolchainVersion = crossCompile + addr2line.stdout.decode('utf-8').split('\n')[0]
return _toolchainVersion
def getElfArchitecture(elf: str):
readelf = subprocess.run(f'readelf -h {elf}', shell=True, stdout=subprocess.PIPE)
readelf.check_returncode()
for line in readelf.stdout.decode('utf-8').split('\n'):
line = line.strip()
if line.startswith('Machine:'):
return line.split(':', 1)[1].strip()
return None
def parseRange(stringRange):
result = []
for part in stringRange.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result
class elfCache:
# Basic Block Reconstruction:
# currently requires support through dynamic branch analysis which
# provides a csv with dynamic branches and their targets to accuratly
# reconstruct basic blocks
# AArch64 - Stable
# RISC-V - Experimental
archBranches = {
'AArch64': {
# These instruction divert the control flow of the application
'all': {'b', 'b.eq', 'b.ne', 'b.cs', 'b.hs', 'b.cc', 'b.lo', 'b.mi', 'b.pl', 'b.vs', 'b.vc', 'b.hi', 'b.ls', 'b.ge', 'b.lt', 'b.gt', 'b.le', 'b.al', 'b.nv', 'bl', 'br', 'blr', 'svc', 'brk', 'ret', 'cbz', 'cbnz', 'tbnz'},
# These instructions are dynamic branches that can only divert control flow towards the head of a basicblock/function or after another branch instruction
'remote': {'svc', 'brk', 'blr', 'ret'},
},
'RISC-V': {
'all': {'j', 'jal', 'jr', 'jalr', 'ret', 'call', 'tail', 'bne', 'beq', 'blt', 'bltu', 'bge', 'bgeu', 'beqz', 'bnez', 'blez', 'bgez', 'bltz', 'bgtz', 'bgt', 'ble', 'bgtu', 'bleu', 'ecall', 'ebreak', 'scall', 'sbreak'},
'remote': {'ebreak', 'ecall', 'sbreak', 'scall', 'jalr', 'ret'},
}
}
caches = {}
cacheFiles = {}
def __init__(self):
global cacheFolder
if not os.path.isdir(cacheFolder):
os.makedirs(cacheFolder)
def getRawCache(self, name):
global cacheFolder
name = os.path.abspath(f'{cacheFolder}/{name}')
lock = FileLock(name + ".lock")
# If the lock is held this will stall
lock.acquire()
lock.release()
if os.path.isfile(name):
return pickle.load(open(name, mode="rb"))
else:
raise Exception(f'could not find requested elf cache {name}')
def getCacheFile(self, elf):
if elf in self.cacheFiles:
return self.cacheFiles[elf]
global cacheFolder
global unwindInline
hasher = hashlib.md5()
with open(elf, 'rb') as afile:
hasher.update(afile.read())
return os.path.abspath(f"{cacheFolder}/{os.path.basename(elf)}_{'i' if unwindInline else ''}{hasher.hexdigest()}")
def openOrCreateCache(self, elf: str):
global disableCache
if not self.cacheAvailable(elf):
if disableCache:
print("WARNING: cache disabled, constructing limited in memory cache", file=sys.stderr)
self.createCache(elf, basicblockReconstruction=False, includeSource=False, verbose=False)
else:
raise Exception(f'could not find cache for file {elf}, please create first or run with disabled cache')
def getSampleFromPC(self, elf: str, pc: int):
self.openOrCreateCache(elf)
if pc not in self.caches[elf]['cache']:
print(f"WARNING: 0x{pc:x} does not exist in {elf}", file=sys.stderr)
sample = copy(SAMPLE.invalid)
sample[SAMPLE.binary] = self.caches[elf]['name']
sample[SAMPLE.pc] = pc
return sample
else:
return self.caches[elf]['cache'][pc]
def cacheAvailable(self, elf: str, load=True):
if elf in self.caches:
return True
global disableCache
if disableCache:
return False
global cacheVersion
cacheFile = self.getCacheFile(elf)
lock = FileLock(cacheFile + ".lock")
# If the lock is held this will stall
lock.acquire()
lock.release()
if os.path.isfile(cacheFile):
cache = pickle.load(open(cacheFile, mode="rb"))
if 'version' not in cache or cache['version'] != cacheVersion:
raise Exception(f"wrong version of cache for {elf} located at {cacheFile}!")
if load:
self.cacheFiles[elf] = cacheFile
self.caches[elf] = cache
return True
else:
return False
def createCache(self, elf: str, name=None, sourceSearchPaths=[], dynmapfile=None, includeSource=True, basicblockReconstruction=True, verbose=True):
global cacheVersion
global crossCompile
global unwindInline
global disableCache
if name is None:
name = os.path.basename(elf)
if not disableCache:
cacheFile = self.getCacheFile(elf)
lock = FileLock(cacheFile + ".lock")
lock.acquire()
# Remove the cache if it already exists
if os.path.isfile(cacheFile):
os.remove(cacheFile)
try:
functionCounter = -1
cache = {
'version': cacheVersion,
'binary': os.path.basename(elf),
'name': name,
'arch': getElfArchitecture(elf),
'date': datetime.now(),
'toolchain': getToolchainVersion(),
'unwindInline': unwindInline,
'cache': {},
'source': {},
'asm': {},
}
if cache['arch'] not in self.archBranches and basicblockReconstruction:
basicblockReconstruction = False
if verbose:
print(f"WARNING: disabling basic block reconstruction due to unknown architecture {cache['arch']}")
sPreObjdump = f"{crossCompile}objdump -wh {elf}"
pPreObjdump = subprocess.Popen(sPreObjdump, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
for sectionLine in pPreObjdump.stdout:
sectionLine = re.sub(r', ', ',', sectionLine)
sectionList = re.sub(r'[\t ]+', ' ', sectionLine).strip().split(' ')
# Check whether we write, allocate or execute a section
if len(sectionList) < 7 or 'CODE' not in sectionList[7]:
continue
section = sectionList[1]
# First step is creating an object dump of the elf file
# We disassemble much more than needed however its necesarry for some profilers
sObjdump = f"{crossCompile}objdump -Dz --prefix-addresses -j {section} {elf}"
pObjdump = subprocess.Popen(sObjdump, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
# Remove trailing additional data that begins with '//'
# sObjdump = re.sub('[ \t]+(// ).+\n','\n', sObjdump)
# Remove trailing additional data that begins with '#'
# sObjdump = re.sub('[ \t]+(# ).+\n','\n', sObjdump)
for line in pObjdump.stdout:
objdumpInstruction = re.compile('([0-9a-fA-F]+) (<.+?(\+0x[0-9a-f-A-F]+)?> [^\t ]+)(\t[^<\t]+)?(.+)?')
funcOffset = re.compile('^<(.+?)(\+0x[0-9a-f-A-F]+)?>$')
match0 = objdumpInstruction.match(line)
if match0:
# Instruction can be reliably splitted of the second match
funcAndInstr = match0.group(2).rstrip('\n').rsplit(' ', 1)
meta = META.normalInstruction
match1 = funcOffset.match(funcAndInstr[0])
if match1.group(2) is None:
meta |= META.functionHead | META.basicblockHead
functionCounter += 1
pc = int(match0.group(1), 16)
# match 3 the pc offset in the function, not used here
# match 4 are the function arguments
sample = [pc, name, None, match1.group(1), f'f{functionCounter}', None, funcAndInstr[1], meta]
asm = funcAndInstr[1]
if match0.group(4) is not None:
asm += match0.group(4).rstrip('\n')
if match0.group(5) is not None:
asm += match0.group(5).rstrip('\n')
cache['asm'][pc] = asm.strip()
cache['cache'][pc] = sample
# print(f'0x{pc:x}: {asm.strip()}')
pObjdump.stdout.close()
returnCode = pObjdump.wait()
if returnCode:
raise subprocess.CalledProcessError(returnCode, sObjdump)
pPreObjdump.stdout.close()
returnCode = pPreObjdump.wait()
if returnCode:
raise subprocess.CalledProcessError(returnCode, sPreObjdump)
if (len(cache['cache']) == 0):
raise Exception(f'Could not parse any instructions from {elf}')
# Second Step, correlate addresses to function/files
tmpfile, tmpfilename = tempfile.mkstemp()
try:
addr2lineDecode = re.compile('^(0x[0-9a-fA-F]+)\n(.+?)\n(.+)?:(([0-9]+)|(\?)).*$')
with os.fdopen(tmpfile, 'w') as tmp:
tmp.write('\n'.join(map(lambda x: f'0x{x:x}', cache['cache'].keys())) + '\n')
tmp.close()
# addr2line by default outputs the file/line where the instruction originates from
# the -i option lets it print the chain of inlining which might be multiple files/lines
# We are only intersted in one result per address. We want either the function the
# address ends up in or the function it came from (when inlined). It will return the origin
# without -i and when -i is passed, the last result will be always the function this
# address was inlined to. That means this option is logically inverted for this script
# as we only take the last result per address.
pAddr2line = subprocess.run(f"{crossCompile}addr2line -Cafr{'i' if not unwindInline else ''} -e {elf} @{tmpfilename}", shell=True, stdout=subprocess.PIPE)
pAddr2line.check_returncode()
sAddr2line = pAddr2line.stdout.decode('utf-8').split("\n0x")
for entry in sAddr2line:
matchEntry = (entry if entry.startswith('0x') else '0x' + entry).split('\n')
while len(matchEntry) > 3 and len(matchEntry[-1]) == 0:
matchEntry.pop()
matchEntry = '\n'.join([matchEntry[0], matchEntry[-2], matchEntry[-1]])
match = addr2lineDecode.match(matchEntry)
if match:
iAddr = int(match.group(1), 16)
if iAddr not in cache['cache']:
raise Exception(f'Got an unknown address from addr2line: {match.group(1)}')
if match.group(3) is not None and len(match.group(3).strip('?')) != 0:
cache['cache'][iAddr][SAMPLE.file] = match.group(3)
if match.group(2) is not None and len(match.group(2).strip('?')) != 0:
# If we do source correlation save the absolute path for the moment
cache['cache'][iAddr][SAMPLE.function] = match.group(2)
if match.group(4) is not None and len(match.group(4).strip('?')) != 0 and int(match.group(4)) != 0:
cache['cache'][iAddr][SAMPLE.line] = int(match.group(4))
else:
raise Exception(f'Could not decode the following addr2line entry\n{entry}')
finally:
os.remove(tmpfilename)
# Third Step, read in source code
if includeSource:
# Those encondings will be tried
all_encodings = ['utf_8', 'latin_1', 'ascii', 'utf_16', 'utf_32', 'iso8859_2', 'utf_8_sig' 'utf_16_be', 'utf_16_le', 'utf_32_be', 'utf_32_le',
'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7', 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_12',
'iso8859_13', 'iso8859_14', 'iso8859_15', 'iso8859_16']
for pc in cache['cache']:
if cache['cache'][pc][SAMPLE.file] is not None and cache['cache'][pc][SAMPLE.file] not in cache['source']:
targetFile = None
sourcePath = cache['cache'][pc][SAMPLE.file]
searchPath = pathlib.Path(sourcePath)
cache['source'][sourcePath] = None
if (os.path.isfile(sourcePath)):
targetFile = sourcePath
elif len(sourceSearchPaths) > 0:
if searchPath.is_absolute():
searchPath = pathlib.Path(*searchPath.parts[1:])
found = False
for search in sourceSearchPaths:
currentSearchPath = searchPath
while not found and len(currentSearchPath.parts) > 0:
if os.path.isfile(search / currentSearchPath):
targetFile = search / currentSearchPath
found = True
break
currentSearchPath = pathlib.Path(*currentSearchPath.parts[1:])
if found:
break
if targetFile is not None:
decoded = False
for enc in all_encodings:
try:
with open(targetFile, 'r', encoding=enc) as fp:
cache['source'][sourcePath] = []
for i, line in enumerate(fp):
cache['source'][sourcePath].append(line.strip('\r\n'))
# print(f"Opened file {targetFile} with encoding {enc}")
decoded = True
break
except Exception:
pass
if not decoded:
cache['source'][sourcePath] = None
raise Exception(f"could not decode source code {sourcePath}")
elif verbose:
print(f"WARNING: could not find source code for {os.path.basename(sourcePath)}", file=sys.stderr)
# Fourth Step, basic block reconstruction
if basicblockReconstruction:
# If a dynmap file is provided, read it in and add the dynamic branch informations
dynmap = {}
if dynmapfile is None and os.path.isfile(elf + '.dynmap'):
dynmapfile = elf + '.dynmap'
if dynmapfile is not None and os.path.isfile(dynmapfile):
try:
with open(dynmapfile, "r") as fDynmap:
csvDynmap = csv.reader(fDynmap)
for row in csvDynmap:
try:
fromPc = int(row[0], 0)
toPc = int(row[1], 0)
except Exception:
continue
if fromPc not in dynmap:
dynmap[fromPc] = [toPc]
else:
dynmap[fromPc].append(toPc)
except Exception:
if verbose:
print(f"WARNING: could not read dynamic branch information from {dynmapfile}", file=sys.stderr)
# pcs = sorted(cache['cache'].keys())
unresolvedBranches = []
# First pass to identify branches
for pc in cache['cache']:
instruction = cache['cache'][pc][SAMPLE.instruction].lower()
if instruction in self.archBranches[cache['arch']]['all']:
cache['cache'][pc][SAMPLE.meta] |= META.branchInstruction
asm = cache['asm'][pc].split('\t')
if instruction not in self.archBranches[cache['arch']]['remote'] and len(asm) >= 2:
branched = False
for argument in reversed(re.split(', |,| ', asm[1])):
try:
branchTarget = int(argument.strip(), 16)
if branchTarget in cache['cache']:
cache['cache'][branchTarget][SAMPLE.meta] |= META.branchTarget
branched = True
break
except Exception:
pass
if not branched and verbose:
# Might be a branch that has dynmap information or comes from the plt
if pc not in dynmap and not (cache['cache'][pc][SAMPLE.function].endswith('.plt') or cache['cache'][pc][SAMPLE.function].endswith('@plt')):
unresolvedBranches.append(pc)
# Parse dynmap to complete informations
newBranchTargets = []
knownBranchTargets = []
for pc in dynmap:
if pc not in cache['cache']:
raise Exception(f'address 0x{pc:x} from dynamic branch informations is unknown in file {elf}')
if not cache['cache'][pc][SAMPLE.meta] & META.branchInstruction:
raise Exception(f'dynamic branch information provided an unknown branch at 0x{pc:x} in file {elf}')
# With the Exception this is unecessary
# cache['cache'][pc][SAMPLE.meta] |= META.branchInstruction
for target in dynmap[pc]:
if target not in cache['cache']:
raise Exception(f'target address 0x{target:x} from dynamic branch informations is unknown in file {elf}')
if not cache['cache'][target][SAMPLE.meta] & META.branchTarget and not cache['cache'][target][SAMPLE.meta] & META.functionHead:
newBranchTargets.append(target)
else:
knownBranchTargets.append(target)
cache['cache'][target][SAMPLE.meta] |= META.dynamicBranchTarget
if verbose and len(newBranchTargets) > 0:
print(f"INFO: {len(newBranchTargets)} new branch targets were identified with dynamic branch information ({', '.join([f'0x{x:x}' for x in newBranchTargets])})", file=sys.stderr)
if verbose and len(knownBranchTargets) > 0:
print(f"INFO: {len(knownBranchTargets)} branch targets from dynamic branch information were already known ({', '.join([f'0x{x:x}' for x in knownBranchTargets])})", file=sys.stderr)
if verbose and len(unresolvedBranches) > 0:
print(f"WARNING: {len(unresolvedBranches)} dynamic branches might not be resolved! ({', '.join([f'0x{x:x}' for x in unresolvedBranches])})", file=sys.stderr)
# print('\n'.join([cache['asm'][x] for x in unresolvedBranches]))
# Second pass to resolve the basic blocks
basicblockCount = 0
prevPc = None
for pc in cache['cache']:
# If function head, we reset the basicblock counter to zero (functions are already a basicblock)
if cache['cache'][pc][SAMPLE.meta] & META.functionHead:
basicblockCount = 0
cache['cache'][pc][SAMPLE.meta] |= META.basicblockHead
if prevPc is not None:
cache['cache'][prevPc][SAMPLE.meta] |= META.functionBack | META.basicblockBack
# Else, if instruction is a branch target or the previous is a branch we increase the basicblock counter
elif (cache['cache'][pc][SAMPLE.meta] & META.branchTarget) or (cache['cache'][pc][SAMPLE.meta] & META.dynamicBranchTarget) or (prevPc is not None and cache['cache'][prevPc][SAMPLE.meta] & META.branchInstruction):
basicblockCount += 1
cache['cache'][pc][SAMPLE.meta] |= META.basicblockHead
if prevPc is not None:
cache['cache'][prevPc][SAMPLE.meta] |= META.basicblockBack
cache['cache'][pc][SAMPLE.basicblock] += f'b{basicblockCount}'
prevPc = pc
if not disableCache:
pickle.dump(cache, open(cacheFile, "wb"), pickle.HIGHEST_PROTOCOL)
self.caches[elf] = cache
finally:
if not disableCache:
lock.release()
class listmapper:
maps = {}
def __init__(self, mapping=None):
if mapping is not None:
self.addMaping(mapping)
def removeMapping(self, mapping):
if isinstance(mapping, list):
for m in mapping:
if not isinstance(m, int):
raise Exception('class listmapper must be used with integer maps')
if m in self.maps:
del self.maps[m]
elif isinstance(mapping, int):
raise Exception('class listmapper must be used with integer maps')
elif mapping in self.maps:
del self.maps[mapping]
def addMaping(self, mapping):
if isinstance(mapping, list):
for m in mapping:
if not isinstance(m, int):
raise Exception('class listmapper must be used with integer maps')
if m not in self.maps:
self.maps[m] = []
elif isinstance(m, int):
raise Exception('class listmapper must be used with integer maps')
elif mapping not in self.maps:
self.maps[mapping] = []
def mapValues(self, values: list):
mapped = []
for i, val in enumerate(values):
if i in self.maps:
if val not in self.maps[i]:
self.maps[i].append(val)
mapped.append(self.maps[i].index(val))
else:
mapped.append(val)
return mapped
def remapValues(self, values: list):
remapped = []
for i, val in enumerate(values):
if i in self.maps:
if not isinstance(val, int) or val >= len(self.maps[i]):
raise Exception(f'listmapper invalid remap request for value {val} in map {i}')
remapped.append(self.maps[i][val])
else:
remapped.append(val)
return remapped
def setMaps(self, maps: dict):
self.maps = maps
def retrieveMaps(self):
return self.maps
class sampleParser:
cache = elfCache()
# Mapper will compress the samples down to a numeric list
mapper = listmapper([SAMPLE.binary, SAMPLE.file, SAMPLE.function, SAMPLE.basicblock, SAMPLE.instruction])
cacheMap = {}
binaries = []
kallsyms = []
searchPaths = []
_localSampleCache = {}
def __init__(self):
pass
def addSearchPath(self, path):
if not isinstance(path, list):
path = [path]
for p in path:
if not os.path.isdir(p):
raise Exception(f"Not a directory '{path}'")
self.searchPaths.extend(path)
def loadVMMap(self, fromFile=False, fromBuffer=False):
if (not fromFile and not fromBuffer):
raise Exception("Not enough arguments")
if (fromFile and not os.path.isfile(fromFile)):
raise Exception(f"File '{fromFile}' not found")
if (fromFile):
if fromFile.endswith("bz2"):
fromBuffer = bz2.open(fromFile, 'rt').read()
else:
fromBuffer = open(fromFile, "r").read()
for line in fromBuffer.split("\n"):
if (len(line) > 2):
(addr, size, label,) = line.split(" ", 2)
addr = int(addr, 16)
size = int(size, 16)
found = False
static = False
for searchPath in self.searchPaths:
path = f"{searchPath}/{label}"
if (os.path.isfile(path)):
readelf = subprocess.run(f"readelf -h {path}", shell=True, stdout=subprocess.PIPE)
readelfsection = subprocess.run(f"readelf -lW {path} 2>/dev/null | awk '$0 ~ /LOAD.+ R.E 0x/ {{print $3\":\"$6}}'", shell=True, stdout=subprocess.PIPE)
try:
readelf.check_returncode()
readelfsection.check_returncode()
static = True if re.search("Type:[ ]+EXEC", readelf.stdout.decode('utf-8'), re.M) else False
offset = int(readelfsection.stdout.decode('utf-8').split('\n')[:-1][0].split(":")[0], 0)
found = True
break
except Exception:
pass
if found:
# Not seen so far but a binary could have multiple code sections which wouldn't work with that structure so far:
# print(f"Using offset {offset:x} for {label}")
self.binaries.append({
'binary': label,
'path': path,
'kernel': False,
'static': static,
'offset': offset,
'start': addr,
'size': size,
'end': addr + size
})
else:
raise Exception(f"Could not find {label}")
def loadKallsyms(self, fromFile=False, fromBuffer=False):
if (not fromFile and not fromBuffer):
raise Exception("Not enough arguments")
if (fromFile and not os.path.isfile(fromFile)):
raise Exception(f"File '{fromFile}' not found")
if (fromFile):
if fromFile.endswith("bz2"):
fromBuffer = bz2.open(fromFile, 'rt').read()
else:
fromBuffer = open(fromFile, "r").read()
for symbol in fromBuffer.split('\n'):
s = symbol.split(" ")
if len(s) >= 3:
self.kallsyms.append([int(s[0], 16), s[2]])
if len(self.kallsyms) <= 0:
return
kstart = self.kallsyms[0][0]
self.binaries.append({
'binary': '_kernel',
'path': '_kernel',
'kernel': True,
'static': False,
'start': kstart,
'offset': 0,
'size': self.kallsyms[-1][0] - kstart,
'end': self.kallsyms[-1][0]
})
self.kallsyms = [[x - kstart, y] for (x, y) in self.kallsyms]
self.kallsyms.reverse()
def isPCKnown(self, pc):
if self.getBinaryFromPC(pc) is False:
return False
return True
def getBinaryFromPC(self, pc):
for binary in self.binaries:
if (pc >= binary['start'] and pc <= binary['end']):
return binary
return False
def parsePC(self, pc):
if pc in self._localSampleCache:
return self._localSampleCache[pc]
binary = self.getBinaryFromPC(pc)
sample = None
if binary is not False:
# Static pc is used as is
# dynamic pc points into a virtual memory range which was mapped according to the vmmap
# the binary on e.g. x86 are typically mapped using an offset to the actual code section
# in the binary meaning the read pc value must be treated with the offset for correlation
srcpc = pc if binary['static'] else (pc - binary['start']) + binary['offset']
if binary['kernel']:
sample = copy(SAMPLE.invalid)
sample[SAMPLE.pc] = srcpc
sample[SAMPLE.binary] = binary['binary']
for f in self.kallsyms:
if f[0] <= srcpc:
sample[SAMPLE.function] = f[1]
break
else:
sample = self.cache.getSampleFromPC(binary['path'], srcpc)
if sample is not None:
if sample[SAMPLE.binary] not in self.cacheMap:
self.cacheMap[sample[SAMPLE.binary]] = os.path.basename(self.cache.getCacheFile(binary['path']))
if sample is None:
sample = copy(SAMPLE.invalid)
sample[SAMPLE.pc] = pc
result = self.mapper.mapValues(sample)
self._localSampleCache[pc] = result
return result
def parseFromSample(self, sample):
return self.mapper.remapValues(sample)
def getMaps(self):
return self.mapper.retrieveMaps()
def getCacheMap(self):
return self.cacheMap
def getName(self, binary):
self.cache.openOrCreateCache(binary)
return self.cache.caches[binary]['name']
class sampleFormatter():
mapper = listmapper()
def __init__(self, maps):
self.mapper.setMaps(maps)
def remapSample(self, sample):
return self.mapper.remapValues(sample)
def formatSample(self, sample, displayKeys=[SAMPLE.binary, SAMPLE.function], delimiter=":", labelNone='_unknown'):
for i, k in enumerate(displayKeys):
valid = True
if isinstance(k, str):
if k not in SAMPLE.names:
valid = False
else:
displayKeys[i] = SAMPLE.names.index(k)
elif isinstance(k, int):
if i < 0 or i >= len(SAMPLE.names):
valid = False
else:
valid = False
if not valid:
raise Exception(f'class sampleFormatter encountered unknown display key {k}')
return delimiter.join([str(labelNone) if sample[x] is None else
f"0x{sample[x]:x}" if x == SAMPLE.pc else
os.path.basename(sample[x]) if x == SAMPLE.file else
str(sample[x]) for x in displayKeys])
| [] | [] | [
"UNWIND_INLINE",
"DISABLE_CACHE",
"PPERF_CACHE",
"CROSS_COMPILE"
] | [] | ["UNWIND_INLINE", "DISABLE_CACHE", "PPERF_CACHE", "CROSS_COMPILE"] | python | 4 | 0 | |
edge/cmd/edgecore/app/server.go | package app
import (
"errors"
"fmt"
"os"
"github.com/mitchellh/go-ps"
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/cli/globalflag"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
"github.com/kubeedge/beehive/pkg/core"
"github.com/kubeedge/kubeedge/common/constants"
"github.com/kubeedge/kubeedge/edge/cmd/edgecore/app/options"
"github.com/kubeedge/kubeedge/edge/pkg/common/dbm"
"github.com/kubeedge/kubeedge/edge/pkg/devicetwin"
"github.com/kubeedge/kubeedge/edge/pkg/edged"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub"
"github.com/kubeedge/kubeedge/edge/pkg/edgestream"
"github.com/kubeedge/kubeedge/edge/pkg/eventbus"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager"
"github.com/kubeedge/kubeedge/edge/pkg/servicebus"
"github.com/kubeedge/kubeedge/edge/test"
edgemesh "github.com/kubeedge/kubeedge/edgemesh/pkg"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1/validation"
"github.com/kubeedge/kubeedge/pkg/util"
"github.com/kubeedge/kubeedge/pkg/util/flag"
"github.com/kubeedge/kubeedge/pkg/version"
"github.com/kubeedge/kubeedge/pkg/version/verflag"
)
// NewEdgeCoreCommand create edgecore cmd
func NewEdgeCoreCommand() *cobra.Command {
opts := options.NewEdgeCoreOptions()
cmd := &cobra.Command{
Use: "edgecore",
Long: `Edgecore is the core edge part of KubeEdge, which contains six modules: devicetwin, edged,
edgehub, eventbus, metamanager, and servicebus. DeviceTwin is responsible for storing device status
and syncing device status to the cloud. It also provides query interfaces for applications. Edged is an
agent that runs on edge nodes and manages containerized applications and devices. Edgehub is a web socket
client responsible for interacting with Cloud Service for the edge computing (like Edge Controller as in the KubeEdge
Architecture). This includes syncing cloud-side resource updates to the edge, and reporting
edge-side host and device status changes to the cloud. EventBus is a MQTT client to interact with MQTT
servers (mosquito), offering publish and subscribe capabilities to other components. MetaManager
is the message processor between edged and edgehub. It is also responsible for storing/retrieving metadata
to/from a lightweight database (SQLite).ServiceBus is a HTTP client to interact with HTTP servers (REST),
offering HTTP client capabilities to components of cloud to reach HTTP servers running at edge. `,
Run: func(cmd *cobra.Command, args []string) {
verflag.PrintAndExitIfRequested()
flag.PrintMinConfigAndExitIfRequested(v1alpha1.NewMinEdgeCoreConfig())
flag.PrintDefaultConfigAndExitIfRequested(v1alpha1.NewDefaultEdgeCoreConfig())
flag.PrintFlags(cmd.Flags())
if errs := opts.Validate(); len(errs) > 0 {
klog.Fatal(util.SpliceErrors(errs))
}
config, err := opts.Config()
if err != nil {
klog.Fatal(err)
}
if errs := validation.ValidateEdgeCoreConfiguration(config); len(errs) > 0 {
klog.Fatal(util.SpliceErrors(errs.ToAggregate().Errors()))
}
// To help debugging, immediately log version
klog.Infof("Version: %+v", version.Get())
// Check the running environment by default
checkEnv := os.Getenv("CHECK_EDGECORE_ENVIRONMENT")
if checkEnv != "false" {
// Check running environment before run edge core
if err := environmentCheck(); err != nil {
klog.Fatal(fmt.Errorf("Failed to check the running environment: %v", err))
}
}
// get edge node local ip
if config.Modules.Edged.NodeIP == "" {
hostnameOverride, err := os.Hostname()
if err != nil {
hostnameOverride = constants.DefaultHostnameOverride
}
localIP, _ := util.GetLocalIP(hostnameOverride)
config.Modules.Edged.NodeIP = localIP
}
registerModules(config)
// start all modules
core.Run()
},
}
fs := cmd.Flags()
namedFs := opts.Flags()
flag.AddFlags(namedFs.FlagSet("global"))
verflag.AddFlags(namedFs.FlagSet("global"))
globalflag.AddGlobalFlags(namedFs.FlagSet("global"), cmd.Name())
for _, f := range namedFs.FlagSets {
fs.AddFlagSet(f)
}
usageFmt := "Usage:\n %s\n"
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStderr(), namedFs, cols)
return nil
})
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStdout(), namedFs, cols)
})
return cmd
}
// environmentCheck check the environment before edgecore start
// if Check failed, return errors
func environmentCheck() error {
processes, err := ps.Processes()
if err != nil {
return err
}
for _, process := range processes {
// if kubelet is running, return error
if process.Executable() == "kubelet" {
return errors.New("kubelet should not running on edge node when running edgecore")
}
// if kube-proxy is running, return error
if process.Executable() == "kube-proxy" {
return errors.New("kube-proxy should not running on edge node when running edgecore")
}
}
return nil
}
// registerModules register all the modules started in edgecore
func registerModules(c *v1alpha1.EdgeCoreConfig) {
devicetwin.Register(c.Modules.DeviceTwin, c.Modules.Edged.HostnameOverride)
edged.Register(c.Modules.Edged)
edgehub.Register(c.Modules.EdgeHub, c.Modules.Edged.HostnameOverride)
eventbus.Register(c.Modules.EventBus, c.Modules.Edged.HostnameOverride)
edgemesh.Register(c.Modules.EdgeMesh)
metamanager.Register(c.Modules.MetaManager)
servicebus.Register(c.Modules.ServiceBus)
edgestream.Register(c.Modules.EdgeStream, c.Modules.Edged.HostnameOverride, c.Modules.Edged.NodeIP)
test.Register(c.Modules.DBTest)
// Note: Need to put it to the end, and wait for all models to register before executing
dbm.InitDBConfig(c.DataBase.DriverName, c.DataBase.AliasName, c.DataBase.DataSource)
}
| [
"\"CHECK_EDGECORE_ENVIRONMENT\""
] | [] | [
"CHECK_EDGECORE_ENVIRONMENT"
] | [] | ["CHECK_EDGECORE_ENVIRONMENT"] | go | 1 | 0 | |
flink-runtime/src/main/java/org/apache/flink/runtime/fs/hdfs/DistributedFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.fs.hdfs;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.URI;
import java.net.UnknownHostException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.flink.core.fs.BlockLocation;
import org.apache.flink.core.fs.FSDataInputStream;
import org.apache.flink.core.fs.FSDataOutputStream;
import org.apache.flink.core.fs.FileStatus;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.fs.Path;
import org.apache.flink.util.InstantiationUtil;
import org.apache.hadoop.conf.Configuration;
/**
* Concrete implementation of the {@link FileSystem} base class for the Hadoop Distribution File System. The
* class is essentially a wrapper class which encapsulated the original Hadoop HDFS API.
*/
public final class DistributedFileSystem extends FileSystem {
private static final Logger LOG = LoggerFactory.getLogger(DistributedFileSystem.class);
private static final String DEFAULT_HDFS_CLASS = "org.apache.hadoop.hdfs.DistributedFileSystem";
/**
* Configuration value name for the DFS implementation name. Usually not specified in hadoop configurations.
*/
private static final String HDFS_IMPLEMENTATION_KEY = "fs.hdfs.impl";
private final org.apache.hadoop.conf.Configuration conf;
private final org.apache.hadoop.fs.FileSystem fs;
/**
* Creates a new DistributedFileSystem object to access HDFS
*
* @throws IOException
* throw if the required HDFS classes cannot be instantiated
*/
public DistributedFileSystem() throws IOException {
// Create new Hadoop configuration object
this.conf = getHadoopConfiguration();
Class<? extends org.apache.hadoop.fs.FileSystem> fsClass = null;
// try to get the FileSystem implementation class Hadoop 2.0.0 style
{
LOG.debug("Trying to load HDFS class Hadoop 2.x style.");
Object fsHandle = null;
try {
Method newApi = org.apache.hadoop.fs.FileSystem.class.getMethod("getFileSystemClass", String.class, org.apache.hadoop.conf.Configuration.class);
fsHandle = newApi.invoke(null, "hdfs", conf);
} catch (Exception e) {
// if we can't find the FileSystem class using the new API,
// clazz will still be null, we assume we're running on an older Hadoop version
}
if (fsHandle != null) {
if (fsHandle instanceof Class && org.apache.hadoop.fs.FileSystem.class.isAssignableFrom((Class<?>) fsHandle)) {
fsClass = ((Class<?>) fsHandle).asSubclass(org.apache.hadoop.fs.FileSystem.class);
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded '" + fsClass.getName() + "' as HDFS class.");
}
}
else {
LOG.debug("Unexpected return type from 'org.apache.hadoop.fs.FileSystem.getFileSystemClass(String, Configuration)'.");
throw new RuntimeException("The value returned from org.apache.hadoop.fs.FileSystem.getFileSystemClass(String, Configuration) is not a valid subclass of org.apache.hadoop.fs.FileSystem.");
}
}
}
// fall back to an older Hadoop version
if (fsClass == null)
{
// first of all, check for a user-defined hdfs class
if (LOG.isDebugEnabled()) {
LOG.debug("Falling back to loading HDFS class old Hadoop style. Looking for HDFS class configuration entry '"
+ HDFS_IMPLEMENTATION_KEY + "'.");
}
Class<?> classFromConfig = conf.getClass(HDFS_IMPLEMENTATION_KEY, null);
if (classFromConfig != null)
{
if (org.apache.hadoop.fs.FileSystem.class.isAssignableFrom(classFromConfig)) {
fsClass = classFromConfig.asSubclass(org.apache.hadoop.fs.FileSystem.class);
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded HDFS class '" + fsClass.getName() + "' as specified in configuration.");
}
}
else {
if (LOG.isDebugEnabled()) {
LOG.debug("HDFS class specified by " + HDFS_IMPLEMENTATION_KEY + " is of wrong type.");
}
throw new IOException("HDFS class specified by " + HDFS_IMPLEMENTATION_KEY +
" cannot be cast to a FileSystem type.");
}
}
else {
// load the default HDFS class
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to load default HDFS implementation " + DEFAULT_HDFS_CLASS);
}
try {
Class <?> reflectedClass = Class.forName(DEFAULT_HDFS_CLASS);
if (org.apache.hadoop.fs.FileSystem.class.isAssignableFrom(reflectedClass)) {
fsClass = reflectedClass.asSubclass(org.apache.hadoop.fs.FileSystem.class);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Default HDFS class is of wrong type.");
}
throw new IOException("The default HDFS class '" + DEFAULT_HDFS_CLASS +
"' cannot be cast to a FileSystem type.");
}
}
catch (ClassNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Default HDFS class cannot be loaded.");
}
throw new IOException("No HDFS class has been configured and the default class '" +
DEFAULT_HDFS_CLASS + "' cannot be loaded.");
}
}
}
this.fs = instantiateFileSystem(fsClass);
}
/**
* Returns a new Hadoop Configuration object using the path to the hadoop conf configured
* in the main configuration (flink-conf.yaml).
* This method is public because its being used in the HadoopDataSource.
*/
public static org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
Configuration retConf = new org.apache.hadoop.conf.Configuration();
// We need to load both core-site.xml and hdfs-site.xml to determine the default fs path and
// the hdfs configuration
// Try to load HDFS configuration from Hadoop's own configuration files
// 1. approach: Flink configuration
final String hdfsDefaultPath = GlobalConfiguration.getString(ConfigConstants.HDFS_DEFAULT_CONFIG, null);
if (hdfsDefaultPath != null) {
retConf.addResource(new org.apache.hadoop.fs.Path(hdfsDefaultPath));
} else {
LOG.debug("Cannot find hdfs-default configuration file");
}
final String hdfsSitePath = GlobalConfiguration.getString(ConfigConstants.HDFS_SITE_CONFIG, null);
if (hdfsSitePath != null) {
retConf.addResource(new org.apache.hadoop.fs.Path(hdfsSitePath));
} else {
LOG.debug("Cannot find hdfs-site configuration file");
}
// 2. Approach environment variables
String[] possibleHadoopConfPaths = new String[4];
possibleHadoopConfPaths[0] = GlobalConfiguration.getString(ConfigConstants.PATH_HADOOP_CONFIG, null);
possibleHadoopConfPaths[1] = System.getenv("HADOOP_CONF_DIR");
if (System.getenv("HADOOP_HOME") != null) {
possibleHadoopConfPaths[2] = System.getenv("HADOOP_HOME")+"/conf";
possibleHadoopConfPaths[3] = System.getenv("HADOOP_HOME")+"/etc/hadoop"; // hadoop 2.2
}
for (String possibleHadoopConfPath : possibleHadoopConfPaths) {
if (possibleHadoopConfPath != null) {
if (new File(possibleHadoopConfPath).exists()) {
if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) {
retConf.addResource(new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/core-site.xml"));
if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + possibleHadoopConfPath + "/core-site.xml to hadoop configuration");
}
}
if (new File(possibleHadoopConfPath + "/hdfs-site.xml").exists()) {
retConf.addResource(new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/hdfs-site.xml"));
if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + possibleHadoopConfPath + "/hdfs-site.xml to hadoop configuration");
}
}
}
}
}
return retConf;
}
private org.apache.hadoop.fs.FileSystem instantiateFileSystem(Class<? extends org.apache.hadoop.fs.FileSystem> fsClass)
throws IOException
{
try {
return fsClass.newInstance();
}
catch (ExceptionInInitializerError e) {
throw new IOException("The filesystem class '" + fsClass.getName() + "' throw an exception upon initialization.", e.getException());
}
catch (Throwable t) {
String errorMessage = InstantiationUtil.checkForInstantiationError(fsClass);
if (errorMessage != null) {
throw new IOException("The filesystem class '" + fsClass.getName() + "' cannot be instantiated: " + errorMessage);
} else {
throw new IOException("An error occurred while instantiating the filesystem class '" +
fsClass.getName() + "'.", t);
}
}
}
@Override
public Path getWorkingDirectory() {
return new Path(this.fs.getWorkingDirectory().toUri());
}
@Override
public URI getUri() {
return fs.getUri();
}
@Override
public void initialize(URI path) throws IOException {
// For HDFS we have to have an authority
if (path.getAuthority() == null) {
String configEntry = this.conf.get("fs.default.name", null);
if (configEntry == null) {
// fs.default.name deprecated as of hadoop 2.2.0 http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/DeprecatedProperties.html
configEntry = this.conf.get("fs.defaultFS", null);
}
if (LOG.isDebugEnabled()) {
LOG.debug("fs.defaultFS is set to " + configEntry);
}
if (configEntry == null) {
throw new IOException(getMissingAuthorityErrorPrefix(path) + "Either no default hdfs configuration was registered, " +
"or that configuration did not contain an entry for the default hdfs.");
} else {
try {
URI initURI = URI.create(configEntry);
if (initURI.getAuthority() == null) {
throw new IOException(getMissingAuthorityErrorPrefix(path) + "Either no default hdfs configuration was registered, " +
"or the provided configuration contains no valid hdfs namenode address (fs.default.name or fs.defaultFS) describing the hdfs namenode host and port.");
} else if (!initURI.getScheme().equalsIgnoreCase("hdfs")) {
throw new IOException(getMissingAuthorityErrorPrefix(path) + "Either no default hdfs configuration was registered, " +
"or the provided configuration describes a file system with scheme '" + initURI.getScheme() + "' other than the Hadoop Distributed File System (HDFS).");
} else {
try {
this.fs.initialize(initURI, this.conf);
}
catch (IOException e) {
throw new IOException(getMissingAuthorityErrorPrefix(path) +
"Could not initialize the file system connection with the given address of the HDFS NameNode: " + e.getMessage(), e);
}
}
}
catch (IllegalArgumentException e) {
throw new IOException(getMissingAuthorityErrorPrefix(path) +
"The configuration contains an invalid hdfs default name (fs.default.name or fs.defaultFS): " + configEntry);
}
}
}
else {
// Initialize HDFS
try {
this.fs.initialize(path, this.conf);
}
catch (UnknownHostException e) {
String message = "The HDFS NameNode host at '" + path.getAuthority()
+ "', specified by file path '" + path.toString() + "', cannot be resolved"
+ (e.getMessage() != null ? ": " + e.getMessage() : ".");
if (path.getPort() == -1) {
message += " Hint: Have you forgotten a slash? (correct URI would be 'hdfs:///" + path.getAuthority() + path.getPath() + "' ?)";
}
throw new IOException(message, e);
}
catch (Exception e) {
throw new IOException("The given file URI (" + path.toString() + ") points to the HDFS NameNode at "
+ path.getAuthority() + ", but the File System could not be initialized with that address"
+ (e.getMessage() != null ? ": " + e.getMessage() : "."), e);
}
}
}
private static String getMissingAuthorityErrorPrefix(URI path) {
return "The given HDFS file URI (" + path.toString() + ") did not describe the HDFS NameNode." +
" The attempt to use a default HDFS configuration, as specified in the '" + ConfigConstants.HDFS_DEFAULT_CONFIG + "' or '" +
ConfigConstants.HDFS_SITE_CONFIG + "' config parameter failed due to the following problem: ";
}
@Override
public FileStatus getFileStatus(final Path f) throws IOException {
org.apache.hadoop.fs.FileStatus status = this.fs.getFileStatus(new org.apache.hadoop.fs.Path(f.toString()));
return new DistributedFileStatus(status);
}
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus file, final long start, final long len)
throws IOException
{
if (!(file instanceof DistributedFileStatus)) {
throw new IOException("file is not an instance of DistributedFileStatus");
}
final DistributedFileStatus f = (DistributedFileStatus) file;
final org.apache.hadoop.fs.BlockLocation[] blkLocations = fs.getFileBlockLocations(f.getInternalFileStatus(),
start, len);
// Wrap up HDFS specific block location objects
final DistributedBlockLocation[] distBlkLocations = new DistributedBlockLocation[blkLocations.length];
for (int i = 0; i < distBlkLocations.length; i++) {
distBlkLocations[i] = new DistributedBlockLocation(blkLocations[i]);
}
return distBlkLocations;
}
@Override
public FSDataInputStream open(final Path f, final int bufferSize) throws IOException {
final org.apache.hadoop.fs.FSDataInputStream fdis = this.fs.open(new org.apache.hadoop.fs.Path(f.toString()),
bufferSize);
return new DistributedDataInputStream(fdis);
}
@Override
public FSDataInputStream open(final Path f) throws IOException {
final org.apache.hadoop.fs.FSDataInputStream fdis = fs.open(new org.apache.hadoop.fs.Path(f.toString()));
return new DistributedDataInputStream(fdis);
}
@Override
public FSDataOutputStream create(final Path f, final boolean overwrite, final int bufferSize,
final short replication, final long blockSize)
throws IOException
{
final org.apache.hadoop.fs.FSDataOutputStream fdos = this.fs.create(
new org.apache.hadoop.fs.Path(f.toString()), overwrite, bufferSize, replication, blockSize);
return new DistributedDataOutputStream(fdos);
}
@Override
public FSDataOutputStream create(final Path f, final boolean overwrite) throws IOException {
final org.apache.hadoop.fs.FSDataOutputStream fsDataOutputStream = this.fs
.create(new org.apache.hadoop.fs.Path(f.toString()), overwrite);
return new DistributedDataOutputStream(fsDataOutputStream);
}
@Override
public boolean delete(final Path f, final boolean recursive) throws IOException {
return this.fs.delete(new org.apache.hadoop.fs.Path(f.toString()), recursive);
}
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
final org.apache.hadoop.fs.FileStatus[] hadoopFiles = this.fs.listStatus(new org.apache.hadoop.fs.Path(f.toString()));
final FileStatus[] files = new FileStatus[hadoopFiles.length];
// Convert types
for (int i = 0; i < files.length; i++) {
files[i] = new DistributedFileStatus(hadoopFiles[i]);
}
return files;
}
@Override
public boolean mkdirs(final Path f) throws IOException {
return this.fs.mkdirs(new org.apache.hadoop.fs.Path(f.toString()));
}
@Override
public boolean rename(final Path src, final Path dst) throws IOException {
return this.fs.rename(new org.apache.hadoop.fs.Path(src.toString()),
new org.apache.hadoop.fs.Path(dst.toString()));
}
@SuppressWarnings("deprecation")
@Override
public long getDefaultBlockSize() {
return this.fs.getDefaultBlockSize();
}
@Override
public boolean isDistributedFS() {
return true;
}
}
| [
"\"HADOOP_CONF_DIR\"",
"\"HADOOP_HOME\"",
"\"HADOOP_HOME\"",
"\"HADOOP_HOME\""
] | [] | [
"HADOOP_HOME",
"HADOOP_CONF_DIR"
] | [] | ["HADOOP_HOME", "HADOOP_CONF_DIR"] | java | 2 | 0 | |
vendor/github.com/mitchellh/packer/builder/vmware/common/driver_player5_windows.go | // +build windows
package common
import (
"log"
"os"
"os/exec"
"path/filepath"
"syscall"
)
func playerFindVdiskManager() (string, error) {
path, err := exec.LookPath("vmware-vdiskmanager.exe")
if err == nil {
return path, nil
}
return findFile("vmware-vdiskmanager.exe", playerProgramFilePaths()), nil
}
func playerFindQemuImg() (string, error) {
path, err := exec.LookPath("qemu-img.exe")
if err == nil {
return path, nil
}
return findFile("qemu-img.exe", playerProgramFilePaths()), nil
}
func playerFindVMware() (string, error) {
path, err := exec.LookPath("vmplayer.exe")
if err == nil {
return path, nil
}
return findFile("vmplayer.exe", playerProgramFilePaths()), nil
}
func playerFindVmrun() (string, error) {
path, err := exec.LookPath("vmrun.exe")
if err == nil {
return path, nil
}
return findFile("vmrun.exe", playerProgramFilePaths()), nil
}
func playerToolsIsoPath(flavor string) string {
return findFile(flavor+".iso", playerProgramFilePaths())
}
func playerDhcpLeasesPath(device string) string {
path, err := playerDhcpLeasesPathRegistry()
if err != nil {
log.Printf("Error finding leases in registry: %s", err)
} else if _, err := os.Stat(path); err == nil {
return path
}
return findFile("vmnetdhcp.leases", playerDataFilePaths())
}
func playerVmDhcpConfPath(device string) string {
// the device isn't actually used on windows hosts
path, err := playerDhcpConfigPathRegistry()
if err != nil {
log.Printf("Error finding configuration in registry: %s", err)
} else if _, err := os.Stat(path); err == nil {
return path
}
return findFile("vmnetdhcp.conf", playerDataFilePaths())
}
func playerVmnetnatConfPath(device string) string {
// the device isn't actually used on windows hosts
return findFile("vmnetnat.conf", playerDataFilePaths())
}
func playerNetmapConfPath() string {
return findFile("netmap.conf", playerDataFilePaths())
}
// This reads the VMware installation path from the Windows registry.
func playerVMwareRoot() (s string, err error) {
key := `SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\vmplayer.exe`
subkey := "Path"
s, err = readRegString(syscall.HKEY_LOCAL_MACHINE, key, subkey)
if err != nil {
log.Printf(`Unable to read registry key %s\%s`, key, subkey)
return
}
return normalizePath(s), nil
}
// This reads the VMware DHCP leases path from the Windows registry.
func playerDhcpLeasesPathRegistry() (s string, err error) {
key := "SYSTEM\\CurrentControlSet\\services\\VMnetDHCP\\Parameters"
subkey := "LeaseFile"
s, err = readRegString(syscall.HKEY_LOCAL_MACHINE, key, subkey)
if err != nil {
log.Printf(`Unable to read registry key %s\%s`, key, subkey)
return
}
return normalizePath(s), nil
}
// This reads the VMware DHCP configuration path from the Windows registry.
func playerDhcpConfigPathRegistry() (s string, err error) {
key := "SYSTEM\\CurrentControlSet\\services\\VMnetDHCP\\Parameters"
subkey := "ConfFile"
s, err = readRegString(syscall.HKEY_LOCAL_MACHINE, key, subkey)
if err != nil {
log.Printf(`Unable to read registry key %s\%s`, key, subkey)
return
}
return normalizePath(s), nil
}
// playerProgramFilesPaths returns a list of paths that are eligible
// to contain program files we may want just as vmware.exe.
func playerProgramFilePaths() []string {
path, err := playerVMwareRoot()
if err != nil {
log.Printf("Error finding VMware root: %s", err)
}
paths := make([]string, 0, 5)
if os.Getenv("VMWARE_HOME") != "" {
paths = append(paths, os.Getenv("VMWARE_HOME"))
}
if path != "" {
paths = append(paths, path)
}
if os.Getenv("ProgramFiles(x86)") != "" {
paths = append(paths,
filepath.Join(os.Getenv("ProgramFiles(x86)"), "/VMware/VMware Player"))
}
if os.Getenv("ProgramFiles") != "" {
paths = append(paths,
filepath.Join(os.Getenv("ProgramFiles"), "/VMware/VMware Player"))
}
if os.Getenv("QEMU_HOME") != "" {
paths = append(paths, os.Getenv("QEMU_HOME"))
}
if os.Getenv("ProgramFiles(x86)") != "" {
paths = append(paths,
filepath.Join(os.Getenv("ProgramFiles(x86)"), "/QEMU"))
}
if os.Getenv("ProgramFiles") != "" {
paths = append(paths,
filepath.Join(os.Getenv("ProgramFiles"), "/QEMU"))
}
if os.Getenv("SystemDrive") != "" {
paths = append(paths,
filepath.Join(os.Getenv("SystemDrive"), "/QEMU"))
}
return paths
}
// playerDataFilePaths returns a list of paths that are eligible
// to contain data files we may want such as vmnet NAT configuration files.
func playerDataFilePaths() []string {
leasesPath, err := playerDhcpLeasesPathRegistry()
if err != nil {
log.Printf("Error getting DHCP leases path: %s", err)
}
if leasesPath != "" {
leasesPath = filepath.Dir(leasesPath)
}
paths := make([]string, 0, 5)
if os.Getenv("VMWARE_DATA") != "" {
paths = append(paths, os.Getenv("VMWARE_DATA"))
}
if leasesPath != "" {
paths = append(paths, leasesPath)
}
if os.Getenv("ProgramData") != "" {
paths = append(paths,
filepath.Join(os.Getenv("ProgramData"), "/VMware"))
}
if os.Getenv("ALLUSERSPROFILE") != "" {
paths = append(paths,
filepath.Join(os.Getenv("ALLUSERSPROFILE"), "/Application Data/VMware"))
}
return paths
}
| [
"\"VMWARE_HOME\"",
"\"VMWARE_HOME\"",
"\"ProgramFiles(x86",
"\"ProgramFiles(x86",
"\"ProgramFiles\"",
"\"ProgramFiles\"",
"\"QEMU_HOME\"",
"\"QEMU_HOME\"",
"\"ProgramFiles(x86",
"\"ProgramFiles(x86",
"\"ProgramFiles\"",
"\"ProgramFiles\"",
"\"SystemDrive\"",
"\"SystemDrive\"",
"\"VMWARE_DATA\"",
"\"VMWARE_DATA\"",
"\"ProgramData\"",
"\"ProgramData\"",
"\"ALLUSERSPROFILE\"",
"\"ALLUSERSPROFILE\""
] | [] | [
"VMWARE_DATA",
"QEMU_HOME",
"ProgramFiles(x8",
"VMWARE_HOME",
"ALLUSERSPROFILE",
"ProgramFiles",
"SystemDrive",
"ProgramData"
] | [] | ["VMWARE_DATA", "QEMU_HOME", "ProgramFiles(x8", "VMWARE_HOME", "ALLUSERSPROFILE", "ProgramFiles", "SystemDrive", "ProgramData"] | go | 8 | 0 | |
traffic_ops/traffic_ops_golang/crconfig/snapshot_test.go | package crconfig
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"reflect"
"testing"
"time"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/monitoring"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func ExpectedGetSnapshot(crc *tc.CRConfig) ([]byte, error) {
return json.Marshal(crc)
}
func ExpectedGetMontioringSnapshot(crc *tc.CRConfig, tx *sql.Tx) ([]byte, error) {
tm, _ := monitoring.GetMonitoringJSON(tx, *crc.Stats.CDNName)
return json.Marshal(tm)
}
func MockGetSnapshot(mock sqlmock.Sqlmock, expected []byte, cdn string) {
rows := sqlmock.NewRows([]string{"snapshot"})
rows = rows.AddRow(expected)
rows = rows.AddRow(expected)
mock.ExpectQuery("SELECT").WithArgs(cdn).WillReturnRows(rows)
}
func TestGetSnapshot(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
cdn := "mycdn"
crc := &tc.CRConfig{}
crc.Stats.CDNName = &cdn
mock.ExpectBegin()
expected, err := ExpectedGetSnapshot(crc)
if err != nil {
t.Fatalf("GetSnapshot creating expected err expected: nil, actual: %v", err)
}
MockGetSnapshot(mock, expected, cdn)
mock.ExpectCommit()
dbCtx, cancelTx := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancelTx()
tx, err := db.BeginTx(dbCtx, nil)
if err != nil {
t.Fatalf("creating transaction: %v", err)
}
defer tx.Commit()
actual, exists, err := GetSnapshot(tx, cdn)
if err != nil {
t.Fatalf("GetSnapshot err expected: nil, actual: %v", err)
}
if !exists {
t.Fatalf("GetSnapshot exists expected: true, actual: false")
}
if !reflect.DeepEqual(string(expected), actual) {
t.Errorf("GetSnapshot expected: %+v, actual: %+v", string(expected), actual)
}
}
type AnyTime struct{}
// Match satisfies sqlmock.Argument interface
func (a AnyTime) Match(v driver.Value) bool {
_, ok := v.(time.Time)
return ok
}
type Any struct{}
// Match satisfies sqlmock.Argument interface
func (a Any) Match(v driver.Value) bool {
return true
}
func MockSnapshot(mock sqlmock.Sqlmock, expected []byte, expectedtm []byte, cdn string) {
mock.ExpectExec("insert").WithArgs(cdn, expected, AnyTime{}, expectedtm).WillReturnResult(sqlmock.NewResult(1, 1))
}
func TestSnapshot(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
cdn := "mycdn"
crc := &tc.CRConfig{}
crc.Stats.CDNName = &cdn
mock.ExpectBegin()
dbCtx, cancelTx := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancelTx()
tx, err := db.BeginTx(dbCtx, nil)
if err != nil {
t.Fatalf("creating transaction: %v", err)
}
expected, err := ExpectedGetSnapshot(crc)
if err != nil {
t.Fatalf("GetSnapshot creating expected err expected: nil, actual: %v", err)
}
expectedtm, err := ExpectedGetMontioringSnapshot(crc, tx)
if err != nil {
t.Fatalf("GetSnapshotMonitor creating expected err expected: nil, actual: %v", err)
}
tm, _ := monitoring.GetMonitoringJSON(tx, *crc.Stats.CDNName)
MockSnapshot(mock, expected, expectedtm, cdn)
mock.ExpectCommit()
defer tx.Commit()
if err := Snapshot(tx, crc, tm); err != nil {
t.Fatalf("GetSnapshot err expected: nil, actual: %v", err)
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
dcrutil/appdata_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package dcrutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/mc-aeq/aeqd/dcrutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := dcrutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
] | [] | [
"APPDATA",
"LOCALAPPDATA"
] | [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
slackbot/cmd/bot.go | package main
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"log"
"math"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
_ "github.com/lib/pq"
"github.com/nlopes/slack"
)
const (
apiEndpoint = "https://api.coinmarketcap.com/v1/ticker/"
)
var (
client *http.Client
db *sql.DB
err error
dbURL = os.Getenv("DB_URL")
dbPort = os.Getenv("DB_PORT")
dbName = os.Getenv("DB_NAME")
dbTable = os.Getenv("DB_TABLE")
dbUser = os.Getenv("DB_USER")
dbPW = os.Getenv("DB_PW")
botToken = os.Getenv("BOT_TOKEN")
)
// Contains DB connection details and Slack token
var confPath = os.Getenv("HOME") + "/.aws_conf/yachtbot.config"
func main() {
lambda.Start(queryHandler)
}
func init() {
client = &http.Client{Timeout: time.Second * 10}
// Connect to configured AWS RDS
dbinfo := fmt.Sprintf("user=%s password=%s dbname=%s host=%s port=%s sslmode=disable",
dbUser, dbPW, dbName, dbURL, dbPort)
db, err = sql.Open("postgres", dbinfo)
if err != nil {
panic(err)
}
}
func queryHandler(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
// If it's a challenge, unmarshal and reply with challenge.Challenge
var challenge *Challenge
if err := json.Unmarshal([]byte(request.Body), &challenge); err != nil {
log.Fatalf("challenge decode: %v", err)
}
if challenge.Challenge != "" {
return events.APIGatewayProxyResponse{Body: challenge.Challenge, StatusCode: 200}, nil
}
// If it's not a challenge, it should be a mention event
var mention *Mention
if err := json.Unmarshal([]byte(request.Body), &mention); err != nil {
log.Fatalf("mention decode: %v", err)
}
// debug
fmt.Println("Mention text:", mention.Event.Text)
// Get the ticker and pull data
tickerSplit := strings.Split(mention.Event.Text, " ")
ticker := strings.ToUpper(tickerSplit[len(tickerSplit)-1])
attachment, err := getSingle(ticker)
if err != nil {
log.Fatalf("queryHandler: %v", err)
}
// Send message as slack attachment
params := slack.PostMessageParameters{AsUser: true}
params.Attachments = []slack.Attachment{attachment}
api := slack.New(botToken)
_, _, err = api.PostMessage(mention.Event.Channel, "", params)
if err != nil {
log.Fatalf("queryHandler: %v", err)
return events.APIGatewayProxyResponse{}, err
}
return events.APIGatewayProxyResponse{StatusCode: 200}, nil
}
// getSingle returns Slack attachment with price information for a single coin/token
func getSingle(ticker string) (slack.Attachment, error) {
// CoinMarketCap uses IDs to query the API, not ticker symbols
id, err := getID(db, ticker)
if err != nil {
return slack.Attachment{}, fmt.Errorf("getSingle: %v", err)
}
if id == "" {
return slack.Attachment{}, fmt.Errorf("getSingle null ID: %v", err)
}
target := apiEndpoint + id
resp, err := makeRequest(target)
if err != nil {
return slack.Attachment{}, fmt.Errorf("getSingle: %v", err)
}
attachment, err := prepareAttachment(resp)
if err != nil {
return slack.Attachment{}, fmt.Errorf("getSingle: %s", resp.Status)
}
return attachment, nil
}
// Queries Postgres DB for ID that matches the incoming ticker symbol
func getID(db *sql.DB, ticker string) (string, error) {
cleanTicker := strings.Replace(ticker, "$", "", -1)
stmt, err := db.Prepare(fmt.Sprintf("SELECT id FROM %s WHERE ticker = $1;", dbTable))
if err != nil {
return "", fmt.Errorf("\n getID db.Prepare: %v", err)
}
var id string
rows, err := stmt.Query(cleanTicker)
if err != nil {
return "", fmt.Errorf("\n getID query: %v", err)
}
for rows.Next() {
err = rows.Scan(&id)
if err != nil {
return "", fmt.Errorf("\n getID scan: %v", err)
}
}
return id, nil
}
func makeRequest(target string) (*http.Response, error) {
// Prepare and make the request
req, err := http.NewRequest("GET", target, nil)
if err != nil {
return nil, fmt.Errorf("\n makeRequest NewRequest: %v", err)
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("\n makeRequest Do: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("\n makeRequest Bad response: %s", resp.Status)
}
return resp, nil
}
func prepareAttachment(resp *http.Response) (slack.Attachment, error) {
payload := make([]Response, 0)
err := json.NewDecoder(resp.Body).Decode(&payload)
if err != nil {
return slack.Attachment{}, fmt.Errorf("\n prepareAttachment Decode: %v", err)
}
resp.Body.Close()
// No financial decisions better be made out of this, using % change to calculate $ differences
priceUSD, err := strconv.ParseFloat(payload[0].PriceUSD, 64)
if err != nil {
return slack.Attachment{}, fmt.Errorf("\n prepareAttachment ParseFloat: %v", err)
}
pct24h, err := strconv.ParseFloat(payload[0].Change24h, 64)
if err != nil {
return slack.Attachment{}, fmt.Errorf("\n prepareAttachment ParseFloat: %v", err)
}
diff24h := priceUSD - (priceUSD / ((pct24h / 100) + 1))
pct7d, err := strconv.ParseFloat(payload[0].Change7d, 64)
if err != nil {
return slack.Attachment{}, fmt.Errorf("\n prepareAttachment ParseFloat: %v", err)
}
diff7d := priceUSD - (priceUSD / ((pct7d / 100) + 1))
color, emoji := getReaction(pct24h)
// Formatted Slack attachment
// https://api.slack.com/docs/message-attachments
attachment := slack.Attachment{
Title: fmt.Sprintf("Price of %s - $%s %s", payload[0].Name, payload[0].Symbol, emoji),
TitleLink: fmt.Sprintf("https://coinmarketcap.com/currencies/%s/", payload[0].ID),
Fallback: "Cryptocurrency Price",
Color: color,
Fields: []slack.AttachmentField{
{
Title: "Price USD",
Value: fmt.Sprintf("$%.2f", priceUSD),
Short: true,
},
{
Title: "Price BTC",
Value: payload[0].PriceBTC,
Short: true,
},
{
Title: "24H Change",
Value: fmt.Sprintf("%s (%s%%)", currency(diff24h), payload[0].Change24h),
Short: true,
},
{
Title: "7D Change",
Value: fmt.Sprintf("%s (%s%%)", currency(diff7d), payload[0].Change7d),
Short: true,
},
},
Footer: "ESKETIT",
}
return attachment, nil
}
// Determines color and emoji for Slack attachment based on 24h performance
func getReaction(pct24h float64) (string, string) {
switch {
case pct24h < -50:
return "#d7191c", ":trash::fire:"
case pct24h < -25:
return "#d7191c", ":smoking:"
case pct24h < -10:
return "#fdae61", ":thinking_face:"
case pct24h < 0:
return "#FAD898", ":zzz:"
case pct24h < 25:
return "#FAD898", ":beers:"
case pct24h < 50:
return "#a6d96a", ":champagne:"
case pct24h < 100:
return "#1a9641", ":racing_car:"
case pct24h < 1000:
return "#1a9641", ":motor_boat:"
default:
return "#000000", ":full_moon_with_face:"
}
}
type currency float64
// Ensures that negative sign goes before dollar sign
func (c currency) String() string {
if c < 0 {
return fmt.Sprintf("-$%.2f", math.Abs(float64(c)))
}
return fmt.Sprintf("$%.2f", float32(c))
}
// Response from CoinMarketCap API
type Response struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Symbol string `json:"symbol,omitempty"`
Rank string `json:"rank,omitempty"`
PriceUSD string `json:"price_usd,omitempty"`
PriceBTC string `json:"price_btc,omitempty"`
Volume24h string `json:"24h_volume_usd,omitempty"`
MarketCap string `json:"market_cap_usd,omitempty"`
SupplyAvailable string `json:"available_supply,omitempty"`
SupplyTotal string `json:"total_supply,omitempty"`
SupplyMax string `json:"max_supply,omitempty"`
Change1h string `json:"percent_change_1h,omitempty"`
Change24h string `json:"percent_change_24h,omitempty"`
Change7d string `json:"percent_change_7d,omitempty"`
Updated string `json:"last_updated,omitempty"`
}
// Challenge from Slack to validate my API, need to reply with the challenge in plaintext
// https://api.slack.com/events/url_verification
type Challenge struct {
Token string `json:"token,omitempty"`
Challenge string `json:"challenge,omitempty"`
Type string `json:"type,omitempty"`
}
// Mention from Slack
// https://api.slack.com/events/app_mention#mention
type Mention struct {
Token string `json:"token,omitempty"`
TeamID string `json:"team_id,omitempty"`
APIAppID string `json:"api_app_id,omitempty"`
Event Event `json:"event,omitempty"`
Type string `json:"type,omitempty"`
EventID string `json:"event_id,omitempty"`
EventTime int `json:"event_time,omitempty"`
AuthedUsers []string `json:"authed_users,omitempty"`
}
// Event details corresponding to a mention
type Event struct {
Type string `json:"type,omitempty"`
User string `json:"user,omitempty"`
Text string `json:"text,omitempty"`
TS string `json:"ts,omitempty"`
Channel string `json:"channel,omitempty"`
EventTS string `json:"event_ts,omitempty"`
}
| [
"\"DB_URL\"",
"\"DB_PORT\"",
"\"DB_NAME\"",
"\"DB_TABLE\"",
"\"DB_USER\"",
"\"DB_PW\"",
"\"BOT_TOKEN\"",
"\"HOME\""
] | [] | [
"DB_PORT",
"DB_TABLE",
"DB_PW",
"DB_NAME",
"BOT_TOKEN",
"HOME",
"DB_USER",
"DB_URL"
] | [] | ["DB_PORT", "DB_TABLE", "DB_PW", "DB_NAME", "BOT_TOKEN", "HOME", "DB_USER", "DB_URL"] | go | 8 | 0 | |
s10day12bbs/wsgi.py | """
WSGI config for s10day12bbs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "s10day12bbs.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
utils/database/database.go | package database
import (
"context"
"os"
"github.com/georgysavva/scany/pgxscan"
"github.com/jackc/pgx/v4/pgxpool"
)
type Blog struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Content string `json:"content,omitempty"`
}
func Setup() (context.Context, *pgxpool.Pool) {
ctx := context.Background()
db, _ := pgxpool.Connect(context.Background(), os.Getenv("DATABASE_URL"))
return ctx, db
}
func GetBlogs() []*Blog {
ctx, db := Setup()
var blogs []*Blog
pgxscan.Select(ctx, db, &blogs, "SELECT name, content FROM blogs")
return blogs
}
func GetBlog(name string) *Blog {
ctx, db := Setup()
var blogs []*Blog
pgxscan.Select(ctx, db, &blogs, "SELECT name, content FROM blogs WHERE name='"+name+"'")
return blogs[0]
}
func CreateBlog(name string, content string) *Blog {
ctx, db := Setup()
var blogs []*Blog
pgxscan.Select(ctx, db, &blogs, "INSERT INTO blogs (name, content) VALUES('"+name+"','"+content+"')")
pgxscan.Select(ctx, db, &blogs, "SELECT name, content FROM blogs WHERE name='"+name+"'")
return blogs[0]
}
func DeleteBlog(name string) {
ctx, db := Setup()
var blogs []*Blog
pgxscan.Select(ctx, db, &blogs, "DELETE FROM blogs WHERE name='"+name+"'")
}
func SetBlog(name string, content string) {
ctx, db := Setup()
var blogs []*Blog
pgxscan.Select(ctx, db, &blogs, "UPDATE blogs SET content='"+content+"'WHERE name='"+name+"'")
}
| [
"\"DATABASE_URL\""
] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | go | 1 | 0 | |
dvc/remote/hdfs.py | import io
import logging
import os
import posixpath
import re
import subprocess
from collections import deque
from contextlib import closing, contextmanager
from urllib.parse import urlparse
from .base import RemoteBASE, RemoteCmdError
from .pool import get_connection
from dvc.scheme import Schemes
from dvc.utils import fix_env, tmp_fname
logger = logging.getLogger(__name__)
class RemoteHDFS(RemoteBASE):
scheme = Schemes.HDFS
REGEX = r"^hdfs://((?P<user>.*)@)?.*$"
PARAM_CHECKSUM = "checksum"
REQUIRES = {"pyarrow": "pyarrow"}
TRAVERSE_PREFIX_LEN = 2
def __init__(self, repo, config):
super().__init__(repo, config)
self.path_info = None
url = config.get("url")
if not url:
return
parsed = urlparse(url)
user = parsed.username or config.get("user")
self.path_info = self.path_cls.from_parts(
scheme=self.scheme,
host=parsed.hostname,
user=user,
port=parsed.port,
path=parsed.path,
)
def hdfs(self, path_info):
import pyarrow
return get_connection(
pyarrow.hdfs.connect,
path_info.host,
path_info.port,
user=path_info.user,
)
def hadoop_fs(self, cmd, user=None):
cmd = "hadoop fs -" + cmd
if user:
cmd = "HADOOP_USER_NAME={} ".format(user) + cmd
# NOTE: close_fds doesn't work with redirected stdin/stdout/stderr.
# See https://github.com/iterative/dvc/issues/1197.
close_fds = os.name != "nt"
executable = os.getenv("SHELL") if os.name != "nt" else None
p = subprocess.Popen(
cmd,
shell=True,
close_fds=close_fds,
executable=executable,
env=fix_env(os.environ),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise RemoteCmdError(self.scheme, cmd, p.returncode, err)
return out.decode("utf-8")
@staticmethod
def _group(regex, s, gname):
match = re.match(regex, s)
assert match is not None
return match.group(gname)
def get_file_checksum(self, path_info):
# NOTE: pyarrow doesn't support checksum, so we need to use hadoop
regex = r".*\t.*\t(?P<checksum>.*)"
stdout = self.hadoop_fs(
"checksum {}".format(path_info.path), user=path_info.user
)
return self._group(regex, stdout, "checksum")
def copy(self, from_info, to_info, **_kwargs):
dname = posixpath.dirname(to_info.path)
with self.hdfs(to_info) as hdfs:
hdfs.mkdir(dname)
# NOTE: this is how `hadoop fs -cp` works too: it copies through
# your local machine.
with hdfs.open(from_info.path, "rb") as from_fobj:
tmp_info = to_info.parent / tmp_fname(to_info.name)
try:
with hdfs.open(tmp_info.path, "wb") as tmp_fobj:
tmp_fobj.upload(from_fobj)
hdfs.rename(tmp_info.path, to_info.path)
except Exception:
self.remove(tmp_info)
raise
def remove(self, path_info):
if path_info.scheme != "hdfs":
raise NotImplementedError
if self.exists(path_info):
logger.debug("Removing {}".format(path_info.path))
with self.hdfs(path_info) as hdfs:
hdfs.rm(path_info.path)
def exists(self, path_info):
assert not isinstance(path_info, list)
assert path_info.scheme == "hdfs"
with self.hdfs(path_info) as hdfs:
return hdfs.exists(path_info.path)
def _upload(self, from_file, to_info, **_kwargs):
with self.hdfs(to_info) as hdfs:
hdfs.mkdir(posixpath.dirname(to_info.path))
tmp_file = tmp_fname(to_info.path)
with open(from_file, "rb") as fobj:
hdfs.upload(tmp_file, fobj)
hdfs.rename(tmp_file, to_info.path)
def _download(self, from_info, to_file, **_kwargs):
with self.hdfs(from_info) as hdfs:
with open(to_file, "wb+") as fobj:
hdfs.download(from_info.path, fobj)
@contextmanager
def open(self, path_info, mode="r", encoding=None):
assert mode in {"r", "rt", "rb"}
try:
with self.hdfs(path_info) as hdfs, closing(
hdfs.open(path_info.path, mode="rb")
) as fd:
if mode == "rb":
yield fd
else:
yield io.TextIOWrapper(fd, encoding=encoding)
except IOError as e:
# Empty .errno and not specific enough error class in pyarrow,
# see https://issues.apache.org/jira/browse/ARROW-6248
if "file does not exist" in str(e):
raise FileNotFoundError(*e.args)
raise
def list_cache_paths(self, prefix=None):
if not self.exists(self.path_info):
return
if prefix:
root = posixpath.join(self.path_info.path, prefix[:2])
else:
root = self.path_info.path
dirs = deque([root])
with self.hdfs(self.path_info) as hdfs:
while dirs:
try:
for entry in hdfs.ls(dirs.pop(), detail=True):
if entry["kind"] == "directory":
dirs.append(urlparse(entry["name"]).path)
elif entry["kind"] == "file":
yield urlparse(entry["name"]).path
except IOError as e:
# When searching for a specific prefix pyarrow raises an
# exception if the specified cache dir does not exist
if not prefix:
raise e
| [] | [] | [
"SHELL"
] | [] | ["SHELL"] | python | 1 | 0 | |
dapper/cmd/dapper-db-archive/main.go | package main
import (
"bytes"
"database/sql"
"encoding/csv"
"fmt"
"github.com/GeoNet/fits/dapper/dapperlib"
"github.com/GeoNet/fits/dapper/internal/platform/s3"
"github.com/GeoNet/kit/cfg"
"github.com/GeoNet/kit/metrics"
_ "github.com/lib/pq"
"log"
"os"
"path"
"sync"
"time"
)
var (
s3Client s3.S3
db *sql.DB
startTime time.Time
oldestmod = time.Unix(1<<63-62135596801, 999999999)
domain = os.Getenv("DOMAIN")
s3Prefix = os.Getenv("S3_PREFIX")
s3Bucket = os.Getenv("S3_BUCKET")
)
func main() {
startTime = time.Now().UTC()
log.Println("Archiving all previously un-archived records before", startTime.Format(time.Stamp))
var err error
p, err := cfg.PostgresEnv()
if err != nil {
log.Fatalf("error reading DB config from the environment vars: %v", err)
}
db, err = sql.Open("postgres", p.Connection())
if err != nil {
log.Fatalf("error with DB config: %v", err)
}
if s3Bucket == "" {
log.Fatalf("please specify a value for S3_BUCKET")
}
s3Client, err = s3.New()
if err != nil {
log.Fatal(err)
}
stmt, err := db.Prepare("SELECT record_domain, record_key, field, time, value, modtime FROM dapper.records WHERE record_domain=$1 AND archived=FALSE ORDER BY record_key;")
if err != nil {
log.Fatalf("failed to prepare statement: %v", err)
}
rows, err := stmt.Query(domain)
if err != nil {
log.Fatalf("failed to execute query: %v", err)
}
records := make([]dapperlib.Record, 0)
var prev_key string
for rows.Next() {
rec := dapperlib.Record{}
var modtime time.Time
err := rows.Scan(&rec.Domain, &rec.Key, &rec.Field, &rec.Time, &rec.Value, &modtime)
if err != nil {
log.Fatalf("failed to scan record: %v", err)
}
/*
We get the oldest modtime to improve the speed of the `SET archived=true` query later
*/
if modtime.Before(oldestmod) {
oldestmod = modtime
}
records = append(records, rec)
if len(records) >= 100000 && prev_key != rec.Key { //To reduce memory usage do batches, but don't let a key span batches
err = archiveRecords(records)
if err != nil {
log.Printf("failed to archive records: %v", err)
}
records = make([]dapperlib.Record, 0)
oldestmod = time.Unix(1<<63-62135596801, 999999999)
}
prev_key = rec.Key
}
err = archiveRecords(records)
if err != nil {
log.Fatalf("failed to archive records: %v", err)
}
res, err := db.Exec(`DELETE FROM dapper.records WHERE record_domain=$1 AND archived=TRUE AND time < now() - interval '14 days'`, domain)
if err != nil {
log.Fatalf("failed to delete old records: %v", err)
}
n, err := res.RowsAffected()
if err != nil {
log.Fatalf("failed to get number of rows affected: %v", err)
}
log.Printf("archive operation complete. %d archived db records deleted.", n)
}
func archiveRecords(records []dapperlib.Record) error {
log.Printf("archiving %v records", len(records))
tables := dapperlib.ParseRecords(records, dapperlib.MONTH) //TODO: Configurable
log.Printf("across %v tables", len(tables))
stmt, err := db.Prepare("UPDATE dapper.records SET archived=TRUE WHERE record_domain=$1 AND record_key=$2 AND modtime>=$3 AND modtime<=$4;")
if err != nil {
metrics.MsgErr()
return fmt.Errorf("failed to prepare archived statment: %v", err)
}
sem := make(chan int, 10)
var wg sync.WaitGroup
var goErr error
for name, t := range tables {
sem <- 1
wg.Add(1)
go func(name string, t dapperlib.Table) {
defer func() {
wg.Done()
<-sem
}()
s3path := path.Join(s3Prefix, fmt.Sprintf("%s.csv", name))
b := &bytes.Buffer{}
exists, err := s3Client.Exists(s3Bucket, s3path)
if err != nil {
goErr = fmt.Errorf("couldn't determine if CSV already exists: %v", err) //TODO: Better error handling
metrics.MsgErr()
return
}
if exists {
err := s3Client.Get(s3Bucket, s3path, "", b)
if err != nil {
goErr = fmt.Errorf("failed to get existing CSV file: %v", err)
metrics.MsgErr()
return
}
metrics.MsgRx()
r := csv.NewReader(b)
inCsv, err := r.ReadAll()
if err != nil {
goErr = fmt.Errorf("failed to parse existing CSV file: %v", err)
metrics.MsgErr()
return
}
err = t.AddCSV(inCsv, nil)
if err != nil {
goErr = fmt.Errorf("failed to add existing CSV records to table: %v", err)
metrics.MsgErr()
return
}
}
b.Reset()
outCsv := t.ToCSV()
w := csv.NewWriter(b)
err = w.WriteAll(outCsv)
if err != nil {
goErr = fmt.Errorf("failed to write csv: %v", err)
metrics.MsgErr()
return
}
err = s3Client.Put(s3Bucket, s3path, b.Bytes())
if err != nil {
goErr = fmt.Errorf("failed to write to S3: %v", err)
metrics.MsgErr()
return
}
metrics.MsgTx()
_, err = stmt.Exec(t.Domain, t.Key, oldestmod, startTime)
if err != nil {
goErr = fmt.Errorf("failed to execute archived update: %v", err)
metrics.MsgErr()
return
}
metrics.MsgProc()
}(name, t)
}
wg.Wait()
log.Println("batch done")
return goErr
}
| [
"\"DOMAIN\"",
"\"S3_PREFIX\"",
"\"S3_BUCKET\""
] | [] | [
"DOMAIN",
"S3_BUCKET",
"S3_PREFIX"
] | [] | ["DOMAIN", "S3_BUCKET", "S3_PREFIX"] | go | 3 | 0 | |
tpdatasrc/tpgamefiles/scr/tpModifiers/fatigue_exhaustion.py | from templeplus.pymod import PythonModifier
from toee import *
import tpdp
from utilities import *
print "Registering fatigue_exhaust"
# Note: Not currently supporting no run or charge for fatigue as per SRD. This is the same
# as barbarian fatigue previously worked.
def FatigueOnAdd(attachee, args, evt_obj):
partSys = game.particles("Barbarian Fatigue", attachee )
exhausted = args.get_arg(2)
if exhausted != 0:
attachee.float_text_line("Exhausted")
else:
attachee.float_text_line("Fatigued")
args.set_arg(4, partSys)
return 0
def FatigueTooltip(attachee, args, evt_obj):
# Set the tooltip
exhausted = args.get_arg(2)
if exhausted != 0:
if exhausted == -1:
evt_obj.append("Exhausted")
else:
evt_obj.append("Exhausted (" + str(exhausted) + " rounds)")
else:
fatigued = args.get_arg(1)
if fatigued == -1:
evt_obj.append("Fatigue")
else:
evt_obj.append("Fatigue (" + str(fatigued) + " rounds)")
return 0
def FatigueEffectTooltip(attachee, args, evt_obj):
# Set the tooltip
exhausted = args.get_arg(2)
if exhausted != 0:
if exhausted == -1:
evt_obj.append(tpdp.hash("FATIGUE_EXHAUST"), -2, "Exhausted")
else:
evt_obj.append(tpdp.hash("FATIGUE_EXHAUST"), -2, "Exhausted (" + str(exhausted) + " rounds)")
else:
fatigued = args.get_arg(1)
if fatigued == -1:
evt_obj.append(tpdp.hash("FATIGUE_EXHAUST"), -2, "Fatigue")
else:
evt_obj.append(tpdp.hash("FATIGUE_EXHAUST"), -2, "Fatigue (" + str(fatigued) + " rounds)")
return 0
def FatigueDexMod(attachee, args, evt_obj):
exhaustDuration = args.get_arg(2)
if exhaustDuration != 0:
evt_obj.bonus_list.add(-6, 0, "Exhausted")
else:
evt_obj.bonus_list.add(-2, 0, "Fatigue")
return 0
def FatigueStrMod(attachee, args, evt_obj):
exhaustDuration = args.get_arg(2)
if exhaustDuration != 0:
evt_obj.bonus_list.add(-6, 0, "Exhausted")
else:
evt_obj.bonus_list.add(-2, 0, "Fatigue")
return 0
def ExhaustedMovement(attachee, args, evt_obj):
#Half movement if exhausted
exhausted = args.get_arg(2)
if exhausted != 0:
evt_obj.factor = evt_obj.factor * .5
return 0
def FatigueBeginRound(attachee, args, evt_obj):
roundsToReduce = evt_obj.data1
rageFatigueDuration = args.get_arg(0)
fatigueDuration = args.get_arg(1)
exhaustDuration = args.get_arg(2)
if rageFatigueDuration > 0:
rageFatigueDuration = rageFatigueDuration - roundsToReduce
rageFatigueDuration = max(rageFatigueDuration, 0)
args.set_arg(0, rageFatigueDuration)
if exhaustDuration > 0:
exhaustDuration = exhaustDuration - roundsToReduce
exhaustDuration = max(exhaustDuration, 0)
args.set_arg(2, exhaustDuration)
if fatigueDuration > -1:
fatigueDuration = fatigueDuration - roundsToReduce
fatigueDuration = max(fatigueDuration, 0)
if fatigueDuration < 1:
#See if the exhaustion duration should be downgraded to the fatigue duration
if (exhaustDuration == -1) or (exhaustDuration > 0):
args.set_arg(1, exhaustDuration)
args.set_arg(2, 0)
else:
args.condition_remove()
else:
args.set_arg(1, fatigueDuration)
return 0
def FatigueRemove(attachee, args, evt_obj):
args.condition_remove()
return 0
def FatigueAddHeal(attachee, args, evt_obj):
val = evt_obj.is_modifier("sp-Heal")
if val:
attachee.float_text_line("Fatigue Removed")
args.condition_remove()
return 0
def BarbarianFatiguedQuery(attachee, args, evt_obj):
rageFatigueDuration = args.get_arg(0)
evt_obj.return_val = (rageFatigueDuration > 0)
return 0
def FatiguedQuery(attachee, args, evt_obj):
fatigueDuration = args.get_arg(1)
if (fatigueDuration > 0) or (fatigueDuration == -1):
evt_obj.return_val = 1
return 0
def ExhaustedQuery(attachee, args, evt_obj):
exhaustDuration = args.get_arg(2)
if exhaustDuration != 0:
evt_obj.return_val = 1
return 0
def AddBarbarianFatigueSignal(attachee, args, evt_obj):
#Always update the barbarian rage timer
duration = evt_obj.data1
args.set_arg(0, duration)
#Update the fatigue duration if this is longer
fatigueDuration = args.get_arg(1)
if duration > fatigueDuration and fatigueDuration != -1:
args.set_arg(1, duration)
return 0
def AddFatigueSignal(attachee, args, evt_obj):
if evt_obj.data1 == 0:
fatigueDurationNew = evt_obj.data1
else:
fatigueDurationNew = -1
upgrade = args.get_arg(3)
exhaustDuration = args.get_arg(2)
#See if an upgrade should be performed
if exhaustDuration == 0:
if upgrade:
args.set_arg(2, fatigueDurationNew)
game.particles("Fatigue Boom", attachee) #Play only the boom for an upgrade
attachee.float_text_line("Exhausted")
else:
attachee.float_text_line("Already Fatigued")
else:
target_item.obj.float_text_line("Already Fatigued")
return 0
def AddExhaustionSignal(attachee, args, evt_obj):
exhaustOld = args.get_arg(2)
#Data 2 signals a indefinite duration (sending -1 in a signal does not work well)
if evt_obj.data2 == 0:
exhaustNew = evt_obj.data1
else:
exhaustNew = -1
if exhaustOld == 0:
args.set_arg(1, exhaustNew)
args.set_arg(2, exhaustNew)
game.particles("Fatigue Boom", attachee) #Play only the boom for an upgrade
attachee.float_text_line("Exhausted")
else:
attachee.float_text_line("Already Exhausted")
return 0
def FatiguePlayParticlesSaveId(attachee, args, evt_obj):
partSys = game.particles("Barbarian Fatigue", attachee)
args.set_arg(4, partSys)
return 0
def FatigueOnRemove(attachee, args, evt_obj):
partSys = args.get_arg(4)
game.particles_kill(partSys)
return 0
def FatigueOnRemove2(attachee, args, evt_obj):
game.particles("Barbarian Fatigue-END", attachee)
return 0
Fatigue = PythonModifier("FatigueExhaust", 6) #Barbarian Fatigue Duration, Fatigue Duration, Exhaustion Duration, Upgradable, Particle System, Spare
Fatigue.AddHook(ET_OnConditionAdd, EK_NONE, FatigueOnAdd, ())
Fatigue.AddHook(ET_OnGetTooltip, EK_NONE, FatigueTooltip, ())
Fatigue.AddHook(ET_OnGetEffectTooltip, EK_NONE, FatigueEffectTooltip, ())
Fatigue.AddHook(ET_OnAbilityScoreLevel, EK_STAT_DEXTERITY, FatigueDexMod, ())
Fatigue.AddHook(ET_OnAbilityScoreLevel, EK_STAT_STRENGTH, FatigueStrMod, ())
Fatigue.AddHook(ET_OnGetMoveSpeed, EK_NONE, ExhaustedMovement, ())
Fatigue.AddHook(ET_OnBeginRound, EK_NONE, FatigueBeginRound, ())
Fatigue.AddHook(ET_OnNewDay, EK_NEWDAY_REST, FatigueRemove, ())
Fatigue.AddHook(ET_OnConditionAddPre, EK_NONE, FatigueAddHeal, ())
Fatigue.AddHook(ET_OnConditionAddFromD20StatusInit, EK_NONE, FatiguePlayParticlesSaveId, ())
Fatigue.AddHook(ET_OnConditionRemove, EK_NONE, FatigueOnRemove, ())
Fatigue.AddHook(ET_OnConditionRemove2, EK_NONE, FatigueOnRemove2, ())
Fatigue.AddHook(ET_OnD20Query, EK_Q_Barbarian_Fatigued, BarbarianFatiguedQuery, ())
Fatigue.AddHook(ET_OnD20Signal, EK_S_Killed, FatigueRemove, ())
Fatigue.AddHook(ET_OnD20PythonQuery, "Fatigued", FatiguedQuery, ())
Fatigue.AddHook(ET_OnD20PythonQuery, "Exhausted", ExhaustedQuery, ())
Fatigue.AddHook(ET_OnD20PythonSignal, "Add Barbarian Fatigue", AddBarbarianFatigueSignal, ())
Fatigue.AddHook(ET_OnD20PythonSignal, "Add Fatigue", AddFatigueSignal, ())
Fatigue.AddHook(ET_OnD20PythonSignal, "Add Exhaustion", AddExhaustionSignal, ())
| [] | [] | [] | [] | [] | python | null | null | null |
soda_test.go | package soda
import (
"encoding/csv"
"encoding/json"
"io"
"os"
"sync/atomic"
"testing"
"time"
)
const (
apptoken = ""
endpoint = "https://data.ct.gov/resource/hma6-9xbg"
)
func TestGetRequestSerialize(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
gr.Format = "json"
gr.Filters["farm_name"] = "Bell Nurseries"
gr.Filters["item"] = "Salad/micro greens"
gr.Query.Limit = 1
want := endpoint + ".json"
if gr.GetEndpoint() != want {
t.Errorf("Want %s, have %s", want, gr.GetEndpoint())
}
want = "%24limit=1&farm_name=Bell+Nurseries&item=Salad%2Fmicro+greens"
if gr.URLValues().Encode() != want {
t.Errorf("Want %s, have %s", want, gr.URLValues().Encode())
}
gr.Filters = make(SimpleFilters) //reset filters
gr.Query.Select = []string{"farm_name", "category", "item", "website"}
gr.Query.Where = "item like '%ADISH%'"
gr.Query.Limit = 10
gr.Query.Offset = 20
gr.Query.AddOrder("category", DirDesc)
gr.Query.AddOrder("farm_name", false)
want = "%24limit=10&%24offset=20&%24order=category+DESC%2Cfarm_name+ASC&%24select=farm_name%2Ccategory%2Citem%2Cwebsite&%24where=item+like+%27%25ADISH%25%27"
if gr.URLValues().Encode() != want {
t.Errorf("Want %s, have %s", want, gr.URLValues().Encode())
}
}
func TestCount(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
//count all records
count, err := gr.Count()
if err != nil {
t.Fatal(err)
}
if count < 22000 {
t.Fatalf("Expected a count of atleast %d, have %d", 22000, count)
}
t.Logf("Count all: %d\n", count)
//filtered count
gr.Filters["farm_name"] = "Bell Nurseries"
gr.Filters["item"] = "Salad/micro greens"
count, err = gr.Count()
if err != nil {
t.Fatal(err)
}
if count != 1 {
t.Fatalf("Expected a count of %d, have %d", 1, count)
}
t.Logf("Count filtered: %d\n", count)
}
func TestFields(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
fields, err := gr.Fields()
if err != nil {
t.Fatal(err)
}
if len(fields) < 2 {
t.Fatalf("Expected atleast %d fields, have %d", 2, len(fields))
}
t.Logf("Fields: %v\n", fields)
}
func TestModified(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
modified, err := gr.Modified()
if err != nil {
t.Fatal(err)
}
t.Log(modified)
}
func TestGetJSON(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
gr.Format = "json"
gr.Query.Where = "item = 'Radishes'"
gr.Query.Limit = 1000
gr.Query.AddOrder("category", DirDesc)
gr.Query.AddOrder("farm_name", DirAsc)
resp, err := gr.Get()
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
results := []Business{}
err = json.NewDecoder(resp.Body).Decode(&results)
if err != nil {
t.Fatal(err)
}
if len(results) == 0 {
t.Errorf("No results found")
}
for _, res := range results {
if res.Item != "Radishes" {
t.Errorf("Item %s is not 'Radishes'", res.Item)
}
}
t.Logf("%d JSON results\n", len(results))
}
func TestGetCSV(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
gr.Format = "csv"
gr.Query.Select = []string{"category", "farm_name", "item"}
gr.Filters["farm_name"] = "Beaver Brook Farm"
gr.Filters["item"] = "Pumpkins"
resp, err := gr.Get()
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
csvreader := csv.NewReader(resp.Body)
rows := 0
for {
record, err := csvreader.Read()
if err != nil {
if err == io.EOF {
break
}
t.Fatal(err)
}
if rows == 0 {
rows++
continue
}
want := []string{"Seasonal Items", "Beaver Brook Farm", "Pumpkins"}
for i := range want {
if record[i] != want[i] {
t.Errorf("Want '%v', have '%v'", want[i], record[i])
}
}
rows++
}
t.Logf("%d CSV rows\n", rows)
}
func TestMetadataConstructor(t *testing.T) {
ms := []metadata{
newMetadata("https://data.ct.gov/resource/y6p2-px98"),
newMetadata("https://data.ct.gov/resource/y6p2-px98/"),
}
for _, m := range ms {
wb := "https://data.ct.gov"
wi := "y6p2-px98"
if m.baseurl != wb {
t.Errorf("Want baseurl %s, have %s", wb, m.baseurl)
}
if m.identifier != wi {
t.Errorf("Want identifier %s, have %s", wi, m.identifier)
}
}
}
func TestGetMetadata(t *testing.T) {
m := newMetadata(endpoint)
gr := NewGetRequest(endpoint, apptoken)
md, err := gr.Metadata.Get()
if err != nil {
t.Fatal(err)
}
if md.ID != m.identifier {
t.Errorf("Want ID %s, have %s", m.identifier, md.ID)
}
w := "2014-09-04 15:01:44 +0000 UTC"
if md.CreatedAt.Time().UTC().String() != w {
t.Errorf("Want CreatedAt %s, have %s", w, md.CreatedAt.Time().UTC().String())
}
}
func TestGetMetadataError(t *testing.T) {
gr := NewGetRequest(endpoint[:len(endpoint)-2], apptoken)
md, err := gr.Metadata.Get()
if err == nil || md != nil {
t.Fatal("Wanted error")
}
}
func TestGetMetadataColumns(t *testing.T) {
gr := NewGetRequest(endpoint, apptoken)
cols, err := gr.Metadata.GetColumns()
if err != nil {
t.Fatal(err)
}
if len(cols) != 15 {
t.Errorf("Want %d columns, have %d", 15, len(cols))
}
want := []string{"Farm Name", "Category", "Item", "Farmer ID", "Location 1"} //not complete, but good enough
for _, w := range want {
found := false
for _, col := range cols {
if col.Name == w {
found = true
break
}
}
if !found {
t.Errorf("Did not find column %s", w)
}
}
}
func TestOffsetGetRequest(t *testing.T) {
//only run using Travis Go Tip version or when not in Travis
travisGo := os.Getenv("TRAVIS_GO_VERSION")
if travisGo != "" && travisGo != "tip" {
t.Logf("Skipping on go version %s", travisGo)
return
}
gr := NewGetRequest(endpoint, apptoken)
gr.Format = "json"
gr.Query.AddOrder("farm_name", DirAsc)
ogr, err := NewOffsetGetRequest(gr)
if err != nil {
t.Fatal(err)
}
records := uint64(0)
start := time.Now()
numGoroutines := 4
batchSize := uint(2000)
for i := 0; i < numGoroutines; i++ {
ogr.Add(1)
go func() {
defer ogr.Done()
for {
resp, err := ogr.Next(batchSize)
if err == ErrDone {
break
}
if err != nil {
t.Fatal(err)
}
results := []Business{}
err = json.NewDecoder(resp.Body).Decode(&results)
resp.Body.Close()
if err != nil {
t.Fatal(err)
}
atomic.AddUint64(&records, uint64(len(results)))
}
}()
}
ogr.Wait()
if uint(records) != ogr.Count() {
t.Errorf("Wanted %d records, have %d", ogr.Count(), records)
}
t.Logf("Got %d records in %s using %d goroutines", records, time.Since(start), numGoroutines)
}
type Business struct {
Business string `json:"business"`
Category string `json:"category"`
FarmName string `json:"farm_name"`
FarmerID string `json:"farmer_id"`
Item string `json:"item"`
L string `json:"l"`
Location1 struct {
HumanAddress string `json:"human_address"`
Latitude string `json:"latitude"`
Longitude string `json:"longitude"`
NeedsRecoding bool `json:"needs_recoding"`
} `json:"location_1"`
Phone1 string `json:"phone1"`
Zipcode string `json:"zipcode"`
}
| [
"\"TRAVIS_GO_VERSION\""
] | [] | [
"TRAVIS_GO_VERSION"
] | [] | ["TRAVIS_GO_VERSION"] | go | 1 | 0 | |
go/tools/releaser/file.go | // Copyright 2021 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"sync"
)
var repoRootState = struct {
once sync.Once
dir string
err error
}{}
// repoRoot returns the workspace root directory. If this program was invoked
// with 'bazel run', repoRoot returns the BUILD_WORKSPACE_DIRECTORY environment
// variable. Otherwise, repoRoot walks up the directory tree and finds a
// WORKSPACE file.
func repoRoot() (string, error) {
repoRootState.once.Do(func() {
if wsDir := os.Getenv("BUILD_WORKSPACE_DIRECTORY"); wsDir != "" {
repoRootState.dir = wsDir
return
}
dir, err := os.Getwd()
if err != nil {
repoRootState.err = err
return
}
for {
_, err := os.Stat(filepath.Join(dir, "WORKSPACE"))
if err == nil {
repoRootState.dir = dir
return
}
if err != os.ErrNotExist {
repoRootState.err = err
return
}
parent := filepath.Dir(dir)
if parent == dir {
repoRootState.err = errors.New("could not find workspace directory")
return
}
dir = parent
}
})
return repoRootState.dir, repoRootState.err
}
// extractArchive extracts a zip or tar.gz archive opened in f, into the
// directory dir, stripping stripPrefix from each entry before extraction.
// name is the name of the archive, used for error reporting.
func extractArchive(f *os.File, name, dir, stripPrefix string) (err error) {
if strings.HasSuffix(name, ".zip") {
return extractZip(f, name, dir, stripPrefix)
}
if strings.HasSuffix(name, ".tar.gz") {
zr, err := gzip.NewReader(f)
if err != nil {
return fmt.Errorf("extracting %s: %w", name, err)
}
defer func() {
if cerr := zr.Close(); err == nil && cerr != nil {
err = cerr
}
}()
return extractTar(zr, name, dir, stripPrefix)
}
return fmt.Errorf("could not determine archive format from extension: %s", name)
}
func extractZip(zf *os.File, name, dir, stripPrefix string) (err error) {
stripPrefix += "/"
fi, err := zf.Stat()
if err != nil {
return err
}
defer func() {
if err != nil {
err = fmt.Errorf("extracting zip %s: %w", name, err)
}
}()
zr, err := zip.NewReader(zf, fi.Size())
if err != nil {
return err
}
extractFile := func(f *zip.File) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("extracting %s: %w", f.Name, err)
}
}()
outPath, err := extractedPath(dir, stripPrefix, f.Name)
if err != nil {
return err
}
if strings.HasSuffix(f.Name, "/") {
return os.MkdirAll(outPath, 0777)
}
r, err := f.Open()
if err != nil {
return err
}
defer r.Close()
parent := filepath.Dir(outPath)
if err := os.MkdirAll(parent, 0777); err != nil {
return err
}
w, err := os.Create(outPath)
if err != nil {
return err
}
defer func() {
if cerr := w.Close(); err == nil && cerr != nil {
err = cerr
}
}()
_, err = io.Copy(w, r)
return err
}
for _, f := range zr.File {
if err := extractFile(f); err != nil {
return err
}
}
return nil
}
func extractTar(r io.Reader, name, dir, stripPrefix string) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("extracting tar %s: %w", name, err)
}
}()
tr := tar.NewReader(r)
extractFile := func(hdr *tar.Header) (err error) {
outPath, err := extractedPath(dir, stripPrefix, hdr.Name)
if err != nil {
return err
}
switch hdr.Typeflag {
case tar.TypeDir:
return os.MkdirAll(outPath, 0777)
case tar.TypeReg:
w, err := os.Create(outPath)
if err != nil {
return err
}
defer func() {
if cerr := w.Close(); err == nil && cerr != nil {
err = cerr
}
}()
_, err = io.Copy(w, tr)
return err
default:
return fmt.Errorf("unsupported file type %x: %q", hdr.Typeflag, hdr.Name)
}
}
stripPrefix += "/"
for {
hdr, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
if err := extractFile(hdr); err != nil {
return err
}
}
return nil
}
// extractedPath returns the file path that a file in an archive should be
// extracted to. It verifies that entryName starts with stripPrefix and does not
// point outside dir.
func extractedPath(dir, stripPrefix, entryName string) (string, error) {
if !strings.HasPrefix(entryName, stripPrefix) {
return "", fmt.Errorf("entry does not start with prefix %s: %q", stripPrefix, entryName)
}
entryName = entryName[len(stripPrefix):]
if entryName == "" {
return dir, nil
}
if path.IsAbs(entryName) {
return "", fmt.Errorf("entry has an absolute path: %q", entryName)
}
if strings.HasPrefix(entryName, "../") {
return "", fmt.Errorf("entry refers to something outside the archive: %q", entryName)
}
entryName = strings.TrimSuffix(entryName, "/")
if path.Clean(entryName) != entryName {
return "", fmt.Errorf("entry does not have a clean path: %q", entryName)
}
return filepath.Join(dir, entryName), nil
}
// copyDir recursively copies a directory tree.
func copyDir(toDir, fromDir string) error {
if err := os.MkdirAll(toDir, 0777); err != nil {
return err
}
return filepath.Walk(fromDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
rel, _ := filepath.Rel(fromDir, path)
if rel == "." {
return nil
}
outPath := filepath.Join(toDir, rel)
if fi.IsDir() {
return os.Mkdir(outPath, 0777)
} else {
return copyFile(outPath, path)
}
})
}
func copyFile(toFile, fromFile string) (err error) {
r, err := os.Open(fromFile)
if err != nil {
return err
}
defer r.Close()
w, err := os.Create(toFile)
if err != nil {
return err
}
defer func() {
if cerr := w.Close(); err == nil && cerr != nil {
err = cerr
}
}()
_, err = io.Copy(w, r)
return err
}
func sha256SumFile(name string) (string, error) {
r, err := os.Open(name)
if err != nil {
return "", err
}
defer r.Close()
h := sha256.New()
if _, err := io.Copy(h, r); err != nil {
return "", err
}
sum := h.Sum(nil)
return hex.EncodeToString(sum), nil
}
// copyFileToMirror uploads a file to the GCS bucket backing mirror.bazel.build.
// gsutil must be installed, and the user must be authenticated with
// 'gcloud auth login' and be allowed to write files to the bucket.
func copyFileToMirror(ctx context.Context, path, fileName string) (err error) {
dest := "gs://bazel-mirror/" + path
defer func() {
if err != nil {
err = fmt.Errorf("copying file %s to %s: %w", fileName, dest, err)
}
}()
// This function shells out to gsutil instead of using
// cloud.google.com/go/storage because that package has a million
// dependencies.
return runForError(ctx, ".", "gsutil", "cp", "-n", fileName, dest)
return nil
}
| [
"\"BUILD_WORKSPACE_DIRECTORY\""
] | [] | [
"BUILD_WORKSPACE_DIRECTORY"
] | [] | ["BUILD_WORKSPACE_DIRECTORY"] | go | 1 | 0 | |
src/iiw_book_service/settings.py | """
Django settings for iiw_book_service project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import posixpath
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "*uj-^cy8ln$@3ni7t)*(0+3c=+yosvj^tv85%*zy0j#g3-i%h*"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG") == "true"
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"iiw_book.apps.IIWBookConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
]
ROOT_URLCONF = "iiw_book_service.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "iiw_book_service.wsgi.application"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"console_handler": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "verbose",
}
},
"loggers": {
"api": {"handlers": ["console_handler"], "level": "DEBUG", "propagate": False},
"django": {
"handlers": ["console_handler"],
"level": "INFO",
"propagate": False,
},
"django.request": {
"handlers": ["console_handler"],
"level": "INFO",
"propagate": False,
},
},
"root": {
"handlers": ["console_handler"],
"level": str(os.getenv("DJANGO_LOG_LEVEL", "INFO")).upper(),
"propagate": False,
},
}
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.sqlite3",
# "NAME": os.path.join(BASE_DIR, "db.sqlite3"),
# }
# }
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("DB_NAME"),
"USER": os.environ.get("DB_USER"),
"PASSWORD": os.environ.get("DB_PASSWORD"),
"HOST": os.environ.get("DB_HOST"),
"PORT": os.environ.get("DB_PORT"),
}
}
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "d6bfd10a-1e7a-478c-87e7-775181be591a",
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/assets/"
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ["assets"]))
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, "assets"),)
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_PORT = os.environ.get("EMAIL_PORT")
EMAIL_USE_SSL = os.environ.get("EMAIL_USE_SSL") == "true"
LOGOUT_REDIRECT_URL = 'index'
# Gunicorn options
timeout = 300
| [] | [] | [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DJANGO_LOG_LEVEL",
"EMAIL_USE_SSL",
"EMAIL_HOST",
"DEBUG",
"EMAIL_PORT",
"DB_USER"
] | [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DJANGO_LOG_LEVEL", "EMAIL_USE_SSL", "EMAIL_HOST", "DEBUG", "EMAIL_PORT", "DB_USER"] | python | 10 | 0 | |
cmd/jiralert/main.go | // Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"runtime"
"strconv"
"github.com/andygrunwald/go-jira"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus-community/jiralert/pkg/alertmanager"
"github.com/prometheus-community/jiralert/pkg/config"
"github.com/prometheus-community/jiralert/pkg/notify"
"github.com/prometheus-community/jiralert/pkg/template"
_ "net/http/pprof"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
unknownReceiver = "<unknown>"
logFormatLogfmt = "logfmt"
logFormatJSON = "json"
)
var (
listenAddress = flag.String("listen-address", ":9097", "The address to listen on for HTTP requests.")
configFile = flag.String("config", "config/jiralert.yml", "The JIRAlert configuration file")
logLevel = flag.String("log.level", "info", "Log filtering level (debug, info, warn, error)")
logFormat = flag.String("log.format", logFormatLogfmt, "Log format to use ("+logFormatLogfmt+", "+logFormatJSON+")")
hashJiraLabel = flag.Bool("hash-jira-label", false, "if enabled: hash the key-value pairs inside of ALERT{...} in the created jira issue labels "+
"- this ensures that the label text does not overflow the allowed length in jira (255)")
// Version is the build version, set by make to latest git tag/hash via `-ldflags "-X main.Version=$(VERSION)"`.
Version = "<local build>"
)
func main() {
if os.Getenv("DEBUG") != "" {
runtime.SetBlockProfileRate(1)
runtime.SetMutexProfileFraction(1)
}
flag.Parse()
var logger = setupLogger(*logLevel, *logFormat)
level.Info(logger).Log("msg", "starting JIRAlert", "version", Version)
if !*hashJiraLabel {
level.Warn(logger).Log("msg", "Using deprecated jira label generation - "+
"please read https://github.com/prometheus-community/jiralert/pull/79 "+
"and try -hash-jira-label")
}
config, _, err := config.LoadFile(*configFile, logger)
if err != nil {
level.Error(logger).Log("msg", "error loading configuration", "path", *configFile, "err", err)
os.Exit(1)
}
tmpl, err := template.LoadTemplate(config.Template, logger)
if err != nil {
level.Error(logger).Log("msg", "error loading templates", "path", config.Template, "err", err)
os.Exit(1)
}
http.HandleFunc("/alert", func(w http.ResponseWriter, req *http.Request) {
level.Debug(logger).Log("msg", "handling /alert webhook request")
defer func() { _ = req.Body.Close() }()
// https://godoc.org/github.com/prometheus/alertmanager/template#Data
data := alertmanager.Data{}
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
errorHandler(w, http.StatusBadRequest, err, unknownReceiver, &data, logger)
return
}
conf := config.ReceiverByName(data.Receiver)
if conf == nil {
errorHandler(w, http.StatusNotFound, fmt.Errorf("receiver missing: %s", data.Receiver), unknownReceiver, &data, logger)
return
}
level.Debug(logger).Log("msg", " matched receiver", "receiver", conf.Name)
// TODO: Consider reusing notifiers or just jira clients to reuse connections.
tp := jira.BasicAuthTransport{
Username: conf.User,
Password: string(conf.Password),
}
client, err := jira.NewClient(tp.Client(), conf.APIURL)
if err != nil {
errorHandler(w, http.StatusInternalServerError, err, conf.Name, &data, logger)
return
}
if retry, err := notify.NewReceiver(logger, conf, tmpl, client.Issue).Notify(&data, *hashJiraLabel); err != nil {
var status int
if retry {
// Instruct Alertmanager to retry.
status = http.StatusServiceUnavailable
} else {
status = http.StatusInternalServerError
}
errorHandler(w, status, err, conf.Name, &data, logger)
return
}
requestTotal.WithLabelValues(conf.Name, "200").Inc()
})
http.HandleFunc("/", HomeHandlerFunc())
http.HandleFunc("/config", ConfigHandlerFunc(config))
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { http.Error(w, "OK", http.StatusOK) })
http.Handle("/metrics", promhttp.Handler())
if os.Getenv("PORT") != "" {
*listenAddress = ":" + os.Getenv("PORT")
}
level.Info(logger).Log("msg", "listening", "address", *listenAddress)
err = http.ListenAndServe(*listenAddress, nil)
if err != nil {
level.Error(logger).Log("msg", "failed to start HTTP server", "address", *listenAddress)
os.Exit(1)
}
}
func errorHandler(w http.ResponseWriter, status int, err error, receiver string, data *alertmanager.Data, logger log.Logger) {
w.WriteHeader(status)
response := struct {
Error bool
Status int
Message string
}{
true,
status,
err.Error(),
}
// JSON response
bytes, _ := json.Marshal(response)
json := string(bytes[:])
fmt.Fprint(w, json)
level.Error(logger).Log("msg", "error handling request", "statusCode", status, "statusText", http.StatusText(status), "err", err, "receiver", receiver, "groupLabels", data.GroupLabels)
requestTotal.WithLabelValues(receiver, strconv.FormatInt(int64(status), 10)).Inc()
}
func setupLogger(lvl string, fmt string) (logger log.Logger) {
var filter level.Option
switch lvl {
case "error":
filter = level.AllowError()
case "warn":
filter = level.AllowWarn()
case "debug":
filter = level.AllowDebug()
default:
filter = level.AllowInfo()
}
if fmt == logFormatJSON {
logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
} else {
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
}
logger = level.NewFilter(logger, filter)
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
return
}
| [
"\"DEBUG\"",
"\"PORT\"",
"\"PORT\""
] | [] | [
"PORT",
"DEBUG"
] | [] | ["PORT", "DEBUG"] | go | 2 | 0 | |
build.go | // +build ignore
/*
Copyright (c) 2019 the Octant contributors. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package main
import (
"bytes"
"fmt"
"log"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/spf13/cobra"
)
var (
VERSION = "v0.18.0"
GOPATH = os.Getenv("GOPATH")
GIT_COMMIT = gitCommit()
BUILD_TIME = time.Now().UTC().Format(time.RFC3339)
LD_FLAGS = fmt.Sprintf("-X \"main.buildTime=%s\" -X main.gitCommit=%s", BUILD_TIME, GIT_COMMIT)
GO_FLAGS = fmt.Sprintf("-ldflags=%s", LD_FLAGS)
)
func main() {
rootCmd := &cobra.Command{
Use: "build.go",
Short: "Build tools for Octant",
}
rootCmd.Name()
rootCmd.AddCommand(
&cobra.Command{
Use: "ci",
Short: "full build, running tests",
Run: func(cmd *cobra.Command, args []string) {
test()
vet()
webDeps()
webTest()
webBuild()
build()
},
},
&cobra.Command{
Use: "ci-quick",
Short: "full build, skipping tests",
Run: func(cmd *cobra.Command, args []string) {
webDeps()
webBuild()
build()
},
},
&cobra.Command{
Use: "web-deps",
Short: "install client dependencies",
Run: func(cmd *cobra.Command, args []string) {
webDeps()
},
},
&cobra.Command{
Use: "web-test",
Short: "run client tests",
Run: func(cmd *cobra.Command, args []string) {
verifyRegistry()
webDeps()
webTest()
},
},
&cobra.Command{
Use: "web-build",
Short: "client build, skipping tests",
Run: func(cmd *cobra.Command, args []string) {
verifyRegistry()
webDeps()
webBuild()
},
},
&cobra.Command{
Use: "web",
Short: "client build, running tests",
Run: func(cmd *cobra.Command, args []string) {
webDeps()
webTest()
webBuild()
},
},
&cobra.Command{
Use: "generate",
Short: "update generated artifacts",
Run: func(cmd *cobra.Command, args []string) {
generate()
goFmt(true)
},
},
&cobra.Command{
Use: "vet",
Short: "lint server code",
Run: func(cmd *cobra.Command, args []string) {
vet()
},
},
&cobra.Command{
Use: "fmt",
Short: "format server code",
Run: func(cmd *cobra.Command, args []string) {
goFmt(true)
},
},
&cobra.Command{
Use: "test",
Short: "run server tests",
Run: func(cmd *cobra.Command, args []string) {
test()
},
},
&cobra.Command{
Use: "verify",
Short: "verify resolving correct registry",
Run: func(cmd *cobra.Command, args []string) {
verifyRegistry()
},
},
&cobra.Command{
Use: "build",
Short: "server build, skipping tests",
Run: func(cmd *cobra.Command, args []string) {
build()
},
},
&cobra.Command{
Use: "build-electron",
Short: "server build to extraResources, skipping tests",
Run: func(cmd *cobra.Command, args []string) {
webBuildElectron()
buildElectron()
},
},
&cobra.Command{
Use: "run-dev",
Short: "run ci produced build",
Run: func(cmd *cobra.Command, args []string) {
runDev()
},
},
&cobra.Command{
Use: "go-install",
Short: "install build tools",
Run: func(cmd *cobra.Command, args []string) {
goInstall()
},
},
&cobra.Command{
Use: "serve",
Short: "start client and server in development mode",
Run: func(cmd *cobra.Command, args []string) {
serve()
},
},
&cobra.Command{
Use: "install-test-plugin",
Short: "build the sample plugin",
Run: func(cmd *cobra.Command, args []string) {
installTestPlugin()
},
},
&cobra.Command{
Use: "version",
Short: "",
Run: func(cmd *cobra.Command, args []string) {
version()
},
},
&cobra.Command{
Use: "release",
Short: "tag and push a release",
Run: func(cmd *cobra.Command, args []string) {
release()
},
},
)
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}
func runCmd(command string, env map[string]string, args ...string) {
cmd := newCmd(command, env, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
log.Printf("Running: %s\n", cmd.String())
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
}
func runCmdIn(dir, command string, env map[string]string, args ...string) {
cmd := newCmd(command, env, args...)
cmd.Dir = dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
log.Printf("Running in %s: %s\n", dir, cmd.String())
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
}
func newCmd(command string, env map[string]string, args ...string) *exec.Cmd {
realCommand, err := exec.LookPath(command)
if err != nil {
log.Fatalf("unable to find command '%s'", command)
}
cmd := exec.Command(realCommand, args...)
cmd.Stderr = os.Stderr
cmd.Env = os.Environ()
for k, v := range env {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
}
return cmd
}
func goInstall() {
pkgs := []string{
"github.com/golang/mock/gomock",
"github.com/golang/mock/mockgen",
"github.com/golang/protobuf/protoc-gen-go",
"golang.org/x/tools/cmd/goimports",
}
for _, pkg := range pkgs {
runCmd("go", map[string]string{"GO111MODULE": "on"}, "install", pkg)
}
}
func generate() {
removeFakes()
runCmd("go", nil, "generate", "-v", "./pkg/...", "./internal/...")
}
func build() {
newPath := filepath.Join(".", "build")
os.MkdirAll(newPath, 0755)
artifact := "octant"
if runtime.GOOS == "windows" {
artifact = "octant.exe"
}
runCmd("go", nil, "build", "-tags", "embedded", "-mod=vendor", "-o", "build/"+artifact, GO_FLAGS, "-v", "./cmd/octant")
}
func buildElectron() {
newPath := filepath.Join(".", "build")
os.MkdirAll(newPath, 0755)
artifact := "octant"
if runtime.GOOS == "windows" {
artifact = "octant.exe"
}
runCmd("go", nil, "build", "-mod=vendor", "-o", "web/extraResources/"+artifact, GO_FLAGS, "-v", "./cmd/octant")
}
func runDev() {
env := make(map[string]string)
for _, e := range os.Environ() {
parts := strings.SplitN(e, "=", 2)
env[parts[0]] = parts[1]
}
runCmd("build/octant", env)
}
func test() {
runCmd("go", nil, "test", "-v", "./internal/...", "./pkg/...")
}
func vet() {
runCmd("go", nil, "vet", "./internal/...", "./pkg/...")
goFmt(false)
}
func goFmt(update bool) {
if update {
runCmd("goimports", nil, "--local", "github.com/vmware-tanzu/octant", "-w", "cmd", "internal", "pkg")
} else {
out := bytes.NewBufferString("")
cmd := newCmd("goimports", nil, "--local", "github.com/vmware-tanzu/octant", "-l", "cmd", "internal", "pkg")
cmd.Stdout = out
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
log.Printf("Running: %s\n", cmd.String())
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if out.Len() != 0 {
os.Stdout.Write(out.Bytes())
log.Fatal("above files are not formatted correctly. please run `go run build.go fmt`")
}
}
}
func webDeps() {
cmd := newCmd("npm", nil, "ci")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Dir = "./web"
if err := cmd.Run(); err != nil {
log.Fatalf("web-deps: %s", err)
}
}
func webTest() {
cmd := newCmd("npm", nil, "run", "test:headless")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Dir = "./web"
if err := cmd.Run(); err != nil {
log.Fatalf("web-test: %s", err)
}
}
func webBuild() {
cmd := newCmd("npm", nil, "run", "build")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Dir = "./web"
if err := cmd.Run(); err != nil {
log.Fatalf("web-build: %s", err)
}
}
func webBuildElectron() {
cleanCmd := newCmd("npm", nil, "run", "clean")
cleanCmd.Stdout = os.Stdout
cleanCmd.Stderr = os.Stderr
cleanCmd.Stdin = os.Stdin
cleanCmd.Dir = "./web"
if err := cleanCmd.Run(); err != nil {
log.Fatalf("web-build-electron: create dist/octant/ : %s", err)
}
}
func serve() {
var wg sync.WaitGroup
uiVars := map[string]string{"API_BASE": "http://localhost:7777"}
uiCmd := newCmd("npm", uiVars, "run", "start")
uiCmd.Stdout = os.Stdout
uiCmd.Stderr = os.Stderr
uiCmd.Stdin = os.Stdin
uiCmd.Dir = "./web"
if err := uiCmd.Start(); err != nil {
log.Fatalf("uiCmd: start: %s", err)
}
wg.Add(1)
go func() {
defer wg.Done()
if err := uiCmd.Wait(); err != nil {
log.Fatalf("serve: npm run: %s", err)
}
}()
serverVars := map[string]string{
"OCTANT_DISABLE_OPEN_BROWSER": "true",
"OCTANT_LISTENER_ADDR": "localhost:7777",
"OCTANT_PROXY_FRONTEND": "http://localhost:4200",
}
serverCmd := newCmd("go", serverVars, "run", "./cmd/octant/main.go")
serverCmd.Stdout = os.Stdout
if err := serverCmd.Start(); err != nil {
log.Fatalf("serveCmd: start: %s", err)
}
wg.Add(1)
go func() {
defer wg.Done()
if err := serverCmd.Wait(); err != nil {
log.Fatalf("serve: go run: %s", err)
}
}()
sigc := make(chan os.Signal, 1)
signal.Notify(sigc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
wg.Add(1)
go func() {
defer wg.Done()
<-sigc
uiCmd.Process.Signal(syscall.SIGQUIT)
serverCmd.Process.Signal(syscall.SIGQUIT)
}()
wg.Wait()
}
func installTestPlugin() {
dir := pluginDir()
log.Printf("Plugin path: %s", dir)
os.MkdirAll(dir, 0755)
filename := "octant-sample-plugin"
if runtime.GOOS == "windows" {
filename = "octant-sample-plugin.exe"
}
pluginFile := filepath.Join(dir, filename)
runCmd("go", nil, "build", "-o", pluginFile, "github.com/vmware-tanzu/octant/cmd/octant-sample-plugin")
}
func version() {
fmt.Println(VERSION)
}
func release() {
runCmd("git", nil, "tag", "-a", VERSION, "-m", fmt.Sprintf("\"Release %s\"", VERSION))
runCmd("git", nil, "push", "--follow-tags")
}
func removeFakes() {
checkDirs := []string{"pkg", "internal"}
fakePaths := []string{}
for _, dir := range checkDirs {
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
return nil
}
if info.Name() == "fake" {
fakePaths = append(fakePaths, path)
}
return nil
})
if err != nil {
log.Fatalf("generate (%s): %s", dir, err)
}
}
log.Print("Removing fakes from pkg/ and internal/")
for _, p := range fakePaths {
os.RemoveAll(p)
}
}
func gitCommit() string {
cmd := newCmd("git", nil, "rev-parse", "--short", "HEAD")
out, err := cmd.Output()
if err != nil {
log.Printf("gitCommit: %s", err)
return ""
}
return fmt.Sprintf("%s", out)
}
func pluginDir() string {
xdgConfigHome := os.Getenv("XDG_CONFIG_HOME")
if xdgConfigHome != "" {
return filepath.Join(xdgConfigHome, "octant", "plugins")
} else if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("LOCALAPPDATA"), "octant", "plugins")
} else {
return filepath.Join(os.Getenv("HOME"), ".config", "octant", "plugins")
}
}
func verifyRegistry() {
cmd := newCmd("grep", nil, "-R", "build-artifactory.eng.vmware.com", "web")
out, err := cmd.Output()
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() > 1 {
log.Fatalf("grep: %s", err)
}
}
if len(out) > 0 {
log.Fatalf("found registry: %s", string(out))
}
}
| [
"\"GOPATH\"",
"\"XDG_CONFIG_HOME\"",
"\"LOCALAPPDATA\"",
"\"HOME\""
] | [] | [
"GOPATH",
"HOME",
"XDG_CONFIG_HOME",
"LOCALAPPDATA"
] | [] | ["GOPATH", "HOME", "XDG_CONFIG_HOME", "LOCALAPPDATA"] | go | 4 | 0 | |
discoreg/discoreg/wsgi.py | """
WSGI config for discoreg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "discoreg.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
main.go | package main
import (
"context"
"crypto/ecdsa"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/open-policy-agent/frameworks/constraint/pkg/externaldata"
"github.com/sigstore/cosign/pkg/cosign/kubernetes"
"github.com/sozercan/cosign-provider/pkg/cosign"
"go.uber.org/zap"
)
var log logr.Logger
const (
timeout = 3 * time.Second
apiVersion = "externaldata.gatekeeper.sh/v1alpha1"
)
func main() {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("unable to initialize logger: %v", err))
}
log = zapr.NewLogger(zapLog)
log.WithName("cosign-provider")
log.Info("starting server...")
http.HandleFunc("/validate", validate)
if err = http.ListenAndServe(":8090", nil); err != nil {
panic(err)
}
}
func validate(w http.ResponseWriter, req *http.Request) {
secretKeyRef := os.Getenv("SECRET_NAME")
requestBody, err := ioutil.ReadAll(req.Body)
if err != nil {
log.Error(err, "unable to read request body")
return
}
var providerRequest externaldata.ProviderRequest
err = json.Unmarshal(requestBody, &providerRequest)
if err != nil {
log.Error(err, "unable to unmarshal request body")
return
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
cfg, err := kubernetes.GetKeyPairSecret(ctx, secretKeyRef)
if err != nil {
log.Error(err, "unable to get key pair secret")
return
}
publicKeys := cosign.Keys(cfg.Data)
results := make([]externaldata.Item, 0)
for _, key := range providerRequest.Request.Keys {
isValid := checkSignature(ctx, key.(string), publicKeys)
if isValid {
results = append(results, externaldata.Item{
Key: key.(string),
Value: true,
})
} else {
results = append(results, externaldata.Item{
Key: key.(string),
Value: false,
})
}
}
response := externaldata.ProviderResponse{
APIVersion: apiVersion,
Kind: "ProviderResponse",
Response: externaldata.Response{
Items: results,
},
}
w.WriteHeader(http.StatusOK)
if err = json.NewEncoder(w).Encode(response); err != nil {
log.Error(err, "unable to encode output")
return
}
}
func checkSignature(ctx context.Context, img string, keys []*ecdsa.PublicKey) bool {
for _, k := range keys {
sps, err := cosign.Signatures(ctx, img, k)
if err != nil {
fmt.Printf("error while checking signature on image %s. error: %s\n", err, img)
return false
}
if len(sps) > 0 {
fmt.Printf("valid signatures on image %s with key %s\n", img, k)
return true
}
}
return false
}
| [
"\"SECRET_NAME\""
] | [] | [
"SECRET_NAME"
] | [] | ["SECRET_NAME"] | go | 1 | 0 | |
nni/tools/trial_tool/trial.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import ctypes
import os
import shlex
import tarfile
import time
from datetime import datetime
from subprocess import Popen
import psutil
from .log_utils import LogType, RemoteLogger, StdOutputType, nni_log
from .commands import CommandType
trial_output_path_name = ".nni"
class Trial:
def __init__(self, args, data):
self.process = None
self.data = data
self.args = args
self.command_channel = args.command_channel
self.trial_syslogger_stdout = None
global NNI_TRIAL_JOB_ID
self.id = data["trialId"]
if self.id is None:
raise Exception("trial_id is not found in %s" % data)
os.environ['NNI_TRIAL_JOB_ID'] = self.id
NNI_TRIAL_JOB_ID = self.id
# for multiple nodes. If it's None, it means single node.
self.node_id = args.node_id
if self.node_id is None:
self.name = self.id
else:
self.name = "%s_%s" % (self.id, self.node_id)
def run(self):
# redirect trial's stdout and stderr to syslog
self.trial_syslogger_stdout = RemoteLogger(self.args.nnimanager_ip, self.args.nnimanager_port, 'trial', StdOutputType.Stdout,
self.args.log_collection, self.id, self.args.command_channel)
nni_log(LogType.Info, "%s: start to run trial" % self.name)
trial_working_dir = os.path.realpath(os.path.join(os.curdir, "..", "..", "trials", self.id))
self.trial_output_dir = os.path.join(trial_working_dir, trial_output_path_name)
trial_code_dir = os.path.join(trial_working_dir, "code")
trial_nnioutput_dir = os.path.join(trial_working_dir, "nnioutput")
environ = os.environ.copy()
environ['NNI_TRIAL_SEQ_ID'] = str(self.data["sequenceId"])
environ['NNI_OUTPUT_DIR'] = os.path.join(trial_working_dir, "nnioutput")
environ['NNI_SYS_DIR'] = trial_working_dir
self.working_dir = trial_working_dir
# prepare code and parameters
prepared_flag_file_name = os.path.join(trial_working_dir, "trial_prepared")
if not os.path.exists(trial_working_dir):
os.makedirs(trial_working_dir, exist_ok=True)
os.makedirs(self.trial_output_dir, exist_ok=True)
os.makedirs(trial_nnioutput_dir, exist_ok=True)
# prepare code
os.makedirs(trial_code_dir, exist_ok=True)
with tarfile.open(os.path.join("..", "nni-code.tar.gz"), "r:gz") as tar:
tar.extractall(trial_code_dir)
# save parameters
nni_log(LogType.Info, '%s: saving parameter %s' % (self.name, self.data["parameter"]["value"]))
parameter_file_name = os.path.join(trial_working_dir, "parameter.cfg")
with open(parameter_file_name, "w") as parameter_file:
parameter_file.write(self.data["parameter"]["value"])
# ready flag
with open(prepared_flag_file_name, "w") as prepared_flag_file:
prepared_flag_file.write("%s" % (int(datetime.now().timestamp() * 1000)))
# make sure code prepared by other node.
if self.node_id is not None:
while True:
if os.path.exists(prepared_flag_file_name):
break
time.sleep(0.1)
trial_command = self.args.trial_command
gpuIndices = self.data.get('gpuIndices')
if (gpuIndices is not None):
trial_command = 'CUDA_VISIBLE_DEVICES="%s " %s' % (gpuIndices, trial_command)
self.log_pipe_stdout = self.trial_syslogger_stdout.get_pipelog_reader()
self.process = Popen(trial_command, shell=True, stdout=self.log_pipe_stdout,
stderr=self.log_pipe_stdout, cwd=trial_code_dir, env=dict(environ))
nni_log(LogType.Info, '{0}: spawns a subprocess (pid {1}) to run command: {2}'.
format(self.name, self.process.pid, shlex.split(trial_command)))
def save_parameter_file(self, command_data):
parameters = command_data["parameters"]
file_index = int(parameters["index"])
if file_index == 0:
parameter_file_name = "parameter.cfg"
else:
parameter_file_name = "parameter_{}.cfg".format(file_index)
parameter_file_name = os.path.join(self.working_dir, parameter_file_name)
with open(parameter_file_name, "w") as parameter_file:
nni_log(LogType.Info, '%s: saving parameter %s' % (self.name, parameters["value"]))
parameter_file.write(parameters["value"])
def is_running(self):
if (self.process is None):
return False
retCode = self.process.poll()
# child worker process exits and all stdout data is read
if retCode is not None and self.log_pipe_stdout.set_process_exit() and self.log_pipe_stdout.is_read_completed == True:
# In Windows, the retCode -1 is 4294967295. It's larger than c_long, and raise OverflowError.
# So covert it to int32.
retCode = ctypes.c_long(retCode).value
nni_log(LogType.Info, '{0}: subprocess terminated. Exit code is {1}.'.format(self.name, retCode))
end_time = int(datetime.now().timestamp() * 1000)
end_message = {
"code": retCode,
"time": end_time,
"trial": self.id,
}
self.command_channel.send(CommandType.TrialEnd, end_message)
self.cleanup()
return False
else:
return True
def kill(self, trial_id=None):
if trial_id == self.id or trial_id is None:
if self.process is not None:
try:
nni_log(LogType.Info, "%s: killing trial" % self.name)
for child in psutil.Process(self.process.pid).children(True):
child.kill()
self.process.kill()
except psutil.NoSuchProcess:
nni_log(LogType.Info, "kill trial %s failed: %s does not exist!" % (trial_id, self.process.pid))
except Exception as ex:
nni_log(LogType.Error, "kill trial %s failed: %s " % (trial_id, str(ex)))
self.cleanup()
def cleanup(self):
nni_log(LogType.Info, "%s: clean up trial" % self.name)
self.process = None
if self.log_pipe_stdout is not None:
self.log_pipe_stdout.set_process_exit()
self.log_pipe_stdout = None
if self.trial_syslogger_stdout is not None:
self.trial_syslogger_stdout.close()
self.trial_syslogger_stdout = None
| [] | [] | [
"NNI_TRIAL_JOB_ID"
] | [] | ["NNI_TRIAL_JOB_ID"] | python | 1 | 0 | |
providers/dns/dnsimple/dnsimple_test.go | package dnsimple
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/xenolf/lego/acme"
"github.com/xenolf/lego/platform/tester"
)
const sandboxURL = "https://api.sandbox.fake.com"
var envTest = tester.NewEnvTest(
"DNSIMPLE_OAUTH_TOKEN",
"DNSIMPLE_BASE_URL").
WithDomain("DNSIMPLE_DOMAIN").
WithLiveTestRequirements("DNSIMPLE_OAUTH_TOKEN", "DNSIMPLE_DOMAIN")
func TestNewDNSProvider(t *testing.T) {
testCases := []struct {
desc string
userAgent string
envVars map[string]string
expected string
}{
{
desc: "success",
userAgent: "lego",
envVars: map[string]string{
"DNSIMPLE_OAUTH_TOKEN": "my_token",
},
},
{
desc: "success: base url",
envVars: map[string]string{
"DNSIMPLE_OAUTH_TOKEN": "my_token",
"DNSIMPLE_BASE_URL": "https://api.dnsimple.test",
},
},
{
desc: "missing oauth token",
envVars: map[string]string{
"DNSIMPLE_OAUTH_TOKEN": "",
},
expected: "dnsimple: OAuth token is missing",
},
}
for _, test := range testCases {
t.Run(test.desc, func(t *testing.T) {
defer envTest.RestoreEnv()
envTest.ClearEnv()
envTest.Apply(test.envVars)
if test.userAgent != "" {
acme.UserAgent = test.userAgent
}
p, err := NewDNSProvider()
if len(test.expected) == 0 {
require.NoError(t, err)
require.NotNil(t, p)
require.NotNil(t, p.config)
require.NotNil(t, p.client)
baseURL := os.Getenv("DNSIMPLE_BASE_URL")
if baseURL != "" {
assert.Equal(t, baseURL, p.client.BaseURL)
}
if test.userAgent != "" {
assert.Equal(t, "lego", p.client.UserAgent)
}
} else {
require.EqualError(t, err, test.expected)
}
})
}
}
func TestNewDNSProviderConfig(t *testing.T) {
testCases := []struct {
desc string
accessToken string
baseURL string
expected string
}{
{
desc: "success",
accessToken: "my_token",
baseURL: "",
},
{
desc: "success: base url",
accessToken: "my_token",
baseURL: "https://api.dnsimple.test",
},
{
desc: "missing oauth token",
expected: "dnsimple: OAuth token is missing",
},
}
for _, test := range testCases {
t.Run(test.desc, func(t *testing.T) {
config := NewDefaultConfig()
config.AccessToken = test.accessToken
config.BaseURL = test.baseURL
p, err := NewDNSProviderConfig(config)
if len(test.expected) == 0 {
require.NoError(t, err)
require.NotNil(t, p)
require.NotNil(t, p.config)
require.NotNil(t, p.client)
if test.baseURL != "" {
assert.Equal(t, test.baseURL, p.client.BaseURL)
}
} else {
require.EqualError(t, err, test.expected)
}
})
}
}
func TestLivePresent(t *testing.T) {
if !envTest.IsLiveTest() {
t.Skip("skipping live test")
}
envTest.RestoreEnv()
if len(os.Getenv("DNSIMPLE_BASE_URL")) == 0 {
os.Setenv("DNSIMPLE_BASE_URL", sandboxURL)
}
provider, err := NewDNSProvider()
require.NoError(t, err)
err = provider.Present(envTest.GetDomain(), "", "123d==")
require.NoError(t, err)
}
func TestLiveCleanUp(t *testing.T) {
if !envTest.IsLiveTest() {
t.Skip("skipping live test")
}
envTest.RestoreEnv()
if len(os.Getenv("DNSIMPLE_BASE_URL")) == 0 {
os.Setenv("DNSIMPLE_BASE_URL", sandboxURL)
}
provider, err := NewDNSProvider()
require.NoError(t, err)
time.Sleep(1 * time.Second)
err = provider.CleanUp(envTest.GetDomain(), "", "123d==")
require.NoError(t, err)
}
| [
"\"DNSIMPLE_BASE_URL\"",
"\"DNSIMPLE_BASE_URL\"",
"\"DNSIMPLE_BASE_URL\""
] | [] | [
"DNSIMPLE_BASE_URL"
] | [] | ["DNSIMPLE_BASE_URL"] | go | 1 | 0 | |
vendor/github.com/mitchellh/packer/builder/oneandone/config.go | package oneandone
import (
"errors"
"os"
"strings"
"github.com/1and1/oneandone-cloudserver-sdk-go"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"github.com/mitchellh/mapstructure"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
Token string `mapstructure:"token"`
Url string `mapstructure:"url"`
SSHKey string
SnapshotName string `mapstructure:"image_name"`
DataCenterName string `mapstructure:"data_center_name"`
DataCenterId string
Image string `mapstructure:"source_image_name"`
DiskSize int `mapstructure:"disk_size"`
Retries int `mapstructure:"retries"`
CommConfig communicator.Config `mapstructure:",squash"`
ctx interpolate.Context
}
func NewConfig(raws ...interface{}) (*Config, []string, error) {
var c Config
var md mapstructure.Metadata
err := config.Decode(&c, &config.DecodeOpts{
Metadata: &md,
Interpolate: true,
InterpolateContext: &c.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"run_command",
},
},
}, raws...)
if err != nil {
return nil, nil, err
}
var errs *packer.MultiError
if c.SnapshotName == "" {
def, err := interpolate.Render("packer-{{timestamp}}", nil)
if err != nil {
panic(err)
}
// Default to packer-{{ unix timestamp (utc) }}
c.SnapshotName = def
}
if c.Image == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("1&1 'image' is required"))
}
if c.Token == "" {
c.Token = os.Getenv("ONEANDONE_TOKEN")
}
if c.Url == "" {
c.Url = oneandone.BaseUrl
}
if c.DiskSize == 0 {
c.DiskSize = 50
}
if c.Retries == 0 {
c.Retries = 600
}
if c.DataCenterName != "" {
token := oneandone.SetToken(c.Token)
//Create an API client
api := oneandone.New(token, c.Url)
dcs, err := api.ListDatacenters()
if err != nil {
errs = packer.MultiErrorAppend(
errs, err)
}
for _, dc := range dcs {
if strings.ToLower(dc.CountryCode) == strings.ToLower(c.DataCenterName) {
c.DataCenterId = dc.Id
break
}
}
}
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
errs = packer.MultiErrorAppend(errs, es...)
}
if errs != nil && len(errs.Errors) > 0 {
return nil, nil, errs
}
common.ScrubConfig(c, c.Token)
return &c, nil, nil
}
| [
"\"ONEANDONE_TOKEN\""
] | [] | [
"ONEANDONE_TOKEN"
] | [] | ["ONEANDONE_TOKEN"] | go | 1 | 0 | |
src/app.py | import json
import os
import cloudinary
import discord
from dotenv import load_dotenv
from printy import printy
from PapunikaMap import PapunikaMap
# ingame name : papunika name
alias_names_arthetine = {
"origins of stern": "stern"
}
alias_names_feiton = {
"red moonshade": "red moon wastes",
"kalaja": "kallazar",
"wailing swamp": "rot water",
"shady cliff": "siena monastery",
"nameless valley": "nameless plateau"
}
alias_names_rohendel = {
"breezesome brae": "wind hill",
"elzowin's shade": "ancient flower garden",
"rothun": "queen's garden",
"lake shiverwave": "soaring harbor",
"xeneela ruins": "the ruins of genail"
}
alias_names_yorn = {
"yorn's cradle": "helm harbor",
"unfinished garden": "lower dungeons",
"hall of promise": "primordial lands",
"iron hammer mine": "mines of innumerable riches",
"black anvil mine": "mines of fire",
"great castle": "isendelf"
}
alias_names_punika = {
"starsand beach": "star sand beach",
"tideshelf path": "shallow sea road",
"tikatika colony": "tikku gardens",
"secret forest": "mangrove forest"
}
# Load bot environment variables
load_dotenv()
cloudinary.config(
cloud_name=os.getenv('CLOUDINARY_CLOUD_NAME'),
api_key=os.getenv('CLOUDINARY_API_KEY'),
api_secret=os.getenv('CLOUDINARY_API_SECRET')
)
client = discord.Client()
papunika_map_info_file = open('res/papunikaMapInfo.json')
papunika_map_info_data = json.loads(papunika_map_info_file.read())
papunika_map = PapunikaMap(**papunika_map_info_data)
@client.event
async def on_ready():
printy(f'Logged in as [r]{client.user}@')
@client.event
async def on_message(message):
if message.author == client.user:
return
msg = message.content
if message.content.startswith('!map'):
map: str = msg.split("!map ", 1)[1]
map = map.strip()
# TODO: get map by map layer, i.e: Kalaja 2F
# TODO: get map by partial name, i.e: brae (Breezesome Brae)
aliases = [
alias_names_yorn, alias_names_feiton,
alias_names_punika, alias_names_rohendel,
alias_names_arthetine
]
image_url = None
map_id = None
for map_zone in papunika_map.zones:
if map_zone['name'].lower() == map.lower():
image_url = cloudinary.utils.cloudinary_url(f"maps/{map_zone['id']}.png")[0]
map_id = map_zone['id']
if not image_url:
for alias in aliases:
for game_name, papunika_name in alias.items():
if game_name == map.lower():
map_zone = [zone for zone in papunika_map.zones if zone['name'].lower() == papunika_name][0]
if map_zone:
image_url = cloudinary.utils.cloudinary_url(f"maps/{map_zone['id']}.png")[0]
map_id = map_zone['id']
if image_url:
embed = discord.Embed()
embed.set_author(name="Map from Papunika found!")
embed.add_field(name=f"https://papunika.com/map/?z={map_id}&l=us", value="Link to papunika map")
embed.set_image(url=image_url)
await message.channel.send(embed=embed)
else:
await message.channel.send("Map not found!")
client.run(os.getenv('TOKEN'))
| [] | [] | [
"CLOUDINARY_API_KEY",
"CLOUDINARY_API_SECRET",
"CLOUDINARY_CLOUD_NAME",
"TOKEN"
] | [] | ["CLOUDINARY_API_KEY", "CLOUDINARY_API_SECRET", "CLOUDINARY_CLOUD_NAME", "TOKEN"] | python | 4 | 0 | |
store/store_test.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"context"
"fmt"
"os"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/testleak"
)
const (
startIndex = 0
testCount = 2
indexStep = 2
)
type brokenStore struct{}
func (s *brokenStore) Open(schema string) (kv.Storage, error) {
return nil, kv.ErrRetryable
}
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
TestingT(t)
}
var _ = Suite(&testKVSuite{})
type testKVSuite struct {
s kv.Storage
}
func (s *testKVSuite) SetUpSuite(c *C) {
testleak.BeforeTest()
store, err := mockstore.NewMockTikvStore()
c.Assert(err, IsNil)
s.s = store
}
func (s *testKVSuite) TearDownSuite(c *C) {
err := s.s.Close()
c.Assert(err, IsNil)
testleak.AfterTest(c)()
}
func insertData(c *C, txn kv.Transaction) {
for i := startIndex; i < testCount; i++ {
val := encodeInt(i * indexStep)
err := txn.Set(val, val)
c.Assert(err, IsNil)
}
}
func mustDel(c *C, txn kv.Transaction) {
for i := startIndex; i < testCount; i++ {
val := encodeInt(i * indexStep)
err := txn.Delete(val)
c.Assert(err, IsNil)
}
}
func encodeInt(n int) []byte {
return []byte(fmt.Sprintf("%010d", n))
}
func decodeInt(s []byte) int {
var n int
fmt.Sscanf(string(s), "%010d", &n)
return n
}
func valToStr(c *C, iter kv.Iterator) string {
val := iter.Value()
return string(val)
}
func checkSeek(c *C, txn kv.Transaction) {
for i := startIndex; i < testCount; i++ {
val := encodeInt(i * indexStep)
iter, err := txn.Iter(val, nil)
c.Assert(err, IsNil)
c.Assert([]byte(iter.Key()), BytesEquals, val)
c.Assert(decodeInt([]byte(valToStr(c, iter))), Equals, i*indexStep)
iter.Close()
}
// Test iterator Next()
for i := startIndex; i < testCount-1; i++ {
val := encodeInt(i * indexStep)
iter, err := txn.Iter(val, nil)
c.Assert(err, IsNil)
c.Assert([]byte(iter.Key()), BytesEquals, val)
c.Assert(valToStr(c, iter), Equals, string(val))
err = iter.Next()
c.Assert(err, IsNil)
c.Assert(iter.Valid(), IsTrue)
val = encodeInt((i + 1) * indexStep)
c.Assert([]byte(iter.Key()), BytesEquals, val)
c.Assert(valToStr(c, iter), Equals, string(val))
iter.Close()
}
// Non exist and beyond maximum seek test
iter, err := txn.Iter(encodeInt(testCount*indexStep), nil)
c.Assert(err, IsNil)
c.Assert(iter.Valid(), IsFalse)
// Non exist but between existing keys seek test,
// it returns the smallest key that larger than the one we are seeking
inBetween := encodeInt((testCount-1)*indexStep - 1)
last := encodeInt((testCount - 1) * indexStep)
iter, err = txn.Iter(inBetween, nil)
c.Assert(err, IsNil)
c.Assert(iter.Valid(), IsTrue)
c.Assert([]byte(iter.Key()), Not(BytesEquals), inBetween)
c.Assert([]byte(iter.Key()), BytesEquals, last)
iter.Close()
}
func mustNotGet(c *C, txn kv.Transaction) {
for i := startIndex; i < testCount; i++ {
s := encodeInt(i * indexStep)
_, err := txn.Get(s)
c.Assert(err, NotNil)
}
}
func mustGet(c *C, txn kv.Transaction) {
for i := startIndex; i < testCount; i++ {
s := encodeInt(i * indexStep)
val, err := txn.Get(s)
c.Assert(err, IsNil)
c.Assert(string(val), Equals, string(s))
}
}
func (s *testKVSuite) TestGetSet(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
insertData(c, txn)
mustGet(c, txn)
// Check transaction results
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
defer txn.Commit(context.Background())
mustGet(c, txn)
mustDel(c, txn)
}
func (s *testKVSuite) TestSeek(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
insertData(c, txn)
checkSeek(c, txn)
// Check transaction results
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
defer txn.Commit(context.Background())
checkSeek(c, txn)
mustDel(c, txn)
}
func (s *testKVSuite) TestInc(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
key := []byte("incKey")
n, err := kv.IncInt64(txn, key, 100)
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(100))
// Check transaction results
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
n, err = kv.IncInt64(txn, key, -200)
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(-100))
err = txn.Delete(key)
c.Assert(err, IsNil)
n, err = kv.IncInt64(txn, key, 100)
c.Assert(err, IsNil)
c.Assert(n, Equals, int64(100))
err = txn.Delete(key)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestDelete(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
insertData(c, txn)
mustDel(c, txn)
mustNotGet(c, txn)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// Try get
txn, err = s.s.Begin()
c.Assert(err, IsNil)
mustNotGet(c, txn)
// Insert again
insertData(c, txn)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// Delete all
txn, err = s.s.Begin()
c.Assert(err, IsNil)
mustDel(c, txn)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
mustNotGet(c, txn)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestDelete2(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
val := []byte("test")
txn.Set([]byte("DATA_test_tbl_department_record__0000000001_0003"), val)
txn.Set([]byte("DATA_test_tbl_department_record__0000000001_0004"), val)
txn.Set([]byte("DATA_test_tbl_department_record__0000000002_0003"), val)
txn.Set([]byte("DATA_test_tbl_department_record__0000000002_0004"), val)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// Delete all
txn, err = s.s.Begin()
c.Assert(err, IsNil)
it, err := txn.Iter([]byte("DATA_test_tbl_department_record__0000000001_0003"), nil)
c.Assert(err, IsNil)
for it.Valid() {
err = txn.Delete([]byte(it.Key()))
c.Assert(err, IsNil)
err = it.Next()
c.Assert(err, IsNil)
}
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
it, _ = txn.Iter([]byte("DATA_test_tbl_department_record__000000000"), nil)
c.Assert(it.Valid(), IsFalse)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestSetNil(c *C) {
txn, err := s.s.Begin()
defer txn.Commit(context.Background())
c.Assert(err, IsNil)
err = txn.Set([]byte("1"), nil)
c.Assert(err, NotNil)
}
func (s *testKVSuite) TestBasicSeek(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
txn.Set([]byte("1"), []byte("1"))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
defer txn.Commit(context.Background())
it, err := txn.Iter([]byte("2"), nil)
c.Assert(err, IsNil)
c.Assert(it.Valid(), Equals, false)
txn.Delete([]byte("1"))
}
func (s *testKVSuite) TestBasicTable(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
for i := 1; i < 5; i++ {
b := []byte(strconv.Itoa(i))
txn.Set(b, b)
}
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
defer txn.Commit(context.Background())
err = txn.Set([]byte("1"), []byte("1"))
c.Assert(err, IsNil)
it, err := txn.Iter([]byte("0"), nil)
c.Assert(err, IsNil)
c.Assert(string(it.Key()), Equals, "1")
err = txn.Set([]byte("0"), []byte("0"))
c.Assert(err, IsNil)
it, err = txn.Iter([]byte("0"), nil)
c.Assert(err, IsNil)
c.Assert(string(it.Key()), Equals, "0")
err = txn.Delete([]byte("0"))
c.Assert(err, IsNil)
txn.Delete([]byte("1"))
it, err = txn.Iter([]byte("0"), nil)
c.Assert(err, IsNil)
c.Assert(string(it.Key()), Equals, "2")
err = txn.Delete([]byte("3"))
c.Assert(err, IsNil)
it, err = txn.Iter([]byte("2"), nil)
c.Assert(err, IsNil)
c.Assert(string(it.Key()), Equals, "2")
it, err = txn.Iter([]byte("3"), nil)
c.Assert(err, IsNil)
c.Assert(string(it.Key()), Equals, "4")
err = txn.Delete([]byte("2"))
c.Assert(err, IsNil)
err = txn.Delete([]byte("4"))
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestRollback(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
err = txn.Rollback()
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
insertData(c, txn)
mustGet(c, txn)
err = txn.Rollback()
c.Assert(err, IsNil)
txn, err = s.s.Begin()
c.Assert(err, IsNil)
defer txn.Commit(context.Background())
for i := startIndex; i < testCount; i++ {
_, err := txn.Get([]byte(strconv.Itoa(i)))
c.Assert(err, NotNil)
}
}
func (s *testKVSuite) TestSeekMin(c *C) {
rows := []struct {
key string
value string
}{
{"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001", "lock-version"},
{"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001_0002", "1"},
{"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001_0003", "hello"},
{"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002", "lock-version"},
{"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002_0002", "2"},
{"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002_0003", "hello"},
}
txn, err := s.s.Begin()
c.Assert(err, IsNil)
for _, row := range rows {
txn.Set([]byte(row.key), []byte(row.value))
}
it, err := txn.Iter(nil, nil)
c.Assert(err, IsNil)
for it.Valid() {
fmt.Printf("%s, %s\n", it.Key(), it.Value())
it.Next()
}
it, err = txn.Iter([]byte("DATA_test_main_db_tbl_tbl_test_record__00000000000000000000"), nil)
c.Assert(err, IsNil)
c.Assert(string(it.Key()), Equals, "DATA_test_main_db_tbl_tbl_test_record__00000000000000000001")
for _, row := range rows {
txn.Delete([]byte(row.key))
}
}
func (s *testKVSuite) TestConditionIfNotExist(c *C) {
var success int64
cnt := 100
b := []byte("1")
var wg sync.WaitGroup
wg.Add(cnt)
for i := 0; i < cnt; i++ {
go func() {
defer wg.Done()
txn, err := s.s.Begin()
c.Assert(err, IsNil)
err = txn.Set(b, b)
if err != nil {
return
}
err = txn.Commit(context.Background())
if err == nil {
atomic.AddInt64(&success, 1)
}
}()
}
wg.Wait()
// At least one txn can success.
c.Assert(success, Greater, int64(0))
// Clean up
txn, err := s.s.Begin()
c.Assert(err, IsNil)
err = txn.Delete(b)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestConditionIfEqual(c *C) {
var success int64
cnt := 100
b := []byte("1")
var wg sync.WaitGroup
wg.Add(cnt)
txn, err := s.s.Begin()
c.Assert(err, IsNil)
txn.Set(b, b)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
for i := 0; i < cnt; i++ {
go func() {
defer wg.Done()
// Use txn1/err1 instead of txn/err is
// to pass `go tool vet -shadow` check.
txn1, err1 := s.s.Begin()
c.Assert(err1, IsNil)
txn1.Set(b, []byte("newValue"))
err1 = txn1.Commit(context.Background())
if err1 == nil {
atomic.AddInt64(&success, 1)
}
}()
}
wg.Wait()
c.Assert(success, Greater, int64(0))
// Clean up
txn, err = s.s.Begin()
c.Assert(err, IsNil)
err = txn.Delete(b)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestConditionUpdate(c *C) {
txn, err := s.s.Begin()
c.Assert(err, IsNil)
txn.Delete([]byte("b"))
kv.IncInt64(txn, []byte("a"), 1)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestDBClose(c *C) {
c.Skip("don't know why it fails.")
store, err := mockstore.NewMockTikvStore()
c.Assert(err, IsNil)
txn, err := store.Begin()
c.Assert(err, IsNil)
err = txn.Set([]byte("a"), []byte("b"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
ver, err := store.CurrentVersion()
c.Assert(err, IsNil)
c.Assert(kv.MaxVersion.Cmp(ver), Equals, 1)
snap, err := store.GetSnapshot(kv.MaxVersion)
c.Assert(err, IsNil)
_, err = snap.Get([]byte("a"))
c.Assert(err, IsNil)
txn, err = store.Begin()
c.Assert(err, IsNil)
err = store.Close()
c.Assert(err, IsNil)
_, err = store.Begin()
c.Assert(err, NotNil)
_, err = store.GetSnapshot(kv.MaxVersion)
c.Assert(err, NotNil)
err = txn.Set([]byte("a"), []byte("b"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, NotNil)
}
func (s *testKVSuite) TestIsolationInc(c *C) {
threadCnt := 4
ids := make(map[int64]struct{}, threadCnt*100)
var m sync.Mutex
var wg sync.WaitGroup
wg.Add(threadCnt)
for i := 0; i < threadCnt; i++ {
go func() {
defer wg.Done()
for j := 0; j < 100; j++ {
var id int64
err := kv.RunInNewTxn(s.s, true, func(txn kv.Transaction) error {
var err1 error
id, err1 = kv.IncInt64(txn, []byte("key"), 1)
return err1
})
c.Assert(err, IsNil)
m.Lock()
_, ok := ids[id]
ids[id] = struct{}{}
m.Unlock()
c.Assert(ok, IsFalse)
}
}()
}
wg.Wait()
// delete
txn, err := s.s.Begin()
c.Assert(err, IsNil)
defer txn.Commit(context.Background())
txn.Delete([]byte("key"))
}
func (s *testKVSuite) TestIsolationMultiInc(c *C) {
threadCnt := 4
incCnt := 100
keyCnt := 4
keys := make([][]byte, 0, keyCnt)
for i := 0; i < keyCnt; i++ {
keys = append(keys, []byte(fmt.Sprintf("test_key_%d", i)))
}
var wg sync.WaitGroup
wg.Add(threadCnt)
for i := 0; i < threadCnt; i++ {
go func() {
defer wg.Done()
for j := 0; j < incCnt; j++ {
err := kv.RunInNewTxn(s.s, true, func(txn kv.Transaction) error {
for _, key := range keys {
_, err1 := kv.IncInt64(txn, key, 1)
if err1 != nil {
return err1
}
}
return nil
})
c.Assert(err, IsNil)
}
}()
}
wg.Wait()
err := kv.RunInNewTxn(s.s, false, func(txn kv.Transaction) error {
for _, key := range keys {
id, err1 := kv.GetInt64(txn, key)
if err1 != nil {
return err1
}
c.Assert(id, Equals, int64(threadCnt*incCnt))
txn.Delete(key)
}
return nil
})
c.Assert(err, IsNil)
}
func (s *testKVSuite) TestRetryOpenStore(c *C) {
begin := time.Now()
Register("dummy", &brokenStore{})
store, err := newStoreWithRetry("dummy://dummy-store", 3)
if store != nil {
defer store.Close()
}
c.Assert(err, NotNil)
elapse := time.Since(begin)
c.Assert(uint64(elapse), GreaterEqual, uint64(3*time.Second), Commentf("elapse: %s", elapse))
}
| [
"\"log_level\""
] | [] | [
"log_level"
] | [] | ["log_level"] | go | 1 | 0 | |
experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go | package experiment
import (
"os"
"github.com/litmuschaos/chaos-operator/pkg/apis/litmuschaos/v1alpha1"
litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-cpu-hog/lib"
clients "github.com/litmuschaos/litmus-go/pkg/clients"
"github.com/litmuschaos/litmus-go/pkg/events"
experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/environment"
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/types"
"github.com/litmuschaos/litmus-go/pkg/log"
"github.com/litmuschaos/litmus-go/pkg/probe"
"github.com/litmuschaos/litmus-go/pkg/result"
"github.com/litmuschaos/litmus-go/pkg/status"
"github.com/litmuschaos/litmus-go/pkg/types"
"github.com/litmuschaos/litmus-go/pkg/utils/common"
"github.com/sirupsen/logrus"
)
// NodeCPUHog inject the node-cpu-hog chaos
func NodeCPUHog(clients clients.ClientSets) {
experimentsDetails := experimentTypes.ExperimentDetails{}
resultDetails := types.ResultDetails{}
eventsDetails := types.EventDetails{}
chaosDetails := types.ChaosDetails{}
//Fetching all the ENV passed from the runner pod
log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
experimentEnv.GetENV(&experimentsDetails)
// Initialize the chaos attributes
types.InitialiseChaosVariables(&chaosDetails)
// Initialize Chaos Result Parameters
types.SetResultAttributes(&resultDetails, chaosDetails)
if experimentsDetails.EngineName != "" {
// Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet
if err := probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil {
log.Errorf("Unable to initialize the probes, err: %v", err)
return
}
}
//Updating the chaos result in the beginning of experiment
log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
log.Errorf("Unable to Create the Chaos Result, err: %v", err)
failStep := "Updating the chaos result of pod-cpu-hog experiment (SOT)"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
// Set the chaos result uid
result.SetResultUID(&resultDetails, clients, &chaosDetails)
// generating the event in chaosresult to marked the verdict as awaited
msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
//DISPLAY THE APP INFORMATION
log.InfoWithValues("The application information is as follows", logrus.Fields{
"Node Label": experimentsDetails.NodeLabel,
"Chaos Duration": experimentsDetails.ChaosDuration,
"Target Nodes": experimentsDetails.TargetNodes,
"Node CPU Cores": experimentsDetails.NodeCPUcores,
})
// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
//PRE-CHAOS APPLICATION STATUS CHECK
log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil {
log.Errorf("Application status check failed, err: %v", err)
failStep := "Verify that the AUT (Application Under Test) is running (pre-chaos)"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
if experimentsDetails.AuxiliaryAppInfo != "" {
log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
log.Errorf("Auxiliary Application status check failed, err: %v", err)
failStep := "Verify that the Auxiliary Applications are running (pre-chaos)"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
}
// Checking the status of target nodes
log.Info("[Status]: Getting the status of target nodes")
if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
log.Errorf("Target nodes are not in the ready state, err: %v", err)
failStep := "Checking the status of nodes"
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
if experimentsDetails.EngineName != "" {
// marking AUT as running, as we already checked the status of application under test
msg := "NUT: Ready"
// run the probes in the pre-chaos check
if len(resultDetails.ProbeDetails) != 0 {
if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
log.Errorf("Probe Failed, err: %v", err)
failStep := "Failed while running probes"
msg := "NUT: Ready, Probes: Unsuccessful"
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
msg = "NUT: Ready, Probes: Successful"
}
// generating the events for the pre-chaos check
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
// Including the litmus lib for node-cpu-hog
switch experimentsDetails.ChaosLib {
case "litmus":
if err := litmusLIB.PrepareNodeCPUHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
log.Errorf("[Error]: CPU hog failed, err: %v", err)
failStep := "failed in chaos injection phase"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
default:
log.Error("[Invalid]: Please Provide the correct LIB")
failStep := "no match found for specified lib"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
resultDetails.Verdict = v1alpha1.ResultVerdictPassed
//POST-CHAOS APPLICATION STATUS CHECK
log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil {
log.Infof("Application status check failed, err: %v", err)
failStep := "Verify that the AUT (Application Under Test) is running (post-chaos)"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
if experimentsDetails.AuxiliaryAppInfo != "" {
log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
log.Errorf("Auxiliary Application status check failed, err: %v", err)
failStep := "Verify that the Auxiliary Applications are running (post-chaos)"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
}
// Checking the status of target nodes
log.Info("[Status]: Getting the status of target nodes")
if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
if experimentsDetails.EngineName != "" {
// marking AUT as running, as we already checked the status of application under test
msg := "NUT: Ready"
// run the probes in the post-chaos check
if len(resultDetails.ProbeDetails) != 0 {
if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
log.Errorf("Probes Failed, err: %v", err)
failStep := "Failed while running probes"
msg := "NUT: Ready, Probes: Unsuccessful"
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
msg = "NUT: Ready, Probes: Successful"
}
// generating post chaos event
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
//Updating the chaosResult in the end of experiment
log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
log.Errorf("Unable to Update the Chaos Result, err: %v", err)
return
}
// generating the event in chaosresult to marked the verdict as pass/fail
msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
reason := types.PassVerdict
eventType := "Normal"
if resultDetails.Verdict != "Pass" {
reason = types.FailVerdict
eventType = "Warning"
}
types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
if experimentsDetails.EngineName != "" {
msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
}
| [
"\"EXPERIMENT_NAME\""
] | [] | [
"EXPERIMENT_NAME"
] | [] | ["EXPERIMENT_NAME"] | go | 1 | 0 | |
linklab/wsgi.py | """
WSGI config for linklab project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'linklab.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
evaluate_parsing_JPPNet-s2.py | from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import scipy.misc
import cv2
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from utils import *
from LIP_model import *
N_CLASSES = 20
INPUT_SIZE = (384, 384)
DATA_DIRECTORY = '/home/vasile/PycharmProjects/PF-AFN/VITON/VITON-men/train_img/'
DATA_LIST_PATH = '/home/vasile/PycharmProjects/PF-AFN/VITON/VITON-men/train_img/val.txt'
NUM_STEPS = 516 # Number of images in the validation set.
RESTORE_FROM = './checkpoint/JPPNet-s2'
OUTPUT_DIR = '/home/vasile/PycharmProjects/PF-AFN/VITON/VITON-men/train_label/'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
def main():
"""Create the model and start the evaluation process."""
# Create queue coordinator.
coord = tf.train.Coordinator()
h, w = INPUT_SIZE
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(DATA_DIRECTORY, DATA_LIST_PATH, None, False, False, coord)
image = reader.image
image_rev = tf.reverse(image, tf.stack([1]))
image_list = reader.image_list
image_batch_origin = tf.stack([image, image_rev])
image_batch = tf.image.resize_images(image_batch_origin, [int(h), int(w)])
image_batch075 = tf.image.resize_images(image_batch_origin, [int(h * 0.75), int(w * 0.75)])
image_batch125 = tf.image.resize_images(image_batch_origin, [int(h * 1.25), int(w * 1.25)])
# Create network.
with tf.variable_scope('', reuse=False):
net_100 = JPPNetModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
net_075 = JPPNetModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
net_125 = JPPNetModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES)
# parsing net
parsing_fea1_100 = net_100.layers['res5d_branch2b_parsing']
parsing_fea1_075 = net_075.layers['res5d_branch2b_parsing']
parsing_fea1_125 = net_125.layers['res5d_branch2b_parsing']
parsing_out1_100 = net_100.layers['fc1_human']
parsing_out1_075 = net_075.layers['fc1_human']
parsing_out1_125 = net_125.layers['fc1_human']
# pose net
resnet_fea_100 = net_100.layers['res4b22_relu']
resnet_fea_075 = net_075.layers['res4b22_relu']
resnet_fea_125 = net_125.layers['res4b22_relu']
with tf.variable_scope('', reuse=False):
pose_out1_100, pose_fea1_100 = pose_net(resnet_fea_100, 'fc1_pose')
pose_out2_100, pose_fea2_100 = pose_refine(pose_out1_100, parsing_out1_100, pose_fea1_100, name='fc2_pose')
parsing_out2_100, parsing_fea2_100 = parsing_refine(parsing_out1_100, pose_out1_100, parsing_fea1_100,
name='fc2_parsing')
parsing_out3_100, parsing_fea3_100 = parsing_refine(parsing_out2_100, pose_out2_100, parsing_fea2_100,
name='fc3_parsing')
with tf.variable_scope('', reuse=True):
pose_out1_075, pose_fea1_075 = pose_net(resnet_fea_075, 'fc1_pose')
pose_out2_075, pose_fea2_075 = pose_refine(pose_out1_075, parsing_out1_075, pose_fea1_075, name='fc2_pose')
parsing_out2_075, parsing_fea2_075 = parsing_refine(parsing_out1_075, pose_out1_075, parsing_fea1_075,
name='fc2_parsing')
parsing_out3_075, parsing_fea3_075 = parsing_refine(parsing_out2_075, pose_out2_075, parsing_fea2_075,
name='fc3_parsing')
with tf.variable_scope('', reuse=True):
pose_out1_125, pose_fea1_125 = pose_net(resnet_fea_125, 'fc1_pose')
pose_out2_125, pose_fea2_125 = pose_refine(pose_out1_125, parsing_out1_125, pose_fea1_125, name='fc2_pose')
parsing_out2_125, parsing_fea2_125 = parsing_refine(parsing_out1_125, pose_out1_125, parsing_fea1_125,
name='fc2_parsing')
parsing_out3_125, parsing_fea3_125 = parsing_refine(parsing_out2_125, pose_out2_125, parsing_fea2_125,
name='fc3_parsing')
parsing_out1 = tf.reduce_mean(
tf.stack([tf.image.resize_images(parsing_out1_100, tf.shape(image_batch_origin)[1:3, ]),
tf.image.resize_images(parsing_out1_075, tf.shape(image_batch_origin)[1:3, ]),
tf.image.resize_images(parsing_out1_125, tf.shape(image_batch_origin)[1:3, ])]), axis=0)
parsing_out2 = tf.reduce_mean(
tf.stack([tf.image.resize_images(parsing_out2_100, tf.shape(image_batch_origin)[1:3, ]),
tf.image.resize_images(parsing_out2_075, tf.shape(image_batch_origin)[1:3, ]),
tf.image.resize_images(parsing_out2_125, tf.shape(image_batch_origin)[1:3, ])]), axis=0)
parsing_out3 = tf.reduce_mean(
tf.stack([tf.image.resize_images(parsing_out3_100, tf.shape(image_batch_origin)[1:3, ]),
tf.image.resize_images(parsing_out3_075, tf.shape(image_batch_origin)[1:3, ]),
tf.image.resize_images(parsing_out3_125, tf.shape(image_batch_origin)[1:3, ])]), axis=0)
raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)
head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)
tail_list = tf.unstack(tail_output, num=20, axis=2)
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx]
tail_list_rev[14] = tail_list[15]
tail_list_rev[15] = tail_list[14]
tail_list_rev[16] = tail_list[17]
tail_list_rev[17] = tail_list[16]
tail_list_rev[18] = tail_list[19]
tail_list_rev[19] = tail_list[18]
tail_output_rev = tf.stack(tail_list_rev, axis=2)
tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))
raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_output_all = tf.expand_dims(raw_output_all, dim=0)
raw_output_all = tf.argmax(raw_output_all, dimension=3)
pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor.
# Which variables to load.
restore_var = tf.global_variables()
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.local_variables_initializer())
# Load weights.
loader = tf.train.Saver(var_list=restore_var)
if RESTORE_FROM is not None:
if load(loader, sess, RESTORE_FROM):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in range(NUM_STEPS):
parsing_ = sess.run(pred_all)
if step % 100 == 0:
print('step {:d}'.format(step))
print(image_list[step])
img_split = image_list[step].split('/')
img_id = img_split[-1][:-4]
msk = decode_labels(parsing_, num_classes=N_CLASSES)
parsing_im = Image.fromarray(msk[0])
# parsing_im.save('{}/{}_vis.png'.format(OUTPUT_DIR, img_id))
cv2.imwrite('{}/{}.png'.format(OUTPUT_DIR, img_id), parsing_[0, :, :, 0])
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
##############################################################333
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
misc/test/aws_test.go | // +build aws all
package test
import (
"encoding/base64"
"fmt"
"net/url"
"os"
"path"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pulumi/pulumi/pkg/v2/testing/integration"
"github.com/pulumi/pulumi/sdk/v2/go/common/resource"
"github.com/stretchr/testify/assert"
)
func TestAccAwsGoAssumeRole(t *testing.T) {
nanos := time.Now().UnixNano()
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": fmt.Sprintf("unpriv-go-%d", nanos),
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoEks(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-eks"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["url"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello Kubernetes bootcamp!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoFargate(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-fargate"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["url"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["websiteUrl"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoS3FolderComponent(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-s3-folder-component"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["websiteUrl"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoWebserver(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["publicIp"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsCsAssumeRole(t *testing.T) {
nanos := time.Now().UnixNano()
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-cs-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": fmt.Sprintf("unpriv-cs-%d", nanos),
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsCsS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-cs-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["Endpoint"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsFsS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-fs-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["endpoint"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsContainers(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-containers"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["frontendURL"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, "http://"+stack.Outputs["websiteUrl"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsS3FolderComponent(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-s3-folder-component"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["websiteUrl"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsSqsSlack(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-sqs-slack"),
Config: map[string]string{
"slackToken": "token",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsWebserver(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPHelloWorld(t, stack.Outputs["publicHostName"], nil)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsWebserverComponent(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-webserver-component"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPHelloWorld(t, stack.Outputs["webUrl"], nil)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyAppSync(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-appsync"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoAppSync(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-appsync"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 8 * time.Minute
endpoint := stack.Outputs["endpoint"].(string)
mutation := "mutation AddTenant { addTenant(id: \"123\", name: \"FirstCorp\") { id name } }"
finalURL := fmt.Sprintf("%s?query=%s", endpoint, url.QueryEscape(mutation))
key := stack.Outputs["key"].(string)
headersMap := map[string]string{
"Content-Type": "application/graphql",
"x-api-key": key,
}
assertHTTPResultShapeWithRetry(t, finalURL, headersMap, maxWait, func(body string) bool {
return !strings.Contains(body, "AccessDeniedException")
}, func(body string) bool {
return assert.Contains(t, body, "FirstCorp")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyAssumeRole(t *testing.T) {
nanos := time.Now().UnixNano()
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": fmt.Sprintf("unpriv-py-%d", nanos),
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyResources(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-resources"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoResources(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-resources"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, "http://"+stack.Outputs["website_url"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyStepFunctions(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-stepfunctions"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyWebserver(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, "http://"+stack.Outputs["public_dns"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsAirflow(t *testing.T) {
t.Skip("Skip due to failures initializing 20(!) instances")
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-airflow"),
Config: map[string]string{
"airflow:dbPassword": "secretP4ssword",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsApiGateway(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-apigateway"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["endpoint"].(string)
assertHTTPResultWithRetry(t, endpoint+"hello", nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "route")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsAppSync(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-appsync"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsAssumeRole(t *testing.T) {
nanos := time.Now().UnixNano()
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": fmt.Sprintf("unpriv-%d", nanos),
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsContainers(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-containers"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 15 * time.Minute
endpoint := stack.Outputs["frontendURL"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsEc2Provisioners(t *testing.T) {
sess, err := session.NewSession(&aws.Config{
Region: aws.String(getAwsRegion())},
)
assert.NoError(t, err)
svc := ec2.New(sess)
keyName, err := resource.NewUniqueHex("test-keyname", 8, 20)
assert.NoError(t, err)
t.Logf("Creating keypair %s.\n", keyName)
key, err := svc.CreateKeyPair(&ec2.CreateKeyPairInput{
KeyName: aws.String(keyName),
})
assert.NoError(t, err)
defer func() {
t.Logf("Deleting keypair %s.\n", keyName)
_, err := svc.DeleteKeyPair(&ec2.DeleteKeyPairInput{
KeyName: aws.String(keyName),
})
assert.NoError(t, err)
}()
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-ec2-provisioners"),
Config: map[string]string{
"keyName": aws.StringValue(key.KeyName),
},
Secrets: map[string]string{
"privateKey": base64.StdEncoding.EncodeToString([]byte(aws.StringValue(key.KeyMaterial))),
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
catConfigStdout := stack.Outputs["catConfigStdout"].(string)
assert.Equal(t, "[test]\nx = 42\n", catConfigStdout)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsEks(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-eks"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsEksHelloWorld(t *testing.T) {
t.Skip("Skip due to frequent failures: `timeout while waiting for state to become 'ACTIVE'`")
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-eks-hello-world"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["serviceHostname"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsHelloFargate(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-hello-fargate"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["url"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsPulumiWebhooks(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-pulumi-webhooks"),
Config: map[string]string{
"cloud:provider": "aws",
"aws-ts-pulumi-webhooks:slackChannel": "general",
"aws-ts-pulumi-webhooks:slackWebhook": "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
},
// TODO[pulumi/examples#859]: Currently this examples leads to a no-op preview diff of:
// ~ aws:apigateway:RestApi pulumi-webhook-handler update [diff: ~binaryMediaTypes]
// ++ aws:apigateway:Deployment pulumi-webhook-handler create replacement [diff: ~variables]
// +- aws:apigateway:Deployment pulumi-webhook-handler replace [diff: ~variables]
// ++ aws:lambda:Permission pulumi-webhook-handler-fa520765 create replacement [diff: ~sourceArn]
// +- aws:lambda:Permission pulumi-webhook-handler-fa520765 replace [diff: ~sourceArn]
// ++ aws:lambda:Permission pulumi-webhook-handler-c171fd88 create replacement [diff: ~sourceArn]
// +- aws:lambda:Permission pulumi-webhook-handler-c171fd88 replace [diff: ~sourceArn]
// ~ aws:apigateway:Stage pulumi-webhook-handler update [diff: ~deployment]
// -- aws:lambda:Permission pulumi-webhook-handler-fa520765 delete original [diff: ~sourceArn]
// -- aws:lambda:Permission pulumi-webhook-handler-c171fd88 delete original [diff: ~sourceArn]
// -- aws:apigateway:Deployment pulumi-webhook-handler delete original [diff: ~variables]
AllowEmptyPreviewChanges: true,
AllowEmptyUpdateChanges: true,
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsPulumiMiniflux(t *testing.T) {
t.Skip("Skip until ECS Service supports custom timeouts")
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-pulumi-miniflux"),
Config: map[string]string{
"aws-ts-pulumi-miniflux:db_name": "miniflux",
"aws-ts-pulumi-miniflux:db_username": "minifluxuser",
"aws-ts-pulumi-miniflux:db_password": "2Password2",
"aws-ts-pulumi-miniflux:admin_username": "adminuser",
"aws-ts-pulumi-miniflux:admin_password": "2Password2",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsResources(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-resources"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsS3LambdaCopyZip(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-s3-lambda-copyzip"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsSlackbot(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-slackbot"),
Config: map[string]string{
"mentionbot:slackToken": "XXX",
"mentionbot:verificationToken": "YYY",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsStepFunctions(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-stepfunctions"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsThumbnailer(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-thumbnailer"),
// TODO[pulumi/examples#859]: Currently this examples leads to a no-op preview diff of:
// ~ aws:lambda:Function onNewVideo update [diff: ~code]
AllowEmptyPreviewChanges: true,
AllowEmptyUpdateChanges: true,
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsTwitterAthena(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-twitter-athena"),
Config: map[string]string{
"aws-ts-twitter-athena:twitterConsumerKey": "12345",
"aws-ts-twitter-athena:twitterConsumerSecret": "xyz",
"aws-ts-twitter-athena:twitterAccessTokenKey": "12345",
"aws-ts-twitter-athena:twitterAccessTokenSecret": "xyz",
"aws-ts-twitter-athena:twitterQuery": "smurfs",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsLambdaEfs(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-lambda-efs"),
// TODO[pulumi/examples#859]: Currently this examples leads to a no-op preview diff of:
// ++ aws:ecs:TaskDefinition nginx create replacement [diff: ~volumes]
// +- aws:ecs:TaskDefinition nginx replace [diff: ~volumes]
// ~ aws:ecs:Service nginx update [diff: ~taskDefinition]
// -- aws:ecs:TaskDefinition nginx delete original [diff: ~volumes]
AllowEmptyPreviewChanges: true,
AllowEmptyUpdateChanges: true,
})
integration.ProgramTest(t, &test)
}
func getAWSBase(t *testing.T) integration.ProgramTestOptions {
awsRegion := getAwsRegion()
base := getBaseOptions(t)
awsBase := base.With(integration.ProgramTestOptions{
Config: map[string]string{
"aws:region": awsRegion,
},
})
return awsBase
}
func getAwsRegion() string {
awsRegion := os.Getenv("AWS_REGION")
if awsRegion == "" {
awsRegion = "us-west-1"
fmt.Println("Defaulting AWS_REGION to 'us-west-1'. You can override using the AWS_REGION environment variable")
}
return awsRegion
}
| [
"\"AWS_REGION\""
] | [] | [
"AWS_REGION"
] | [] | ["AWS_REGION"] | go | 1 | 0 | |
cmd/ipfswatch/main.go | package main
import (
"flag"
"log"
"os"
"os/signal"
"path/filepath"
context "context"
homedir "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir"
commands "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
corehttp "github.com/ipfs/go-ipfs/core/corehttp"
coreunix "github.com/ipfs/go-ipfs/core/coreunix"
config "github.com/ipfs/go-ipfs/repo/config"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
fsnotify "gx/ipfs/QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv/fsnotify.v1"
)
var http = flag.Bool("http", false, "expose IPFS HTTP API")
var repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
var watchPath = flag.String("path", ".", "the path to watch")
func main() {
flag.Parse()
// precedence
// 1. --repo flag
// 2. IPFS_PATH environment variable
// 3. default repo path
var ipfsPath string
if *repoPath != "" {
ipfsPath = *repoPath
} else {
var err error
ipfsPath, err = fsrepo.BestKnownPath()
if err != nil {
log.Fatal(err)
}
}
if err := run(ipfsPath, *watchPath); err != nil {
log.Fatal(err)
}
}
func run(ipfsPath, watchPath string) error {
proc := process.WithParent(process.Background())
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
ipfsPath, err := homedir.Expand(ipfsPath)
if err != nil {
return err
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer watcher.Close()
if err := addTree(watcher, watchPath); err != nil {
return err
}
r, err := fsrepo.Open(ipfsPath)
if err != nil {
// TODO handle case: daemon running
// TODO handle case: repo doesn't exist or isn't initialized
return err
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{
Online: true,
Repo: r,
})
if err != nil {
return err
}
defer node.Close()
if *http {
addr := "/ip4/127.0.0.1/tcp/5001"
var opts = []corehttp.ServeOption{
corehttp.GatewayOption(true, "/ipfs", "/ipns"),
corehttp.WebUIOption,
corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
}
proc.Go(func(p process.Process) {
if err := corehttp.ListenAndServe(node, addr, opts...); err != nil {
return
}
})
}
interrupts := make(chan os.Signal)
signal.Notify(interrupts, os.Interrupt, os.Kill)
for {
select {
case <-interrupts:
return nil
case e := <-watcher.Events:
log.Printf("received event: %s", e)
isDir, err := IsDirectory(e.Name)
if err != nil {
continue
}
switch e.Op {
case fsnotify.Remove:
if isDir {
if err := watcher.Remove(e.Name); err != nil {
return err
}
}
default:
// all events except for Remove result in an IPFS.Add, but only
// directory creation triggers a new watch
switch e.Op {
case fsnotify.Create:
if isDir {
addTree(watcher, e.Name)
}
}
proc.Go(func(p process.Process) {
file, err := os.Open(e.Name)
if err != nil {
log.Println(err)
}
defer file.Close()
k, err := coreunix.Add(node, file)
if err != nil {
log.Println(err)
}
log.Printf("added %s... key: %s", e.Name, k)
})
}
case err := <-watcher.Errors:
log.Println(err)
}
}
return nil
}
func addTree(w *fsnotify.Watcher, root string) error {
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
isDir, err := IsDirectory(path)
if err != nil {
log.Println(err)
return nil
}
switch {
case isDir && IsHidden(path):
log.Println(path)
return filepath.SkipDir
case isDir:
log.Println(path)
if err := w.Add(path); err != nil {
return err
}
default:
return nil
}
return nil
})
if err != nil {
return err
}
return nil
}
func IsDirectory(path string) (bool, error) {
fileInfo, err := os.Stat(path)
return fileInfo.IsDir(), err
}
func IsHidden(path string) bool {
path = filepath.Base(path)
if path == "." || path == "" {
return false
}
if rune(path[0]) == rune('.') {
return true
}
return false
}
func cmdCtx(node *core.IpfsNode, repoPath string) commands.Context {
return commands.Context{
Online: true,
ConfigRoot: repoPath,
LoadConfig: func(path string) (*config.Config, error) {
return node.Repo.Config()
},
ConstructNode: func() (*core.IpfsNode, error) {
return node, nil
},
}
}
| [
"\"IPFS_PATH\""
] | [] | [
"IPFS_PATH"
] | [] | ["IPFS_PATH"] | go | 1 | 0 | |
presto-cli/src/main/java/com/facebook/presto/cli/Console.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.cli;
import com.facebook.presto.cli.ClientOptions.OutputFormat;
import com.facebook.presto.client.ClientSession;
import com.facebook.presto.sql.parser.IdentifierSymbol;
import com.facebook.presto.sql.parser.ParsingException;
import com.facebook.presto.sql.parser.SqlParser;
import com.facebook.presto.sql.parser.SqlParserOptions;
import com.facebook.presto.sql.parser.StatementSplitter;
import com.facebook.presto.sql.tree.Use;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Files;
import io.airlift.airline.Command;
import io.airlift.airline.HelpOption;
import io.airlift.http.client.spnego.KerberosConfig;
import io.airlift.log.Logging;
import io.airlift.log.LoggingConfiguration;
import io.airlift.units.Duration;
import jline.console.history.FileHistory;
import jline.console.history.History;
import jline.console.history.MemoryHistory;
import org.fusesource.jansi.AnsiConsole;
import javax.inject.Inject;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;
import static com.facebook.presto.cli.Completion.commandCompleter;
import static com.facebook.presto.cli.Completion.lowerCaseCommandCompleter;
import static com.facebook.presto.cli.Help.getHelpText;
import static com.facebook.presto.cli.QueryPreprocessor.preprocessQuery;
import static com.facebook.presto.client.ClientSession.stripTransactionId;
import static com.facebook.presto.client.ClientSession.withCatalogAndSchema;
import static com.facebook.presto.client.ClientSession.withPreparedStatements;
import static com.facebook.presto.client.ClientSession.withProperties;
import static com.facebook.presto.client.ClientSession.withTransactionId;
import static com.facebook.presto.sql.parser.StatementSplitter.Statement;
import static com.facebook.presto.sql.parser.StatementSplitter.isEmptyStatement;
import static com.facebook.presto.sql.parser.StatementSplitter.squeezeStatement;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.io.ByteStreams.nullOutputStream;
import static java.lang.Integer.parseInt;
import static java.lang.String.format;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Locale.ENGLISH;
import static java.util.concurrent.TimeUnit.SECONDS;
import static jline.internal.Configuration.getUserHome;
@Command(name = "presto", description = "Presto interactive console")
public class Console
implements Runnable
{
private static final String PROMPT_NAME = "presto";
private static final Duration EXIT_DELAY = new Duration(3, SECONDS);
// create a parser with all identifier options enabled, since this is only used for USE statements
private static final SqlParser SQL_PARSER = new SqlParser(new SqlParserOptions().allowIdentifierSymbol(EnumSet.allOf(IdentifierSymbol.class)));
private static final Pattern HISTORY_INDEX_PATTERN = Pattern.compile("!\\d+");
@Inject
public HelpOption helpOption;
@Inject
public VersionOption versionOption = new VersionOption();
@Inject
public ClientOptions clientOptions = new ClientOptions();
@Override
public void run()
{
ClientSession session = clientOptions.toClientSession();
KerberosConfig kerberosConfig = clientOptions.toKerberosConfig();
boolean hasQuery = !Strings.isNullOrEmpty(clientOptions.execute);
boolean isFromFile = !Strings.isNullOrEmpty(clientOptions.file);
if (!hasQuery && !isFromFile) {
AnsiConsole.systemInstall();
}
initializeLogging(clientOptions.logLevelsFile);
String query = clientOptions.execute;
if (hasQuery) {
query += ";";
}
if (isFromFile) {
if (hasQuery) {
throw new RuntimeException("both --execute and --file specified");
}
try {
query = Files.toString(new File(clientOptions.file), UTF_8);
hasQuery = true;
}
catch (IOException e) {
throw new RuntimeException(format("Error reading from file %s: %s", clientOptions.file, e.getMessage()));
}
}
AtomicBoolean exiting = new AtomicBoolean();
interruptThreadOnExit(Thread.currentThread(), exiting);
try (QueryRunner queryRunner = QueryRunner.create(
session,
Optional.ofNullable(clientOptions.socksProxy),
Optional.ofNullable(clientOptions.keystorePath),
Optional.ofNullable(clientOptions.keystorePassword),
Optional.ofNullable(clientOptions.truststorePath),
Optional.ofNullable(clientOptions.truststorePassword),
Optional.ofNullable(clientOptions.user),
clientOptions.password ? Optional.of(getPassword()) : Optional.empty(),
Optional.ofNullable(clientOptions.krb5Principal),
Optional.ofNullable(clientOptions.krb5RemoteServiceName),
clientOptions.authenticationEnabled,
kerberosConfig)) {
if (hasQuery) {
executeCommand(queryRunner, query, clientOptions.outputFormat);
}
else {
runConsole(queryRunner, session, exiting);
}
}
}
private String getPassword()
{
checkState(clientOptions.user != null, "Username must be specified along with password");
String defaultPassword = System.getenv("PRESTO_PASSWORD");
if (defaultPassword != null) {
return defaultPassword;
}
java.io.Console console = System.console();
if (console == null) {
throw new RuntimeException("No console from which to read password");
}
char[] password = console.readPassword("Password: ");
if (password != null) {
return new String(password);
}
return "";
}
private static void runConsole(QueryRunner queryRunner, ClientSession session, AtomicBoolean exiting)
{
try (TableNameCompleter tableNameCompleter = new TableNameCompleter(queryRunner);
LineReader reader = new LineReader(getHistory(), commandCompleter(), lowerCaseCommandCompleter(), tableNameCompleter)) {
tableNameCompleter.populateCache();
StringBuilder buffer = new StringBuilder();
while (!exiting.get()) {
// read a line of input from user
String prompt = PROMPT_NAME;
if (session.getSchema() != null) {
prompt += ":" + session.getSchema();
}
if (buffer.length() > 0) {
prompt = Strings.repeat(" ", prompt.length() - 1) + "-";
}
String commandPrompt = prompt + "> ";
String line = reader.readLine(commandPrompt);
// add buffer to history and clear on user interrupt
if (reader.interrupted()) {
String partial = squeezeStatement(buffer.toString());
if (!partial.isEmpty()) {
reader.getHistory().add(partial);
}
buffer = new StringBuilder();
continue;
}
// exit on EOF
if (line == null) {
System.out.println();
return;
}
// check for special commands if this is the first line
if (buffer.length() == 0) {
String command = line.trim();
if (HISTORY_INDEX_PATTERN.matcher(command).matches()) {
int historyIndex = parseInt(command.substring(1));
History history = reader.getHistory();
if ((historyIndex <= 0) || (historyIndex > history.index())) {
System.err.println("Command does not exist");
continue;
}
line = history.get(historyIndex - 1).toString();
System.out.println(commandPrompt + line);
}
if (command.endsWith(";")) {
command = command.substring(0, command.length() - 1).trim();
}
switch (command.toLowerCase(ENGLISH)) {
case "exit":
case "quit":
return;
case "history":
for (History.Entry entry : reader.getHistory()) {
System.out.printf("%5d %s%n", entry.index() + 1, entry.value());
}
continue;
case "help":
System.out.println();
System.out.println(getHelpText());
continue;
}
}
// not a command, add line to buffer
buffer.append(line).append("\n");
// execute any complete statements
String sql = buffer.toString();
StatementSplitter splitter = new StatementSplitter(sql, ImmutableSet.of(";", "\\G"));
for (Statement split : splitter.getCompleteStatements()) {
Optional<Object> statement = getParsedStatement(split.statement());
if (statement.isPresent() && isSessionParameterChange(statement.get())) {
Map<String, String> properties = queryRunner.getSession().getProperties();
Map<String, String> preparedStatements = queryRunner.getSession().getPreparedStatements();
session = processSessionParameterChange(statement.get(), session, properties, preparedStatements);
queryRunner.setSession(session);
tableNameCompleter.populateCache();
}
else {
OutputFormat outputFormat = OutputFormat.ALIGNED;
if (split.terminator().equals("\\G")) {
outputFormat = OutputFormat.VERTICAL;
}
process(queryRunner, split.statement(), outputFormat, true);
}
reader.getHistory().add(squeezeStatement(split.statement()) + split.terminator());
}
// replace buffer with trailing partial statement
buffer = new StringBuilder();
String partial = splitter.getPartialStatement();
if (!partial.isEmpty()) {
buffer.append(partial).append('\n');
}
}
}
catch (IOException e) {
System.err.println("Readline error: " + e.getMessage());
}
}
private static Optional<Object> getParsedStatement(String statement)
{
try {
return Optional.of((Object) SQL_PARSER.createStatement(statement));
}
catch (ParsingException e) {
return Optional.empty();
}
}
static ClientSession processSessionParameterChange(Object parsedStatement, ClientSession session, Map<String, String> existingProperties, Map<String, String> existingPreparedStatements)
{
if (parsedStatement instanceof Use) {
Use use = (Use) parsedStatement;
session = withCatalogAndSchema(session, use.getCatalog().orElse(session.getCatalog()), use.getSchema());
session = withProperties(session, existingProperties);
session = withPreparedStatements(session, existingPreparedStatements);
}
return session;
}
private static boolean isSessionParameterChange(Object statement)
{
return statement instanceof Use;
}
private static void executeCommand(QueryRunner queryRunner, String query, OutputFormat outputFormat)
{
StatementSplitter splitter = new StatementSplitter(query);
for (Statement split : splitter.getCompleteStatements()) {
if (!isEmptyStatement(split.statement())) {
process(queryRunner, split.statement(), outputFormat, false);
}
}
if (!isEmptyStatement(splitter.getPartialStatement())) {
System.err.println("Non-terminated statement: " + splitter.getPartialStatement());
}
}
private static void process(QueryRunner queryRunner, String sql, OutputFormat outputFormat, boolean interactive)
{
String finalSql;
try {
finalSql = preprocessQuery(
Optional.ofNullable(queryRunner.getSession().getCatalog()),
Optional.ofNullable(queryRunner.getSession().getSchema()),
sql);
}
catch (QueryPreprocessorException e) {
System.err.println(e.getMessage());
if (queryRunner.getSession().isDebug()) {
e.printStackTrace();
}
return;
}
try (Query query = queryRunner.startQuery(finalSql)) {
query.renderOutput(System.out, outputFormat, interactive);
ClientSession session = queryRunner.getSession();
// update session properties if present
if (!query.getSetSessionProperties().isEmpty() || !query.getResetSessionProperties().isEmpty()) {
Map<String, String> sessionProperties = new HashMap<>(session.getProperties());
sessionProperties.putAll(query.getSetSessionProperties());
sessionProperties.keySet().removeAll(query.getResetSessionProperties());
session = withProperties(session, sessionProperties);
}
// update prepared statements if present
if (!query.getAddedPreparedStatements().isEmpty() || !query.getDeallocatedPreparedStatements().isEmpty()) {
Map<String, String> preparedStatements = new HashMap<>(session.getPreparedStatements());
preparedStatements.putAll(query.getAddedPreparedStatements());
preparedStatements.keySet().removeAll(query.getDeallocatedPreparedStatements());
session = withPreparedStatements(session, preparedStatements);
}
// update transaction ID if necessary
if (query.isClearTransactionId()) {
session = stripTransactionId(session);
}
if (query.getStartedTransactionId() != null) {
session = withTransactionId(session, query.getStartedTransactionId());
}
queryRunner.setSession(session);
}
catch (RuntimeException e) {
System.err.println("Error running command: " + e.getMessage());
if (queryRunner.getSession().isDebug()) {
e.printStackTrace();
}
}
}
private static MemoryHistory getHistory()
{
MemoryHistory history;
File historyFile = new File(getUserHome(), ".presto_history");
try {
history = new FileHistory(historyFile);
history.setMaxSize(10000);
}
catch (IOException e) {
System.err.printf("WARNING: Failed to load history file (%s): %s. " +
"History will not be available during this session.%n",
historyFile, e.getMessage());
history = new MemoryHistory();
}
history.setAutoTrim(true);
return history;
}
private static void initializeLogging(String logLevelsFile)
{
// unhook out and err while initializing logging or logger will print to them
PrintStream out = System.out;
PrintStream err = System.err;
try {
LoggingConfiguration config = new LoggingConfiguration();
if (logLevelsFile == null) {
System.setOut(new PrintStream(nullOutputStream()));
System.setErr(new PrintStream(nullOutputStream()));
config.setConsoleEnabled(false);
}
else {
config.setLevelsFile(logLevelsFile);
}
Logging logging = Logging.initialize();
logging.configure(config);
}
catch (IOException e) {
throw Throwables.propagate(e);
}
finally {
System.setOut(out);
System.setErr(err);
}
}
private static void interruptThreadOnExit(Thread thread, AtomicBoolean exiting)
{
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
exiting.set(true);
thread.interrupt();
try {
thread.join(EXIT_DELAY.toMillis());
}
catch (InterruptedException ignored) {
}
}));
}
}
| [
"\"PRESTO_PASSWORD\""
] | [] | [
"PRESTO_PASSWORD"
] | [] | ["PRESTO_PASSWORD"] | java | 1 | 0 | |
internal/provider/resource_runscope_bucket_test.go | package provider
import (
"context"
"fmt"
"github.com/terraform-providers/terraform-provider-runscope/internal/runscope"
"os"
"strings"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func init() {
resource.AddTestSweepers("runscope_bucket", &resource.Sweeper{
Name: "runscope_bucket",
F: testAccSweepBuckets,
})
}
func TestAccBucket_basic(t *testing.T) {
var bucket runscope.Bucket
teamId := os.Getenv("RUNSCOPE_TEAM_ID")
bucketName := testAccRandomBucketName()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckBucketDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccRunscopeBucketBasicConfig, bucketName, teamId),
Check: resource.ComposeTestCheckFunc(
testAccCheckBucketExists("runscope_bucket.bucket", &bucket),
resource.TestCheckResourceAttr("runscope_bucket.bucket", "name", bucketName),
resource.TestCheckResourceAttr("runscope_bucket.bucket", "team_uuid", teamId),
resource.TestCheckResourceAttrSet("runscope_bucket.bucket", "default"),
resource.TestCheckResourceAttrSet("runscope_bucket.bucket", "verify_ssl"),
resource.TestCheckResourceAttrSet("runscope_bucket.bucket", "trigger_url"),
),
},
{
ResourceName: "runscope_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccCheckBucketDestroy(s *terraform.State) error {
ctx := context.Background()
client := testAccProvider.Meta().(*providerConfig).client
for _, rs := range s.RootModule().Resources {
if rs.Type == "runscope_bucket" {
if _, err := client.Bucket.Get(ctx, &runscope.BucketGetOpts{Key: rs.Primary.ID}); err == nil {
return fmt.Errorf("Record %s still exists", rs.Primary.ID)
}
}
}
return nil
}
func testAccCheckBucketExists(n string, b *runscope.Bucket) resource.TestCheckFunc {
return func(s *terraform.State) error {
ctx := context.Background()
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Record ID is set")
}
client := testAccProvider.Meta().(*providerConfig).client
bucket, err := client.Bucket.Get(ctx, &runscope.BucketGetOpts{Key: rs.Primary.ID})
if err != nil {
return err
}
if bucket.Key != rs.Primary.ID {
return fmt.Errorf("Record not found")
}
*b = *bucket
return nil
}
}
const testAccRunscopeBucketBasicConfig = `
resource "runscope_bucket" "bucket" {
name = "%s"
team_uuid = "%s"
}`
func testAccSweepBuckets(_ string) error {
ctx := context.Background()
client := runscope.NewClient(runscope.WithToken(os.Getenv("RUNSCOPE_ACCESS_TOKEN")))
buckets, err := client.Bucket.List(ctx)
if err != nil {
return fmt.Errorf("Couldn't list bucket for sweeping")
}
for _, bucket := range buckets {
if !(strings.HasPrefix(bucket.Name, testAccBucketNamePrefix) || bucket.Name == "terraform-provider-test") {
continue
}
opts := &runscope.BucketDeleteOpts{}
opts.Key = bucket.Key
if err := client.Bucket.Delete(ctx, opts); err != nil {
return err
}
}
return nil
}
| [
"\"RUNSCOPE_TEAM_ID\"",
"\"RUNSCOPE_ACCESS_TOKEN\""
] | [] | [
"RUNSCOPE_TEAM_ID",
"RUNSCOPE_ACCESS_TOKEN"
] | [] | ["RUNSCOPE_TEAM_ID", "RUNSCOPE_ACCESS_TOKEN"] | go | 2 | 0 | |
api/tissue_test.go | package api
import (
"context"
"os"
"testing"
"time"
)
func TestClient_CheckIn(t *testing.T) {
client, err := NewClient(&ClientOption{
BaseURL: os.Getenv("TISSUE_BASE_URL"),
WebhookID: os.Getenv("TISSUE_WEBHOOK_ID"),
})
if err != nil {
t.Fatal(err)
}
if client == nil {
t.Fatal("nil client")
}
checkIn, err := client.CheckIn(context.TODO(), &CheckInOption{
DateTime: time.Now(),
Tags: []string{"test", "hoge"},
Link: "https://github.com/mohemohe/go-tissue",
Note: "go-tissue test checkin",
Private: true,
TooSensitive: false,
})
if err != nil {
t.Fatal(err)
}
if checkIn == nil {
t.Error("something wrong")
}
if checkIn.ID == 0 {
t.Error("something wrong")
}
}
| [
"\"TISSUE_BASE_URL\"",
"\"TISSUE_WEBHOOK_ID\""
] | [] | [
"TISSUE_WEBHOOK_ID",
"TISSUE_BASE_URL"
] | [] | ["TISSUE_WEBHOOK_ID", "TISSUE_BASE_URL"] | go | 2 | 0 | |
config/config.py | import os
STATIC_FILES = {
'css': 'public/static/css',
'script': 'public/static/js',
'images': 'public/static/images',
'template': 'public/templates'
}
'''Get Session Driver from Environment'''
SESSION_DRIVER = os.getenv('SESSION_DRIVER', 'memory')
| [] | [] | [
"SESSION_DRIVER"
] | [] | ["SESSION_DRIVER"] | python | 1 | 0 | |
internal/gitflow/gitflow_internal_test.go | package gitflow
import (
"fmt"
"os"
"os/exec"
"strings"
"testing"
)
const (
errPathTest = "/home/error/path"
okPathTest = "/home/fake/path/to/git/repository"
commitTest = "9b7f1bbc8d82ef98bbb15e86f3ccb704ec35720a"
remoteTagTest = "v1.2.4"
tagTest = "v1.2.3"
)
var errPathTests = []struct {
path string // input
}{
{""}, // empty path
{" "}, // empty path (only space)
{errPathTest}, // path
{" " + errPathTest}, // path with leading space
{errPathTest + " "}, // path ending by space
}
var okPathTests = []struct {
path string // input
}{
{okPathTest}, // path
{" " + okPathTest}, // path with leading space
{okPathTest + " "}, // path ending by space
}
var commitTests = []struct {
id string // input
tag string // output
}{
{"", tagTest}, // no commit
{" ", tagTest}, // no commit (ony space)
{" " + commitTest, remoteTagTest}, // commit with leading space
{commitTest, remoteTagTest}, // commit
}
var tagTests = []struct {
tag string // input
}{
{""},
{" " + gitTagFolder + remoteTagTest},
{gitTagFolder + remoteTagTest + " "},
}
// fakeExecCommand returns a mock of the exec command.
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
// TestNewRepo tests the methiod to return a structure for the Git repository.
func TestNewRepo(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with various incorrect paths.
for _, tp := range errPathTests {
if _, err := NewRepo(tp.path); err == nil {
t.Errorf("Expected error with invalid path '%v'", tp.path)
}
}
// Checks with valid paths
for _, tp := range okPathTests {
if r, err := NewRepo(tp.path); err != nil {
t.Errorf("Expected no error with valid path '%v', got: %v", tp.path, err)
} else if r.path != okPathTest {
t.Errorf("Expected a valid repository with path '%v'", tp.path)
}
}
}
// TestRepo_CheckoutTag tests the method dedicated to checkout the given tag on the current repository.
func TestRepo_CheckoutTag(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with valid paths
for _, tp := range okPathTests {
if r, err := NewRepo(tp.path); err != nil {
t.Errorf("Expected no error with valid path '%v', got: %v", tp.path, err)
} else if err := r.CheckoutTag(""); err == nil {
t.Error("Expected error with empty commit")
} else if err := r.CheckoutTag(remoteTagTest); err != nil {
t.Errorf("Expected no error with valid tag '%v', got: %v", remoteTagTest, err)
} else if err := r.CheckoutTag(tagTest); err == nil {
t.Error("Expected error with unknown tag on local repository")
}
}
}
// TestRepo_LocalTag tests the method dedicated to get the local tag of current repository.
func TestRepo_LocalTag(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with valid paths
for _, tp := range okPathTests {
if r, err := NewRepo(tp.path); err != nil {
t.Errorf("Expected no error with valid path '%v', got: %v", tp.path, err)
} else if tag, err := r.LocalTag(); err != nil {
t.Errorf("Expected no error, got '%v'", err)
} else if tag != tagTest {
t.Errorf("Expected tag '%v', got '%v'", tagTest, tag)
}
}
}
// TestRepo_LastTag tests the method dedicated to get the latest remote tag of current repository.
func TestRepo_LastTag(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with valid paths
for _, tp := range okPathTests {
if r, err := NewRepo(tp.path); err != nil {
t.Errorf("Expected no error with valid path '%v', got: %v", tp.path, err)
} else if tag, err := r.LastTag(); err != nil {
t.Errorf("Expected no error, got '%v'", err)
} else if tag != remoteTagTest {
t.Errorf("Expected tag '%v', got '%v'", remoteTagTest, tag)
}
}
}
// TestGitCheck tests the internal method dedicated to verify if the given path is a Git repository.
func TestGitCheck(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with incorrect path.
r := new(Repo)
r.path = errPathTest
if err := r.gitCheck(); err == nil {
t.Errorf("Expected error with invalid path '%v'", errPathTest)
}
// Checks with valid path
r = new(Repo)
r.path = okPathTest
if err := r.gitCheck(); err != nil {
t.Errorf("Expected nil error, got %v", err)
}
}
// TestGitCheckout tests the internal method dedicated to git checkout.
func TestGitCheckout(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with incorrect path.
r := new(Repo)
r.path = errPathTest
for _, c := range tagTests {
if err := r.gitCheckout(c.tag); err == nil {
t.Errorf("Expected error with branch '%v' on invalid Git path '%v'", c.tag, errPathTest)
}
}
// Checks with valid path
r = new(Repo)
r.path = okPathTest
for _, c := range tagTests {
if err := r.gitCheckout(c.tag); err != nil {
t.Errorf("Expected no error with valid path '%v' and branch '%v', got: %v", okPathTest, c.tag, err)
}
}
}
// TestGitDescribe tests the internal method dedicated to git describe.
func TestGitDescribe(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with incorrect path.
r := new(Repo)
r.path = errPathTest
for _, c := range commitTests {
if _, err := r.gitDescribe(c.id); err == nil {
t.Errorf("Expected error with commit '%v' on invalid Git path '%v'", c.id, errPathTest)
}
}
// Checks with valid path
r = new(Repo)
r.path = okPathTest
for _, c := range commitTests {
if out, err := r.gitDescribe(c.id); err != nil {
t.Errorf("Expected no error with valid path '%v' and commit '%v', got: %v", errPathTest, c.id, err)
} else if tag := string(out); tag != c.tag {
t.Errorf("Expected tag '%v' for the local Git repository '%v', got: %v", c.tag, okPathTest, tag)
}
}
}
// TestGitFetch tests the internal method dedicated to git fetch.
func TestGitFetch(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with incorrect path.
r := new(Repo)
r.path = errPathTest
if err := r.gitFetch(); err == nil {
t.Errorf("Expected error on invalid Git path '%v'", errPathTest)
}
// Checks with valid path
r = new(Repo)
r.path = okPathTest
if err := r.gitFetch(); err != nil {
t.Errorf("Expected no error with valid path '%v', got: %v", okPathTest, err)
}
}
// TestGitStatus tests the internal method dedicated to git status.
func TestGitStatus(t *testing.T) {
execCommand = fakeExecCommand
// Restore exec command behavior at the end of the test.
defer func() { execCommand = exec.Command }()
// Checks with incorrect path.
r := new(Repo)
r.path = errPathTest
if _, err := r.gitStatus(); err == nil {
t.Errorf("Expected error with invalid path '%v'", errPathTest)
}
// Checks with valid path
r = new(Repo)
r.path = okPathTest
if out, err := r.gitStatus(); err != nil {
t.Errorf("Expected nil error, got: %v", err)
} else if string(out) == "" {
t.Errorf("Expected status message about the valid local Git repository: %v", okPathTest)
}
}
// TestHelperProcess mocks exec commands and responds instead of the command git.
func TestHelperProcess(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
// Extract only exec arguments.
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
args = args[1:]
break
}
args = args[1:]
}
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "No command\n")
os.Exit(2)
}
cmd, args := args[0], args[1:]
// Manage only git command.
if cmd != "git" && args[0] != "-C" {
fmt.Fprintf(os.Stderr, "fatal: Not a Git command, received: %v\n", cmd)
os.Exit(1)
}
// Manage exit status on error on "invalid" Git path.
if strings.HasPrefix(args[1], errPathTest) {
fmt.Fprintf(os.Stderr, "fatal: Not a git repository %v (or any of the parent directories): .git\n", args[1])
os.Exit(1)
}
// Manage each git sub-commands.
switch args[2] {
case "checkout":
switch len(args) {
case 4:
if args[3] == gitTagFolder+remoteTagTest {
fmt.Fprintf(os.Stdout, "note: checking out '%v'.", remoteTagTest)
} else {
fmt.Fprintf(os.Stderr, "error: pathspec '%v' did not match any file(s) known to git.\n", args[3])
os.Exit(1)
}
case 3:
fmt.Fprintf(os.Stdout, "Your branch is up-to-date with '%v%v'.", gitTagFolder, tagTest)
default:
fmt.Fprintf(os.Stderr, "fatal: Not a git repository (or any of the parent directories): .git\n")
os.Exit(1)
}
case "describe":
if args[3] == "--abbrev=0" && args[4] == "--tags" {
if len(args) == 6 {
fmt.Fprint(os.Stdout, remoteTagTest+"\n")
} else {
fmt.Fprint(os.Stdout, tagTest+"\n")
}
}
case "fetch":
fmt.Fprint(os.Stdout, "\n")
case "status":
if len(args) == 3 {
fmt.Fprint(os.Stdout, "On branch stable\n")
}
case "rev-list":
if args[3] == "--tags" && args[4] == "--max-count=1" {
fmt.Fprint(os.Stdout, commitTest+"\n")
}
default:
fmt.Fprintf(os.Stderr, "fatal: Not a git sub-command (%v)\n", args[2])
os.Exit(1)
}
}
| [
"\"GO_WANT_HELPER_PROCESS\""
] | [] | [
"GO_WANT_HELPER_PROCESS"
] | [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
adapters/slack/slackAdapter.go | package slack
import (
"log"
"os"
"strings"
"github.com/alexandrebodin/gilibot"
slack "github.com/alexandrebodin/slack_rtm"
)
// Adapter defines a bot adpater to receive data from slack chat
type Adapter struct {
bot *gilibot.Bot
client *slack.SlackClient
}
// New returns a new slack adapter
func New(b *gilibot.Bot) *Adapter {
return &Adapter{bot: b}
}
type slackHandler struct {
bot *gilibot.Bot
}
func (h *slackHandler) OnMessage(c *slack.SlackContext, m *slack.MessageType) error {
msg := gilibot.Message{
Channel: m.Channel,
User: m.User,
Text: strings.Replace(m.Text, "&", "&", -1),
}
h.bot.ReceiveMessage(msg)
return nil
}
// Start starts slack adapter
func (s *Adapter) Start() error {
token := os.Getenv("GILIBOT_SLACK_TOKEN")
if token == "" {
log.Fatal("slack token is missing")
}
slackClient, err := slack.New(token)
if err != nil {
return err
}
s.client = slackClient
h := &slackHandler{bot: s.bot}
slackClient.AddListener(slack.MessageEvent, h)
err = slackClient.Run()
if err != nil {
return err
}
return nil
}
// Reply send back and answer to slack
func (s *Adapter) Reply(msg gilibot.Message, message string) error {
resp := slack.ResponseMessage{
Id: "1",
Type: "message",
Text: ">>>" + message,
Channel: msg.Channel,
}
err := s.client.WriteMessage(resp)
if err != nil {
return err
}
return nil
}
| [
"\"GILIBOT_SLACK_TOKEN\""
] | [] | [
"GILIBOT_SLACK_TOKEN"
] | [] | ["GILIBOT_SLACK_TOKEN"] | go | 1 | 0 | |
vendor/cloud.google.com/go/storage/storage.go | // Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"
htransport "google.golang.org/api/transport/http"
)
// Methods which can be used in signed URLs.
var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true}
var (
// ErrBucketNotExist indicates that the bucket does not exist.
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
// ErrObjectNotExist indicates that the object does not exist.
ErrObjectNotExist = errors.New("storage: object doesn't exist")
// errMethodNotValid indicates that given HTTP method is not valid.
errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
)
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo)
const (
// ScopeFullControl grants permissions to manage your
// data and permissions in Google Cloud Storage.
ScopeFullControl = raw.DevstorageFullControlScope
// ScopeReadOnly grants permissions to
// view your data in Google Cloud Storage.
ScopeReadOnly = raw.DevstorageReadOnlyScope
// ScopeReadWrite grants permissions to manage your
// data in Google Cloud Storage.
ScopeReadWrite = raw.DevstorageReadWriteScope
)
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
}
// Client is a client for interacting with Google Cloud Storage.
//
// Clients should be reused instead of created as needed.
// The methods of Client are safe for concurrent use by multiple goroutines.
type Client struct {
hc *http.Client
raw *raw.Service
// Scheme describes the scheme under the current host.
scheme string
// EnvHost is the host set on the STORAGE_EMULATOR_HOST variable.
envHost string
// ReadHost is the default host used on the reader.
readHost string
}
// NewClient creates a new Google Cloud Storage client.
// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
var host, readHost, scheme string
if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
scheme = "https"
readHost = "storage.googleapis.com"
// Prepend default options to avoid overriding options passed by the user.
opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...)
} else {
scheme = "http"
readHost = host
opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...)
}
hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc))
if err != nil {
return nil, fmt.Errorf("storage client: %v", err)
}
if ep == "" {
// Override the default value for BasePath from the raw client.
// TODO: remove when the raw client uses this endpoint as its default (~end of 2020)
rawService.BasePath = "https://storage.googleapis.com/storage/v1/"
} else {
// If the endpoint has been set explicitly, use this for the BasePath
// as well as readHost
rawService.BasePath = ep
u, err := url.Parse(ep)
if err != nil {
return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err)
}
readHost = u.Host
}
return &Client{
hc: hc,
raw: rawService,
scheme: scheme,
envHost: host,
readHost: readHost,
}, nil
}
// Close closes the Client.
//
// Close need not be called at program exit.
func (c *Client) Close() error {
// Set fields to nil so that subsequent uses will panic.
c.hc = nil
c.raw = nil
return nil
}
// SigningScheme determines the API version to use when signing URLs.
type SigningScheme int
const (
// SigningSchemeDefault is presently V2 and will change to V4 in the future.
SigningSchemeDefault SigningScheme = iota
// SigningSchemeV2 uses the V2 scheme to sign URLs.
SigningSchemeV2
// SigningSchemeV4 uses the V4 scheme to sign URLs.
SigningSchemeV4
)
// SignedURLOptions allows you to restrict the access to the signed URL.
type SignedURLOptions struct {
// GoogleAccessID represents the authorizer of the signed URL generation.
// It is typically the Google service account client email address from
// the Google Developers Console in the form of "[email protected]".
// Required.
GoogleAccessID string
// PrivateKey is the Google service account private key. It is obtainable
// from the Google Developers Console.
// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
// create a service account client ID or reuse one of your existing service account
// credentials. Click on the "Generate new P12 key" to generate and download
// a new private key. Once you download the P12 file, use the following command
// to convert it into a PEM file.
//
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
//
// Provide the contents of the PEM file as a byte slice.
// Exactly one of PrivateKey or SignBytes must be non-nil.
PrivateKey []byte
// SignBytes is a function for implementing custom signing. For example, if
// your application is running on Google App Engine, you can use
// appengine's internal signing function:
// ctx := appengine.NewContext(request)
// acc, _ := appengine.ServiceAccount(ctx)
// url, err := SignedURL("bucket", "object", &SignedURLOptions{
// GoogleAccessID: acc,
// SignBytes: func(b []byte) ([]byte, error) {
// _, signedBytes, err := appengine.SignBytes(ctx, b)
// return signedBytes, err
// },
// // etc.
// })
//
// Exactly one of PrivateKey or SignBytes must be non-nil.
SignBytes func([]byte) ([]byte, error)
// Method is the HTTP method to be used with the signed URL.
// Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
// Required.
Method string
// Expires is the expiration time on the signed URL. It must be
// a datetime in the future. For SigningSchemeV4, the expiration may be no
// more than seven days in the future.
// Required.
Expires time.Time
// ContentType is the content type header the client must provide
// to use the generated signed URL.
// Optional.
ContentType string
// Headers is a list of extension headers the client must provide
// in order to use the generated signed URL. Each must be a string of the
// form "key:values", with multiple values separated by a semicolon.
// Optional.
Headers []string
// QueryParameters is a map of additional query parameters. When
// SigningScheme is V4, this is used in computing the signature, and the
// client must use the same query parameters when using the generated signed
// URL.
// Optional.
QueryParameters url.Values
// MD5 is the base64 encoded MD5 checksum of the file.
// If provided, the client should provide the exact value on the request
// header in order to use the signed URL.
// Optional.
MD5 string
// Scheme determines the version of URL signing to use. Default is
// SigningSchemeV2.
Scheme SigningScheme
}
var (
tabRegex = regexp.MustCompile(`[\t]+`)
// I was tempted to call this spacex. :)
spaceRegex = regexp.MustCompile(` +`)
canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`)
excludedCanonicalHeaders = map[string]bool{
"x-goog-encryption-key": true,
"x-goog-encryption-key-sha256": true,
}
)
// v2SanitizeHeaders applies the specifications for canonical extension headers at
// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
func v2SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var header, value string
// Only keep canonical headers, discard any others.
headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader)
if len(headerMatches) == 0 {
continue
}
header = headerMatches[1]
value = headerMatches[2]
header = strings.ToLower(strings.TrimSpace(header))
value = strings.TrimSpace(value)
if excludedCanonicalHeaders[header] {
// Do not keep any deliberately excluded canonical headers when signing.
continue
}
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[header] = append(headerMap[header], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
sort.Strings(sanitizedHeaders)
return sanitizedHeaders
}
// v4SanitizeHeaders applies the specifications for canonical extension headers
// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
//
// V4 does a couple things differently from V2:
// - Headers get sorted by key, instead of by key:value. We do this in
// signedURLV4.
// - There's no canonical regexp: we simply split headers on :.
// - We don't exclude canonical headers.
// - We replace leading and trailing spaces in header values, like v2, but also
// all intermediate space duplicates get stripped. That is, there's only ever
// a single consecutive space.
func v4SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var key, value string
headerMatches := strings.Split(sanitizedHeader, ":")
if len(headerMatches) < 2 {
continue
}
key = headerMatches[0]
value = headerMatches[1]
key = strings.ToLower(strings.TrimSpace(key))
value = strings.TrimSpace(value)
value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" ")))
value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t")))
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[key] = append(headerMap[key], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
return sanitizedHeaders
}
// SignedURL returns a URL for the specified object. Signed URLs allow
// the users access to a restricted resource for a limited time without having a
// Google account or signing in. For more information about the signed
// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
now := utcNow()
if err := validateOptions(opts, now); err != nil {
return "", err
}
switch opts.Scheme {
case SigningSchemeV2:
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, name, opts)
case SigningSchemeV4:
opts.Headers = v4SanitizeHeaders(opts.Headers)
return signedURLV4(bucket, name, opts, now)
default: // SigningSchemeDefault
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, name, opts)
}
}
func validateOptions(opts *SignedURLOptions, now time.Time) error {
if opts == nil {
return errors.New("storage: missing required SignedURLOptions")
}
if opts.GoogleAccessID == "" {
return errors.New("storage: missing required GoogleAccessID")
}
if (opts.PrivateKey == nil) == (opts.SignBytes == nil) {
return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
}
opts.Method = strings.ToUpper(opts.Method)
if _, ok := signedURLMethods[opts.Method]; !ok {
return errMethodNotValid
}
if opts.Expires.IsZero() {
return errors.New("storage: missing required expires option")
}
if opts.MD5 != "" {
md5, err := base64.StdEncoding.DecodeString(opts.MD5)
if err != nil || len(md5) != 16 {
return errors.New("storage: invalid MD5 checksum")
}
}
if opts.Scheme == SigningSchemeV4 {
cutoff := now.Add(604801 * time.Second) // 7 days + 1 second
if !opts.Expires.Before(cutoff) {
return errors.New("storage: expires must be within seven days from now")
}
}
return nil
}
const (
iso8601 = "20060102T150405Z"
yearMonthDay = "20060102"
)
// utcNow returns the current time in UTC and is a variable to allow for
// reassignment in tests to provide deterministic signed URL values.
var utcNow = func() time.Time {
return time.Now().UTC()
}
// extractHeaderNames takes in a series of key:value headers and returns the
// header names only.
func extractHeaderNames(kvs []string) []string {
var res []string
for _, header := range kvs {
nameValue := strings.Split(header, ":")
res = append(res, nameValue[0])
}
return res
}
// pathEncodeV4 creates an encoded string that matches the v4 signature spec.
// Following the spec precisely is necessary in order to ensure that the URL
// and signing string are correctly formed, and Go's url.PathEncode and
// url.QueryEncode don't generate an exact match without some additional logic.
func pathEncodeV4(path string) string {
segments := strings.Split(path, "/")
var encodedSegments []string
for _, s := range segments {
encodedSegments = append(encodedSegments, url.QueryEscape(s))
}
encodedStr := strings.Join(encodedSegments, "/")
encodedStr = strings.Replace(encodedStr, "+", "%20", -1)
return encodedStr
}
// signedURLV4 creates a signed URL using the sigV4 algorithm.
func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
u := &url.URL{Path: bucket}
if name != "" {
u.Path += "/" + name
}
u.RawPath = pathEncodeV4(u.Path)
// Note: we have to add a / here because GCS does so auto-magically, despite
// our encoding not doing so (and we have to exactly match their
// canonical query).
fmt.Fprintf(buf, "/%s\n", u.RawPath)
headerNames := append(extractHeaderNames(opts.Headers), "host")
if opts.ContentType != "" {
headerNames = append(headerNames, "content-type")
}
if opts.MD5 != "" {
headerNames = append(headerNames, "content-md5")
}
sort.Strings(headerNames)
signedHeaders := strings.Join(headerNames, ";")
timestamp := now.Format(iso8601)
credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay))
canonicalQueryString := url.Values{
"X-Goog-Algorithm": {"GOOG4-RSA-SHA256"},
"X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)},
"X-Goog-Date": {timestamp},
"X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))},
"X-Goog-SignedHeaders": {signedHeaders},
}
// Add user-supplied query parameters to the canonical query string. For V4,
// it's necessary to include these.
for k, v := range opts.QueryParameters {
canonicalQueryString[k] = append(canonicalQueryString[k], v...)
}
fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode())
u.Host = "storage.googleapis.com"
var headersWithValue []string
headersWithValue = append(headersWithValue, "host:"+u.Host)
headersWithValue = append(headersWithValue, opts.Headers...)
if opts.ContentType != "" {
headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType)
}
if opts.MD5 != "" {
headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5)
}
// Trim extra whitespace from headers and replace with a single space.
var trimmedHeaders []string
for _, h := range headersWithValue {
trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " "))
}
canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n")
fmt.Fprintf(buf, "%s\n\n", canonicalHeaders)
fmt.Fprintf(buf, "%s\n", signedHeaders)
// If the user provides a value for X-Goog-Content-SHA256, we must use
// that value in the request string. If not, we use UNSIGNED-PAYLOAD.
sha256Header := false
for _, h := range trimmedHeaders {
if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") {
sha256Header = true
fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1])
break
}
}
if !sha256Header {
fmt.Fprint(buf, "UNSIGNED-PAYLOAD")
}
sum := sha256.Sum256(buf.Bytes())
hexDigest := hex.EncodeToString(sum[:])
signBuf := &bytes.Buffer{}
fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n")
fmt.Fprintf(signBuf, "%s\n", timestamp)
fmt.Fprintf(signBuf, "%s\n", credentialScope)
fmt.Fprintf(signBuf, "%s", hexDigest)
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
b, err := signBytes(signBuf.Bytes())
if err != nil {
return "", err
}
signature := hex.EncodeToString(b)
canonicalQueryString.Set("X-Goog-Signature", string(signature))
u.Scheme = "https"
u.RawQuery = canonicalQueryString.Encode()
return u.String(), nil
}
// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header
// key.
func sortHeadersByKey(hdrs []string) []string {
headersMap := map[string]string{}
var headersKeys []string
for _, h := range hdrs {
parts := strings.Split(h, ":")
k := parts[0]
v := parts[1]
headersMap[k] = v
headersKeys = append(headersKeys, k)
}
sort.Strings(headersKeys)
var sorted []string
for _, k := range headersKeys {
v := headersMap[k]
sorted = append(sorted, fmt.Sprintf("%s:%s", k, v))
}
return sorted
}
func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
u := &url.URL{
Path: fmt.Sprintf("/%s/%s", bucket, name),
}
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
fmt.Fprintf(buf, "%s\n", opts.MD5)
fmt.Fprintf(buf, "%s\n", opts.ContentType)
fmt.Fprintf(buf, "%d\n", opts.Expires.Unix())
if len(opts.Headers) > 0 {
fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n"))
}
fmt.Fprintf(buf, "%s", u.String())
b, err := signBytes(buf.Bytes())
if err != nil {
return "", err
}
encoded := base64.StdEncoding.EncodeToString(b)
u.Scheme = "https"
u.Host = "storage.googleapis.com"
q := u.Query()
q.Set("GoogleAccessId", opts.GoogleAccessID)
q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
q.Set("Signature", string(encoded))
u.RawQuery = q.Encode()
return u.String(), nil
}
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
}
// ACL provides access to the object's access control list.
// This controls who can read and write this object.
// This call does not perform any network operations.
func (o *ObjectHandle) ACL() *ACLHandle {
return &o.acl
}
// Generation returns a new ObjectHandle that operates on a specific generation
// of the object.
// By default, the handle operates on the latest generation. Not
// all operations work when given a specific generation; check the API
// endpoints at https://cloud.google.com/storage/docs/json_api/ for details.
func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
o2 := *o
o2.gen = gen
return &o2
}
// If returns a new ObjectHandle that applies a set of preconditions.
// Preconditions already set on the ObjectHandle are ignored.
// Operations on the new handle will return an error if the preconditions are not
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
// for more details.
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
o2 := *o
o2.conds = &conds
return &o2
}
// Key returns a new ObjectHandle that uses the supplied encryption
// key to encrypt and decrypt the object's contents.
//
// Encryption key must be a 32-byte AES-256 key.
// See https://cloud.google.com/storage/docs/encryption for details.
func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
o2 := *o
o2.encryptionKey = encryptionKey
return &o2
}
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
}
// Update updates an object with the provided attributes.
// All zero-value attributes are ignored.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
var attrs ObjectAttrs
// Lists of fields to send, and set to null, in the JSON.
var forceSendFields, nullFields []string
if uattrs.ContentType != nil {
attrs.ContentType = optional.ToString(uattrs.ContentType)
// For ContentType, sending the empty string is a no-op.
// Instead we send a null.
if attrs.ContentType == "" {
nullFields = append(nullFields, "ContentType")
} else {
forceSendFields = append(forceSendFields, "ContentType")
}
}
if uattrs.ContentLanguage != nil {
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
// For ContentLanguage it's an error to send the empty string.
// Instead we send a null.
if attrs.ContentLanguage == "" {
nullFields = append(nullFields, "ContentLanguage")
} else {
forceSendFields = append(forceSendFields, "ContentLanguage")
}
}
if uattrs.ContentEncoding != nil {
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
forceSendFields = append(forceSendFields, "ContentEncoding")
}
if uattrs.ContentDisposition != nil {
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
forceSendFields = append(forceSendFields, "ContentDisposition")
}
if uattrs.CacheControl != nil {
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl")
}
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 {
// Sending the empty map is a no-op. We send null instead.
nullFields = append(nullFields, "Metadata")
} else {
forceSendFields = append(forceSendFields, "Metadata")
}
}
if uattrs.ACL != nil {
attrs.ACL = uattrs.ACL
// It's an error to attempt to delete the ACL, so
// we don't append to nullFields here.
forceSendFields = append(forceSendFields, "Acl")
}
rawObj := attrs.toRawObject(o.bucket)
rawObj.ForceSendFields = forceSendFields
rawObj.NullFields = nullFields
call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
}
// BucketName returns the name of the bucket.
func (o *ObjectHandle) BucketName() string {
return o.bucket
}
// ObjectName returns the name of the object.
func (o *ObjectHandle) ObjectName() string {
return o.object
}
// ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated.
// Set a field to its zero value to delete it.
//
// For example, to change ContentType and delete ContentEncoding and
// Metadata, use
// ObjectAttrsToUpdate{
// ContentType: "text/html",
// ContentEncoding: "",
// Metadata: map[string]string{},
// }
type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
ContentType optional.String
ContentLanguage optional.String
ContentEncoding optional.String
ContentDisposition optional.String
CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
func (o *ObjectHandle) Delete(ctx context.Context) error {
if err := o.validate(); err != nil {
return err
}
call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
return err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
// Encryption doesn't apply to Delete.
setClientHeader(call.Header())
err := runWithRetry(ctx, func() error { return call.Do() })
switch e := err.(type) {
case nil:
return nil
case *googleapi.Error:
if e.Code == http.StatusNotFound {
return ErrObjectNotExist
}
}
return err
}
// ReadCompressed when true causes the read to happen without decompressing.
func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
o2 := *o
o2.readCompressed = compressed
return &o2
}
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
// A new object will be created unless an object with this name already exists.
// Otherwise any previous object with the same name will be replaced.
// The object will not be available (and any previous object will remain)
// until Close has been called.
//
// Attributes can be set on the object by modifying the returned Writer's
// ObjectAttrs field before the first call to Write. If no ContentType
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
return &Writer{
ctx: ctx,
o: o,
donec: make(chan struct{}),
ObjectAttrs: ObjectAttrs{Name: o.object},
ChunkSize: googleapi.DefaultUploadChunkSize,
}
}
func (o *ObjectHandle) validate() error {
if o.bucket == "" {
return errors.New("storage: bucket name is empty")
}
if o.object == "" {
return errors.New("storage: object name is empty")
}
if !utf8.ValidString(o.object) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
}
return nil
}
// parseKey converts the binary contents of a private key file to an
// *rsa.PrivateKey. It detects whether the private key is in a PEM container or
// not. If so, it extracts the private key from PEM container before
// conversion. It only supports PEM containers with no passphrase.
func parseKey(key []byte) (*rsa.PrivateKey, error) {
if block, _ := pem.Decode(key); block != nil {
key = block.Bytes
}
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, err
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("oauth2: private key is invalid")
}
return parsed, nil
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
var ret string
if !o.RetentionExpirationTime.IsZero() {
ret = o.RetentionExpirationTime.Format(time.RFC3339)
}
return &raw.Object{
Bucket: bucket,
Name: o.Name,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: ret,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
}
}
// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
type ObjectAttrs struct {
// Bucket is the name of the bucket containing this GCS object.
// This field is read-only.
Bucket string
// Name is the name of the object within the bucket.
// This field is read-only.
Name string
// ContentType is the MIME type of the object's content.
ContentType string
// ContentLanguage is the content language of the object's content.
ContentLanguage string
// CacheControl is the Cache-Control header to be sent in the response
// headers when serving the object data.
CacheControl string
// EventBasedHold specifies whether an object is under event-based hold. New
// objects created in a bucket whose DefaultEventBasedHold is set will
// default to that value.
EventBasedHold bool
// TemporaryHold specifies whether an object is under temporary hold. While
// this flag is set to true, the object is protected against deletion and
// overwrites.
TemporaryHold bool
// RetentionExpirationTime is a server-determined value that specifies the
// earliest time that the object's retention period expires.
// This is a read-only field.
RetentionExpirationTime time.Time
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
Owner string
// Size is the length of the object's content. This field is read-only.
Size int64
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// ContentDisposition is the optional Content-Disposition header of the object
// sent in the response headers.
ContentDisposition string
// MD5 is the MD5 hash of the object's content. This field is read-only,
// except when used from a Writer. If set on a Writer, the uploaded
// data is rejected if its MD5 hash does not match this field.
MD5 []byte
// CRC32C is the CRC32 checksum of the object's content using
// the Castagnoli93 polynomial. This field is read-only, except when
// used from a Writer. If set on a Writer and Writer.SendCRC32C
// is true, the uploaded data is rejected if its CRC32c hash does not
// match this field.
CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only.
MediaLink string
// Metadata represents user-provided metadata, in key/value pairs.
// It can be nil if no metadata is provided.
Metadata map[string]string
// Generation is the generation number of the object's content.
// This field is read-only.
Generation int64
// Metageneration is the version of the metadata for this
// object at this generation. This field is used for preconditions
// and for detecting changes in metadata. A metageneration number
// is only meaningful in the context of a particular generation
// of a particular object. This field is read-only.
Metageneration int64
// StorageClass is the storage class of the object. This defines
// how objects are stored and determines the SLA and the cost of storage.
// Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE".
// Defaults to "STANDARD".
// See https://cloud.google.com/storage/docs/storage-classes for all
// valid values.
StorageClass string
// Created is the time the object was created. This field is read-only.
Created time.Time
// Deleted is the time the object was deleted.
// If not deleted, it is the zero value. This field is read-only.
Deleted time.Time
// Updated is the creation or modification time of the object.
// For buckets with versioning enabled, changing an object's
// metadata does not change this property. This field is read-only.
Updated time.Time
// CustomerKeySHA256 is the base64-encoded SHA-256 hash of the
// customer-supplied encryption key for the object. It is empty if there is
// no customer-supplied encryption key.
// See // https://cloud.google.com/storage/docs/encryption for more about
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
// populated.
Prefix string
// Etag is the HTTP/1.1 Entity tag for the object.
// This field is read-only.
Etag string
}
// convertTime converts a time in RFC3339 format to time.Time.
// If any error occurs in parsing, the zero-value time.Time is silently returned.
func convertTime(t string) time.Time {
var r time.Time
if t != "" {
r, _ = time.Parse(time.RFC3339, t)
}
return r
}
func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
}
md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
crc32c, _ := decodeUint32(o.Crc32c)
var sha256 string
if o.CustomerEncryption != nil {
sha256 = o.CustomerEncryption.KeySha256
}
return &ObjectAttrs{
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertTime(o.RetentionExpirationTime),
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Etag: o.Etag,
}
}
// Decode a uint32 encoded in Base64 in big-endian byte order.
func decodeUint32(b64 string) (uint32, error) {
d, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return 0, err
}
if len(d) != 4 {
return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d)
}
return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil
}
// Encode a uint32 as Base64 in big-endian byte order.
func encodeUint32(u uint32) string {
b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)}
return base64.StdEncoding.EncodeToString(b)
}
// Query represents a query to filter objects from a bucket.
type Query struct {
// Delimiter returns results in a directory-like fashion.
// Results will contain only objects whose names, aside from the
// prefix, do not contain delimiter. Objects whose names,
// aside from the prefix, contain delimiter will have their name,
// truncated after the delimiter, returned in prefixes.
// Duplicate prefixes are omitted.
// Optional.
Delimiter string
// Prefix is the prefix filter to query objects
// whose names begin with this prefix.
// Optional.
Prefix string
// Versions indicates whether multiple versions of the same
// object will be included in the results.
Versions bool
// fieldSelection is used to select only specific fields to be returned by
// the query. It's used internally and is populated for the user by
// calling Query.SetAttrSelection
fieldSelection string
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
// names in the API call. Only the ObjectAttrs field names are visible to users
// because they are already part of the public API of the package.
var attrToFieldMap = map[string]string{
"Bucket": "bucket",
"Name": "name",
"ContentType": "contentType",
"ContentLanguage": "contentLanguage",
"CacheControl": "cacheControl",
"EventBasedHold": "eventBasedHold",
"TemporaryHold": "temporaryHold",
"RetentionExpirationTime": "retentionExpirationTime",
"ACL": "acl",
"Owner": "owner",
"ContentEncoding": "contentEncoding",
"ContentDisposition": "contentDisposition",
"Size": "size",
"MD5": "md5Hash",
"CRC32C": "crc32c",
"MediaLink": "mediaLink",
"Metadata": "metadata",
"Generation": "generation",
"Metageneration": "metageneration",
"StorageClass": "storageClass",
"CustomerKeySHA256": "customerEncryption",
"KMSKeyName": "kmsKeyName",
"Created": "timeCreated",
"Deleted": "timeDeleted",
"Updated": "updated",
"Etag": "etag",
}
// SetAttrSelection makes the query populate only specific attributes of
// objects. When iterating over objects, if you only need each object's name
// and size, pass []string{"Name", "Size"} to this method. Only these fields
// will be fetched for each object across the network; the other fields of
// ObjectAttr will remain at their default values. This is a performance
// optimization; for more information, see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
func (q *Query) SetAttrSelection(attrs []string) error {
fieldSet := make(map[string]bool)
for _, attr := range attrs {
field, ok := attrToFieldMap[attr]
if !ok {
return fmt.Errorf("storage: attr %v is not valid", attr)
}
fieldSet[field] = true
}
if len(fieldSet) > 0 {
var b strings.Builder
b.WriteString("items(")
first := true
for field := range fieldSet {
if !first {
b.WriteString(",")
}
first = false
b.WriteString(field)
}
b.WriteString(")")
q.fieldSelection = b.String()
}
return nil
}
// Conditions constrain methods to act on specific generations of
// objects.
//
// The zero value is an empty set of constraints. Not all conditions or
// combinations of conditions are applicable to all methods.
// See https://cloud.google.com/storage/docs/generations-preconditions
// for details on how these operate.
type Conditions struct {
// Generation constraints.
// At most one of the following can be set to a non-zero value.
// GenerationMatch specifies that the object must have the given generation
// for the operation to occur.
// If GenerationMatch is zero, it has no effect.
// Use DoesNotExist to specify that the object does not exist in the bucket.
GenerationMatch int64
// GenerationNotMatch specifies that the object must not have the given
// generation for the operation to occur.
// If GenerationNotMatch is zero, it has no effect.
GenerationNotMatch int64
// DoesNotExist specifies that the object must not exist in the bucket for
// the operation to occur.
// If DoesNotExist is false, it has no effect.
DoesNotExist bool
// Metadata generation constraints.
// At most one of the following can be set to a non-zero value.
// MetagenerationMatch specifies that the object must have the given
// metageneration for the operation to occur.
// If MetagenerationMatch is zero, it has no effect.
MetagenerationMatch int64
// MetagenerationNotMatch specifies that the object must not have the given
// metageneration for the operation to occur.
// If MetagenerationNotMatch is zero, it has no effect.
MetagenerationNotMatch int64
}
func (c *Conditions) validate(method string) error {
if *c == (Conditions{}) {
return fmt.Errorf("storage: %s: empty conditions", method)
}
if !c.isGenerationValid() {
return fmt.Errorf("storage: %s: multiple conditions specified for generation", method)
}
if !c.isMetagenerationValid() {
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
}
return nil
}
func (c *Conditions) isGenerationValid() bool {
n := 0
if c.GenerationMatch != 0 {
n++
}
if c.GenerationNotMatch != 0 {
n++
}
if c.DoesNotExist {
n++
}
return n <= 1
}
func (c *Conditions) isMetagenerationValid() bool {
return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0
}
// applyConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
cval := reflect.ValueOf(call)
if gen >= 0 {
if !setConditionField(cval, "Generation", gen) {
return fmt.Errorf("storage: %s: generation not supported", method)
}
}
if conds == nil {
return nil
}
if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
}
case conds.GenerationNotMatch != 0:
if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
}
case conds.DoesNotExist:
if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
}
}
switch {
case conds.MetagenerationMatch != 0:
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
}
case conds.MetagenerationNotMatch != 0:
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
}
}
return nil
}
func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
if gen >= 0 {
call.SourceGeneration(gen)
}
if conds == nil {
return nil
}
if err := conds.validate("CopyTo source"); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
call.IfSourceGenerationMatch(conds.GenerationMatch)
case conds.GenerationNotMatch != 0:
call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
case conds.DoesNotExist:
call.IfSourceGenerationMatch(0)
}
switch {
case conds.MetagenerationMatch != 0:
call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
case conds.MetagenerationNotMatch != 0:
call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
}
return nil
}
// setConditionField sets a field on a *raw.WhateverCall.
// We can't use anonymous interfaces because the return type is
// different, since the field setters are builders.
func setConditionField(call reflect.Value, name string, value interface{}) bool {
m := call.MethodByName(name)
if !m.IsValid() {
return false
}
m.Call([]reflect.Value{reflect.ValueOf(value)})
return true
}
// conditionsQuery returns the generation and conditions as a URL query
// string suitable for URL.RawQuery. It assumes that the conditions
// have been validated.
func conditionsQuery(gen int64, conds *Conditions) string {
// URL escapes are elided because integer strings are URL-safe.
var buf []byte
appendParam := func(s string, n int64) {
if len(buf) > 0 {
buf = append(buf, '&')
}
buf = append(buf, s...)
buf = strconv.AppendInt(buf, n, 10)
}
if gen >= 0 {
appendParam("generation=", gen)
}
if conds == nil {
return string(buf)
}
switch {
case conds.GenerationMatch != 0:
appendParam("ifGenerationMatch=", conds.GenerationMatch)
case conds.GenerationNotMatch != 0:
appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch)
case conds.DoesNotExist:
appendParam("ifGenerationMatch=", 0)
}
switch {
case conds.MetagenerationMatch != 0:
appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch)
case conds.MetagenerationNotMatch != 0:
appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch)
}
return string(buf)
}
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
// that modifyCall searches for by name.
type composeSourceObj struct {
src *raw.ComposeRequestSourceObjects
}
func (c composeSourceObj) Generation(gen int64) {
c.src.Generation = gen
}
func (c composeSourceObj) IfGenerationMatch(gen int64) {
// It's safe to overwrite ObjectPreconditions, since its only field is
// IfGenerationMatch.
c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{
IfGenerationMatch: gen,
}
}
func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error {
if key == nil {
return nil
}
// TODO(jbd): Ask the API team to return a more user-friendly error
// and avoid doing this check at the client level.
if len(key) != 32 {
return errors.New("storage: not a 32-byte AES-256 key")
}
var cs string
if copySource {
cs = "copy-source-"
}
headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
keyHash := sha256.Sum256(key)
headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
return nil
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
res, err := r.Context(ctx).Do()
if err != nil {
return "", err
}
return res.EmailAddress, nil
}
| [
"\"STORAGE_EMULATOR_HOST\""
] | [] | [
"STORAGE_EMULATOR_HOST"
] | [] | ["STORAGE_EMULATOR_HOST"] | go | 1 | 0 | |
core/ServiceInstance.go | /* ServiceInstance.go: provides the interface for controlling service instance processes
*
* Author: J. Lowell Wofford <[email protected]>
*
* This software is open source software available under the BSD-3 license.
* Copyright (c) 2020, Triad National Security, LLC
* See LICENSE file for details.
*/
package core
import (
"fmt"
"os"
"os/exec"
"sync"
"github.com/golang/protobuf/ptypes"
"github.com/hpc/kraken/lib"
)
////////////////////////////
// ServiceInstance Object /
//////////////////////////
var _ lib.ServiceInstance = (*ServiceInstance)(nil)
// A ServiceInstance describes a service that will be built-in to the binary and exec'ed by forking
// note: state information is stored in the node proto object, this object manages a running context
type ServiceInstance struct {
id string // ID must be unique
module string // name doesn't need to be unique; we can run multiple configs of the same service
exe string // gets set automatically
entry func() // needs to run as a goroutine
sock string
cmd *exec.Cmd
ctl chan<- lib.ServiceControl
wchan chan<- lib.ServiceInstanceUpdate
state lib.ServiceState // note: these states mean a slightly different: RUN means process is running, INIT means nothing
m lib.ModuleSelfService
mutex *sync.Mutex
}
// NewServiceInstance provides a new, initialized ServiceInstance object
func NewServiceInstance(id, module string, entry func()) *ServiceInstance {
si := &ServiceInstance{
id: id,
module: module,
entry: entry,
cmd: nil,
mutex: &sync.Mutex{},
}
si.setState((lib.Service_STOP)) // we're obviously stopped right now
si.exe, _ = os.Executable()
return si
}
// ID gets the ID string for the service
func (si *ServiceInstance) ID() string { return si.id }
// Module returns the name of the module this is an instance of
func (si *ServiceInstance) Module() string { return si.module }
// GetState returns the current run state of the service
func (si *ServiceInstance) GetState() lib.ServiceState {
si.mutex.Lock()
defer si.mutex.Unlock()
return si.state
}
// UpdateConfig will send a signal to the running si to check for a config update
func (si *ServiceInstance) UpdateConfig() {
if si.ctl != nil {
si.ctl <- lib.ServiceControl{Command: lib.ServiceControl_UPDATE}
}
}
// Start will execute the process
func (si *ServiceInstance) Start() {
e := si.start()
if e != nil {
si.setState(lib.Service_ERROR)
return
}
si.setState(lib.Service_RUN)
go si.watcher()
}
// Stop sends a signal to the running si to stop
func (si *ServiceInstance) Stop() {
if si.ctl != nil {
si.ctl <- lib.ServiceControl{Command: lib.ServiceControl_STOP}
}
}
// Watch provides a channel where process state changes will be reported
func (si *ServiceInstance) Watch(wchan chan<- lib.ServiceInstanceUpdate) {
si.wchan = wchan
}
// SetCtl sets the channel to send control message to (to pass through the API)
func (si *ServiceInstance) SetCtl(ctl chan<- lib.ServiceControl) {
si.ctl = ctl
}
// SetSock sets the path to the API socket
func (si *ServiceInstance) SetSock(sock string) {
si.sock = sock
}
// setState sets the state, but should only be done internally. This makes sure we notify any watcher
func (si *ServiceInstance) setState(state lib.ServiceState) {
si.mutex.Lock()
defer si.mutex.Unlock()
si.state = state
if si.wchan != nil {
si.wchan <- lib.ServiceInstanceUpdate{
ID: si.id,
State: si.state,
}
}
}
func (si *ServiceInstance) watcher() {
e := si.cmd.Wait()
if e != nil {
si.setState(lib.Service_ERROR)
return
}
si.setState(lib.Service_STOP)
}
func (si *ServiceInstance) start() (e error) {
si.mutex.Lock()
defer si.mutex.Unlock()
if si.state == lib.Service_RUN {
return fmt.Errorf("cannot start service instance not in stop state")
}
if _, e = os.Stat(si.exe); os.IsNotExist(e) {
return e
}
// TODO: we should probably do more sanity checks here...
si.cmd = exec.Command(si.exe)
si.cmd.Args = []string{"[kraken:" + si.ID() + "]"}
si.cmd.Stdin = os.Stdin
si.cmd.Stdout = os.Stdout
si.cmd.Stderr = os.Stderr
si.cmd.Env = append(os.Environ(),
"KRAKEN_SOCK="+si.sock,
"KRAKEN_MODULE="+si.module,
"KRAKEN_ID="+si.ID())
e = si.cmd.Start()
return
}
// moduleExecute does all of the necessary steps to start the service instance
// this is the actual entry point for a new module process
func ModuleExecute(id, module, sock string) {
m, ok := Registry.Modules[module]
if !ok {
fmt.Printf("trying to launch non-existent module: %s", module)
return
}
mss, ok := m.(lib.ModuleSelfService)
if !ok {
fmt.Printf("module is not executable: %s", module)
return
}
config := false
mc, ok := m.(lib.ModuleWithConfig)
if ok {
config = true
}
api := NewAPIClient(sock)
mss.Init(api)
// call in, and get a control chan
cc, e := api.ServiceInit(id, module)
if e != nil {
fmt.Printf("sock: %v\nid: %v\nmodule: %v\nerror: %v\n", os.Getenv("KRAKEN_SOCK"), os.Getenv("KRAKEN_ID"), os.Getenv("KRAKEN_MODULE"), e)
return
}
// Setup logger stream
if e = api.LoggerInit(id); e != nil {
fmt.Printf("failed to create logger stream: %v\n", e)
return
}
// Setup mutation stream if we need it
mm, ok := m.(lib.ModuleWithMutations)
if ok {
cc, e := api.MutationInit(id, module)
if e != nil {
api.Logf(ERROR, "failed to create mutation stream: %v\n", e)
return
}
mm.SetMutationChan(cc)
}
// Setup event stream if we need it
me, ok := m.(lib.ModuleWithAllEvents)
if ok {
cc, e := api.EventInit(id, module)
if e != nil {
api.Logf(ERROR, "failed to create event stream: %v\n", e)
return
}
me.SetEventsChan(cc)
}
// Setup discovery stream if we need it
md, ok := m.(lib.ModuleWithDiscovery)
if ok {
cc, e := api.DiscoveryInit(id)
if e != nil {
api.Logf(ERROR, "failed to create discovery stream: %v\n", e)
return
}
md.SetDiscoveryChan(cc)
}
updateConfig := func() {
if !config {
api.Logf(ERROR, "tried to update config on module with no config")
return
}
// Get a copy of the config
n, _ := api.QueryRead(api.Self().String())
srv := n.GetService(id)
p, e := Registry.Resolve(srv.GetConfig().GetTypeUrl())
if e != nil {
api.Logf(ERROR, "resolve config error (%s): %v\n", srv.GetConfig().GetTypeUrl(), e)
return
}
e = ptypes.UnmarshalAny(srv.GetConfig(), p)
if e != nil {
api.Logf(ERROR, "unmarshal config failure: %v\n", e)
return
}
mc.UpdateConfig(p)
}
if config {
updateConfig()
}
go mss.Entry()
for {
select {
case cmd := <-cc:
switch cmd.Command {
case lib.ServiceControl_STOP:
api.Logf(NOTICE, "stopping")
mss.Stop()
break
case lib.ServiceControl_UPDATE:
updateConfig()
break
default:
}
}
}
}
| [
"\"KRAKEN_SOCK\"",
"\"KRAKEN_ID\"",
"\"KRAKEN_MODULE\""
] | [] | [
"KRAKEN_MODULE",
"KRAKEN_ID",
"KRAKEN_SOCK"
] | [] | ["KRAKEN_MODULE", "KRAKEN_ID", "KRAKEN_SOCK"] | go | 3 | 0 | |
diffeqtorch/install.py | import os
import subprocess
from pathlib import Path
from warnings import warn
from .logging import get_logger
JULIA_PROJECT = str(Path(__file__).parent / "julia")
os.environ["JULIA_PROJECT"] = JULIA_PROJECT
log = get_logger("diffeqtorch_install")
def install_and_test(pyjulia=True, julia_deps=True, julia_sysimage=True):
if pyjulia:
log.debug("Install PyJulia")
install_pyjulia()
log.debug("Test PyJulia")
test_pyjulia()
if julia_deps:
log.debug("Install Julia dependencies")
install_julia_deps()
log.debug("Test Julia dependencies")
test_julia_deps()
if julia_sysimage:
log.debug("Install Julia system image")
install_julia_sysimage()
log.debug("Test Julia system image")
test_julia_sysimage()
def install_julia_deps():
output = subprocess.run(
f"export JULIA_PROJECT={JULIA_PROJECT}; julia -E 'using Pkg; Pkg.instantiate()'",
shell=True,
check=True,
capture_output=True,
)
log.debug(output)
def test_julia_deps():
output = subprocess.run(
f"export JULIA_PROJECT={JULIA_PROJECT}; julia -E 'using DifferentialEquations'",
shell=True,
check=True,
capture_output=True,
)
log.debug(output)
def install_pyjulia():
import julia
julia.install()
def test_pyjulia(sysimage=None, call="1+1"):
from julia.api import Julia
if sysimage is None:
julia = Julia(compiled_modules=False, debug=True)
else:
julia = Julia(compiled_modules=False, sysimage=sysimage, debug=True)
log.debug(julia._call(call))
def install_julia_sysimage():
if "JULIA_SYSIMAGE_DIFFEQTORCH" in os.environ:
if not Path(os.environ["JULIA_SYSIMAGE_DIFFEQTORCH"]).exists():
log.debug("Build Julia system image")
output = subprocess.run(
f"julia --project={JULIA_PROJECT} {JULIA_PROJECT}/sysimage.jl",
shell=True,
check=True,
capture_output=True,
)
log.debug(output)
else:
log.debug("System image exists, skipping")
else:
warn("JULIA_SYSIMAGE_DIFFEQTORCH not set, won't build system image")
def test_julia_sysimage():
if "JULIA_SYSIMAGE_DIFFEQTORCH" in os.environ:
assert Path(os.environ["JULIA_SYSIMAGE_DIFFEQTORCH"]).exists()
test_pyjulia(
sysimage=os.environ["JULIA_SYSIMAGE_DIFFEQTORCH"]
)
else:
log.debug("JULIA_SYSIMAGE_DIFFEQTORCH not set")
| [] | [] | [
"JULIA_PROJECT",
"JULIA_SYSIMAGE_DIFFEQTORCH"
] | [] | ["JULIA_PROJECT", "JULIA_SYSIMAGE_DIFFEQTORCH"] | python | 2 | 0 | |
cloud-control-manager/cloud-driver/drivers/aws/main/Test_Resources.go | // Proof of Concepts of CB-Spider.
// The CB-Spider is a sub-Framework of the Cloud-Barista Multi-Cloud Project.
// The CB-Spider Mission is to connect all the clouds with a single interface.
//
// * Cloud-Barista: https://github.com/cloud-barista
//
// This is a Cloud Driver Example for PoC Test.
//
// by [email protected], 2019.08.
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
awsdrv "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/drivers/aws"
idrv "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/interfaces"
irs "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/interfaces/resources"
"github.com/davecgh/go-spew/spew"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
cblog "github.com/cloud-barista/cb-log"
)
var cblogger *logrus.Logger
func init() {
// cblog is a global variable.
cblogger = cblog.GetLogger("AWS Resource Test")
//cblog.SetLevel("info")
cblog.SetLevel("debug")
}
func handleSecurity() {
cblogger.Debug("Start Security Resource Test")
ResourceHandler, err := getResourceHandler("Security")
if err != nil {
panic(err)
}
handler := ResourceHandler.(irs.SecurityHandler)
//config := readConfigFile()
//VmID := config.Aws.VmID
securityName := "CB-SecurityAddTest1"
securityId := "sg-0d6a2bb960481ce68"
vpcId := "vpc-c0479cab"
for {
fmt.Println("Security Management")
fmt.Println("0. Quit")
fmt.Println("1. Security List")
fmt.Println("2. Security Create")
fmt.Println("3. Security Get")
fmt.Println("4. Security Delete")
fmt.Println("5. Security Add Rules")
fmt.Println("6. Security Delete Rules")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
result, err := handler.ListSecurity()
if err != nil {
cblogger.Infof(" Security 목록 조회 실패 : ", err)
} else {
cblogger.Info("Security 목록 조회 결과")
//cblogger.Info(result)
spew.Dump(result)
if result != nil {
securityId = result[0].IId.SystemId // 조회 및 삭제를 위해 생성된 ID로 변경
}
}
case 2:
cblogger.Infof("[%s] Security 생성 테스트", securityName)
securityReqInfo := irs.SecurityReqInfo{
IId: irs.IID{NameId: securityName},
VpcIID: irs.IID{SystemId: vpcId},
SecurityRules: &[]irs.SecurityRuleInfo{ //보안 정책 설정
//CIDR 테스트
{
FromPort: "30",
ToPort: "30",
IPProtocol: "tcp",
Direction: "inbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "40",
ToPort: "40",
IPProtocol: "tcp",
Direction: "outbound",
CIDR: "10.13.1.10/32",
},
// {
// FromPort: "30",
// ToPort: "30",
// IPProtocol: "tcp",
// Direction: "outbound",
// CIDR: "1.2.3.4/0",
// },
// {
// FromPort: "20",
// ToPort: "22",
// IPProtocol: "tcp",
// Direction: "inbound",
// //CIDR: "1.2.3.4/0",
// },
/*
{
FromPort: "80",
ToPort: "80",
IPProtocol: "tcp",
Direction: "inbound",
CIDR: "1.2.3.4/0",
},
{
FromPort: "8080",
ToPort: "8080",
IPProtocol: "tcp",
Direction: "inbound",
},
{
FromPort: "-1",
ToPort: "-1",
IPProtocol: "icmp",
Direction: "inbound",
},
{
FromPort: "443",
ToPort: "443",
IPProtocol: "tcp",
Direction: "outbound",
},
{
FromPort: "8443",
ToPort: "9999",
IPProtocol: "tcp",
Direction: "outbound",
},
*/
/*
{
//FromPort: "8443",
//ToPort: "9999",
IPProtocol: "-1", // 모두 허용 (포트 정보 없음)
Direction: "inbound",
},
*/
},
}
result, err := handler.CreateSecurity(securityReqInfo)
if err != nil {
cblogger.Infof(securityName, " Security 생성 실패 : ", err)
} else {
cblogger.Infof("[%s] Security 생성 결과 : [%v]", securityName, result)
securityId = result.IId.SystemId
spew.Dump(result)
}
case 3:
cblogger.Infof("[%s] Security 조회 테스트", securityId)
result, err := handler.GetSecurity(irs.IID{SystemId: securityId})
if err != nil {
cblogger.Infof(securityId, " Security 조회 실패 : ", err)
} else {
cblogger.Infof("[%s] Security 조회 결과 : [%v]", securityId, result)
spew.Dump(result)
}
case 4:
cblogger.Infof("[%s] Security 삭제 테스트", securityId)
result, err := handler.DeleteSecurity(irs.IID{SystemId: securityId})
if err != nil {
cblogger.Infof(securityId, " Security 삭제 실패 : ", err)
} else {
cblogger.Infof("[%s] Security 삭제 결과 : [%s]", securityId, result)
}
case 5:
cblogger.Infof("[%s] Security 그룹 룰 추가 테스트", securityId)
result, err := handler.AddRules(irs.IID{SystemId: securityId}, &[]irs.SecurityRuleInfo{
{
FromPort: "80",
ToPort: "80",
IPProtocol: "tcp",
Direction: "inbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "8080",
ToPort: "8080",
IPProtocol: "tcp",
Direction: "inbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "81",
ToPort: "81",
IPProtocol: "tcp",
Direction: "outbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "82",
ToPort: "82",
IPProtocol: "tcp",
Direction: "outbound",
CIDR: "10.13.1.10/32",
},
})
if err != nil {
cblogger.Infof(securityId, " Security 그룹 룰 추가 실패 : ", err)
} else {
cblogger.Infof("[%s] Security 그룹 룰 추가 결과 : [%s]", securityId, result)
}
case 6:
cblogger.Infof("[%s] Security 그룹 룰 제거 테스트", securityId)
result, err := handler.RemoveRules(irs.IID{SystemId: securityId}, &[]irs.SecurityRuleInfo{
{
FromPort: "80",
ToPort: "80",
IPProtocol: "tcp",
Direction: "inbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "8080",
ToPort: "8080",
IPProtocol: "tcp",
Direction: "inbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "81",
ToPort: "81",
IPProtocol: "tcp",
Direction: "outbound",
CIDR: "10.13.1.10/32",
},
{
FromPort: "82",
ToPort: "82",
IPProtocol: "tcp",
Direction: "outbound",
CIDR: "10.13.1.10/32",
},
})
if err != nil {
cblogger.Infof(securityId, " Security 그룹 룰 제거 실패 : ", err)
} else {
cblogger.Infof("[%s] Security 그룹 룰 제거 결과 : [%s]", securityId, result)
}
}
}
}
}
// Test SecurityHandler
func handleSecurityOld() {
cblogger.Debug("Start handler")
ResourceHandler, err := getResourceHandler("Security")
if err != nil {
panic(err)
}
handler := ResourceHandler.(irs.SecurityHandler)
config := readConfigFile()
securityId := config.Aws.SecurityGroupID
cblogger.Infof(securityId)
securityId = "sg-0101df0e8d4f27fec"
//securityId = "cb-sgtest-mcloud-barista"
//result, err := handler.GetSecurity(irs.IID{SystemId: securityId})
//result, err := handler.GetSecurity("sg-0fd2d90b269ebc082") // sgtest-mcloub-barista
//result, err := handler.DeleteSecurity(irs.IID{SystemId: securityId})
//result, err := handler.DeleteSecurity(irs.IID{SystemId: "sg-0101df0e8d4f27fec"})
result, err := handler.ListSecurity()
securityReqInfo := irs.SecurityReqInfo{
IId: irs.IID{NameId: "cb-sgtest2-mcloud-barista"},
VpcIID: irs.IID{NameId: "CB-VNet", SystemId: "vpc-0c23cb9c0e68c735a"},
SecurityRules: &[]irs.SecurityRuleInfo{ //보안 정책 설정
{
FromPort: "20",
ToPort: "22",
IPProtocol: "tcp",
Direction: "inbound",
},
/*
{
FromPort: "80",
ToPort: "80",
IPProtocol: "tcp",
Direction: "inbound",
},
{
FromPort: "8080",
ToPort: "8080",
IPProtocol: "tcp",
Direction: "inbound",
},
{
FromPort: "443",
ToPort: "443",
IPProtocol: "tcp",
Direction: "outbound",
},
{
FromPort: "8443",
ToPort: "9999",
IPProtocol: "tcp",
Direction: "outbound",
},
{
//FromPort: "8443",
//ToPort: "9999",
IPProtocol: "-1", // 모두 허용 (포트 정보 없음)
Direction: "inbound",
},
*/
},
}
cblogger.Info(securityReqInfo)
//result, err := handler.CreateSecurity(securityReqInfo)
if err != nil {
cblogger.Infof("보안 그룹 조회 실패 : ", err)
} else {
cblogger.Info("보안 그룹 조회 결과")
//cblogger.Info(result)
spew.Dump(result)
}
}
/*
// Test PublicIp
func handlePublicIP() {
cblogger.Debug("Start Publicip Resource Test")
ResourceHandler, err := getResourceHandler("Publicip")
if err != nil {
panic(err)
}
handler := ResourceHandler.(irs.PublicIPHandler)
config := readConfigFile()
//reqGetPublicIP := "13.124.140.207"
reqPublicIP := config.Aws.PublicIP
reqPublicIP = "mcloud-barista-eip-test"
//reqPublicIP = "eipalloc-0231a3e16ec42e869"
cblogger.Info("reqPublicIP : ", reqPublicIP)
//handler.CreatePublicIP(publicIPReqInfo)
//handler.ListPublicIP()
//handler.GetPublicIP("13.124.140.207")
for {
fmt.Println("")
fmt.Println("Publicip Resource Test")
fmt.Println("1. ListPublicIP()")
fmt.Println("2. GetPublicIP()")
fmt.Println("3. CreatePublicIP()")
fmt.Println("4. DeletePublicIP()")
fmt.Println("5. Exit")
var commandNum int
var reqDelIP string
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
fmt.Println("Start ListPublicIP() ...")
result, err := handler.ListPublicIP()
if err != nil {
cblogger.Error("PublicIP 목록 조회 실패 : ", err)
} else {
cblogger.Info("PublicIP 목록 조회 결과")
spew.Dump(result)
}
fmt.Println("Finish ListPublicIP()")
case 2:
fmt.Println("Start GetPublicIP() ...")
result, err := handler.GetPublicIP(reqPublicIP)
if err != nil {
cblogger.Error(reqPublicIP, " PublicIP 정보 조회 실패 : ", err)
} else {
cblogger.Infof("PublicIP[%s] 정보 조회 결과", reqPublicIP)
spew.Dump(result)
}
fmt.Println("Finish GetPublicIP()")
case 3:
fmt.Println("Start CreatePublicIP() ...")
reqInfo := irs.PublicIPReqInfo{Name: "mcloud-barista-eip-test"}
result, err := handler.CreatePublicIP(reqInfo)
if err != nil {
cblogger.Error("PublicIP 생성 실패 : ", err)
} else {
cblogger.Info("PublicIP 생성 성공 ", result)
spew.Dump(result)
}
fmt.Println("Finish CreatePublicIP()")
case 4:
fmt.Println("Start DeletePublicIP() ...")
result, err := handler.DeletePublicIP(reqPublicIP)
if err != nil {
cblogger.Error(reqDelIP, " PublicIP 삭제 실패 : ", err)
} else {
if result {
cblogger.Infof("PublicIP[%s] 삭제 완료", reqDelIP)
} else {
cblogger.Errorf("PublicIP[%s] 삭제 실패", reqDelIP)
}
}
fmt.Println("Finish DeletePublicIP()")
case 5:
fmt.Println("Exit")
return
}
}
}
}
*/
// Test KeyPair
func handleKeyPair() {
cblogger.Debug("Start KeyPair Resource Test")
KeyPairHandler, err := setKeyPairHandler()
if err != nil {
panic(err)
}
//config := readConfigFile()
//VmID := config.Aws.VmID
keyPairName := "CB-KeyPairTest123123"
//keyPairName := config.Aws.KeyName
for {
fmt.Println("KeyPair Management")
fmt.Println("0. Quit")
fmt.Println("1. KeyPair List")
fmt.Println("2. KeyPair Create")
fmt.Println("3. KeyPair Get")
fmt.Println("4. KeyPair Delete")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
result, err := KeyPairHandler.ListKey()
if err != nil {
cblogger.Infof(" 키 페어 목록 조회 실패 : ", err)
} else {
cblogger.Info("키 페어 목록 조회 결과")
//cblogger.Info(result)
spew.Dump(result)
}
case 2:
cblogger.Infof("[%s] 키 페어 생성 테스트", keyPairName)
keyPairReqInfo := irs.KeyPairReqInfo{
IId: irs.IID{NameId: keyPairName},
//Name: keyPairName,
}
result, err := KeyPairHandler.CreateKey(keyPairReqInfo)
if err != nil {
cblogger.Infof(keyPairName, " 키 페어 생성 실패 : ", err)
} else {
cblogger.Infof("[%s] 키 페어 생성 결과 : [%s]", keyPairName, result)
spew.Dump(result)
}
case 3:
cblogger.Infof("[%s] 키 페어 조회 테스트", keyPairName)
result, err := KeyPairHandler.GetKey(irs.IID{SystemId: keyPairName})
if err != nil {
cblogger.Infof(keyPairName, " 키 페어 조회 실패 : ", err)
} else {
cblogger.Infof("[%s] 키 페어 조회 결과 : [%s]", keyPairName, result)
}
case 4:
cblogger.Infof("[%s] 키 페어 삭제 테스트", keyPairName)
result, err := KeyPairHandler.DeleteKey(irs.IID{SystemId: keyPairName})
if err != nil {
cblogger.Infof(keyPairName, " 키 페어 삭제 실패 : ", err)
} else {
cblogger.Infof("[%s] 키 페어 삭제 결과 : [%s]", keyPairName, result)
}
}
}
}
}
// Test handleVNetwork (VPC)
/*
func handleVNetwork() {
cblogger.Debug("Start VPC Resource Test")
VPCHandler, err := setVPCHandler()
if err != nil {
panic(err)
}
vNetworkReqInfo := irs.VNetworkReqInfo{
//Id: "subnet-044a2b57145e5afc5",
//Name: "CB-VNet-Subnet", // 웹 도구 등 외부에서 전달 받지 않고 드라이버 내부적으로 자동 구현때문에 사용하지 않음.
IId: irs.IID{NameId: "CB-VNet-Subnet"},
//CidrBlock: "10.0.0.0/16",
//CidrBlock: "192.168.0.0/16",
}
//reqSubnetId := "subnet-0b9ea37601d46d8fa"
reqSubnetId := irs.IID{NameId: "subnet-0b9ea37601d46d8fa"}
//reqSubnetId = ""
for {
fmt.Println("VPCHandler Management")
fmt.Println("0. Quit")
fmt.Println("1. VNetwork List")
fmt.Println("2. VNetwork Create")
fmt.Println("3. VNetwork Get")
fmt.Println("4. VNetwork Delete")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
result, err := VPCHandler.ListVNetwork()
if err != nil {
cblogger.Infof(" VNetwork 목록 조회 실패 : ", err)
} else {
cblogger.Info("VNetwork 목록 조회 결과")
//cblogger.Info(result)
spew.Dump(result)
// 내부적으로 1개만 존재함.
//조회및 삭제 테스트를 위해 리스트의 첫번째 서브넷 ID를 요청ID로 자동 갱신함.
if result != nil {
reqSubnetId = result[0].IId // 조회 및 삭제를 위해 생성된 ID로 변경
}
}
case 2:
cblogger.Infof("[%s] VNetwork 생성 테스트", vNetworkReqInfo.IId.NameId)
//vNetworkReqInfo := irs.VNetworkReqInfo{}
result, err := VPCHandler.CreateVNetwork(vNetworkReqInfo)
if err != nil {
cblogger.Infof(reqSubnetId.NameId, " VNetwork 생성 실패 : ", err)
} else {
cblogger.Infof("VNetwork 생성 결과 : ", result)
reqSubnetId = result.IId // 조회 및 삭제를 위해 생성된 ID로 변경
spew.Dump(result)
}
case 3:
cblogger.Infof("[%s] VNetwork 조회 테스트", reqSubnetId)
result, err := VPCHandler.GetVNetwork(reqSubnetId)
if err != nil {
cblogger.Infof("[%s] VNetwork 조회 실패 : ", reqSubnetId, err)
} else {
cblogger.Infof("[%s] VNetwork 조회 결과 : [%s]", reqSubnetId, result)
spew.Dump(result)
}
case 4:
cblogger.Infof("[%s] VNetwork 삭제 테스트", reqSubnetId)
result, err := VPCHandler.DeleteVNetwork(reqSubnetId)
if err != nil {
cblogger.Infof("[%s] VNetwork 삭제 실패 : ", reqSubnetId, err)
} else {
cblogger.Infof("[%s] VNetwork 삭제 결과 : [%s]", reqSubnetId, result)
}
}
}
}
}
*/
func handleVPC() {
cblogger.Debug("Start VPC Resource Test")
VPCHandler, err := setVPCHandler()
if err != nil {
panic(err)
}
subnetReqInfo := irs.SubnetInfo{
IId: irs.IID{NameId: "AddTest-Subnet"},
IPv4_CIDR: "10.0.2.0/24",
}
subnetReqVpcInfo := irs.IID{SystemId: "vpc-00e513fd64a7d9972"}
cblogger.Debug(subnetReqInfo)
cblogger.Debug(subnetReqVpcInfo)
vpcReqInfo := irs.VPCReqInfo{
IId: irs.IID{NameId: "New-CB-VPC"},
IPv4_CIDR: "10.0.0.0/16",
SubnetInfoList: []irs.SubnetInfo{
{
IId: irs.IID{NameId: "New-CB-Subnet"},
IPv4_CIDR: "10.0.1.0/24",
},
/*
{
IId: irs.IID{NameId: "New-CB-Subnet2"},
IPv4_CIDR: "10.0.2.0/24",
},
*/
},
//Id: "subnet-044a2b57145e5afc5",
//Name: "CB-VNet-Subnet", // 웹 도구 등 외부에서 전달 받지 않고 드라이버 내부적으로 자동 구현때문에 사용하지 않음.
//CidrBlock: "10.0.0.0/16",
//CidrBlock: "192.168.0.0/16",
}
reqSubnetId := irs.IID{SystemId: "vpc-04f6de5c2af880978"}
reqSubnetId = irs.IID{SystemId: "subnet-0ebd316ff47f07628"}
for {
fmt.Println("VPCHandler Management")
fmt.Println("0. Quit")
fmt.Println("1. VNetwork List")
fmt.Println("2. VNetwork Create")
fmt.Println("3. VNetwork Get")
fmt.Println("4. VNetwork Delete")
fmt.Println("5. Add Subnet")
fmt.Println("6. Delete Subnet")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
result, err := VPCHandler.ListVPC()
if err != nil {
cblogger.Infof(" VNetwork 목록 조회 실패 : ", err)
} else {
cblogger.Info("VNetwork 목록 조회 결과")
//cblogger.Info(result)
spew.Dump(result)
// 내부적으로 1개만 존재함.
//조회및 삭제 테스트를 위해 리스트의 첫번째 서브넷 ID를 요청ID로 자동 갱신함.
if result != nil {
reqSubnetId = result[0].IId // 조회 및 삭제를 위해 생성된 ID로 변경
subnetReqVpcInfo = reqSubnetId //Subnet 추가/삭제 테스트용
}
}
case 2:
cblogger.Infof("[%s] VNetwork 생성 테스트", vpcReqInfo.IId.NameId)
//vpcReqInfo := irs.VPCReqInfo{}
result, err := VPCHandler.CreateVPC(vpcReqInfo)
if err != nil {
cblogger.Infof(reqSubnetId.NameId, " VNetwork 생성 실패 : ", err)
} else {
cblogger.Infof("VNetwork 생성 결과 : ", result)
reqSubnetId = result.IId // 조회 및 삭제를 위해 생성된 ID로 변경
spew.Dump(result)
}
case 3:
cblogger.Infof("[%s] VNetwork 조회 테스트", reqSubnetId)
result, err := VPCHandler.GetVPC(reqSubnetId)
if err != nil {
cblogger.Infof("[%s] VNetwork 조회 실패 : ", reqSubnetId, err)
} else {
cblogger.Infof("[%s] VNetwork 조회 결과 : [%s]", reqSubnetId, result)
spew.Dump(result)
}
case 4:
cblogger.Infof("[%s] VNetwork 삭제 테스트", reqSubnetId)
result, err := VPCHandler.DeleteVPC(reqSubnetId)
if err != nil {
cblogger.Infof("[%s] VNetwork 삭제 실패 : ", reqSubnetId, err)
} else {
cblogger.Infof("[%s] VNetwork 삭제 결과 : [%s]", reqSubnetId, result)
}
case 5:
cblogger.Infof("[%s] Subnet 추가 테스트", vpcReqInfo.IId.NameId)
result, err := VPCHandler.AddSubnet(subnetReqVpcInfo, subnetReqInfo)
if err != nil {
cblogger.Infof(reqSubnetId.NameId, " VNetwork 생성 실패 : ", err)
} else {
cblogger.Infof("VNetwork 생성 결과 : ", result)
//reqSubnetId = result.IId // 조회 및 삭제를 위해 생성된 ID로 변경
spew.Dump(result)
}
case 6:
cblogger.Infof("[%s] Subnet 삭제 테스트", reqSubnetId.SystemId)
result, err := VPCHandler.RemoveSubnet(subnetReqVpcInfo, reqSubnetId)
if err != nil {
cblogger.Infof("[%s] Subnet 삭제 실패 : ", reqSubnetId.SystemId, err)
} else {
cblogger.Infof("[%s] Subnet 삭제 결과 : [%s]", reqSubnetId.SystemId, result)
}
}
}
}
}
// Test AMI
func handleImage() {
cblogger.Debug("Start ImageHandler Resource Test")
ResourceHandler, err := getResourceHandler("Image")
if err != nil {
panic(err)
}
handler := ResourceHandler.(irs.ImageHandler)
imageReqInfo := irs.ImageReqInfo{
//IId: irs.IID{NameId: "Test OS Image", SystemId: "ami-0c068f008ea2bdaa1"}, //Microsoft Windows Server 2019
IId: irs.IID{NameId: "Test OS Image", SystemId: "ami-088da9557aae42f39"}, //Ubuntu Server 20.04 LTS (HVM), SSD Volume Type 64비트 x86
//Id: "ami-047f7b46bd6dd5d84",
//Name: "Test OS Image",
}
for {
fmt.Println("ImageHandler Management")
fmt.Println("0. Quit")
fmt.Println("1. Image List")
fmt.Println("2. Image Create")
fmt.Println("3. Image Get")
fmt.Println("4. Image Delete")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
result, err := handler.ListImage()
if err != nil {
cblogger.Infof(" Image 목록 조회 실패 : ", err)
} else {
cblogger.Info("Image 목록 조회 결과")
cblogger.Debug(result)
cblogger.Infof("로그 레벨 : [%s]", cblog.GetLevel())
//spew.Dump(result)
cblogger.Info("출력 결과 수 : ", len(result))
//조회및 삭제 테스트를 위해 리스트의 첫번째 정보의 ID를 요청ID로 자동 갱신함.
if result != nil {
imageReqInfo.IId = result[0].IId // 조회 및 삭제를 위해 생성된 ID로 변경
}
}
case 2:
cblogger.Infof("[%s] Image 생성 테스트", imageReqInfo.IId.NameId)
result, err := handler.CreateImage(imageReqInfo)
if err != nil {
cblogger.Infof(imageReqInfo.IId.NameId, " Image 생성 실패 : ", err)
} else {
cblogger.Infof("Image 생성 결과 : ", result)
imageReqInfo.IId = result.IId // 조회 및 삭제를 위해 생성된 ID로 변경
spew.Dump(result)
}
case 3:
cblogger.Infof("[%s] Image 조회 테스트", imageReqInfo.IId)
result, err := handler.GetImage(imageReqInfo.IId)
if err != nil {
cblogger.Infof("[%s] Image 조회 실패 : ", imageReqInfo.IId.NameId, err)
} else {
cblogger.Infof("[%s] Image 조회 결과 : [%s]", imageReqInfo.IId.NameId, result)
spew.Dump(result)
}
case 4:
cblogger.Infof("[%s] Image 삭제 테스트", imageReqInfo.IId.NameId)
result, err := handler.DeleteImage(imageReqInfo.IId)
if err != nil {
cblogger.Infof("[%s] Image 삭제 실패 : ", imageReqInfo.IId.NameId, err)
} else {
cblogger.Infof("[%s] Image 삭제 결과 : [%s]", imageReqInfo.IId.NameId, result)
}
}
}
}
}
/*
// Test VNic
func handleVNic() {
cblogger.Debug("Start VNicHandler Resource Test")
ResourceHandler, err := getResourceHandler("VNic")
if err != nil {
panic(err)
}
handler := ResourceHandler.(irs.VNicHandler)
reqVnicID := "eni-093deb03ca6eb70eb"
vNicReqInfo := irs.VNicReqInfo{
Name: "TestCB-VNic2",
SecurityGroupIds: []string{
//"sg-0d4d11c090c4814e8", "sg-0dc15d050f8272e24",
"sg-06c4523b969eaafc7",
},
}
for {
fmt.Println("VNicHandler Management")
fmt.Println("0. Quit")
fmt.Println("1. VNic List")
fmt.Println("2. VNic Create")
fmt.Println("3. VNic Get")
fmt.Println("4. VNic Delete")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
result, err := handler.ListVNic()
if err != nil {
cblogger.Infof(" VNic 목록 조회 실패 : ", err)
} else {
cblogger.Info("VNic 목록 조회 결과")
spew.Dump(result)
if len(result) > 0 {
reqVnicID = result[0].Id // 조회 및 삭제 편의를 위해 목록의 첫번째 ID로 변경
}
}
case 2:
cblogger.Infof("[%s] VNic 생성 테스트", vNicReqInfo.Name)
result, err := handler.CreateVNic(vNicReqInfo)
if err != nil {
cblogger.Infof(reqVnicID, " VNic 생성 실패 : ", err)
} else {
cblogger.Infof("VNic 생성 결과 : ", result)
reqVnicID = result.Id // 조회 및 삭제를 위해 생성된 ID로 변경
spew.Dump(result)
}
case 3:
cblogger.Infof("[%s] VNic 조회 테스트", reqVnicID)
result, err := handler.GetVNic(reqVnicID)
if err != nil {
cblogger.Infof("[%s] VNic 조회 실패 : ", reqVnicID, err)
} else {
cblogger.Infof("[%s] VNic 조회 결과 : [%s]", reqVnicID, result)
spew.Dump(result)
}
case 4:
cblogger.Infof("[%s] VNic 삭제 테스트", reqVnicID)
result, err := handler.DeleteVNic(reqVnicID)
if err != nil {
cblogger.Infof("[%s] VNic 삭제 실패 : ", reqVnicID, err)
} else {
cblogger.Infof("[%s] VNic 삭제 결과 : [%s]", reqVnicID, result)
}
}
}
}
}
*/
func testErr() error {
//return awserr.Error("")
//return errors.New("")
return awserr.New("504", "찾을 수 없음", nil)
}
// Test VM Lifecycle Management (Create/Suspend/Resume/Reboot/Terminate)
func handleVM() {
cblogger.Debug("Start VMHandler Resource Test")
ResourceHandler, err := getResourceHandler("VM")
if err != nil {
panic(err)
}
//handler := ResourceHandler.(irs2.ImageHandler)
vmHandler := ResourceHandler.(irs.VMHandler)
//config := readConfigFile()
//VmID := irs.IID{NameId: config.Aws.BaseName, SystemId: config.Aws.VmID}
VmID := irs.IID{SystemId: "i-08f13a125cc74bef6"}
for {
fmt.Println("VM Management")
fmt.Println("0. Quit")
fmt.Println("1. VM Start")
fmt.Println("2. VM Info")
fmt.Println("3. Suspend VM")
fmt.Println("4. Resume VM")
fmt.Println("5. Reboot VM")
fmt.Println("6. Terminate VM")
fmt.Println("7. GetVMStatus VM")
fmt.Println("8. ListVMStatus VM")
fmt.Println("9. ListVM")
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 0:
return
case 1:
vmReqInfo := irs.VMReqInfo{
IId: irs.IID{NameId: "mcloud-barista-cb-user-test-rootsize"},
//ImageIID: irs.IID{SystemId: "ami-001b6f8703b50e077"}, //centos-stable-7.2003.13-ebs-202005201235
//ImageIID: irs.IID{SystemId: "ami-059b6d3840b03d6dd"}, //Ubuntu Server 20.04 LTS (HVM)
//ImageIID: irs.IID{SystemId: "ami-09e67e426f25ce0d7"}, //Ubuntu Server 20.04 LTS (HVM) - 버지니아 북부 리전
//ImageIID: irs.IID{SystemId: "ami-059b6d3840b03d6dd"}, //Ubuntu Server 20.04 LTS (HVM)
ImageIID: irs.IID{SystemId: "ami-0fe22bffdec36361c"}, //Ubuntu Server 18.04 LTS (HVM) - Japan 리전
SubnetIID: irs.IID{SystemId: "subnet-0a6ca346752be1ca4"},
SecurityGroupIIDs: []irs.IID{{SystemId: "sg-08c76d376b6e4e4ae"}},
VMSpecName: "t2.micro",
KeyPairIID: irs.IID{SystemId: "japan-test"},
RootDiskType: "standard", //gp2/standard/io1/io2/sc1/st1/gp3
//RootDiskType: "gp2", //gp2/standard/io1/io2/sc1/st1/gp3
//RootDiskType: "gp3", //gp2/standard/io1/io2/sc1/st1/gp3
//RootDiskSize: "60", //최소 8GB 이상이어야 함.
//RootDiskSize: "1", //최소 8GB 이상이어야 함.
//RootDiskSize: "Default", //8GB
}
vmInfo, err := vmHandler.StartVM(vmReqInfo)
if err != nil {
//panic(err)
cblogger.Error(err)
} else {
cblogger.Info("VM 생성 완료!!", vmInfo)
spew.Dump(vmInfo)
VmID = vmInfo.IId
}
//cblogger.Info(vm)
cblogger.Info("Finish Create VM")
case 2:
vmInfo, err := vmHandler.GetVM(VmID)
if err != nil {
cblogger.Errorf("[%s] VM 정보 조회 실패", VmID)
cblogger.Error(err)
} else {
cblogger.Infof("[%s] VM 정보 조회 결과", VmID)
cblogger.Info(vmInfo)
spew.Dump(vmInfo)
}
case 3:
cblogger.Info("Start Suspend VM ...")
result, err := vmHandler.SuspendVM(VmID)
if err != nil {
cblogger.Errorf("[%s] VM Suspend 실패 - [%s]", VmID, result)
cblogger.Error(err)
} else {
cblogger.Infof("[%s] VM Suspend 성공 - [%s]", VmID, result)
}
case 4:
cblogger.Info("Start Resume VM ...")
result, err := vmHandler.ResumeVM(VmID)
if err != nil {
cblogger.Errorf("[%s] VM Resume 실패 - [%s]", VmID, result)
cblogger.Error(err)
} else {
cblogger.Infof("[%s] VM Resume 성공 - [%s]", VmID, result)
}
case 5:
cblogger.Info("Start Reboot VM ...")
result, err := vmHandler.RebootVM(VmID)
if err != nil {
cblogger.Errorf("[%s] VM Reboot 실패 - [%s]", VmID, result)
cblogger.Error(err)
} else {
cblogger.Infof("[%s] VM Reboot 성공 - [%s]", VmID, result)
}
case 6:
cblogger.Info("Start Terminate VM ...")
result, err := vmHandler.TerminateVM(VmID)
if err != nil {
cblogger.Errorf("[%s] VM Terminate 실패 - [%s]", VmID, result)
cblogger.Error(err)
} else {
cblogger.Infof("[%s] VM Terminate 성공 - [%s]", VmID, result)
}
case 7:
cblogger.Info("Start Get VM Status...")
vmStatus, err := vmHandler.GetVMStatus(VmID)
if err != nil {
cblogger.Errorf("[%s] VM Get Status 실패", VmID)
cblogger.Error(err)
} else {
cblogger.Infof("[%s] VM Get Status 성공 : [%s]", VmID, vmStatus)
}
case 8:
cblogger.Info("Start ListVMStatus ...")
vmStatusInfos, err := vmHandler.ListVMStatus()
if err != nil {
cblogger.Error("ListVMStatus 실패")
cblogger.Error(err)
} else {
cblogger.Info("ListVMStatus 성공")
cblogger.Info(vmStatusInfos)
spew.Dump(vmStatusInfos)
}
case 9:
cblogger.Info("Start ListVM ...")
vmList, err := vmHandler.ListVM()
if err != nil {
cblogger.Error("ListVM 실패")
cblogger.Error(err)
} else {
cblogger.Info("ListVM 성공")
cblogger.Info("=========== VM 목록 ================")
cblogger.Info(vmList)
spew.Dump(vmList)
cblogger.Infof("=========== VM 목록 수 : [%d] ================", len(vmList))
if len(vmList) > 0 {
VmID = vmList[0].IId
}
}
}
}
}
}
// Test VMSpec
func handleVMSpec() {
cblogger.Debug("Start VMSpec Resource Test")
ResourceHandler, err := getResourceHandler("VMSpec")
if err != nil {
panic(err)
}
handler := ResourceHandler.(irs.VMSpecHandler)
//config := readConfigFile()
//reqVMSpec := config.Aws.VMSpec
//reqVMSpec := "t2.small" // GPU가 없음
//reqVMSpec := "p3.2xlarge" // GPU 1개
reqVMSpec := "p3.8xlarge" // GPU 4개
//reqRegion := config.Aws.Region
//reqRegion = "us-east-1"
cblogger.Info("reqVMSpec : ", reqVMSpec)
for {
fmt.Println("")
fmt.Println("VMSpec Resource Test")
fmt.Println("1. ListVMSpec()")
fmt.Println("2. GetVMSpec()")
fmt.Println("3. ListOrgVMSpec()")
fmt.Println("4. GetOrgVMSpec()")
fmt.Println("0. Exit")
var commandNum int
//var reqDelIP string
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
panic(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
fmt.Println("Start ListVMSpec() ...")
result, err := handler.ListVMSpec()
if err != nil {
cblogger.Error("VMSpec 목록 조회 실패 : ", err)
} else {
cblogger.Debug("VMSpec 목록 조회 결과")
//spew.Dump(result)
cblogger.Debug(result)
cblogger.Infof("전체 목록 개수 : [%d]", len(result))
}
fmt.Println("Finish ListVMSpec()")
case 2:
fmt.Println("Start GetVMSpec() ...")
result, err := handler.GetVMSpec(reqVMSpec)
if err != nil {
cblogger.Error(reqVMSpec, " VMSpec 정보 조회 실패 : ", err)
} else {
cblogger.Debugf("VMSpec[%s] 정보 조회 결과", reqVMSpec)
//spew.Dump(result)
cblogger.Debug(result)
}
fmt.Println("Finish GetVMSpec()")
case 3:
fmt.Println("Start ListOrgVMSpec() ...")
result, err := handler.ListOrgVMSpec()
if err != nil {
cblogger.Error("VMSpec Org 목록 조회 실패 : ", err)
} else {
cblogger.Debug("VMSpec Org 목록 조회 결과")
//spew.Dump(result)
cblogger.Debug(result)
//spew.Dump(result)
//fmt.Println(result)
//fmt.Println("=========================")
//fmt.Println(result)
cblogger.Infof("전체 목록 개수 : [%d]", len(result))
}
fmt.Println("Finish ListOrgVMSpec()")
case 4:
fmt.Println("Start GetOrgVMSpec() ...")
result, err := handler.GetOrgVMSpec(reqVMSpec)
if err != nil {
cblogger.Error(reqVMSpec, " VMSpec Org 정보 조회 실패 : ", err)
} else {
cblogger.Debugf("VMSpec[%s] Org 정보 조회 결과", reqVMSpec)
//spew.Dump(result)
cblogger.Debug(result)
//fmt.Println(result)
}
fmt.Println("Finish GetOrgVMSpec()")
case 0:
fmt.Println("Exit")
return
}
}
}
}
func main() {
cblogger.Info("AWS Resource Test")
//handleVPC()
//handleKeyPair()
//handlePublicIP() // PublicIP 생성 후 conf
handleSecurity()
//handleVM()
//handleImage() //AMI
//handleVNic() //Lancard
//handleVMSpec()
}
//handlerType : resources폴더의 xxxHandler.go에서 Handler이전까지의 문자열
//(예) ImageHandler.go -> "Image"
func getResourceHandler(handlerType string) (interface{}, error) {
var cloudDriver idrv.CloudDriver
cloudDriver = new(awsdrv.AwsDriver)
config := readConfigFile()
connectionInfo := idrv.ConnectionInfo{
CredentialInfo: idrv.CredentialInfo{
ClientId: config.Aws.AawsAccessKeyID,
ClientSecret: config.Aws.AwsSecretAccessKey,
},
RegionInfo: idrv.RegionInfo{
Region: config.Aws.Region,
Zone: config.Aws.Zone,
},
}
cloudConnection, errCon := cloudDriver.ConnectCloud(connectionInfo)
if errCon != nil {
return nil, errCon
}
var resourceHandler interface{}
var err error
switch handlerType {
case "Image":
resourceHandler, err = cloudConnection.CreateImageHandler()
//case "Publicip":
// resourceHandler, err = cloudConnection.CreatePublicIPHandler()
case "Security":
resourceHandler, err = cloudConnection.CreateSecurityHandler()
case "VNetwork":
resourceHandler, err = cloudConnection.CreateVPCHandler()
//case "VNic":
// resourceHandler, err = cloudConnection.CreateVNicHandler()
case "VM":
resourceHandler, err = cloudConnection.CreateVMHandler()
case "VMSpec":
resourceHandler, err = cloudConnection.CreateVMSpecHandler()
}
if err != nil {
return nil, err
}
return resourceHandler, nil
}
func setKeyPairHandler() (irs.KeyPairHandler, error) {
var cloudDriver idrv.CloudDriver
cloudDriver = new(awsdrv.AwsDriver)
config := readConfigFile()
connectionInfo := idrv.ConnectionInfo{
CredentialInfo: idrv.CredentialInfo{
ClientId: config.Aws.AawsAccessKeyID,
ClientSecret: config.Aws.AwsSecretAccessKey,
},
RegionInfo: idrv.RegionInfo{
Region: config.Aws.Region,
Zone: config.Aws.Zone,
},
}
cloudConnection, err := cloudDriver.ConnectCloud(connectionInfo)
if err != nil {
return nil, err
}
keyPairHandler, err := cloudConnection.CreateKeyPairHandler()
if err != nil {
return nil, err
}
return keyPairHandler, nil
}
func setVPCHandler() (irs.VPCHandler, error) {
var cloudDriver idrv.CloudDriver
cloudDriver = new(awsdrv.AwsDriver)
config := readConfigFile()
connectionInfo := idrv.ConnectionInfo{
CredentialInfo: idrv.CredentialInfo{
ClientId: config.Aws.AawsAccessKeyID,
ClientSecret: config.Aws.AwsSecretAccessKey,
},
RegionInfo: idrv.RegionInfo{
Region: config.Aws.Region,
Zone: config.Aws.Zone,
},
}
cloudConnection, err := cloudDriver.ConnectCloud(connectionInfo)
if err != nil {
return nil, err
}
handler, err := cloudConnection.CreateVPCHandler()
if err != nil {
return nil, err
}
return handler, nil
}
// Region : 사용할 리전명 (ex) ap-northeast-2
// ImageID : VM 생성에 사용할 AMI ID (ex) ami-047f7b46bd6dd5d84
// BaseName : 다중 VM 생성 시 사용할 Prefix이름 ("BaseName" + "_" + "숫자" 형식으로 VM을 생성 함.) (ex) mcloud-barista
// VmID : 라이프 사이트클을 테스트할 EC2 인스턴스ID
// InstanceType : VM 생성시 사용할 인스턴스 타입 (ex) t2.micro
// KeyName : VM 생성시 사용할 키페어 이름 (ex) mcloud-barista-keypair
// MinCount :
// MaxCount :
// SubnetId : VM이 생성될 VPC의 SubnetId (ex) subnet-cf9ccf83
// SecurityGroupID : 생성할 VM에 적용할 보안그룹 ID (ex) sg-0df1c209ea1915e4b
type Config struct {
Aws struct {
AawsAccessKeyID string `yaml:"aws_access_key_id"`
AwsSecretAccessKey string `yaml:"aws_secret_access_key"`
Region string `yaml:"region"`
Zone string `yaml:"zone"`
ImageID string `yaml:"image_id"`
VmID string `yaml:"ec2_instance_id"`
BaseName string `yaml:"base_name"`
InstanceType string `yaml:"instance_type"`
KeyName string `yaml:"key_name"`
MinCount int64 `yaml:"min_count"`
MaxCount int64 `yaml:"max_count"`
SubnetID string `yaml:"subnet_id"`
SecurityGroupID string `yaml:"security_group_id"`
PublicIP string `yaml:"public_ip"`
} `yaml:"aws"`
}
//환경 설정 파일 읽기
//환경변수 CBSPIDER_PATH 설정 후 해당 폴더 하위에 /config/config.yaml 파일 생성해야 함.
func readConfigFile() Config {
// Set Environment Value of Project Root Path
rootPath := os.Getenv("CBSPIDER_PATH")
//rootpath := "D:/Workspace/mcloud-barista-config"
// /mnt/d/Workspace/mcloud-barista-config/config/config.yaml
cblogger.Infof("Test Data 설정파일 : [%]", rootPath+"/config/config.yaml")
data, err := ioutil.ReadFile(rootPath + "/config/config.yaml")
//data, err := ioutil.ReadFile("D:/Workspace/mcloud-bar-config/config/config.yaml")
if err != nil {
panic(err)
}
var config Config
err = yaml.Unmarshal(data, &config)
if err != nil {
panic(err)
}
cblogger.Info("Loaded ConfigFile...")
//spew.Dump(config)
//cblogger.Info(config)
cblogger.Debug(config.Aws.AawsAccessKeyID, " ", config.Aws.Region)
//cblogger.Debug(config.Aws.Region)
return config
}
| [
"\"CBSPIDER_PATH\""
] | [] | [
"CBSPIDER_PATH"
] | [] | ["CBSPIDER_PATH"] | go | 1 | 0 | |
setup.py | import os
import os.path as osp
import shutil
import sys
import warnings
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmocr/version.py'
is_windows = sys.platform == 'win32'
def add_mim_extention():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmocr', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
# Creating a symbolic link on windows may raise an
# `OSError: [WinError 1314]` due to privilege. If
# the error happens, the src file will be copied
mode = 'copy'
warnings.warn(
f'Failed to create a symbolic link for {src_relpath}, '
f'and it will be copied to {tar_path}')
else:
continue
if mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
import sys
# return short version for sdist
if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
return locals()['short_version']
else:
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strip
specific version information.
Args:
fname (str): Path to requirements file.
with_version (bool, default=False): If True, include version specs.
Returns:
info (list[str]): List of requirements items.
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
add_mim_extention()
library_dirs = [
lp for lp in os.environ.get('LD_LIBRARY_PATH', '').split(':')
if len(lp) > 1
]
setup(
name='mmocr',
version=get_version(),
description='OpenMMLab Text Detection, OCR, and NLP Toolbox',
long_description=readme(),
long_description_content_type='text/markdown',
maintainer='MMOCR Authors',
maintainer_email='[email protected]',
keywords='Text Detection, OCR, KIE, NLP',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
url='https://github.com/open-mmlab/mmocr',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
zip_safe=False)
| [] | [] | [
"LD_LIBRARY_PATH"
] | [] | ["LD_LIBRARY_PATH"] | python | 1 | 0 | |
adapters/raw/raw.go | package raw
import (
"bytes"
"encoding/json"
"errors"
"log"
"net"
"os"
"reflect"
"text/template"
"github.com/clevertechru/logspout/router"
)
func init() {
router.AdapterFactories.Register(NewRawAdapter, "raw")
}
var funcs = template.FuncMap{
"toJSON": func(value interface{}) string {
bytes, err := json.Marshal(value)
if err != nil {
log.Println("error marshalling to JSON: ", err)
return "null"
}
return string(bytes)
},
}
// NewRawAdapter returns a configured raw.Adapter
func NewRawAdapter(route *router.Route) (router.LogAdapter, error) {
transport, found := router.AdapterTransports.Lookup(route.AdapterTransport("udp"))
if !found {
return nil, errors.New("bad transport: " + route.Adapter)
}
conn, err := transport.Dial(route.Address, route.Options)
if err != nil {
return nil, err
}
tmplStr := "{{.Data}}\n"
if os.Getenv("RAW_FORMAT") != "" {
tmplStr = os.Getenv("RAW_FORMAT")
}
tmpl, err := template.New("raw").Funcs(funcs).Parse(tmplStr)
if err != nil {
return nil, err
}
return &Adapter{
route: route,
conn: conn,
tmpl: tmpl,
}, nil
}
// Adapter is a simple adapter that streams log output to a connection without any templating
type Adapter struct {
conn net.Conn
route *router.Route
tmpl *template.Template
}
// Stream sends log data to a connection
func (a *Adapter) Stream(logstream chan *router.Message) {
for message := range logstream {
buf := new(bytes.Buffer)
err := a.tmpl.Execute(buf, message)
if err != nil {
log.Println("raw:", err)
return
}
//log.Println("debug:", buf.String())
_, err = a.conn.Write(buf.Bytes())
if err != nil {
log.Println("raw:", err)
if reflect.TypeOf(a.conn).String() != "*net.UDPConn" {
return
}
}
}
}
| [
"\"RAW_FORMAT\"",
"\"RAW_FORMAT\""
] | [] | [
"RAW_FORMAT"
] | [] | ["RAW_FORMAT"] | go | 1 | 0 | |
managed/devops/opscli/ybops/cloud/aws/method.py | #!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
from ybops.cloud.common.method import ListInstancesMethod, CreateInstancesMethod, \
ProvisionInstancesMethod, DestroyInstancesMethod, AbstractMethod, \
AbstractAccessMethod, AbstractNetworkMethod, AbstractInstancesMethod
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.aws.utils import get_yb_sg_name, create_dns_record_set, edit_dns_record_set, \
delete_dns_record_set, list_dns_record_set
import json
import os
import logging
import glob
import subprocess
class AwsListInstancesMethod(ListInstancesMethod):
"""Subclass for listing instances in AWS. Currently doesn't provide any extra functionality.
"""
def __init__(self, base_command):
super(AwsListInstancesMethod, self).__init__(base_command)
class AwsCreateInstancesMethod(CreateInstancesMethod):
"""Subclass for creating instances in AWS. This is responsible for taking in the AWS specific
flags, such as VPCs, AMIs and more.
"""
def __init__(self, base_command):
super(AwsCreateInstancesMethod, self).__init__(base_command)
def add_extra_args(self):
"""Setup the CLI options for creating instances.
"""
super(AwsCreateInstancesMethod, self).add_extra_args()
self.parser.add_argument("--key_pair_name", default=os.environ.get("YB_EC2_KEY_PAIR_NAME"),
help="AWS Key Pair name")
self.parser.add_argument("--security_group_id", default=None,
help="AWS comma delimited security group IDs.")
self.parser.add_argument("--volume_type", choices=["gp2", "io1"], default="gp2",
help="Volume type for volumes on EBS-backed instances.")
self.parser.add_argument("--spot_price", default=None,
help="Spot price for each instance (if desired)")
self.parser.add_argument("--cmk_res_name", help="CMK arn to enable encrypted EBS volumes.")
self.parser.add_argument("--iam_profile_arn", help="ARN string for IAM instance profile")
def preprocess_args(self, args):
super(AwsCreateInstancesMethod, self).preprocess_args(args)
if args.region is None:
raise YBOpsRuntimeError("Must specify a region!")
# TODO: better handling of this...
if args.machine_image is None:
# Update the machine_image with the ami_id for this version.
args.machine_image = self.cloud.get_image(region=args.region).get(args.region)
def callback(self, args):
# These are to be used in the provision part for now.
self.extra_vars.update({
"aws_key_pair_name": args.key_pair_name,
})
if args.security_group_id is not None:
self.extra_vars.update({
"aws_security_group_id": args.security_group_id
})
else:
self.extra_vars.update({
"aws_security_group": get_yb_sg_name(args.region)
})
if args.spot_price is not None:
self.extra_vars.update({
"aws_spot_price": args.spot_price
})
if args.instance_tags is not None:
self.extra_vars.update({
"instance_tags": args.instance_tags
})
super(AwsCreateInstancesMethod, self).callback(args)
def run_ansible_create(self, args):
# TODO: do we need this?
self.update_ansible_vars(args)
self.cloud.create_instance(args)
class AwsProvisionInstancesMethod(ProvisionInstancesMethod):
"""Subclass for provisioning instances in AWS. Setups the proper Create method to point to the
AWS specific one.
"""
def __init__(self, base_command):
super(AwsProvisionInstancesMethod, self).__init__(base_command)
def setup_create_method(self):
"""Override to get the wiring to the proper method.
"""
self.create_method = AwsCreateInstancesMethod(self.base_command)
def add_extra_args(self):
super(AwsProvisionInstancesMethod, self).add_extra_args()
self.parser.add_argument("--use_chrony", action="store_true",
help="Whether to use chrony instead of NTP.")
def update_ansible_vars_with_args(self, args):
super(AwsProvisionInstancesMethod, self).update_ansible_vars_with_args(args)
self.extra_vars["use_chrony"] = args.use_chrony
self.extra_vars["device_names"] = self.cloud.get_device_names(args)
self.extra_vars["mount_points"] = self.cloud.get_mount_points_csv(args)
self.extra_vars["cmk_res_name"] = args.cmk_res_name
class AwsDestroyInstancesMethod(DestroyInstancesMethod):
"""Subclass for destroying an instance in AWS, we fetch the host info and update the extra_vars
with necessary parameters
"""
def __init__(self, base_command):
super(AwsDestroyInstancesMethod, self).__init__(base_command)
def callback(self, args):
host_info = self.cloud.get_host_info(args, private_ip=args.node_ip)
if not host_info:
logging.error("Host {} does not exists.".format(args.search_pattern))
return
self.extra_vars.update({
"cloud_subnet": host_info["subnet"],
"cloud_region": host_info["region"],
"private_ip": host_info['private_ip']
})
super(AwsDestroyInstancesMethod, self).callback(args)
class AwsTagsMethod(AbstractInstancesMethod):
def __init__(self, base_command):
super(AwsTagsMethod, self).__init__(base_command, "tags")
def add_extra_args(self):
super(AwsTagsMethod, self).add_extra_args()
self.parser.add_argument("--remove_tags", required=False,
help="Tag keys to remove.")
def callback(self, args):
self.cloud.modify_tags(args)
class AwsAccessAddKeyMethod(AbstractAccessMethod):
def __init__(self, base_command):
super(AwsAccessAddKeyMethod, self).__init__(base_command, "add-key")
def callback(self, args):
(private_key_file, public_key_file) = self.validate_key_files(args)
self.cloud.add_key_pair(args)
print(json.dumps({"private_key": private_key_file, "public_key": public_key_file}))
class AwsAccessDeleteKeyMethod(AbstractAccessMethod):
def __init__(self, base_command):
super(AwsAccessDeleteKeyMethod, self).__init__(base_command, "delete-key")
def callback(self, args):
try:
self.cloud.delete_key_pair(args)
for key_file in glob.glob("{}/{}.*".format(args.key_file_path, args.key_pair_name)):
os.remove(key_file)
print(json.dumps({"success": "Keypair {} deleted.".format(args.key_pair_name)}))
except Exception as e:
logging.error(e)
print(json.dumps({"error": "Unable to delete Keypair: {}".format(args.key_pair_name)}))
class AwsAccessListKeysMethod(AbstractMethod):
def __init__(self, base_command):
super(AwsAccessListKeysMethod, self).__init__(base_command, "list-keys")
def add_extra_args(self):
"""Setup the CLI options for List Key Pair.
"""
super(AwsAccessListKeysMethod, self).add_extra_args()
self.parser.add_argument("--key_pair_name", required=False, default=None,
help="AWS Key Pair name")
def callback(self, args):
print(json.dumps(self.cloud.list_key_pair(args)))
class AwsQueryRegionsMethod(AbstractMethod):
def __init__(self, base_command):
super(AwsQueryRegionsMethod, self).__init__(base_command, "regions")
def callback(self, args):
print(json.dumps(self.cloud.get_regions()))
class AwsQueryZonesMethod(AbstractMethod):
def __init__(self, base_command):
super(AwsQueryZonesMethod, self).__init__(base_command, "zones")
def add_extra_args(self):
super(AwsQueryZonesMethod, self).add_extra_args()
self.parser.add_argument("--dest_vpc_id", required=False, help="Destination VPC Id. " +
"Do not specify if you want us to create a new one.")
def preprocess_args(self, args):
super(AwsQueryZonesMethod, self).preprocess_args(args)
if args.dest_vpc_id and not args.region:
raise YBOpsRuntimeError("Using --dest_vpc_id requires --region")
def callback(self, args):
print(json.dumps(self.cloud.get_zones(args)))
class AwsQueryVPCMethod(AbstractMethod):
def __init__(self, base_command):
super(AwsQueryVPCMethod, self).__init__(base_command, "vpc")
def callback(self, args):
print(json.dumps(self.cloud.query_vpc(args)))
class AwsQueryCurrentHostMethod(AbstractMethod):
VALID_METADATA_TYPES = ("vpc-id", "subnet-id", "instance-id", "mac",
"region", "privateIp", "security-groups", "role")
def __init__(self, base_command):
super(AwsQueryCurrentHostMethod, self).__init__(base_command, "current-host")
# We do not need cloud credentials to query metadata.
self.need_validation = False
def add_extra_args(self):
super(AwsQueryCurrentHostMethod, self).add_extra_args()
self.parser.add_argument("--metadata_types", nargs="+", type=str, required=True,
choices=self.VALID_METADATA_TYPES)
def callback(self, args):
try:
print(json.dumps(self.cloud.get_current_host_info(args)))
except YBOpsRuntimeError as ye:
print(json.dumps({"error": ye.message}))
class AwsQueryPricingMethod(AbstractMethod):
def __init__(self, base_command):
super(AwsQueryPricingMethod, self).__init__(base_command, "pricing")
def callback(self, args):
raise YBOpsRuntimeError("Not Implemented")
class AwsQuerySpotPricingMethod(AbstractMethod):
def __init__(self, base_command):
super(AwsQuerySpotPricingMethod, self).__init__(base_command, "spot-pricing")
def add_extra_args(self):
super(AwsQuerySpotPricingMethod, self).add_extra_args()
self.parser.add_argument("--instance_type", required=True,
help="The instance type to get pricing info for")
def callback(self, args):
try:
if args.region is None or args.zone is None:
raise YBOpsRuntimeError("Must specify a region & zone to query spot price")
print(json.dumps({'SpotPrice': self.cloud.get_spot_pricing(args)}))
except YBOpsRuntimeError as ye:
print(json.dumps({"error": ye.message}))
class AwsNetworkBootstrapMethod(AbstractNetworkMethod):
def __init__(self, base_command):
super(AwsNetworkBootstrapMethod, self).__init__(base_command, "bootstrap")
def add_extra_args(self):
"""Setup the CLI options network bootstrap."""
super(AwsNetworkBootstrapMethod, self).add_extra_args()
self.parser.add_argument("--custom_payload", required=False,
help="JSON payload of per-region data.")
def callback(self, args):
try:
print(json.dumps(self.cloud.network_bootstrap(args)))
except YBOpsRuntimeError as ye:
print(json.dumps({"error": ye.message}))
class AwsNetworkQueryMethod(AbstractNetworkMethod):
def __init__(self, base_command):
super(AwsNetworkQueryMethod, self).__init__(base_command, "query")
def callback(self, args):
try:
print(json.dumps(self.cloud.query_vpc(args)))
except YBOpsRuntimeError as ye:
print(json.dumps({"error": ye.message}))
class AwsNetworkCleanupMethod(AbstractNetworkMethod):
def __init__(self, base_command):
super(AwsNetworkCleanupMethod, self).__init__(base_command, "cleanup")
def add_extra_args(self):
"""Setup the CLI options network cleanup."""
super(AwsNetworkCleanupMethod, self).add_extra_args()
self.parser.add_argument("--custom_payload", required=False,
help="JSON payload of per-region data.")
def callback(self, args):
try:
print(json.dumps(self.cloud.network_cleanup(args)))
except YBOpsRuntimeError as ye:
print(json.dumps({"error": ye.message}))
class AbstractDnsMethod(AbstractMethod):
def __init__(self, base_command, method_name):
super(AbstractDnsMethod, self).__init__(base_command, method_name)
self.ip_list = []
self.naming_info_required = True
def add_extra_args(self):
super(AbstractDnsMethod, self).add_extra_args()
self.parser.add_argument("--hosted_zone_id", required=True,
help="The ID of the Route53 Hosted Zone.")
self.parser.add_argument("--domain_name_prefix", required=self.naming_info_required,
help="The prefix to create the RecordSet with, in your Zone.")
self.parser.add_argument("--node_ips", required=self.naming_info_required,
help="The CSV of the node IPs to associate to this DNS entry.")
def preprocess_args(self, args):
super(AbstractDnsMethod, self).preprocess_args(args)
if args.node_ips:
self.ip_list = args.node_ips.split(',')
class AwsCreateDnsEntryMethod(AbstractDnsMethod):
def __init__(self, base_command):
super(AwsCreateDnsEntryMethod, self).__init__(base_command, "create")
def callback(self, args):
create_dns_record_set(args.hosted_zone_id, args.domain_name_prefix, self.ip_list)
class AwsEditDnsEntryMethod(AbstractDnsMethod):
def __init__(self, base_command):
super(AwsEditDnsEntryMethod, self).__init__(base_command, "edit")
def callback(self, args):
edit_dns_record_set(args.hosted_zone_id, args.domain_name_prefix, self.ip_list)
class AwsDeleteDnsEntryMethod(AbstractDnsMethod):
def __init__(self, base_command):
super(AwsDeleteDnsEntryMethod, self).__init__(base_command, "delete")
def callback(self, args):
delete_dns_record_set(args.hosted_zone_id, args.domain_name_prefix, self.ip_list)
class AwsListDnsEntryMethod(AbstractDnsMethod):
def __init__(self, base_command):
super(AwsListDnsEntryMethod, self).__init__(base_command, "list")
self.naming_info_required = False
def callback(self, args):
try:
result = list_dns_record_set(args.hosted_zone_id)
print(json.dumps({
'name': result['HostedZone']['Name']
}))
except Exception as e:
print(json.dumps({'error': repr(e)}))
| [] | [] | [
"YB_EC2_KEY_PAIR_NAME"
] | [] | ["YB_EC2_KEY_PAIR_NAME"] | python | 1 | 0 | |
main.go | package main
import (
"flag"
"log"
"os"
"path/filepath"
networkingv1 "k8s.io/api/networking/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
var kubeconfig *string
if home := homeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
clientset, err := getClientset(*kubeconfig)
if err != nil {
log.Fatal(err)
}
globalCSV = InitializeCSV()
for _, n := range listNamespaces(clientset).Items {
ns := &Namespace{
Name: n.ObjectMeta.Name,
Labels: n.ObjectMeta.Labels,
}
for _, p := range listPods(clientset, ns.Name).Items {
ns.Pods = append(ns.Pods, &Pod{
Egress: []Rule{},
Labels: p.ObjectMeta.Labels,
Ingress: []Rule{},
Name: p.ObjectMeta.Name,
Namespace: *ns,
})
}
globalCSV.Namespaces = append(globalCSV.Namespaces, ns)
}
for _, ns := range globalCSV.Namespaces {
for _, nwp := range listNetworkPolicies(clientset, ns.Name).Items {
ns.attachPolicy(nwp.Spec)
}
}
globalCSV.Output()
}
func getClientset(kubeconfig string) (*kubernetes.Clientset, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientset, nil
}
func listNamespaces(clientset *kubernetes.Clientset) *corev1.NamespaceList {
ns, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
log.Fatal(err)
}
return ns
}
func listPods(clientset *kubernetes.Clientset, ns string) *corev1.PodList {
pods, err := clientset.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
log.Fatal(err)
}
return pods
}
func listNetworkPolicies(clientset *kubernetes.Clientset, ns string) *networkingv1.NetworkPolicyList {
nwp, err := clientset.NetworkingV1().NetworkPolicies(ns).List(metav1.ListOptions{})
if err != nil {
log.Fatal(err)
}
return nwp
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
| [
"\"HOME\"",
"\"USERPROFILE\""
] | [] | [
"HOME",
"USERPROFILE"
] | [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
pkg/client/cli/telepresence_test.go | package cli_test
import (
"bufio"
"context"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/suite"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"github.com/datawire/ambassador/pkg/dtest"
"github.com/datawire/dlib/dcontext"
"github.com/datawire/dlib/dexec"
"github.com/datawire/dlib/dgroup"
"github.com/datawire/dlib/dhttp"
"github.com/datawire/dlib/dlog"
"github.com/telepresenceio/telepresence/v2/pkg/client"
"github.com/telepresenceio/telepresence/v2/pkg/client/cli"
"github.com/telepresenceio/telepresence/v2/pkg/filelocation"
"github.com/telepresenceio/telepresence/v2/pkg/version"
)
// serviceCount is the number of interceptable services that gets installed
// in the cluster and later intercepted
const serviceCount = 3
func TestTelepresence(t *testing.T) {
ctx := dlog.NewTestContext(t, false)
dtest.WithMachineLock(ctx, func(ctx context.Context) {
suite.Run(t, new(telepresenceSuite))
})
}
type telepresenceSuite struct {
suite.Suite
testVersion string
namespace string
managerTestNamespace string
}
func (ts *telepresenceSuite) SetupSuite() {
// Check that the "ko" program exists, and adjust PATH as necessary.
if info, err := os.Stat("../../../tools/bin/ko"); err != nil || !info.Mode().IsRegular() || (info.Mode().Perm()&0100) == 0 {
ts.FailNow("it looks like the ./tools/bin/ko executable wasn't built; be sure to build it with `make` before running `go test`!")
}
require := ts.Require()
toolbindir, err := filepath.Abs("../../../tools/bin")
require.NoError(err)
_ = os.Chdir("../../..")
os.Setenv("PATH", toolbindir+":"+os.Getenv("PATH"))
// Remove very verbose output from DTEST initialization
log.SetOutput(ioutil.Discard)
ts.testVersion = fmt.Sprintf("v2.0.0-gotest.%d", os.Getpid())
ts.namespace = fmt.Sprintf("telepresence-%d", os.Getpid())
ts.managerTestNamespace = fmt.Sprintf("ambassador-%d", os.Getpid())
version.Version = ts.testVersion
ctx := dlog.NewTestContext(ts.T(), false)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
executable, err := ts.buildExecutable(ctx)
ts.NoError(err)
client.SetExe(executable)
}()
_ = os.Remove(client.ConnectorSocketName)
err = run(ctx, "sudo", "true")
require.NoError(err, "acquire privileges")
registry := dtest.DockerRegistry(ctx)
os.Setenv("KO_DOCKER_REPO", registry)
os.Setenv("TELEPRESENCE_REGISTRY", registry)
os.Setenv("TELEPRESENCE_MANAGER_NAMESPACE", ts.managerTestNamespace)
wg.Add(1)
go func() {
defer wg.Done()
err := ts.publishManager()
ts.NoError(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
kubeconfig := dtest.Kubeconfig(ctx)
os.Setenv("DTEST_KUBECONFIG", kubeconfig)
os.Setenv("KUBECONFIG", kubeconfig)
err = run(ctx, "kubectl", "create", "namespace", ts.namespace)
ts.NoError(err)
err = run(ctx, "kubectl", "apply", "-f", "k8s/client_rbac.yaml")
ts.NoError(err)
// This is how we create a user that has their rbac restricted to what we have in
// k8s/client_rbac.yaml. We do this by creating a service account and then getting
// the token from said service account and storing it in our kubeconfig.
secret, err := output(ctx, "kubectl", "get", "sa", "telepresence-test-developer", "-o", "jsonpath={.secrets[0].name}")
ts.NoError(err)
encSecret, err := output(ctx, "kubectl", "get", "secret", secret, "-o", "jsonpath={.data.token}")
ts.NoError(err)
token, err := base64.StdEncoding.DecodeString(encSecret)
ts.NoError(err)
err = run(ctx, "kubectl", "config", "set-credentials", "telepresence-test-developer", "--token", string(token))
ts.NoError(err)
err = run(ctx, "kubectl", "config", "set-context", "telepresence-test-developer", "--user", "telepresence-test-developer", "--cluster", "default")
ts.NoError(err)
// We start with the default context, and will switch to the
// telepresence-test-developer user later in the tests
err = run(ctx, "kubectl", "config", "use-context", "default")
ts.NoError(err)
}()
wg.Wait()
wg.Add(serviceCount)
for i := 0; i < serviceCount; i++ {
i := i
go func() {
defer wg.Done()
err = ts.applyEchoService(ctx, fmt.Sprintf("hello-%d", i))
ts.NoError(err)
}()
}
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "with-probes", "with-probes", 80)
ts.NoError(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "rs-echo", "rs-echo", 80)
ts.NoError(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "ss-echo", "ss-echo", 80)
ts.NoError(err)
}()
wg.Wait()
// Ensure that telepresence is not logged in
_, _ = telepresence(ts.T(), "logout")
// Ensure that no telepresence is running when the tests start
_, _ = telepresence(ts.T(), "quit")
}
func (ts *telepresenceSuite) TearDownSuite() {
ctx := dlog.NewTestContext(ts.T(), false)
_ = run(ctx, "kubectl", "config", "use-context", "default")
_ = run(ctx, "kubectl", "delete", "namespace", ts.namespace)
_ = run(ctx, "kubectl", "delete", "namespace", ts.managerTestNamespace)
// Undo RBAC things
_ = run(ctx, "kubectl", "delete", "-f", "k8s/client_rbac.yaml")
_ = run(ctx, "kubectl", "config", "delete-context", "telepresence-test-developer")
_ = run(ctx, "kubectl", "config", "delete-user", "telepresence-test-developer")
}
func (ts *telepresenceSuite) TestA_WithNoDaemonRunning() {
ts.Run("Version", func() {
stdout, stderr := telepresence(ts.T(), "version")
ts.Empty(stderr)
ts.Contains(stdout, fmt.Sprintf("Client: %s", client.DisplayVersion()))
})
ts.Run("Status", func() {
out, _ := telepresence(ts.T(), "status")
ts.Contains(out, "Root Daemon: Not running")
ts.Contains(out, "User Daemon: Not running")
})
ts.Run("Connect using invalid KUBECONFIG", func() {
ts.Run("Reports config error and exits", func() {
kubeConfig := os.Getenv("KUBECONFIG")
defer os.Setenv("KUBECONFIG", kubeConfig)
os.Setenv("KUBECONFIG", "/dev/null")
stdout, stderr := telepresence(ts.T(), "connect")
ts.Contains(stderr, "kubeconfig has no context definition")
ts.Contains(stdout, "Launching Telepresence Daemon")
ts.Contains(stdout, "Daemon quitting")
})
})
ts.Run("Connect with non existing context", func() {
ts.Run("Reports connect error and exits", func() {
stdout, stderr := telepresence(ts.T(), "connect", "--context", "not-likely-to-exist")
ts.Contains(stderr, `"not-likely-to-exist" does not exist`)
ts.Contains(stdout, "Launching Telepresence Daemon")
ts.Contains(stdout, "Daemon quitting")
})
})
ts.Run("Connect with a command", func() {
ts.Run("Connects, executes the command, and then exits", func() {
stdout, stderr := telepresence(ts.T(), "connect", "--", client.GetExe(), "status")
require := ts.Require()
require.Empty(stderr)
require.Contains(stdout, "Launching Telepresence Daemon")
require.Contains(stdout, "Connected to context")
require.Contains(stdout, "Kubernetes context:")
require.Regexp(`Telepresence proxy:\s+ON`, stdout)
require.Contains(stdout, "Daemon quitting")
})
})
ts.Run("Root Daemon Log Level", func() {
t := ts.T()
require := ts.Require()
configDir := t.TempDir()
config, err := os.Create(filepath.Join(configDir, "config.yml"))
require.NoError(err)
_, err = config.WriteString("logLevels:\n rootDaemon: debug\n")
require.NoError(err)
config.Close()
logDir := t.TempDir()
ctx := dlog.NewTestContext(t, false)
ctx = filelocation.WithAppUserConfigDir(ctx, configDir)
ctx = filelocation.WithAppUserLogDir(ctx, logDir)
_, stderr := telepresenceContext(ctx, "connect")
require.Empty(stderr)
_, stderr = telepresenceContext(ctx, "quit")
require.Empty(stderr)
rootLog, err := os.Open(filepath.Join(logDir, "daemon.log"))
require.NoError(err)
defer rootLog.Close()
hasDebug := false
scn := bufio.NewScanner(rootLog)
match := regexp.MustCompile(` debug +daemon/server`)
for scn.Scan() && !hasDebug {
hasDebug = match.MatchString(scn.Text())
}
ts.True(hasDebug, "daemon.log does not contain expected debug statements")
})
ts.Run("DNS includes", func() {
t := ts.T()
require := ts.Require()
tmpDir := t.TempDir()
origKubeconfigFileName := os.Getenv("DTEST_KUBECONFIG")
kubeconfigFileName := filepath.Join(tmpDir, "kubeconfig")
configFileName := filepath.Join(tmpDir, "config.yml")
var cfg *api.Config
cfg, err := clientcmd.LoadFromFile(origKubeconfigFileName)
require.NoError(err, "Unable to read DTEST_KUBECONFIG")
require.NoError(err, api.MinifyConfig(cfg), "unable to minify config")
var cluster *api.Cluster
for _, c := range cfg.Clusters {
cluster = c
break
}
require.NotNilf(cluster, "unable to get cluster from config")
cluster.Extensions = map[string]runtime.Object{"telepresence.io": &runtime.Unknown{
Raw: []byte(`{"dns":{"include-suffixes": [".org"]}}`),
}}
require.NoError(clientcmd.WriteToFile(*cfg, kubeconfigFileName), "unable to write modified kubeconfig")
configFile, err := os.Create(configFileName)
require.NoError(err)
_, err = configFile.WriteString("logLevels:\n rootDaemon: debug\n")
require.NoError(err)
configFile.Close()
defer os.Setenv("KUBECONFIG", origKubeconfigFileName)
os.Setenv("KUBECONFIG", kubeconfigFileName)
ctx := dlog.NewTestContext(t, false)
ctx = filelocation.WithAppUserConfigDir(ctx, tmpDir)
ctx = filelocation.WithAppUserLogDir(ctx, tmpDir)
_, stderr := telepresenceContext(ctx, "connect")
require.Empty(stderr)
_ = run(ctx, "curl", "--silent", "example.org")
_, stderr = telepresenceContext(ctx, "quit")
require.Empty(stderr)
rootLog, err := os.Open(filepath.Join(tmpDir, "daemon.log"))
require.NoError(err)
defer rootLog.Close()
hasLookup := false
scn := bufio.NewScanner(rootLog)
for scn.Scan() && !hasLookup {
hasLookup = strings.Contains(scn.Text(), `LookupHost "example.org"`)
}
ts.True(hasLookup, "daemon.log does not contain expected LookupHost statement")
})
}
func (ts *telepresenceSuite) TestB_Connected() {
suite.Run(ts.T(), &connectedSuite{tpSuite: ts})
}
func (ts *telepresenceSuite) TestC_Uninstall() {
ts.Run("Uninstalls the traffic manager and quits", func() {
require := ts.Require()
ctx := dlog.NewTestContext(ts.T(), false)
names := func() (string, error) {
return ts.kubectlOut(ctx, "get",
"--namespace", ts.managerTestNamespace,
"svc,deploy", "traffic-manager",
"--ignore-not-found",
"-o", "jsonpath={.items[*].metadata.name}")
}
stdout, err := names()
require.NoError(err)
require.Equal(2, len(strings.Split(stdout, " "))) // The service and the deployment
// The telepresence-test-developer will not be able to uninstall everything
require.NoError(run(ctx, "kubectl", "config", "use-context", "default"))
stdout, stderr := telepresence(ts.T(), "uninstall", "--everything")
require.Empty(stderr)
require.Contains(stdout, "Daemon quitting")
require.Eventually(
func() bool {
stdout, _ := names()
return stdout == ""
},
5*time.Second, // waitFor
500*time.Millisecond, // polling interval
)
})
}
type connectedSuite struct {
suite.Suite
tpSuite *telepresenceSuite
}
func (cs *connectedSuite) ns() string {
return cs.tpSuite.namespace
}
func (cs *connectedSuite) SetupSuite() {
require := cs.Require()
c := dlog.NewTestContext(cs.T(), false)
cs.NoError(cs.tpSuite.kubectl(c, "config", "use-context", "telepresence-test-developer"))
stdout, stderr := telepresence(cs.T(), "connect")
require.Empty(stderr)
require.Contains(stdout, "Connected to context")
// Give outbound interceptor 15 seconds to kick in.
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "status")
return regexp.MustCompile(`Telepresence proxy:\s+ON`).FindString(stdout) != ""
},
15*time.Second, // waitFor
time.Second, // polling interval
"Timeout waiting for network overrides to establish", // msg
)
}
func (cs *connectedSuite) TearDownSuite() {
stdout, stderr := telepresence(cs.T(), "quit")
cs.Empty(stderr)
cs.Contains(stdout, "quitting")
c := dlog.NewTestContext(cs.T(), false)
cs.NoError(cs.tpSuite.kubectl(c, "config", "use-context", "default"))
time.Sleep(time.Second) // Allow some time for processes to die and sockets to vanish
}
func (cs *connectedSuite) TestA_ReportsVersionFromDaemon() {
stdout, stderr := telepresence(cs.T(), "version")
cs.Empty(stderr)
vs := client.DisplayVersion()
cs.Contains(stdout, fmt.Sprintf("Client: %s", vs))
cs.Contains(stdout, fmt.Sprintf("Root Daemon: %s", vs))
cs.Contains(stdout, fmt.Sprintf("User Daemon: %s", vs))
}
func (cs *connectedSuite) TestB_ReportsStatusAsConnected() {
stdout, stderr := telepresence(cs.T(), "status")
cs.Empty(stderr)
cs.Contains(stdout, "Kubernetes context:")
}
func (cs *connectedSuite) TestC_ProxiesOutboundTraffic() {
ctx := dlog.NewTestContext(cs.T(), false)
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d.%s", i, cs.ns())
expectedOutput := fmt.Sprintf("Request served by hello-%d", i)
cs.Require().Eventually(
// condition
func() bool {
dlog.Infof(ctx, "trying %q...", "http://"+svc)
hc := http.Client{Timeout: time.Second}
resp, err := hc.Get("http://" + svc)
if err != nil {
dlog.Error(ctx, err)
return false
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
dlog.Error(ctx, err)
return false
}
dlog.Infof(ctx, "body: %q", body)
return strings.Contains(string(body), expectedOutput)
},
15*time.Second, // waitfor
3*time.Second, // polling interval
`body of %q contains %q`, "http://"+svc, expectedOutput,
)
}
}
func (cs *connectedSuite) TestD_Intercepted() {
suite.Run(cs.T(), &interceptedSuite{tpSuite: cs.tpSuite})
}
func (cs *connectedSuite) TestE_PodWithSubdomain() {
require := cs.Require()
c := dlog.NewTestContext(cs.T(), false)
require.NoError(cs.tpSuite.applyApp(c, "echo-w-subdomain", "echo.subsonic", 8080))
defer func() {
cs.NoError(cs.tpSuite.kubectl(c, "delete", "svc", "subsonic", "--context", "default"))
cs.NoError(cs.tpSuite.kubectl(c, "delete", "deploy", "echo-subsonic", "--context", "default"))
}()
cc, cancel := context.WithTimeout(c, 3*time.Second)
defer cancel()
ip, err := net.DefaultResolver.LookupHost(cc, "echo.subsonic."+cs.ns())
cs.NoError(err)
cs.Equal(1, len(ip))
ip, err = net.DefaultResolver.LookupHost(cc, "echo.subsonic."+cs.ns()+".svc.cluster.local")
cs.NoError(err)
cs.Equal(1, len(ip))
}
func (cs *connectedSuite) TestF_SuccessfullyInterceptsDeploymentWithProbes() {
defer telepresence(cs.T(), "leave", "with-probes-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "with-probes", "--port", "9090")
require.Empty(stderr)
require.Contains(stdout, "Using Deployment with-probes")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "with-probes: intercepted")
}
func (cs *connectedSuite) TestG_SuccessfullyInterceptsReplicaSet() {
defer telepresence(cs.T(), "leave", "rs-echo-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "rs-echo", "--port", "9091")
require.Empty(stderr)
require.Contains(stdout, "Using ReplicaSet rs-echo")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "rs-echo: intercepted")
}
func (cs *connectedSuite) TestH_SuccessfullyInterceptsStatefulSet() {
defer telepresence(cs.T(), "leave", "ss-echo-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "ss-echo", "--port", "9091")
require.Empty(stderr)
require.Contains(stdout, "Using StatefulSet ss-echo")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "ss-echo: intercepted")
}
func (cs *connectedSuite) TestI_LocalOnlyIntercept() {
cs.Run("intercept can be established", func() {
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--local-only", "mylocal")
cs.Empty(stdout)
cs.Empty(stderr)
})
cs.Run("is included in list output", func() {
// list includes local intercept
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
cs.Empty(stderr)
cs.Contains(stdout, "mylocal: local-only intercept")
})
cs.Run("makes services reachable using unqualified name", func() {
ctx := dlog.NewTestContext(cs.T(), false)
// service can be resolve with unqualified name
cs.Eventually(func() bool {
return run(ctx, "curl", "--silent", "ss-echo") == nil
}, 3*time.Second, 1*time.Second)
})
cs.Run("leaving renders services unavailable using unqualified name", func() {
stdout, stderr := telepresence(cs.T(), "leave", "mylocal")
cs.Empty(stdout)
cs.Empty(stderr)
ctx := dlog.NewTestContext(cs.T(), false)
cs.Eventually(func() bool {
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
return run(ctx, "curl", "--silent", "ss-echo") != nil
}, 3*time.Second, time.Second)
})
}
func (cs *connectedSuite) TestJ_ListOnlyMapped() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "connect", "--mapped-namespaces", "default")
require.Empty(stderr)
require.Empty(stdout)
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns())
require.Empty(stderr)
require.Contains(stdout, "No Workloads (Deployments, StatefulSets, or ReplicaSets)")
stdout, stderr = telepresence(cs.T(), "connect", "--mapped-namespaces", "all")
require.Empty(stderr)
require.Empty(stdout)
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns())
require.Empty(stderr)
require.NotContains(stdout, "No Workloads (Deployments, StatefulSets, or ReplicaSets)")
}
func (cs *connectedSuite) TestK_DockerRun() {
require := cs.Require()
ctx := dlog.NewTestContext(cs.T(), false)
svc := "hello-0"
tag := "telepresence/hello-test"
testDir := "pkg/client/cli/testdata/hello"
_, err := output(ctx, "docker", "build", "-t", tag, testDir)
require.NoError(err)
abs, err := filepath.Abs(testDir)
require.NoError(err)
// Kill container on exit
defer func() {
_ = dexec.CommandContext(ctx, "docker", "kill", fmt.Sprintf("intercept-%s-%s-8000", svc, cs.ns())).Run()
}()
ccx, cancel := context.WithCancel(ctx)
stdoutCh := make(chan string)
go func() {
stdout, _ := telepresenceContext(ccx, "intercept", "--namespace", cs.ns(), svc,
"--docker-run", "--port", "8000", "--", "--rm", "-v", abs+":/usr/src/app", tag)
stdoutCh <- stdout
}()
expectedOutput := "Hello from intercepted echo-server!"
cs.Eventually(
// condition
func() bool {
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
out, err := output(ctx, "curl", "--silent", svc)
if err != nil {
dlog.Error(ctx, err)
return false
}
dlog.Info(ctx, out)
return strings.Contains(out, expectedOutput)
},
30*time.Second, // waitFor
1*time.Second, // polling interval
`body of %q equals %q`, "http://"+svc, expectedOutput,
)
cancel()
cs.Contains(<-stdoutCh, "Using Deployment "+svc)
}
func (cs *connectedSuite) TestL_LegacySwapDeploymentDoesIntercept() {
require := cs.Require()
// We don't need to defer leaving the intercept because the
// intercept is automatically left once the command is finished
_, stderr := telepresence(cs.T(), "--swap-deployment", "with-probes", "--expose", "9090", "--namespace", cs.ns(), "--mount", "false", "--run", "sleep", "1")
require.Contains(stderr, "Legacy Telepresence command used")
require.Contains(stderr, "Using Deployment with-probes")
// Since legacy Telepresence commands are detected and translated in the
// RunSubcommands function, so we ensure that the help text is *not* being
// printed out in this case.
require.NotContains(stderr, "Telepresence can connect to a cluster and route all outbound traffic")
// Verify that the intercept no longer exists
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "No Workloads (Deployments, StatefulSets, or ReplicaSets)")
}
func (cs *connectedSuite) TestM_AutoInjectedAgent() {
ctx := dlog.NewTestContext(cs.T(), false)
cs.NoError(cs.tpSuite.applyApp(ctx, "echo-auto-inject", "echo-auto-inject", 80))
defer func() {
cs.NoError(cs.tpSuite.kubectl(ctx, "delete", "svc,deploy", "echo-auto-inject", "--context", "default"))
}()
cs.Run("shows up with agent installed in list output", func() {
cs.Eventually(func() bool {
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
cs.Empty(stderr)
return strings.Contains(stdout, "echo-auto-inject: ready to intercept (traffic-agent already installed)")
},
10*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("can be intercepted", func() {
defer telepresence(cs.T(), "leave", "echo-auto-inject-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "echo-auto-inject", "--port", "9091")
require.Empty(stderr)
require.Contains(stdout, "Using Deployment echo-auto-inject")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "echo-auto-inject: intercepted")
})
}
func (cs *connectedSuite) TestZ_Uninstall() {
cs.Run("Uninstalls agent on given deployment", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.Contains(stdout, "with-probes")
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--agent", "with-probes")
require.Empty(stderr)
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return !strings.Contains(stdout, "with-probes")
},
30*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("Uninstalls agent on given replicaset", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.Contains(stdout, "rs-echo")
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--agent", "rs-echo")
require.Empty(stderr)
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return !strings.Contains(stdout, "rs-echo")
},
30*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("Uninstalls agent on given statefulset", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.Contains(stdout, "ss-echo")
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--agent", "ss-echo")
require.Empty(stderr)
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return !strings.Contains(stdout, "ss-echo")
},
30*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("Uninstalls all agents", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.GreaterOrEqual(len(strings.Split(stdout, "\n")), serviceCount)
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--all-agents")
require.Empty(stderr)
require.Eventually(
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return stdout == "No Workloads (Deployments, StatefulSets, or ReplicaSets)"
},
30*time.Second, // waitFor
2*time.Millisecond, // polling interval
)
})
}
type interceptedSuite struct {
suite.Suite
tpSuite *telepresenceSuite
intercepts []string
services *dgroup.Group
cancelServices context.CancelFunc
}
func (is *interceptedSuite) ns() string {
return is.tpSuite.namespace
}
func (is *interceptedSuite) SetupSuite() {
is.intercepts = make([]string, 0, serviceCount)
ctx, cancel := context.WithCancel(dcontext.WithSoftness(dlog.NewTestContext(is.T(), true)))
is.services = dgroup.NewGroup(ctx, dgroup.GroupConfig{})
is.cancelServices = cancel
is.Run("all intercepts ready", func() {
rxs := make([]*regexp.Regexp, serviceCount)
for i := 0; i < serviceCount; i++ {
rxs[i] = regexp.MustCompile(fmt.Sprintf("hello-%d\\s*:\\s+ready to intercept", i))
}
is.Require().Eventually(
// condition
func() bool {
stdout, _ := telepresence(is.T(), "list", "--namespace", is.ns())
is.T().Log(stdout)
for i := 0; i < serviceCount; i++ {
if !rxs[i].MatchString(stdout) {
return false
}
}
return true
},
15*time.Second, // waitFor
3*time.Second, // polling interval
`telepresence list reports all agents`,
)
})
is.Run("adding intercepts", func() {
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d", i)
port := strconv.Itoa(9000 + i)
stdout, stderr := telepresence(is.T(), "intercept", "--namespace", is.ns(), "--mount", "false", svc, "--port", port)
is.Require().Empty(stderr)
is.intercepts = append(is.intercepts, svc)
is.Contains(stdout, "Using Deployment "+svc)
}
})
is.Run("starting http servers", func() {
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d", i)
port := strconv.Itoa(9000 + i)
is.services.Go(svc, func(ctx context.Context) error {
sc := &dhttp.ServerConfig{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s from intercept at %s", svc, r.URL.Path)
}),
}
return sc.ListenAndServe(ctx, ":"+port)
})
}
})
}
func (is *interceptedSuite) TearDownSuite() {
for _, svc := range is.intercepts {
stdout, stderr := telepresence(is.T(), "leave", svc+"-"+is.ns())
is.Empty(stderr)
is.Empty(stdout)
}
is.cancelServices()
is.NoError(is.services.Wait())
time.Sleep(time.Second) // Allow some time for processes to die and intercepts to vanish
}
func (is *interceptedSuite) TestA_VerifyingResponsesFromInterceptor() {
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d", i)
expectedOutput := fmt.Sprintf("%s from intercept at /", svc)
is.Require().Eventually(
// condition
func() bool {
is.T().Logf("trying %q...", "http://"+svc)
hc := http.Client{Timeout: time.Second}
resp, err := hc.Get("http://" + svc)
if err != nil {
is.T().Log(err)
return false
}
defer resp.Body.Close()
is.T().Logf("status code: %v", resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
is.T().Log(err)
return false
}
is.T().Logf("body: %q", body)
return string(body) == expectedOutput
},
15*time.Second, // waitFor
3*time.Second, // polling interval
`body of %q equals %q`, "http://"+svc, expectedOutput,
)
}
}
func (is *interceptedSuite) TestB_ListingActiveIntercepts() {
require := is.Require()
stdout, stderr := telepresence(is.T(), "--namespace", is.ns(), "list", "--intercepts")
require.Empty(stderr)
for i := 0; i < serviceCount; i++ {
require.Contains(stdout, fmt.Sprintf("hello-%d: intercepted", i))
}
}
func (ts *telepresenceSuite) applyApp(c context.Context, name, svcName string, port int) error {
err := ts.kubectl(c, "apply", "-f", fmt.Sprintf("k8s/%s.yaml", name), "--context", "default")
if err != nil {
return fmt.Errorf("failed to deploy %s: %w", name, err)
}
return ts.waitForService(c, svcName, port)
}
func (ts *telepresenceSuite) applyEchoService(c context.Context, name string) error {
err := ts.kubectl(c, "create", "deploy", name, "--image", "jmalloc/echo-server:0.1.0")
if err != nil {
return fmt.Errorf("failed to create deployment %s: %w", name, err)
}
err = ts.kubectl(c, "expose", "deploy", name, "--port", "80", "--target-port", "8080")
if err != nil {
return fmt.Errorf("failed to expose deployment %s: %w", name, err)
}
return ts.waitForService(c, name, 80)
}
func (ts *telepresenceSuite) waitForService(c context.Context, name string, port int) error {
c, cancel := context.WithTimeout(c, 90*time.Second)
defer cancel()
// Since this function can be called multiple times in parallel
// we add the name of the servie to the title of the pod so they
// can run at the same time. We strip out any characters that we
// can't use in a name in k8s.
reg := regexp.MustCompile("[^a-zA-Z0-9-]+")
k8sSafeName := reg.ReplaceAllString(name, "")
containerName := fmt.Sprintf("curl-%s-from-cluster", k8sSafeName)
for i := 0; i < 60; i++ {
time.Sleep(time.Second)
err := ts.kubectl(c, "run", containerName, "--context", "default", "--rm", "-it",
"--image=docker.io/pstauffer/curl", "--restart=Never", "--",
"curl", "--silent", "--output", "/dev/null",
fmt.Sprintf("http://%s.%s:%d", name, ts.namespace, port),
)
if err == nil {
return nil
}
}
return fmt.Errorf("timed out waiting for %s service", name)
}
func (ts *telepresenceSuite) kubectl(c context.Context, args ...string) error {
return run(c, append([]string{"kubectl", "--namespace", ts.namespace}, args...)...)
}
func (ts *telepresenceSuite) kubectlOut(ctx context.Context, args ...string) (string, error) {
return output(ctx, append([]string{"kubectl", "--namespace", ts.namespace}, args...)...)
}
func (ts *telepresenceSuite) publishManager() error {
ctx := dlog.NewTestContext(ts.T(), true)
cmd := dexec.CommandContext(ctx, "make", "push-image")
// Go sets a lot of variables that we don't want to pass on to the ko executable. If we do,
// then it builds for the platform indicated by those variables.
cmd.Env = []string{
"TELEPRESENCE_VERSION=" + ts.testVersion,
"TELEPRESENCE_REGISTRY=" + dtest.DockerRegistry(ctx),
}
includeEnv := []string{"KO_DOCKER_REPO=", "HOME=", "PATH=", "LOGNAME=", "TMPDIR=", "MAKELEVEL="}
for _, env := range os.Environ() {
for _, incl := range includeEnv {
if strings.HasPrefix(env, incl) {
cmd.Env = append(cmd.Env, env)
break
}
}
}
if err := cmd.Run(); err != nil {
return client.RunError(err)
}
return nil
}
func (ts *telepresenceSuite) buildExecutable(c context.Context) (string, error) {
executable := filepath.Join("build-output", "bin", "/telepresence")
return executable, run(c, "go", "build", "-ldflags",
fmt.Sprintf("-X=github.com/telepresenceio/telepresence/v2/pkg/version.Version=%s", ts.testVersion),
"-o", executable, "./cmd/telepresence")
}
func run(c context.Context, args ...string) error {
return client.RunError(dexec.CommandContext(c, args[0], args[1:]...).Run())
}
func output(ctx context.Context, args ...string) (string, error) {
cmd := dexec.CommandContext(ctx, args[0], args[1:]...)
cmd.DisableLogging = true
out, err := cmd.Output()
return string(out), client.RunError(err)
}
// telepresence executes the CLI command in-process
func telepresence(t testing.TB, args ...string) (string, string) {
return telepresenceContext(dlog.NewTestContext(t, false), args...)
}
// telepresence executes the CLI command in-process
func telepresenceContext(ctx context.Context, args ...string) (string, string) {
dlog.Infof(ctx, "running command: %q", append([]string{"telepresence"}, args...))
cmd := cli.Command(ctx)
stdout := new(strings.Builder)
cmd.SetOut(io.MultiWriter(
stdout,
dlog.StdLogger(dlog.WithField(ctx, "stream", "stdout"), dlog.LogLevelInfo).Writer(),
))
stderr := new(strings.Builder)
cmd.SetErr(io.MultiWriter(
stderr,
dlog.StdLogger(dlog.WithField(ctx, "stream", "stderr"), dlog.LogLevelInfo).Writer(),
))
cmd.SetArgs(args)
if err := cmd.ExecuteContext(ctx); err != nil {
fmt.Fprintln(cmd.ErrOrStderr(), err)
}
dlog.Infof(ctx, "command terminated %q", append([]string{"telepresence"}, args...))
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String())
}
| [
"\"PATH\"",
"\"KUBECONFIG\"",
"\"DTEST_KUBECONFIG\""
] | [] | [
"DTEST_KUBECONFIG",
"PATH",
"KUBECONFIG"
] | [] | ["DTEST_KUBECONFIG", "PATH", "KUBECONFIG"] | go | 3 | 0 | |
cmd/clients.go | /*
* Copyright © 2015-2018 Aeneas Rekkas <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Aeneas Rekkas <[email protected]>
* @copyright 2015-2018 Aeneas Rekkas <[email protected]>
* @license Apache-2.0
*/
package cmd
import (
"os"
"github.com/spf13/cobra"
)
// clientsCmd represents the clients command
var clientsCmd = &cobra.Command{
Use: "clients <command>",
Short: "Manage OAuth 2.0 Clients",
}
func init() {
RootCmd.AddCommand(clientsCmd)
//clientsCmd.PersistentFlags().Bool("dry", false, "do not execute the command but show the corresponding curl command instead")
clientsCmd.PersistentFlags().Bool("fake-tls-termination", false, `Fake tls termination by adding "X-Forwarded-Proto: https" to http headers`)
clientsCmd.PersistentFlags().String("access-token", os.Getenv("OAUTH2_ACCESS_TOKEN"), "Set an access token to be used in the Authorization header, defaults to environment variable ACCESS_TOKEN")
clientsCmd.PersistentFlags().String("endpoint", os.Getenv("HYDRA_ADMIN_URL"), "Set the URL where ORY Hydra is hosted, defaults to environment variable HYDRA_ADMIN_URL")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// clientsCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// clientsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
| [
"\"OAUTH2_ACCESS_TOKEN\"",
"\"HYDRA_ADMIN_URL\""
] | [] | [
"HYDRA_ADMIN_URL",
"OAUTH2_ACCESS_TOKEN"
] | [] | ["HYDRA_ADMIN_URL", "OAUTH2_ACCESS_TOKEN"] | go | 2 | 0 | |
docker/flags.go | package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
)
var (
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
dockerTlsVerify = os.Getenv("DOCKER_TLS_VERIFY") != ""
)
func init() {
if dockerCertPath == "" {
dockerCertPath = filepath.Join(homedir.Get(), ".docker")
}
}
func getDaemonConfDir() string {
// TODO: update for Windows daemon
if runtime.GOOS == "windows" {
return filepath.Join(homedir.Get(), ".docker")
}
return "/etc/docker"
}
var (
flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit")
flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level")
flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify")
flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage")
flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote")
// these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs
flTrustKey *string
flCa *string
flCert *string
flKey *string
flHosts []string
)
func setDefaultConfFlag(flag *string, def string) {
if *flag == "" {
if *flDaemon {
*flag = filepath.Join(getDaemonConfDir(), def)
} else {
*flag = filepath.Join(homedir.Get(), ".docker", def)
}
}
}
func init() {
fmt.Println("[HACK] flTlsVerify:", *flTlsVerify)
var placeholderTrustKey string
// TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file")
flTrustKey = &placeholderTrustKey
flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA")
flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file")
flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file")
opts.HostListVar(&flHosts, []string{"H", "-host"}, "Daemon socket(s) to connect to")
flag.Usage = func() {
fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n")
flag.CommandLine.SetOutput(os.Stdout)
flag.PrintDefaults()
help := "\nCommands:\n"
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build an image from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Copy files/folders from a container's filesystem to the host path"},
{"create", "Create a new container"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"exec", "Run a command in a running container"},
{"export", "Stream the contents of a container as a tar archive"},
{"history", "Show the history of an image"},
{"images", "List images"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"info", "Display system-wide information"},
{"inspect", "Return low-level information on a container or image"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
{"login", "Register or log in to a Docker registry server"},
{"logout", "Log out from a Docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
{"pause", "Pause all processes within a container"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from a Docker registry server"},
{"push", "Push an image or a repository to a Docker registry server"},
{"rename", "Rename an existing container"},
{"restart", "Restart a running container"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"save", "Save an image to a tar archive"},
{"search", "Search for an image on the Docker Hub"},
{"start", "Start a stopped container"},
{"stats", "Display a stream of a containers' resource usage statistics"},
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"unpause", "Unpause a paused container"},
{"version", "Show the Docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
help += "\nRun 'docker COMMAND --help' for more information on a command."
fmt.Fprintf(os.Stdout, "%s\n", help)
}
}
| [
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_TLS_VERIFY\""
] | [] | [
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
] | [] | ["DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"] | go | 2 | 0 | |
lib_pypy/pyrepl/unix_console.py | # Copyright 2000-2010 Michael Hudson-Doyle <[email protected]>
# Antonio Cuni
# Armin Rigo
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import termios, select, os, struct, errno
import signal, re, time, sys
from fcntl import ioctl
from . import curses
from .fancy_termios import tcgetattr, tcsetattr
from .console import Console, Event
from .unix_eventqueue import EventQueue
from .trace import trace
try:
from __pypy__ import pyos_inputhook
except ImportError:
def pyos_inputhook():
pass
class InvalidTerminal(RuntimeError):
pass
try:
unicode
except NameError:
unicode = str
_error = (termios.error, curses.error, InvalidTerminal)
# there are arguments for changing this to "refresh"
SIGWINCH_EVENT = 'repaint'
FIONREAD = getattr(termios, "FIONREAD", None)
TIOCGWINSZ = getattr(termios, "TIOCGWINSZ", None)
def _my_getstr(cap, optional=0):
r = curses.tigetstr(cap)
if not optional and r is None:
raise InvalidTerminal(
"terminal doesn't have the required '%s' capability"%cap)
return r
# at this point, can we say: AAAAAAAAAAAAAAAAAAAAAARGH!
def maybe_add_baudrate(dict, rate):
name = 'B%d'%rate
if hasattr(termios, name):
dict[getattr(termios, name)] = rate
ratedict = {}
for r in [0, 110, 115200, 1200, 134, 150, 1800, 19200, 200, 230400,
2400, 300, 38400, 460800, 4800, 50, 57600, 600, 75, 9600]:
maybe_add_baudrate(ratedict, r)
del r, maybe_add_baudrate
delayprog = re.compile(b"\\$<([0-9]+)((?:/|\\*){0,2})>")
try:
poll = select.poll
except AttributeError:
# this is exactly the minumum necessary to support what we
# do with poll objects
class poll:
def __init__(self):
pass
def register(self, fd, flag):
self.fd = fd
def poll(self): # note: a 'timeout' argument would be *milliseconds*
r,w,e = select.select([self.fd],[],[])
return r
POLLIN = getattr(select, "POLLIN", None)
class UnixConsole(Console):
def __init__(self, f_in=0, f_out=1, term=None, encoding=None):
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
if isinstance(f_in, int):
self.input_fd = f_in
else:
self.input_fd = f_in.fileno()
if isinstance(f_out, int):
self.output_fd = f_out
else:
self.output_fd = f_out.fileno()
self.pollob = poll()
self.pollob.register(self.input_fd, POLLIN)
curses.setupterm(term, self.output_fd)
self.term = term
self._bel = _my_getstr("bel")
self._civis = _my_getstr("civis", optional=1)
self._clear = _my_getstr("clear")
self._cnorm = _my_getstr("cnorm", optional=1)
self._cub = _my_getstr("cub", optional=1)
self._cub1 = _my_getstr("cub1", 1)
self._cud = _my_getstr("cud", 1)
self._cud1 = _my_getstr("cud1", 1)
self._cuf = _my_getstr("cuf", 1)
self._cuf1 = _my_getstr("cuf1", 1)
self._cup = _my_getstr("cup")
self._cuu = _my_getstr("cuu", 1)
self._cuu1 = _my_getstr("cuu1", 1)
self._dch1 = _my_getstr("dch1", 1)
self._dch = _my_getstr("dch", 1)
self._el = _my_getstr("el")
self._hpa = _my_getstr("hpa", 1)
self._ich = _my_getstr("ich", 1)
self._ich1 = _my_getstr("ich1", 1)
self._ind = _my_getstr("ind", 1)
self._pad = _my_getstr("pad", 1)
self._ri = _my_getstr("ri", 1)
self._rmkx = _my_getstr("rmkx", 1)
self._smkx = _my_getstr("smkx", 1)
## work out how we're going to sling the cursor around
if 0 and self._hpa: # hpa don't work in windows telnet :-(
self.__move_x = self.__move_x_hpa
elif self._cub and self._cuf:
self.__move_x = self.__move_x_cub_cuf
elif self._cub1 and self._cuf1:
self.__move_x = self.__move_x_cub1_cuf1
else:
raise RuntimeError("insufficient terminal (horizontal)")
if self._cuu and self._cud:
self.__move_y = self.__move_y_cuu_cud
elif self._cuu1 and self._cud1:
self.__move_y = self.__move_y_cuu1_cud1
else:
raise RuntimeError("insufficient terminal (vertical)")
if self._dch1:
self.dch1 = self._dch1
elif self._dch:
self.dch1 = curses.tparm(self._dch, 1)
else:
self.dch1 = None
if self._ich1:
self.ich1 = self._ich1
elif self._ich:
self.ich1 = curses.tparm(self._ich, 1)
else:
self.ich1 = None
self.__move = self.__move_short
self.event_queue = EventQueue(self.input_fd, self.encoding)
self.cursor_visible = 1
def change_encoding(self, encoding):
self.encoding = encoding
def refresh(self, screen, c_xy):
# this function is still too long (over 90 lines)
cx, cy = c_xy
if not self.__gone_tall:
while len(self.screen) < min(len(screen), self.height):
self.__hide_cursor()
self.__move(0, len(self.screen) - 1)
self.__write("\n")
self.__posxy = 0, len(self.screen)
self.screen.append("")
else:
while len(self.screen) < len(screen):
self.screen.append("")
if len(screen) > self.height:
self.__gone_tall = 1
self.__move = self.__move_tall
px, py = self.__posxy
old_offset = offset = self.__offset
height = self.height
# we make sure the cursor is on the screen, and that we're
# using all of the screen if we can
if cy < offset:
offset = cy
elif cy >= offset + height:
offset = cy - height + 1
elif offset > 0 and len(screen) < offset + height:
offset = max(len(screen) - height, 0)
screen.append("")
oldscr = self.screen[old_offset:old_offset + height]
newscr = screen[offset:offset + height]
# use hardware scrolling if we have it.
if old_offset > offset and self._ri:
self.__hide_cursor()
self.__write_code(self._cup, 0, 0)
self.__posxy = 0, old_offset
for i in range(old_offset - offset):
self.__write_code(self._ri)
oldscr.pop(-1)
oldscr.insert(0, "")
elif old_offset < offset and self._ind:
self.__hide_cursor()
self.__write_code(self._cup, self.height - 1, 0)
self.__posxy = 0, old_offset + self.height - 1
for i in range(offset - old_offset):
self.__write_code(self._ind)
oldscr.pop(0)
oldscr.append("")
self.__offset = offset
for y, oldline, newline, in zip(range(offset, offset + height),
oldscr,
newscr):
if oldline != newline:
self.__write_changed_line(y, oldline, newline, px)
y = len(newscr)
while y < len(oldscr):
self.__hide_cursor()
self.__move(0, y)
self.__posxy = 0, y
self.__write_code(self._el)
y += 1
self.__show_cursor()
self.screen = screen
self.move_cursor(cx, cy)
self.flushoutput()
def __write_changed_line(self, y, oldline, newline, px):
# this is frustrating; there's no reason to test (say)
# self.dch1 inside the loop -- but alternative ways of
# structuring this function are equally painful (I'm trying to
# avoid writing code generators these days...)
x = 0
minlen = min(len(oldline), len(newline))
#
# reuse the oldline as much as possible, but stop as soon as we
# encounter an ESCAPE, because it might be the start of an escape
# sequene
while x < minlen and oldline[x] == newline[x] and newline[x] != '\x1b':
x += 1
if oldline[x:] == newline[x+1:] and self.ich1:
if ( y == self.__posxy[1] and x > self.__posxy[0]
and oldline[px:x] == newline[px+1:x+1] ):
x = px
self.__move(x, y)
self.__write_code(self.ich1)
self.__write(newline[x])
self.__posxy = x + 1, y
elif x < minlen and oldline[x + 1:] == newline[x + 1:]:
self.__move(x, y)
self.__write(newline[x])
self.__posxy = x + 1, y
elif (self.dch1 and self.ich1 and len(newline) == self.width
and x < len(newline) - 2
and newline[x+1:-1] == oldline[x:-2]):
self.__hide_cursor()
self.__move(self.width - 2, y)
self.__posxy = self.width - 2, y
self.__write_code(self.dch1)
self.__move(x, y)
self.__write_code(self.ich1)
self.__write(newline[x])
self.__posxy = x + 1, y
else:
self.__hide_cursor()
self.__move(x, y)
if len(oldline) > len(newline):
self.__write_code(self._el)
self.__write(newline[x:])
self.__posxy = len(newline), y
if '\x1b' in newline:
# ANSI escape characters are present, so we can't assume
# anything about the position of the cursor. Moving the cursor
# to the left margin should work to get to a known position.
self.move_cursor(0, y)
def __write(self, text):
self.__buffer.append((text, 0))
def __write_code(self, fmt, *args):
self.__buffer.append((curses.tparm(fmt, *args), 1))
def __maybe_write_code(self, fmt, *args):
if fmt:
self.__write_code(fmt, *args)
def __move_y_cuu1_cud1(self, y):
dy = y - self.__posxy[1]
if dy > 0:
self.__write_code(dy*self._cud1)
elif dy < 0:
self.__write_code((-dy)*self._cuu1)
def __move_y_cuu_cud(self, y):
dy = y - self.__posxy[1]
if dy > 0:
self.__write_code(self._cud, dy)
elif dy < 0:
self.__write_code(self._cuu, -dy)
def __move_x_hpa(self, x):
if x != self.__posxy[0]:
self.__write_code(self._hpa, x)
def __move_x_cub1_cuf1(self, x):
dx = x - self.__posxy[0]
if dx > 0:
self.__write_code(self._cuf1*dx)
elif dx < 0:
self.__write_code(self._cub1*(-dx))
def __move_x_cub_cuf(self, x):
dx = x - self.__posxy[0]
if dx > 0:
self.__write_code(self._cuf, dx)
elif dx < 0:
self.__write_code(self._cub, -dx)
def __move_short(self, x, y):
self.__move_x(x)
self.__move_y(y)
def __move_tall(self, x, y):
assert 0 <= y - self.__offset < self.height, y - self.__offset
self.__write_code(self._cup, y - self.__offset, x)
def move_cursor(self, x, y):
if y < self.__offset or y >= self.__offset + self.height:
self.event_queue.insert(Event('scroll', None))
else:
self.__move(x, y)
self.__posxy = x, y
self.flushoutput()
def prepare(self):
# per-readline preparations:
self.__svtermstate = tcgetattr(self.input_fd)
raw = self.__svtermstate.copy()
raw.iflag &=~ (termios.BRKINT | termios.INPCK |
termios.ISTRIP | termios.IXON)
raw.oflag &=~ (termios.OPOST)
raw.cflag &=~ (termios.CSIZE|termios.PARENB)
raw.cflag |= (termios.CS8)
raw.lflag &=~ (termios.ICANON|termios.ECHO|
termios.IEXTEN|(termios.ISIG*1))
raw.cc[termios.VMIN] = 1
raw.cc[termios.VTIME] = 0
tcsetattr(self.input_fd, termios.TCSADRAIN, raw)
self.screen = []
self.height, self.width = self.getheightwidth()
self.__buffer = []
self.__posxy = 0, 0
self.__gone_tall = 0
self.__move = self.__move_short
self.__offset = 0
self.__maybe_write_code(self._smkx)
try:
self.old_sigwinch = signal.signal(
signal.SIGWINCH, self.__sigwinch)
except ValueError:
pass
def restore(self):
self.__maybe_write_code(self._rmkx)
self.flushoutput()
tcsetattr(self.input_fd, termios.TCSADRAIN, self.__svtermstate)
if hasattr(self, 'old_sigwinch'):
signal.signal(signal.SIGWINCH, self.old_sigwinch)
del self.old_sigwinch
def __sigwinch(self, signum, frame):
self.height, self.width = self.getheightwidth()
self.event_queue.insert(Event('resize', None))
def push_char(self, char):
trace('push char {char!r}', char=char)
self.event_queue.push(char)
def get_event(self, block=1):
while self.event_queue.empty():
while 1: # All hail Unix!
pyos_inputhook()
try:
self.push_char(os.read(self.input_fd, 1))
except (IOError, OSError) as err:
if err.errno == errno.EINTR:
if not self.event_queue.empty():
return self.event_queue.get()
else:
continue
else:
raise
else:
break
if not block:
break
return self.event_queue.get()
def wait(self):
self.pollob.poll()
def set_cursor_vis(self, vis):
if vis:
self.__show_cursor()
else:
self.__hide_cursor()
def __hide_cursor(self):
if self.cursor_visible:
self.__maybe_write_code(self._civis)
self.cursor_visible = 0
def __show_cursor(self):
if not self.cursor_visible:
self.__maybe_write_code(self._cnorm)
self.cursor_visible = 1
def repaint_prep(self):
if not self.__gone_tall:
self.__posxy = 0, self.__posxy[1]
self.__write("\r")
ns = len(self.screen)*['\000'*self.width]
self.screen = ns
else:
self.__posxy = 0, self.__offset
self.__move(0, self.__offset)
ns = self.height*['\000'*self.width]
self.screen = ns
if TIOCGWINSZ:
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(self.input_fd, TIOCGWINSZ, b"\000"*8))[0:2]
if not height: return 25, 80
return height, width
else:
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
return 25, 80
def forgetinput(self):
termios.tcflush(self.input_fd, termios.TCIFLUSH)
def flushoutput(self):
for text, iscode in self.__buffer:
if iscode:
self.__tputs(text)
else:
os.write(self.output_fd, text.encode(self.encoding, 'replace'))
del self.__buffer[:]
def __tputs(self, fmt, prog=delayprog):
"""A Python implementation of the curses tputs function; the
curses one can't really be wrapped in a sane manner.
I have the strong suspicion that this is complexity that
will never do anyone any good."""
# using .get() means that things will blow up
# only if the bps is actually needed (which I'm
# betting is pretty unlkely)
bps = ratedict.get(self.__svtermstate.ospeed)
while 1:
m = prog.search(fmt)
if not m:
os.write(self.output_fd, fmt)
break
x, y = m.span()
os.write(self.output_fd, fmt[:x])
fmt = fmt[y:]
delay = int(m.group(1))
if b'*' in m.group(2):
delay *= self.height
if self._pad:
nchars = (bps*delay)/1000
os.write(self.output_fd, self._pad*nchars)
else:
time.sleep(float(delay)/1000.0)
def finish(self):
y = len(self.screen) - 1
while y >= 0 and not self.screen[y]:
y -= 1
self.__move(0, min(y, self.height + self.__offset - 1))
self.__write("\n\r")
self.flushoutput()
def beep(self):
self.__maybe_write_code(self._bel)
self.flushoutput()
if FIONREAD:
def getpending(self):
e = Event('key', '', b'')
while not self.event_queue.empty():
e2 = self.event_queue.get()
e.data += e2.data
e.raw += e.raw
amount = struct.unpack(
"i", ioctl(self.input_fd, FIONREAD, b"\0\0\0\0"))[0]
raw = os.read(self.input_fd, amount)
data = unicode(raw, self.encoding, 'replace')
e.data += data
e.raw += raw
return e
else:
def getpending(self):
e = Event('key', '', b'')
while not self.event_queue.empty():
e2 = self.event_queue.get()
e.data += e2.data
e.raw += e.raw
amount = 10000
raw = os.read(self.input_fd, amount)
data = unicode(raw, self.encoding, 'replace')
e.data += data
e.raw += raw
return e
def clear(self):
self.__write_code(self._clear)
self.__gone_tall = 1
self.__move = self.__move_tall
self.__posxy = 0, 0
self.screen = []
| [] | [] | [
"LINES",
"COLUMNS"
] | [] | ["LINES", "COLUMNS"] | python | 2 | 0 | |
pkg/controllers/v1alpha1/dataset/suite_test.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dataset
import (
"context"
"os"
"path/filepath"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var testCtx = context.Background()
var useExistingCluster = false
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
if env := os.Getenv("USE_EXISTING_CLUSTER"); env != "" {
useExistingCluster = true
}
By("bootstrapping test environment")
testEnv = &envtest.Environment{
UseExistingCluster: &useExistingCluster,
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crd", "bases")},
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = datav1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
var _ = Describe("dataset", func() {
var dataset datav1alpha1.Dataset
BeforeEach(func() {
dataset = datav1alpha1.Dataset{
ObjectMeta: metav1.ObjectMeta{
Name: "test-name",
Namespace: "default",
},
Spec: datav1alpha1.DatasetSpec{
Mounts: []datav1alpha1.Mount{{
MountPoint: "test-MountPoint",
Name: "test-MountName",
},
},
},
}
})
It("Should create dataset successfully", func() {
By("create dataset")
err := k8sClient.Create(testCtx, &dataset)
Expect(err).NotTo(HaveOccurred())
By("check dataset status")
var createdDataset datav1alpha1.Dataset
var name = types.NamespacedName{
Namespace: dataset.Namespace,
Name: dataset.Name,
}
err = k8sClient.Get(testCtx, name, &createdDataset)
Expect(err).NotTo(HaveOccurred())
Expect(createdDataset.Status.Phase).Should(
Or(Equal(datav1alpha1.NoneDatasetPhase),
Equal(datav1alpha1.NotBoundDatasetPhase)))
By("delete dataset")
err = k8sClient.Delete(testCtx, &dataset)
Expect(err).NotTo(HaveOccurred())
})
})
| [
"\"USE_EXISTING_CLUSTER\""
] | [] | [
"USE_EXISTING_CLUSTER"
] | [] | ["USE_EXISTING_CLUSTER"] | go | 1 | 0 | |
bot.py | import logging
import psycopg2
from telegram.ext import Updater, CommandHandler
import os
import strings
PORT = int(os.environ.get('PORT', 5000))
TOKEN = os.getenv("API_KEY", "optional-default")
PROJECT_URL = os.getenv("PROJECT_URL", "optional-default")
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
DATABASE_URL = os.environ.get('DATABASE_URL')
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
conn.autocommit = True
cur = conn.cursor()
logger = logging.getLogger(__name__)
def start(update, context):
"""Send a message when the command /start is issued."""
update.message.reply_text(strings.signup)
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text(strings.help)
def signup(update, context):
cur = conn.cursor()
userID = update.message.from_user.id
try:
print(update.message.text.split())
username = update.message.text.split()[1]
cur.execute(
"INSERT INTO users (userID, money, username) VALUES (%s, %s, %s)",
(userID,
50.0,
username))
update.message.reply_text(strings.user_created)
except Exception as error:
update.message.reply_text(strings.user_exists)
print(error)
conn.commit()
cur.close()
def atm(update, context):
cur = conn.cursor()
try:
cur.execute("SELECT money FROM users WHERE userID = %s",
(str(update.message.from_user.id),))
update.message.reply_text(
"You have " + str(cur.fetchone()[0]) + " buxx 🤑")
except Exception as error:
print(error)
conn.commit()
cur.close()
def send(update, context):
cur = conn.cursor()
sender = update.message.from_user.id
receiver_username = update.message.text.split()[1]
amount = update.message.text.split()[2]
try:
cur.execute("SELECT userID FROM users WHERE username = %s",
(receiver_username,))
receiver = cur.fetchone()[0]
exchange(update, amount, receiver_username, str(receiver), str(sender))
except Exception as error:
print(error)
conn.commit()
cur.close()
def whoami(update, context):
cur = conn.cursor()
sender = update.message.from_user.id
try:
cur.execute("SELECT username FROM users WHERE userID = '%s'",
(sender,))
receiver = cur.fetchone()[0]
update.message.reply_text("Your username is " + receiver)
except Exception as error:
print(error)
conn.commit()
cur.close()
def exchange(update, amount, receiver_username, receiver, sender):
cur = conn.cursor()
try:
sql = '''SELECT * from users'''
cur.execute(sql)
result_set = cur.fetchall()
shouldTransfer = False
for row in result_set:
if row[0] == sender and row[1] >= int(amount) and int(amount) > 0:
shouldTransfer = True
if shouldTransfer:
sql = "UPDATE users SET money = money + %s WHERE userID = %s"
cur.execute(sql, (amount, receiver))
sql = "UPDATE users SET money = money - %s WHERE userID = %s"
cur.execute(sql, (amount, sender))
sql = "SELECT username FROM users WHERE userID = %s"
cur.execute(sql, (sender,))
sender_username = cur.fetchone()[0]
update.message.reply_text(
str(amount) +
" buxx sent to " +
str(receiver_username) +
" by " +
str(sender_username) +
" 😫")
else:
update.message.reply_text(strings.not_enough_buxx)
conn.commit()
cur.close()
except Exception as error:
print(error)
def main():
global conn
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
conn.autocommit = True
updater = Updater(TOKEN, use_context=True)
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("signup", signup))
dp.add_handler(CommandHandler("atm", atm))
dp.add_handler(CommandHandler("send", send))
dp.add_handler(CommandHandler("whoami", whoami))
# Start the Bot
updater.start_webhook(listen="0.0.0.0",
port=int(PORT),
url_path=TOKEN)
updater.bot.setWebhook(PROJECT_URL + TOKEN)
updater.idle()
if __name__ == '__main__':
main()
| [] | [] | [
"PORT",
"DATABASE_URL",
"API_KEY",
"PROJECT_URL"
] | [] | ["PORT", "DATABASE_URL", "API_KEY", "PROJECT_URL"] | python | 4 | 0 | |
config.py | import os
class Config:
FLASK_APP = os.getenv("FLASK_APP")
FLASK_ENV = os.getenv("DEVELOPMENT")
DEBUG = False
DEVELOPMENT = False
SECRET_KEY = os.getenv("SECRET_KEY", "this-should-be-changed-to-a-real-key")
class ProductionConfig(Config):
pass
class StagingConfig(Config):
DEBUG = True
class DevelopmentConfig(Config):
DEBUG = True
DEVELOPMENT = True | [] | [] | [
"SECRET_KEY",
"DEVELOPMENT",
"FLASK_APP"
] | [] | ["SECRET_KEY", "DEVELOPMENT", "FLASK_APP"] | python | 3 | 0 | |
src/testcases/CWE191_Integer_Underflow/s01/CWE191_Integer_Underflow__int_Environment_sub_02.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE191_Integer_Underflow__int_Environment_sub_02.java
Label Definition File: CWE191_Integer_Underflow__int.label.xml
Template File: sources-sinks-02.tmpl.java
*/
/*
* @description
* CWE: 191 Integer Underflow
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: sub
* GoodSink: Ensure there will not be an underflow before subtracting 1 from data
* BadSink : Subtract 1 from data, which can cause an Underflow
* Flow Variant: 02 Control flow: if(true) and if(false)
*
* */
package testcases.CWE191_Integer_Underflow.s01;
import testcasesupport.*;
import javax.servlet.http.*;
import java.util.logging.Level;
public class CWE191_Integer_Underflow__int_Environment_sub_02 extends AbstractTestCase
{
public void bad() throws Throwable
{
int data;
if (true)
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (true)
{
/* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
}
/* goodG2B1() - use goodsource and badsink by changing first true to false */
private void goodG2B1() throws Throwable
{
int data;
if (false)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
else
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
if (true)
{
/* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
}
/* goodG2B2() - use goodsource and badsink by reversing statements in first if */
private void goodG2B2() throws Throwable
{
int data;
if (true)
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (true)
{
/* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
}
/* goodB2G1() - use badsource and goodsink by changing second true to false */
private void goodB2G1() throws Throwable
{
int data;
if (true)
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (false)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
IO.writeLine("Benign, fixed string");
}
else
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data > Integer.MIN_VALUE)
{
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too small to perform subtraction.");
}
}
}
/* goodB2G2() - use badsource and goodsink by reversing statements in second if */
private void goodB2G2() throws Throwable
{
int data;
if (true)
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (true)
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data > Integer.MIN_VALUE)
{
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too small to perform subtraction.");
}
}
}
public void good() throws Throwable
{
goodG2B1();
goodG2B2();
goodB2G1();
goodB2G2();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\"",
"\"ADD\""
] | [] | [
"ADD"
] | [] | ["ADD"] | java | 1 | 0 | |
pkg/chunk/page.go | /*
* JuiceFS, Copyright 2020 Juicedata, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package chunk
import (
"errors"
"io"
"os"
"runtime"
"runtime/debug"
"sync/atomic"
"github.com/juicedata/juicefs/pkg/utils"
)
var pageStack = os.Getenv("JFS_PAGE_STACK") != ""
// Page is a page with refcount
type Page struct {
refs int32
offheap bool
dep *Page
Data []byte
stack []byte
}
// NewPage create a new page.
func NewPage(data []byte) *Page {
return &Page{refs: 1, Data: data}
}
func NewOffPage(size int) *Page {
if size <= 0 {
panic("size of page should > 0")
}
p := utils.Alloc(size)
page := &Page{refs: 1, offheap: true, Data: p}
if pageStack {
page.stack = debug.Stack()
}
runtime.SetFinalizer(page, func(p *Page) {
refcnt := atomic.LoadInt32(&p.refs)
if refcnt != 0 {
logger.Errorf("refcount of page %p (%d bytes) is not zero: %d, created by: %s", p, cap(p.Data), refcnt, string(p.stack))
if refcnt > 0 {
p.Release()
}
}
})
return page
}
func (p *Page) Slice(off, len int) *Page {
p.Acquire()
np := NewPage(p.Data[off : off+len])
np.dep = p
return np
}
// Acquire increase the refcount
func (p *Page) Acquire() {
if pageStack {
p.stack = append(p.stack, debug.Stack()...)
}
atomic.AddInt32(&p.refs, 1)
}
// Release decrease the refcount
func (p *Page) Release() {
if pageStack {
p.stack = append(p.stack, debug.Stack()...)
}
if atomic.AddInt32(&p.refs, -1) == 0 {
if p.offheap {
utils.Free(p.Data)
}
if p.dep != nil {
p.dep.Release()
p.dep = nil
}
p.Data = nil
}
}
type pageReader struct {
p *Page
off int
}
func NewPageReader(p *Page) *pageReader {
p.Acquire()
return &pageReader{p, 0}
}
func (r *pageReader) Read(buf []byte) (int, error) {
n, err := r.ReadAt(buf, int64(r.off))
r.off += n
return n, err
}
func (r *pageReader) ReadAt(buf []byte, off int64) (int, error) {
if len(buf) == 0 {
return 0, nil
}
if r.p == nil {
return 0, errors.New("page is already released")
}
if int(off) == len(r.p.Data) {
return 0, io.EOF
}
n := copy(buf, r.p.Data[off:])
if n < len(buf) {
return n, io.EOF
}
return n, nil
}
func (r *pageReader) Close() error {
if r.p != nil {
r.p.Release()
r.p = nil
}
return nil
}
| [
"\"JFS_PAGE_STACK\""
] | [] | [
"JFS_PAGE_STACK"
] | [] | ["JFS_PAGE_STACK"] | go | 1 | 0 | |
utils.py | # -*- coding: utf-8 -*-
## @package proxen.utils
# @brief Globals and utility functions used across the app.
import os, logging
from config import *
# --------------------------------------------------------------- #
## `bool` debug mode switcher (`True` = print debug messages to console)
DEBUG = CONFIG['app'].getboolean('debug', fallback=False) if 'app' in CONFIG else False
## `str` newline symbol
NL = '\n'
## `str` default coding (for file IO)
CODING = 'utf-8'
## `str` log message mask
LOGMSGFORMAT = '[{asctime}] {message}'
## `str` log file name (relative to project dir); empty = no log output
LOGFILE = CONFIG['app'].get('logfile', None) if 'app' in CONFIG else None
## `logging.Logger` the global logger object
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
## `logging.Formatter` logging formatter object
formatter = logging.Formatter(fmt=LOGMSGFORMAT, datefmt='%Y-%m-%d %H:%M:%S', style='{')
if DEBUG:
ch_debug = logging.StreamHandler()
ch_debug.setLevel(logging.DEBUG)
ch_debug.setFormatter(formatter)
logger.addHandler(ch_debug)
if LOGFILE:
ch_logfile = logging.FileHandler(os.path.abspath(LOGFILE), mode='w', encoding=CODING, delay=True)
ch_logfile.setLevel(logging.DEBUG)
ch_logfile.setFormatter(formatter)
logger.addHandler(ch_logfile)
# --------------------------------------------------------------- #
## Creates an absolute path given the root directory.
# @param root `str` the root directory to form the abs path (empty = project directory)
# @returns `str` the absolute file / folder path
def make_abspath(filename, root=''):
if not root: root = os.path.dirname(__file__)
return os.path.abspath(os.path.join(root, filename) if filename else root)
# --------------------------------------------------------------- #
## Makes a log message using the global logger instance.
# @param what `str` the message text
# @param how `str` determines the log message type:
# - `info`: information message (default)
# - `warn`: warning message
# - `error`: error message
# - `debug`: debug message
# - `critical`: critical message
# - `exception`: exception message
# @param args `positional args` passed to the logger
# @param kwargs `keyword args` passed to the logger
def log(what, how='info', *args, **kwargs):
logger = logging.getLogger()
if how == 'info':
logger.info(what, *args, **kwargs)
elif how == 'warn':
logger.warning(what, *args, **kwargs)
elif how == 'error':
logger.error(what, *args, **kwargs)
elif how == 'debug':
logger.debug(what, *args, **kwargs)
elif how == 'critical':
logger.critical(what, *args, **kwargs)
elif how == 'exception':
logger.exception(what, *args, **kwargs)
# --------------------------------------------------------------- #
## Checks if the current user has admin / root / SU privileges.
# @returns `tuple` a 2-tuple of the following elements:
# -# `str` current user name
# -# `bool` whether the user has admin / root / SU privileges (`True`) or not (`False`)
def has_admin():
if os.name == 'nt':
try:
# only windows users with admin privileges can read the C:\windows\temp
temp = os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\windows'),'temp']))
except:
return (os.environ['USERNAME'], False)
else:
return (os.environ['USERNAME'], True)
else:
if os.geteuid() == 0:
return (os.environ['USER'], True)
else:
return (os.environ['USER'], False) | [] | [] | [
"SystemRoot",
"USERNAME",
"USER"
] | [] | ["SystemRoot", "USERNAME", "USER"] | python | 3 | 0 | |
renpy/arguments.py | # Copyright 2004-2021 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file handles argument parsing. Argument parsing takes place in
# two phases. In the first phase, we only parse the arguments that are
# necessary to load the game, and run the init phase. The init phase
# can register commands and arguments. These arguments are parsed at
# the end of the init phase, before the game begins running, and can
# decide if the game runs or some other action occurs.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
from renpy.compat import *
import argparse
import os
import renpy
try:
import site
site._renpy_argv_emulation() # @UndefinedVariable
except:
pass
# A map from command name to a (function, flag) tuple. The flag is true if the
# function will parse command line arguments, and false otherwise.
commands = { }
# True if the command requires the display, false if it doesn't.
display = { }
# Commands that force compile to be set.
compile_commands = { "compile", "add_from", "merge_strings" }
class ArgumentParser(argparse.ArgumentParser):
"""
Creates an argument parser that is capable of parsing the standard Ren'Py
arguments, as well as arguments that are specific to a sub-command.
"""
def __init__(self, second_pass=True, description=None, require_command=True):
"""
Creates an argument parser.
`second_pass`
True if this is the second pass through argument parsing. (The pass
that parses sub-commands.)
`description`
If supplied, this will be used as a description of the subcommand
to run.
"""
self.group = self
argparse.ArgumentParser.__init__(self, description="The Ren'Py visual novel engine.", add_help=False)
command_names = ", ".join(sorted(commands))
if require_command:
self.add_argument(
"basedir",
help="The base directory containing of the project to run. This defaults to the directory containing the Ren'Py executable.")
self.add_argument(
"command",
help="The command to execute. Available commands are: " + command_names + ". Defaults to 'run'.")
else:
self.add_argument(
"basedir",
default='',
nargs='?',
help="The base directory containing of the project to run. This defaults to the directory containing the Ren'Py executable.")
self.add_argument(
"command",
help="The command to execute. Available commands are: " + command_names + ". Defaults to 'run'.",
nargs='?',
default="run")
self.add_argument(
"--savedir", dest='savedir', default=None, metavar="DIRECTORY",
help="The directory where saves and persistent data are placed.")
self.add_argument(
'--trace', dest='trace', action='store', default=0, type=int, metavar="LEVEL",
help="The level of trace Ren'Py will log to trace.txt. (1=per-call, 2=per-line)")
self.add_argument(
"--version", action='version', version=renpy.version,
help="Displays the version of Ren'Py in use.")
self.add_argument(
"--compile", action='store_true', dest='compile',
help='Forces all .rpy scripts to be recompiled before proceeding.')
self.add_argument(
"--keep-orphan-rpyc", action="store_true",
help="Prevents the compile command from deleting orphan rpyc files.")
self.add_argument(
"--lint", action="store_true", dest="lint",
help=argparse.SUPPRESS)
self.add_argument(
"--errors-in-editor", action="store_true",
help="Causes errors to open in a text editor.")
self.add_argument(
'--safe-mode', dest='safe_mode', action='store_true', default=False,
help="Forces Ren'Py to start in safe mode, allowing the player to configure graphics.")
dump = self.add_argument_group("JSON dump arguments", description="Ren'Py can dump information about the game to a JSON file. These options let you select the file, and choose what is dumped.")
dump.add_argument("--json-dump", action="store", metavar="FILE", help="The name of the JSON file.")
dump.add_argument("--json-dump-private", action="store_true", default=False, help="Include private names. (Names beginning with _.)")
dump.add_argument("--json-dump-common", action="store_true", default=False, help="Include names defined in the common directory.")
if second_pass:
self.add_argument("-h", "--help", action="help", help="Displays this help message, then exits.")
command = renpy.game.args.command # @UndefinedVariable
self.group = self.add_argument_group("{0} command arguments".format(command), description)
def add_argument(self, *args, **kwargs):
if self.group is self:
argparse.ArgumentParser.add_argument(self, *args, **kwargs)
else:
self.group.add_argument(*args, **kwargs)
def parse_args(self, *args, **kwargs):
rv = argparse.ArgumentParser.parse_args(self, *args, **kwargs)
if rv.command in compile_commands:
rv.compile = True
if renpy.session.get("compile", False):
rv.compile = True
return rv
def parse_known_args(self, *args, **kwargs):
args, rest = argparse.ArgumentParser.parse_known_args(self, *args, **kwargs)
if args.command in compile_commands:
args.compile = True
if renpy.session.get("compile", False):
args.compile = True
return args, rest
def run():
"""
The default command, that (when called) leads to normal game startup.
"""
ap = ArgumentParser(description="Runs the current project normally.", require_command=False)
ap.add_argument(
'--profile-display', dest='profile_display', action='store_true', default=False,
help="If present, Ren'Py will report the amount of time it takes to draw the screen.")
ap.add_argument(
'--debug-image-cache', dest='debug_image_cache', action='store_true', default=False,
help="If present, Ren'Py will log information regarding the contents of the image cache.")
ap.add_argument(
'--warp', dest='warp', default=None,
help='This takes as an argument a filename:linenumber pair, and tries to warp to the statement before that line number.')
args = renpy.game.args = ap.parse_args()
if args.warp:
renpy.warp.warp_spec = args.warp
if args.profile_display: # @UndefinedVariable
renpy.config.profile = True
if args.debug_image_cache:
renpy.config.debug_image_cache = True
return True
def compile(): # @ReservedAssignment
"""
This command forces the game script to be recompiled.
"""
takes_no_arguments("Recompiles the game script.")
return False
def quit(): # @ReservedAssignment
"""
This command is used to quit without doing anything.
"""
takes_no_arguments("Recompiles the game script.")
return False
def rmpersistent():
"""
This command is used to delete the persistent data.
"""
takes_no_arguments("Deletes the persistent data.")
renpy.loadsave.location.unlink_persistent()
renpy.persistent.should_save_persistent = False
return False
def register_command(name, function, uses_display=False):
"""
Registers a command that can be invoked when Ren'Py is run on the command
line. When the command is run, `function` is called with no arguments.
If `function` needs to take additional command-line arguments, it should
instantiate a renpy.arguments.ArgumentParser(), and then call parse_args
on it. Otherwise, it should call renpy.arguments.takes_no_arguments().
If `function` returns true, Ren'Py startup proceeds normally. Otherwise,
Ren'Py will terminate when function() returns.
`uses_display`
If true, Ren'Py will initialize the display. If False, Ren'Py will
use dummy video and audio drivers.
"""
commands[name] = function
display[name] = uses_display
def bootstrap():
"""
Called during bootstrap to perform an initial parse of the arguments, ignoring
unknown arguments. Returns the parsed arguments, and a list of unknown arguments.
"""
global rest
ap = ArgumentParser(False, require_command=False)
args, _rest = ap.parse_known_args()
return args
def pre_init():
"""
Called before init, to set up argument parsing.
"""
global subparsers
register_command("run", run, True)
register_command("lint", renpy.lint.lint)
register_command("compile", compile)
register_command("rmpersistent", rmpersistent)
register_command("quit", quit)
def post_init():
"""
Called after init, but before the game starts. This parses a command
and its arguments. It then runs the command function, and returns True
if execution should continue and False otherwise.
"""
command = renpy.game.args.command # @UndefinedVariable
if command == "run" and renpy.game.args.lint: # @UndefinedVariable
command = "lint"
if command not in commands:
ArgumentParser().error("Command {0} is unknown.".format(command))
if not display[command]:
os.environ.setdefault("SDL_AUDIODRIVER", "dummy")
os.environ.setdefault("SDL_VIDEODRIVER", "dummy")
return commands[command]()
def takes_no_arguments(description=None):
"""
Used to report that a command takes no arguments.
"""
ArgumentParser(description=description).parse_args()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/cloudprovider/gcp/block_store.go | /*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/pkg/errors"
"github.com/satori/uuid"
"github.com/sirupsen/logrus"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"k8s.io/apimachinery/pkg/runtime"
"github.com/heptio/velero/pkg/cloudprovider"
"github.com/heptio/velero/pkg/util/collections"
)
const (
projectKey = "project"
zoneSeparator = "__"
)
type blockStore struct {
gce *compute.Service
project string
log logrus.FieldLogger
}
func NewBlockStore(logger logrus.FieldLogger) cloudprovider.BlockStore {
return &blockStore{log: logger}
}
func (b *blockStore) Init(config map[string]string) error {
project, err := extractProjectFromCreds()
if err != nil {
return err
}
client, err := google.DefaultClient(oauth2.NoContext, compute.ComputeScope)
if err != nil {
return errors.WithStack(err)
}
gce, err := compute.New(client)
if err != nil {
return errors.WithStack(err)
}
b.gce = gce
b.project = project
return nil
}
func extractProjectFromCreds() (string, error) {
credsBytes, err := ioutil.ReadFile(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"))
if err != nil {
return "", errors.WithStack(err)
}
type credentials struct {
ProjectID string `json:"project_id"`
}
var creds credentials
if err := json.Unmarshal(credsBytes, &creds); err != nil {
return "", errors.WithStack(err)
}
if creds.ProjectID == "" {
return "", errors.New("cannot fetch project_id from GCP credentials file")
}
return creds.ProjectID, nil
}
// isMultiZone returns true if the failure-domain tag contains
// double underscore, which is the separator used
// by GKE when a storage class spans multiple availablity
// zones.
func isMultiZone(volumeAZ string) bool {
return strings.Contains(volumeAZ, zoneSeparator)
}
// parseRegion parses a failure-domain tag with multiple zones
// and returns a single region. Zones are sperated by double underscores (__).
// For example
// input: us-central1-a__us-central1-b
// return: us-central1
// When a custom storage class spans multiple geographical zones,
// such as us-central1 and us-west1 only the zone matching the cluster is used
// in the failure-domain tag.
// For example
// Cluster nodes in us-central1-c, us-central1-f
// Storage class zones us-central1-a, us-central1-f, us-east1-a, us-east1-d
// The failure-domain tag would be: us-central1-a__us-central1-f
func parseRegion(volumeAZ string) (string, error) {
zones := strings.Split(volumeAZ, zoneSeparator)
zone := zones[0]
parts := strings.SplitAfterN(zone, "-", 3)
if len(parts) < 2 {
return "", errors.Errorf("failed to parse region from zone: %q", volumeAZ)
}
return parts[0] + strings.TrimSuffix(parts[1], "-"), nil
}
// Retrieve the URLs for zones via the GCP API.
func (b *blockStore) getZoneURLs(volumeAZ string) ([]string, error) {
zones := strings.Split(volumeAZ, zoneSeparator)
var zoneURLs []string
for _, z := range zones {
zone, err := b.gce.Zones.Get(b.project, z).Do()
if err != nil {
return nil, errors.WithStack(err)
}
zoneURLs = append(zoneURLs, zone.SelfLink)
}
return zoneURLs, nil
}
func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
// get the snapshot so we can apply its tags to the volume
res, err := b.gce.Snapshots.Get(b.project, snapshotID).Do()
if err != nil {
return "", errors.WithStack(err)
}
// Kubernetes uses the description field of GCP disks to store a JSON doc containing
// tags.
//
// use the snapshot's description (which contains tags from the snapshotted disk
// plus Velero-specific tags) to set the new disk's description.
disk := &compute.Disk{
Name: "restore-" + uuid.NewV4().String(),
SourceSnapshot: res.SelfLink,
Type: volumeType,
Description: res.Description,
}
if isMultiZone(volumeAZ) {
volumeRegion, err := parseRegion(volumeAZ)
if err != nil {
return "", err
}
// URLs for zones that the volume is replicated to within GCP
zoneURLs, err := b.getZoneURLs(volumeAZ)
if err != nil {
return "", err
}
disk.ReplicaZones = zoneURLs
if _, err = b.gce.RegionDisks.Insert(b.project, volumeRegion, disk).Do(); err != nil {
return "", errors.WithStack(err)
}
} else {
if _, err = b.gce.Disks.Insert(b.project, volumeAZ, disk).Do(); err != nil {
return "", errors.WithStack(err)
}
}
return disk.Name, nil
}
func (b *blockStore) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
var (
res *compute.Disk
err error
)
if isMultiZone(volumeAZ) {
volumeRegion, err := parseRegion(volumeAZ)
if err != nil {
return "", nil, errors.WithStack(err)
}
res, err = b.gce.RegionDisks.Get(b.project, volumeRegion, volumeID).Do()
if err != nil {
return "", nil, errors.WithStack(err)
}
} else {
res, err = b.gce.Disks.Get(b.project, volumeAZ, volumeID).Do()
if err != nil {
return "", nil, errors.WithStack(err)
}
}
return res.Type, nil, nil
}
func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
// snapshot names must adhere to RFC1035 and be 1-63 characters
// long
var snapshotName string
suffix := "-" + uuid.NewV4().String()
if len(volumeID) <= (63 - len(suffix)) {
snapshotName = volumeID + suffix
} else {
snapshotName = volumeID[0:63-len(suffix)] + suffix
}
if isMultiZone(volumeAZ) {
volumeRegion, err := parseRegion(volumeAZ)
if err != nil {
return "", errors.WithStack(err)
}
return b.createRegionSnapshot(snapshotName, volumeID, volumeRegion, tags)
} else {
return b.createSnapshot(snapshotName, volumeID, volumeAZ, tags)
}
}
func (b *blockStore) createSnapshot(snapshotName, volumeID, volumeAZ string, tags map[string]string) (string, error) {
disk, err := b.gce.Disks.Get(b.project, volumeAZ, volumeID).Do()
if err != nil {
return "", errors.WithStack(err)
}
gceSnap := compute.Snapshot{
Name: snapshotName,
Description: getSnapshotTags(tags, disk.Description, b.log),
}
_, err = b.gce.Disks.CreateSnapshot(b.project, volumeAZ, volumeID, &gceSnap).Do()
if err != nil {
return "", errors.WithStack(err)
}
return gceSnap.Name, nil
}
func (b *blockStore) createRegionSnapshot(snapshotName, volumeID, volumeRegion string, tags map[string]string) (string, error) {
disk, err := b.gce.RegionDisks.Get(b.project, volumeRegion, volumeID).Do()
if err != nil {
return "", errors.WithStack(err)
}
gceSnap := compute.Snapshot{
Name: snapshotName,
Description: getSnapshotTags(tags, disk.Description, b.log),
}
_, err = b.gce.RegionDisks.CreateSnapshot(b.project, volumeRegion, volumeID, &gceSnap).Do()
if err != nil {
return "", errors.WithStack(err)
}
return gceSnap.Name, nil
}
func getSnapshotTags(veleroTags map[string]string, diskDescription string, log logrus.FieldLogger) string {
// Kubernetes uses the description field of GCP disks to store a JSON doc containing
// tags.
//
// use the tags in the disk's description (if a valid JSON doc) plus the tags arg
// to set the snapshot's description.
var snapshotTags map[string]string
if err := json.Unmarshal([]byte(diskDescription), &snapshotTags); err != nil {
// error decoding the disk's description, so just use the Velero-assigned tags
log.WithError(err).
Error("unable to decode disk's description as JSON, so only applying Velero-assigned tags to snapshot")
snapshotTags = veleroTags
} else {
// merge Velero-assigned tags with the disk's tags (note that we want current
// Velero-assigned tags to overwrite any older versions of them that may exist
// due to prior snapshots/restores)
for k, v := range veleroTags {
snapshotTags[k] = v
}
}
if len(snapshotTags) == 0 {
return ""
}
tagsJSON, err := json.Marshal(snapshotTags)
if err != nil {
log.WithError(err).Error("unable to encode snapshot's tags to JSON, so not tagging snapshot")
return ""
}
return string(tagsJSON)
}
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
_, err := b.gce.Snapshots.Delete(b.project, snapshotID).Do()
// if it's a 404 (not found) error, we don't need to return an error
// since the snapshot is not there.
if gcpErr, ok := err.(*googleapi.Error); ok && gcpErr.Code == http.StatusNotFound {
return nil
}
if err != nil {
return errors.WithStack(err)
}
return nil
}
func (b *blockStore) GetVolumeID(pv runtime.Unstructured) (string, error) {
if !collections.Exists(pv.UnstructuredContent(), "spec.gcePersistentDisk") {
return "", nil
}
volumeID, err := collections.GetString(pv.UnstructuredContent(), "spec.gcePersistentDisk.pdName")
if err != nil {
return "", err
}
return volumeID, nil
}
func (b *blockStore) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {
gce, err := collections.GetMap(pv.UnstructuredContent(), "spec.gcePersistentDisk")
if err != nil {
return nil, err
}
gce["pdName"] = volumeID
return pv, nil
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS\""
] | [] | [
"GOOGLE_APPLICATION_CREDENTIALS"
] | [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | go | 1 | 0 | |
udwSys/udwSysEnv/BinPath.go | package udwSysEnv
import (
"github.com/tachyon-protocol/udw/udwMath"
"github.com/tachyon-protocol/udw/udwPlatform"
"github.com/tachyon-protocol/udw/udwStrings"
"os"
"path/filepath"
"strings"
)
func RecoverPath() {
if udwPlatform.IsDarwin() || udwPlatform.IsLinux() {
const (
a = "/usr/local/bin"
b = "/bin"
c = "/usr/bin"
)
targetPathOrderList := [3]string{a, b, c}
targetPathIndexMap := make(map[string]int, 3)
for _, p := range targetPathOrderList {
targetPathIndexMap[p] = -1
}
pathToIndex := map[string]int{}
originList := GetBinPathList()
for i, p := range originList {
for _, tp := range targetPathOrderList {
if p == tp {
targetPathIndexMap[tp] = i
break
}
}
pathToIndex[p] = i
}
needRewrite := false
allTargetExist := true
for _, i := range targetPathIndexMap {
if i == -1 {
allTargetExist = false
}
}
if allTargetExist {
for i, p := range targetPathOrderList {
if i == len(targetPathOrderList)-1 {
break
}
if targetPathIndexMap[p] > targetPathIndexMap[targetPathOrderList[i+1]] {
needRewrite = true
break
}
}
} else {
needRewrite = true
}
if !needRewrite {
return
}
size := udwMath.IntMax([]int{len(pathToIndex) - 3, 3})
newPathList := make([]string, 0, size)
for _, p := range targetPathOrderList {
delete(pathToIndex, p)
newPathList = append(newPathList, p)
}
for _, p := range originList {
if pathToIndex[p] == -1 {
continue
}
if p == a || p == b || p == c {
continue
}
newPathList = append(newPathList, p)
pathToIndex[p] = -1
}
os.Setenv("PATH", strings.Join(newPathList, ":"))
}
if udwPlatform.IsWindows() {
pathEnv := os.Getenv("PATH")
pathList := GetBinPathList()
change := false
for _, needPath := range []string{
`c:\windows\system32`,
`c:\windows\system32\wbem`,
} {
if !udwStrings.IsInSlice(pathList, needPath) {
change = true
pathEnv += ";" + needPath
}
}
if change {
os.Setenv("PATH", pathEnv)
}
return
}
}
func GetBinPathList() []string {
return filepath.SplitList(os.Getenv("PATH"))
}
| [
"\"PATH\"",
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
references/recognition/train_tensorflow.py | # Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import os
os.environ['USE_TF'] = '1'
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import datetime
import hashlib
import multiprocessing as mp
import time
from pathlib import Path
import numpy as np
import tensorflow as tf
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr import transforms as T
from doctr.datasets import VOCABS, DataLoader, RecognitionDataset
from doctr.models import recognition
from doctr.utils.metrics import TextMatch
from utils import plot_samples
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb):
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for batch_step in progress_bar(range(train_loader.num_batches), parent=mb):
images, targets = next(train_iter)
images = batch_transforms(images)
with tf.GradientTape() as tape:
train_loss = model(images, targets, training=True)['loss']
grads = tape.gradient(train_loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
mb.child.comment = f'Training loss: {train_loss.numpy().mean():.6}'
def evaluate(model, val_loader, batch_transforms, val_metric):
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
images = batch_transforms(images)
out = model(images, targets, return_preds=True, training=False)
# Compute metric
if len(out['preds']):
words, _ = zip(*out['preds'])
else:
words = []
val_metric.update(targets, words)
val_loss += out['loss'].numpy().mean()
batch_cnt += 1
val_loss /= batch_cnt
result = val_metric.summary()
return val_loss, result['raw'], result['unicase']
def main(args):
print(args)
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
# Load val data generator
st = time.time()
val_set = RecognitionDataset(
img_folder=os.path.join(args.val_path, 'images'),
labels_path=os.path.join(args.val_path, 'labels.json'),
sample_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, drop_last=False, workers=args.workers)
print(f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in "
f"{val_loader.num_batches} batches)")
with open(os.path.join(args.val_path, 'labels.json'), 'rb') as f:
val_hash = hashlib.sha256(f.read()).hexdigest()
# Load doctr model
model = recognition.__dict__[args.arch](
pretrained=args.pretrained,
input_shape=(args.input_size, 4 * args.input_size, 3),
vocab=VOCABS[args.vocab]
)
# Resume weights
if isinstance(args.resume, str):
model.load_weights(args.resume)
# Metrics
val_metric = TextMatch()
batch_transforms = T.Compose([
T.Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301)),
])
if args.test_only:
print("Running evaluation")
val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric)
print(f"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
return
st = time.time()
# Load train data generator
base_path = Path(args.train_path)
parts = [base_path] if base_path.joinpath('labels.json').is_file() else [
base_path.joinpath(sub) for sub in os.listdir(base_path)
]
train_set = RecognitionDataset(
parts[0].joinpath('images'),
parts[0].joinpath('labels.json'),
sample_transforms=T.Compose([
T.RandomApply(T.ColorInversion(), .1),
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Augmentations
T.RandomJpegQuality(60),
T.RandomSaturation(.3),
T.RandomContrast(.3),
T.RandomBrightness(.3),
]),
)
if len(parts) > 1:
for subfolder in parts[1:]:
train_set.merge_dataset(RecognitionDataset(subfolder.joinpath('images'), subfolder.joinpath('labels.json')))
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, workers=args.workers)
print(f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in "
f"{train_loader.num_batches} batches)")
with open(parts[0].joinpath('labels.json'), 'rb') as f:
train_hash = hashlib.sha256(f.read()).hexdigest()
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
# Optimizer
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
args.lr,
decay_steps=args.epochs * len(train_loader),
decay_rate=1 / (25e4), # final lr as a fraction of initial lr
staircase=False
)
optimizer = tf.keras.optimizers.Adam(
learning_rate=scheduler,
beta_1=0.95,
beta_2=0.99,
epsilon=1e-6,
clipnorm=5
)
# Tensorboard to monitor training
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="text-recognition",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": 0.,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "tensorflow",
"scheduler": "exp_decay",
"vocab": args.vocab,
"train_hash": train_hash,
"val_hash": val_hash,
"pretrained": args.pretrained,
}
)
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb)
# Validation loop at the end of each epoch
val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
model.save_weights(f'./{exp_name}/weights')
min_loss = val_loss
mb.write(f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} "
f"(Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
# W&B
if args.wb:
wandb.log({
'val_loss': val_loss,
'exact_match': exact_match,
'partial_match': partial_match,
})
if args.wb:
run.finish()
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='DocTR training script for text recognition (TensorFlow)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('train_path', type=str, help='path to train data folder(s)')
parser.add_argument('val_path', type=str, help='path to val data folder')
parser.add_argument('arch', type=str, help='text-recognition model to train')
parser.add_argument('--name', type=str, default=None, help='Name of your training experiment')
parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train the model on')
parser.add_argument('-b', '--batch_size', type=int, default=64, help='batch size for training')
parser.add_argument('--input_size', type=int, default=32, help='input size H for the model, W = 4*H')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate for the optimizer (Adam)')
parser.add_argument('-j', '--workers', type=int, default=None, help='number of workers used for dataloading')
parser.add_argument('--resume', type=str, default=None, help='Path to your checkpoint')
parser.add_argument('--vocab', type=str, default="french", help='Vocab to be used for training')
parser.add_argument("--test-only", dest='test_only', action='store_true', help="Run the validation loop")
parser.add_argument('--show-samples', dest='show_samples', action='store_true',
help='Display unormalized training samples')
parser.add_argument('--wb', dest='wb', action='store_true',
help='Log to Weights & Biases')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='Load pretrained parameters before starting the training')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| [] | [] | [
"USE_TF",
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["USE_TF", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
diagonalDifference.py | #!/bin/python3
import math
import os
import random
import re
import sys
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY arr as parameter.
def diagonalDifference(arr):
diag0, diag1 = 0, 0
for i in range(len(arr)):
if -100 <= arr[i][i] <= 100 and -100 <= arr[i][len(arr)-1-i] <= 100:
diag0 += arr[i][i]
diag1 += arr[i][len(arr)-1-i]
return abs(diag1-diag0)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = []
for _ in range(n):
arr.append(list(map(int, input().rstrip().split())))
result = diagonalDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
| [] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
libpod/runtime.go | package libpod
import (
"bufio"
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"time"
"golang.org/x/sys/unix"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/defaultnet"
"github.com/containers/common/pkg/secrets"
"github.com/containers/image/v5/pkg/sysregistriesv2"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/libpod/lock"
"github.com/containers/podman/v3/libpod/plugin"
"github.com/containers/podman/v3/libpod/shutdown"
"github.com/containers/podman/v3/pkg/cgroups"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/pkg/systemd"
"github.com/containers/podman/v3/pkg/util"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/namesgenerator"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// conmonMinMajorVersion is the major version required for conmon.
conmonMinMajorVersion = 2
// conmonMinMinorVersion is the minor version required for conmon.
conmonMinMinorVersion = 0
// conmonMinPatchVersion is the sub-minor version required for conmon.
conmonMinPatchVersion = 24
)
// A RuntimeOption is a functional option which alters the Runtime created by
// NewRuntime
type RuntimeOption func(*Runtime) error
type storageSet struct {
RunRootSet bool
GraphRootSet bool
StaticDirSet bool
VolumePathSet bool
GraphDriverNameSet bool
TmpDirSet bool
}
// Runtime is the core libpod runtime
type Runtime struct {
config *config.Config
storageConfig storage.StoreOptions
storageSet storageSet
state State
store storage.Store
storageService *storageService
imageContext *types.SystemContext
defaultOCIRuntime OCIRuntime
ociRuntimes map[string]OCIRuntime
runtimeFlags []string
netPlugin ocicni.CNIPlugin
conmonPath string
libimageRuntime *libimage.Runtime
libimageEventsShutdown chan bool
lockManager lock.Manager
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
// unused.
doRenumber bool
doMigrate bool
// System migrate can move containers to a new runtime.
// We make no promises that these migrated containers work on the new
// runtime, though.
migrateRuntime string
// valid indicates whether the runtime is ready to use.
// valid is set to true when a runtime is returned from GetRuntime(),
// and remains true until the runtime is shut down (rendering its
// storage unusable). When valid is false, the runtime cannot be used.
valid bool
lock sync.RWMutex
// mechanism to read and write even logs
eventer events.Eventer
// noStore indicates whether we need to interact with a store or not
noStore bool
// secretsManager manages secrets
secretsManager *secrets.SecretsManager
}
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
// use for the containers.conf configuration file.
func SetXdgDirs() error {
if !rootless.IsRootless() {
return nil
}
// Setup XDG_RUNTIME_DIR
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
var err error
runtimeDir, err = util.GetRuntimeDir()
if err != nil {
return err
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
sessionAddr := filepath.Join(runtimeDir, "bus")
if _, err := os.Stat(sessionAddr); err == nil {
os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
}
}
// Setup XDG_CONFIG_HOME
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
}
}
return nil
}
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
conf, err := config.NewConfig("")
if err != nil {
return nil, err
}
conf.CheckCgroupsAndAdjustConfig()
return newRuntimeFromConfig(ctx, conf, options...)
}
// NewRuntimeFromConfig creates a new container runtime using the given
// configuration file for its default configuration. Passed RuntimeOption
// functions can be used to mutate this configuration further.
// An error will be returned if the configuration file at the given path does
// not exist or cannot be loaded
func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
return newRuntimeFromConfig(ctx, userConfig, options...)
}
func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
runtime := new(Runtime)
if conf.Engine.OCIRuntime == "" {
conf.Engine.OCIRuntime = "runc"
// If we're running on cgroups v2, default to using crun.
if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
conf.Engine.OCIRuntime = "crun"
}
}
runtime.config = conf
if err := SetXdgDirs(); err != nil {
return nil, err
}
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return nil, err
}
runtime.storageConfig = storeOpts
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
return nil, errors.Wrapf(err, "error configuring runtime")
}
}
if err := shutdown.Register("libpod", func(sig os.Signal) error {
os.Exit(1)
return nil
}); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
logrus.Errorf("Error registering shutdown handler for libpod: %v", err)
}
if err := shutdown.Start(); err != nil {
return nil, errors.Wrapf(err, "error starting shutdown signal handler")
}
if err := makeRuntime(ctx, runtime); err != nil {
return nil, err
}
return runtime, nil
}
func getLockManager(runtime *Runtime) (lock.Manager, error) {
var err error
var manager lock.Manager
switch runtime.config.Engine.LockType {
case "file":
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new file lock manager")
}
} else {
return nil, err
}
}
case "", "shm":
lockPath := define.DefaultSHMLockPath
if rootless.IsRootless() {
lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
}
// Set up the lock manager
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
switch {
case os.IsNotExist(errors.Cause(err)):
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
}
case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
return nil, err
}
default:
return nil, err
}
}
default:
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
}
return manager, nil
}
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// Find a working conmon binary
cPath, err := findConmon(runtime.config.Engine.ConmonPath)
if err != nil {
return err
}
runtime.conmonPath = cPath
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrap(err, "error creating runtime static files directory")
}
}
// Set up the state.
//
// TODO - if we further break out the state implementation into
// libpod/state, the config could take care of the code below. It
// would further allow to move the types and consts into a coherent
// package.
switch runtime.config.Engine.StateType {
case config.InMemoryStateStore:
return errors.Wrapf(define.ErrInvalidArg, "in-memory state is currently disabled")
case config.SQLiteStateStore:
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
state, err := NewBoltState(dbPath, runtime)
if err != nil {
return err
}
runtime.state = state
default:
return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
}
// Grab config from the database so we can reset some defaults
dbConfig, err := runtime.state.GetDBConfig()
if err != nil {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
runtime.mergeDBConfig(dbConfig)
unified, _ := cgroups.IsCgroup2UnifiedMode()
if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) {
// If user is rootless and XDG_RUNTIME_DIR is found, podman will not proceed with /tmp directory
// it will try to use existing XDG_RUNTIME_DIR
// if current user has no write access to XDG_RUNTIME_DIR we will fail later
if unix.Access(runtime.storageConfig.RunRoot, unix.W_OK) != nil {
logrus.Warnf("XDG_RUNTIME_DIR is pointing to a path which is not writable. Most likely podman will fail.")
}
}
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot)
logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
// Validate our config against the database, now that we've set our
// final storage configuration
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
return err
}
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
return errors.Wrapf(err, "error setting libpod namespace in state")
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
if err != nil {
return err
}
needsUserns := !hasCapSysAdmin
// Set up containers/storage
var store storage.Store
if needsUserns {
logrus.Debug("Not configuring container store")
} else if runtime.noStore {
logrus.Debug("No store required. Not opening container store.")
} else if err := runtime.configureStore(); err != nil {
return err
}
defer func() {
if retErr != nil && store != nil {
// Don't forcibly shut down
// We could be opening a store in use by another libpod
if _, err := store.Shutdown(false); err != nil {
logrus.Errorf("Error removing store for partially-created runtime: %s", err)
}
}
}()
// Setup the eventer
eventer, err := runtime.newEventer()
if err != nil {
return err
}
runtime.eventer = eventer
// TODO: events for libimage
// Set up containers/image
if runtime.imageContext == nil {
runtime.imageContext = &types.SystemContext{
BigFilesTemporaryDir: parse.GetTempDir(),
}
}
runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrap(err, "error creating tmpdir")
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrap(err, "error creating events dirs")
}
}
// Get us at least one working OCI runtime.
runtime.ociRuntimes = make(map[string]OCIRuntime)
// Initialize remaining OCI runtimes
for name, paths := range runtime.config.Engine.OCIRuntimes {
ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
if err != nil {
// Don't fatally error.
// This will allow us to ship configs including optional
// runtimes that might not be installed (crun, kata).
// Only a infof so default configs don't spec errors.
logrus.Debugf("configured OCI runtime %s initialization failed: %v", name, err)
continue
}
runtime.ociRuntimes[name] = ociRuntime
}
// Do we have a default OCI runtime?
if runtime.config.Engine.OCIRuntime != "" {
// If the string starts with / it's a path to a runtime
// executable.
if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
ociRuntime, err := newConmonOCIRuntime(runtime.config.Engine.OCIRuntime, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
if err != nil {
return err
}
runtime.ociRuntimes[runtime.config.Engine.OCIRuntime] = ociRuntime
runtime.defaultOCIRuntime = ociRuntime
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
if !ok {
return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
}
runtime.defaultOCIRuntime = ociRuntime
}
}
logrus.Debugf("Using OCI runtime %q", runtime.defaultOCIRuntime.Path())
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating runtime temporary files directory")
}
}
// If we need to make a default network - do so now.
if err := defaultnet.Create(runtime.config.Network.DefaultNetwork, runtime.config.Network.DefaultSubnet, runtime.config.Network.NetworkConfigDir, runtime.config.Engine.StaticDir, runtime.config.Engine.MachineEnabled); err != nil {
logrus.Errorf("Failed to created default CNI network: %v", err)
}
// Set up the CNI net plugin
netPlugin, err := ocicni.InitCNINoInotify(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, "", runtime.config.Network.CNIPluginDirs...)
if err != nil {
return errors.Wrapf(err, "error configuring CNI network plugin")
}
runtime.netPlugin = netPlugin
// We now need to see if the system has restarted
// We check for the presence of a file in our tmp directory to verify this
// This check must be locked to prevent races
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
return errors.Wrapf(err, "error acquiring runtime init lock")
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
// TODO: we can't close the FD in this lock, so we should keep it around
// and use it to lock important operations
aliveLock.Lock()
doRefresh := false
defer func() {
if aliveLock.Locked() {
aliveLock.Unlock()
}
}()
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If we need to refresh, then it is safe to assume there are
// no containers running. Create immediately a namespace, as
// we will need to access the storage.
if needsUserns {
// warn users if mode is rootless and cgroup manager is systemd
// and no valid systemd session is present
// warn only whenever new namespace is created
if runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager {
unified, _ := cgroups.IsCgroup2UnifiedMode()
if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) {
logrus.Debug("Invalid systemd user session for current user")
}
}
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir)
if err != nil {
return errors.Wrapf(err, "could not get pause process pid file path")
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
return err
}
if became {
os.Exit(ret)
}
}
// If the file doesn't exist, we need to refresh the state
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
if os.IsNotExist(err) {
doRefresh = true
} else {
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
}
}
runtime.lockManager, err = getLockManager(runtime)
if err != nil {
return err
}
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
if runtime.doRenumber {
if err := runtime.renumberLocks(); err != nil {
return err
}
}
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
// Ensure we have a store before refresh occurs
if runtime.store == nil {
if err := runtime.configureStore(); err != nil {
return err
}
}
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
return err2
}
}
// Mark the runtime as valid - ready to be used, cannot be modified
// further
runtime.valid = true
if runtime.doMigrate {
if err := runtime.migrate(ctx); err != nil {
return err
}
}
return nil
}
// findConmon iterates over conmonPaths and returns the path
// to the first conmon binary with a new enough version. If none is found,
// we try to do a path lookup of "conmon".
func findConmon(conmonPaths []string) (string, error) {
foundOutdatedConmon := false
for _, path := range conmonPaths {
stat, err := os.Stat(path)
if err != nil {
continue
}
if stat.IsDir() {
continue
}
if err := probeConmon(path); err != nil {
logrus.Warnf("Conmon at %s invalid: %v", path, err)
foundOutdatedConmon = true
continue
}
logrus.Debugf("Using conmon: %q", path)
return path, nil
}
// Search the $PATH as last fallback
if path, err := exec.LookPath("conmon"); err == nil {
if err := probeConmon(path); err != nil {
logrus.Warnf("Conmon at %s is invalid: %v", path, err)
foundOutdatedConmon = true
} else {
logrus.Debugf("Using conmon from $PATH: %q", path)
return path, nil
}
}
if foundOutdatedConmon {
return "", errors.Wrapf(define.ErrConmonOutdated,
"please update to v%d.%d.%d or later",
conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion)
}
return "", errors.Wrapf(define.ErrInvalidArg,
"could not find a working conmon binary (configured options: %v)",
conmonPaths)
}
// probeConmon calls conmon --version and verifies it is a new enough version for
// the runtime expectations the container engine currently has.
func probeConmon(conmonBinary string) error {
cmd := exec.Command(conmonBinary, "--version")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return err
}
r := regexp.MustCompile(`^conmon version (?P<Major>\d+).(?P<Minor>\d+).(?P<Patch>\d+)`)
matches := r.FindStringSubmatch(out.String())
if len(matches) != 4 {
return errors.Wrap(err, define.ErrConmonVersionFormat)
}
major, err := strconv.Atoi(matches[1])
if err != nil {
return errors.Wrap(err, define.ErrConmonVersionFormat)
}
if major < conmonMinMajorVersion {
return define.ErrConmonOutdated
}
if major > conmonMinMajorVersion {
return nil
}
minor, err := strconv.Atoi(matches[2])
if err != nil {
return errors.Wrap(err, define.ErrConmonVersionFormat)
}
if minor < conmonMinMinorVersion {
return define.ErrConmonOutdated
}
if minor > conmonMinMinorVersion {
return nil
}
patch, err := strconv.Atoi(matches[3])
if err != nil {
return errors.Wrap(err, define.ErrConmonVersionFormat)
}
if patch < conmonMinPatchVersion {
return define.ErrConmonOutdated
}
if patch > conmonMinPatchVersion {
return nil
}
return nil
}
// TmpDir gets the current Libpod temporary files directory.
func (r *Runtime) TmpDir() (string, error) {
if !r.valid {
return "", define.ErrRuntimeStopped
}
return r.config.Engine.TmpDir, nil
}
// GetConfig returns a copy of the configuration used by the runtime
func (r *Runtime) GetConfig() (*config.Config, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
config := new(config.Config)
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(r.config, config); err != nil {
return nil, errors.Wrapf(err, "error copying config")
}
return config, nil
}
// libimageEventsMap translates a libimage event type to a libpod event status.
var libimageEventsMap = map[libimage.EventType]events.Status{
libimage.EventTypeImagePull: events.Pull,
libimage.EventTypeImagePush: events.Push,
libimage.EventTypeImageRemove: events.Remove,
libimage.EventTypeImageLoad: events.LoadFromArchive,
libimage.EventTypeImageSave: events.Save,
libimage.EventTypeImageTag: events.Tag,
libimage.EventTypeImageUntag: events.Untag,
libimage.EventTypeImageMount: events.Mount,
libimage.EventTypeImageUnmount: events.Unmount,
}
// libimageEvents spawns a goroutine in the background which is listenting for
// events on the libimage.Runtime. The gourtine will be cleaned up implicitly
// when the main() exists.
func (r *Runtime) libimageEvents() {
r.libimageEventsShutdown = make(chan bool)
toLibpodEventStatus := func(e *libimage.Event) events.Status {
status, found := libimageEventsMap[e.Type]
if !found {
return "Unknown"
}
return status
}
eventChannel := r.libimageRuntime.EventChannel()
go func() {
for {
// Make sure to read and write all events before
// checking if we're about to shutdown.
for len(eventChannel) > 0 {
libimageEvent := <-eventChannel
e := events.Event{
ID: libimageEvent.ID,
Name: libimageEvent.Name,
Status: toLibpodEventStatus(libimageEvent),
Time: libimageEvent.Time,
Type: events.Image,
}
if err := r.eventer.Write(e); err != nil {
logrus.Errorf("unable to write image event: %q", err)
}
}
select {
case <-r.libimageEventsShutdown:
return
default:
time.Sleep(100 * time.Millisecond)
}
}
}()
}
// DeferredShutdown shuts down the runtime without exposing any
// errors. This is only meant to be used when the runtime is being
// shutdown within a defer statement; else use Shutdown
func (r *Runtime) DeferredShutdown(force bool) {
_ = r.Shutdown(force)
}
// Shutdown shuts down the runtime and associated containers and storage
// If force is true, containers and mounted storage will be shut down before
// cleaning up; if force is false, an error will be returned if there are
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
return define.ErrRuntimeStopped
}
r.valid = false
// Shutdown all containers if --force is given
if force {
ctrs, err := r.state.AllContainers()
if err != nil {
logrus.Errorf("Error retrieving containers from database: %v", err)
} else {
for _, ctr := range ctrs {
if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
}
}
}
}
var lastError error
// If no store was requested, it can be nil and there is no need to
// attempt to shut it down
if r.store != nil {
// Wait for the events to be written.
if r.libimageEventsShutdown != nil {
r.libimageEventsShutdown <- true
}
// Note that the libimage runtime shuts down the store.
if err := r.libimageRuntime.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "error shutting down container storage")
}
}
if err := r.state.Close(); err != nil {
if lastError != nil {
logrus.Errorf("%v", lastError)
}
lastError = err
}
return lastError
}
// Reconfigures the runtime after a reboot
// Refreshes the state, recreating temporary files
// Does not check validity as the runtime is not valid until after this has run
func (r *Runtime) refresh(alivePath string) error {
logrus.Debugf("Podman detected system restart - performing state refresh")
// Clear state of database if not running in container
if !graphRootMounted() {
// First clear the state in the database
if err := r.state.Refresh(); err != nil {
return err
}
}
// Next refresh the state of all containers to recreate dirs and
// namespaces, and all the pods to recreate cgroups.
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
return errors.Wrapf(err, "error retrieving all containers from state")
}
pods, err := r.state.AllPods()
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
vols, err := r.state.AllVolumes()
if err != nil {
return errors.Wrapf(err, "error retrieving all volumes from state")
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
// allowed to take locks themselves.
// We cannot assume that any pod/volume/container has a valid lock until
// after this function has returned.
// The runtime alive lock should suffice to provide mutual exclusion
// until this has run.
for _, ctr := range ctrs {
if err := ctr.refresh(); err != nil {
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
}
for _, pod := range pods {
if err := pod.refresh(); err != nil {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
}
for _, vol := range vols {
if err := vol.refresh(); err != nil {
logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
}
}
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return errors.Wrap(err, "error creating runtime status file")
}
defer file.Close()
r.newSystemEvent(events.Refresh)
return nil
}
// Info returns the store and host information
func (r *Runtime) Info() (*define.Info, error) {
return r.info()
}
// generateName generates a unique name for a container or pod.
func (r *Runtime) generateName() (string, error) {
for {
name := namesgenerator.GetRandomName(0)
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
} else if errors.Cause(err) != define.ErrNoSuchCtr {
return "", err
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
} else if errors.Cause(err) != define.ErrNoSuchPod {
return "", err
}
return name, nil
}
// The code should never reach here.
}
// Configure store and image runtime
func (r *Runtime) configureStore() error {
store, err := storage.GetStore(r.storageConfig)
if err != nil {
return err
}
r.store = store
is.Transport.SetStore(store)
// Set up a storage service for creating container root filesystems from
// images
r.storageService = getStorageService(r.store)
runtimeOptions := &libimage.RuntimeOptions{
SystemContext: r.imageContext,
}
libimageRuntime, err := libimage.RuntimeFromStore(store, runtimeOptions)
if err != nil {
return err
}
r.libimageRuntime = libimageRuntime
// Run the libimage events routine.
r.libimageEvents()
return nil
}
// LibimageRuntime ... to allow for a step-by-step migration to libimage.
func (r *Runtime) LibimageRuntime() *libimage.Runtime {
return r.libimageRuntime
}
// SystemContext returns the imagecontext
func (r *Runtime) SystemContext() *types.SystemContext {
// Return the context from the libimage runtime. libimage is sensitive
// to a number of env vars.
return r.libimageRuntime.SystemContext()
}
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
func (r *Runtime) GetOCIRuntimePath() string {
return r.defaultOCIRuntime.Path()
}
// DefaultOCIRuntime return copy of Default OCI Runtime
func (r *Runtime) DefaultOCIRuntime() OCIRuntime {
return r.defaultOCIRuntime
}
// StorageConfig retrieves the storage options for the container runtime
func (r *Runtime) StorageConfig() storage.StoreOptions {
return r.storageConfig
}
// RunRoot retrieves the current c/storage temporary directory in use by Libpod.
func (r *Runtime) RunRoot() string {
if r.store == nil {
return ""
}
return r.store.RunRoot()
}
// GetName retrieves the name associated with a given full ID.
// This works for both containers and pods, and does not distinguish between the
// two.
// If the given ID does not correspond to any existing Pod or Container,
// ErrNoSuchCtr is returned.
func (r *Runtime) GetName(id string) (string, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return "", define.ErrRuntimeStopped
}
return r.state.GetName(id)
}
// DBConfig is a set of Libpod runtime configuration settings that are saved in
// a State when it is first created, and can subsequently be retrieved.
type DBConfig struct {
LibpodRoot string
LibpodTmp string
StorageRoot string
StorageTmp string
GraphDriver string
VolumePath string
}
// mergeDBConfig merges the configuration from the database.
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
c := &r.config.Engine
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
r.storageConfig.RunRoot != "" {
logrus.Debugf("Overriding run root %q with %q from database",
r.storageConfig.RunRoot, dbConfig.StorageTmp)
}
r.storageConfig.RunRoot = dbConfig.StorageTmp
}
if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" {
if r.storageConfig.GraphRoot != dbConfig.StorageRoot &&
r.storageConfig.GraphRoot != "" {
logrus.Debugf("Overriding graph root %q with %q from database",
r.storageConfig.GraphRoot, dbConfig.StorageRoot)
}
r.storageConfig.GraphRoot = dbConfig.StorageRoot
}
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
r.storageConfig.GraphDriverName != "" {
logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
r.storageConfig.GraphDriverName, dbConfig.GraphDriver)
}
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
}
if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" {
if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
}
c.StaticDir = dbConfig.LibpodRoot
}
if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" {
if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
}
c.TmpDir = dbConfig.LibpodTmp
c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
}
if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" {
if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
}
c.VolumePath = dbConfig.VolumePath
}
}
func (r *Runtime) EnableLabeling() bool {
return r.config.Containers.EnableLabeling
}
// Reload reloads the configurations files
func (r *Runtime) Reload() error {
if err := r.reloadContainersConf(); err != nil {
return err
}
if err := r.reloadStorageConf(); err != nil {
return err
}
// Invalidate the registries.conf cache. The next invocation will
// reload all data.
sysregistriesv2.InvalidateCache()
return nil
}
// reloadContainersConf reloads the containers.conf
func (r *Runtime) reloadContainersConf() error {
config, err := config.Reload()
if err != nil {
return err
}
r.config = config
logrus.Infof("applied new containers configuration: %v", config)
return nil
}
// reloadStorageConf reloads the storage.conf
func (r *Runtime) reloadStorageConf() error {
configFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
return err
}
storage.ReloadConfigurationFile(configFile, &r.storageConfig)
logrus.Infof("applied new storage configuration: %v", r.storageConfig)
return nil
}
// getVolumePlugin gets a specific volume plugin given its name.
func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
// There is no plugin for local.
if name == define.VolumeDriverLocal || name == "" {
return nil, nil
}
pluginPath, ok := r.config.Engine.VolumePlugins[name]
if !ok {
return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
}
return plugin.GetVolumePlugin(name, pluginPath)
}
// GetSecretsStoreageDir returns the directory that the secrets manager should take
func (r *Runtime) GetSecretsStorageDir() string {
return filepath.Join(r.store.GraphRoot(), "secrets")
}
// SecretsManager returns the directory that the secrets manager should take
func (r *Runtime) SecretsManager() (*secrets.SecretsManager, error) {
if r.secretsManager == nil {
manager, err := secrets.NewManager(r.GetSecretsStorageDir())
if err != nil {
return nil, err
}
r.secretsManager = manager
}
return r.secretsManager, nil
}
func graphRootMounted() bool {
f, err := os.OpenFile("/run/.containerenv", os.O_RDONLY, os.ModePerm)
if err != nil {
return false
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if scanner.Text() == "graphRootMounted=1" {
return true
}
}
return false
}
func (r *Runtime) graphRootMountedFlag(mounts []spec.Mount) string {
root := r.store.GraphRoot()
for _, val := range mounts {
if strings.HasPrefix(root, val.Source) {
return "graphRootMounted=1"
}
}
return ""
}
| [
"\"XDG_RUNTIME_DIR\"",
"\"DBUS_SESSION_BUS_ADDRESS\"",
"\"XDG_CONFIG_HOME\""
] | [] | [
"XDG_RUNTIME_DIR",
"DBUS_SESSION_BUS_ADDRESS",
"XDG_CONFIG_HOME"
] | [] | ["XDG_RUNTIME_DIR", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME"] | go | 3 | 0 | |
pkg/runtimes/docker/docker.go | /*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package docker
import (
"net/url"
"os"
log "github.com/sirupsen/logrus"
)
type Docker struct{}
// ID returns the identity of the runtime
func (d Docker) ID() string {
return "docker"
}
// GetHost returns the docker daemon host
func (d Docker) GetHost() string {
dockerHost := os.Getenv("DOCKER_HOST")
url, err := url.Parse(dockerHost)
if err != nil {
return ""
}
log.Debugf("DockerHost: %s", url.Host)
return url.Host
}
// GetRuntimePath returns the path of the docker socket
func (d Docker) GetRuntimePath() string {
return "/var/run/docker.sock"
}
| [
"\"DOCKER_HOST\""
] | [] | [
"DOCKER_HOST"
] | [] | ["DOCKER_HOST"] | go | 1 | 0 | |
api/v2alpha1/python/setup.py | # Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import setuptools
try:
from distutils.spawn import find_executable
except ImportError:
from shutil import which as find_executable
NAME = "kfp-pipeline-spec"
VERSION = "0.1.8"
PROTO_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir))
PKG_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "kfp", "pipeline_spec"))
# Find the Protocol Compiler. (Taken from protobuf/python/setup.py)
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
PROTOC = os.environ["PROTOC"]
else:
PROTOC = find_executable("protoc")
def GenerateProto(source):
"""Generate a _pb2.py from a .proto file.
Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input.
Args:
source: The source proto file that needs to be compiled.
"""
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output)):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if PROTOC is None:
sys.stderr.write("protoc is not found. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [
PROTOC, "-I%s" % PROTO_DIR,
"--python_out=%s" % PKG_DIR, source
]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
# Generate the protobuf files that we depend on.
GenerateProto(os.path.join(PROTO_DIR, "pipeline_spec.proto"))
setuptools.setup(
name=NAME,
version=VERSION,
description="Kubeflow Pipelines pipeline spec",
author="google",
author_email="[email protected]",
url="https://github.com/kubeflow/pipelines",
packages=setuptools.find_namespace_packages(include=['kfp.*']),
python_requires=">=3.5.3",
install_requires=["protobuf>=3.13.0,<4"],
include_package_data=True,
license="Apache 2.0",
)
| [] | [] | [
"PROTOC"
] | [] | ["PROTOC"] | python | 1 | 0 | |
cmd/web.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof" // Used for debugging if enabled and a web server is running
"os"
"strings"
"go.wandrs.dev/framework/modules/graceful"
"go.wandrs.dev/framework/modules/log"
"go.wandrs.dev/framework/modules/setting"
"go.wandrs.dev/framework/routers"
"go.wandrs.dev/framework/routers/routes"
context2 "github.com/gorilla/context"
"github.com/urfave/cli/v2"
ini "gopkg.in/ini.v1"
)
// CmdWeb represents the available web sub-command.
var CmdWeb = &cli.Command{
Name: "web",
Usage: "Start Gitea web server",
Description: `Gitea web server is the only thing you need to run,
and it takes care of all the other things for you`,
Action: runWeb,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "port",
Aliases: []string{"p"},
Value: "3000",
Usage: "Temporary port number to prevent conflict",
},
&cli.StringFlag{
Name: "install-port",
Value: "3000",
Usage: "Temporary port number to run the install page on to prevent conflict",
},
&cli.StringFlag{
Name: "pid",
Aliases: []string{"P"},
Value: setting.PIDFile,
Usage: "Custom pid file path",
},
},
}
func runHTTPRedirector() {
source := fmt.Sprintf("%s:%s", setting.HTTPAddr, setting.PortToRedirect)
dest := strings.TrimSuffix(setting.AppURL, "/")
log.Info("Redirecting: %s to %s", source, dest)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
target := dest + r.URL.Path
if len(r.URL.RawQuery) > 0 {
target += "?" + r.URL.RawQuery
}
http.Redirect(w, r, target, http.StatusTemporaryRedirect)
})
var err = runHTTP("tcp", source, "HTTP Redirector", context2.ClearHandler(handler))
if err != nil {
log.Fatal("Failed to start port redirection: %v", err)
}
}
func runWeb(ctx *cli.Context) error {
managerCtx, cancel := context.WithCancel(context.Background())
graceful.InitManager(managerCtx)
defer cancel()
if os.Getppid() > 1 && len(os.Getenv("LISTEN_FDS")) > 0 {
log.Info("Restarting Gitea on PID: %d from parent PID: %d", os.Getpid(), os.Getppid())
} else {
log.Info("Starting Gitea on PID: %d", os.Getpid())
}
// Set pid file setting
if ctx.IsSet("pid") {
setting.PIDFile = ctx.String("pid")
setting.WritePIDFile = true
}
// Perform pre-initialization
needsInstall := routers.PreInstallInit(graceful.GetManager().HammerContext())
if needsInstall {
// Flag for port number in case first time run conflict
if ctx.IsSet("port") {
if err := setPort(ctx.String("port")); err != nil {
return err
}
}
if ctx.IsSet("install-port") {
if err := setPort(ctx.String("install-port")); err != nil {
return err
}
}
c := routes.InstallRoutes()
err := listen(c, false)
select {
case <-graceful.GetManager().IsShutdown():
<-graceful.GetManager().Done()
log.Info("PID: %d Gitea Web Finished", os.Getpid())
log.Close()
return err
default:
}
} else {
NoInstallListener()
}
if setting.EnablePprof {
go func() {
log.Info("Starting pprof server on localhost:6060")
log.Info("%v", http.ListenAndServe("localhost:6060", nil))
}()
}
log.Info("Global init")
// Perform global initialization
routers.GlobalInit(graceful.GetManager().HammerContext())
// Override the provided port number within the configuration
if ctx.IsSet("port") {
if err := setPort(ctx.String("port")); err != nil {
return err
}
}
// Set up Chi routes
c := routes.NormalRoutes()
err := listen(c, true)
<-graceful.GetManager().Done()
log.Info("PID: %d Gitea Web Finished", os.Getpid())
log.Close()
return err
}
func setPort(port string) error {
setting.AppURL = strings.Replace(setting.AppURL, setting.HTTPPort, port, 1)
setting.HTTPPort = port
switch setting.Protocol {
case setting.UnixSocket:
case setting.FCGI:
case setting.FCGIUnix:
default:
defaultLocalURL := string(setting.Protocol) + "://"
if setting.HTTPAddr == "0.0.0.0" {
defaultLocalURL += "localhost"
} else {
defaultLocalURL += setting.HTTPAddr
}
defaultLocalURL += ":" + setting.HTTPPort + "/"
// Save LOCAL_ROOT_URL if port changed
setting.CreateOrAppendToCustomConf(func(cfg *ini.File) {
cfg.Section("server").Key("LOCAL_ROOT_URL").SetValue(defaultLocalURL)
})
}
return nil
}
func listen(m http.Handler, handleRedirector bool) error {
listenAddr := setting.HTTPAddr
if setting.Protocol != setting.UnixSocket && setting.Protocol != setting.FCGIUnix {
listenAddr = net.JoinHostPort(listenAddr, setting.HTTPPort)
}
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubURL)
var err error
switch setting.Protocol {
case setting.HTTP:
if handleRedirector {
NoHTTPRedirector()
}
err = runHTTP("tcp", listenAddr, "Web", context2.ClearHandler(m))
case setting.HTTPS:
if setting.EnableLetsEncrypt {
err = runLetsEncrypt(listenAddr, setting.Domain, setting.LetsEncryptDirectory, setting.LetsEncryptEmail, context2.ClearHandler(m))
break
}
if handleRedirector {
if setting.RedirectOtherPort {
go runHTTPRedirector()
} else {
NoHTTPRedirector()
}
}
err = runHTTPS("tcp", listenAddr, "Web", setting.CertFile, setting.KeyFile, context2.ClearHandler(m))
case setting.FCGI:
if handleRedirector {
NoHTTPRedirector()
}
err = runFCGI("tcp", listenAddr, "FCGI Web", context2.ClearHandler(m))
case setting.UnixSocket:
if handleRedirector {
NoHTTPRedirector()
}
err = runHTTP("unix", listenAddr, "Web", context2.ClearHandler(m))
case setting.FCGIUnix:
if handleRedirector {
NoHTTPRedirector()
}
err = runFCGI("unix", listenAddr, "Web", context2.ClearHandler(m))
default:
log.Fatal("Invalid protocol: %s", setting.Protocol)
}
if err != nil {
log.Critical("Failed to start server: %v", err)
}
log.Info("HTTP Listener: %s Closed", listenAddr)
return err
}
| [
"\"LISTEN_FDS\""
] | [] | [
"LISTEN_FDS"
] | [] | ["LISTEN_FDS"] | go | 1 | 0 | |
commands.go | package docker
import (
"archive/tar"
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/engine"
flag "github.com/dotcloud/docker/pkg/mflag"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"path"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
)
var (
GITCOMMIT string
VERSION string
)
var (
ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?")
)
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
return nil, false
}
return method.Interface().(func(...string) error), true
}
func ParseCommands(proto, addr string, args ...string) error {
cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr)
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return cli.CmdHelp(args[1:]...)
}
return method(args[1:]...)
}
return cli.CmdHelp(args...)
}
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
} else {
method("--help")
return nil
}
}
help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Copy files/folders from the containers filesystem to the host path"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"export", "Stream the contents of a container as a tar archive"},
{"history", "Show the history of an image"},
{"images", "List images"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"info", "Display system-wide information"},
{"insert", "Insert a file in an image"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"save", "Save an image to a tar archive"},
{"search", "Search for an image in the docker index"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"version", "Show the docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
fmt.Fprintf(cli.err, "%s\n", help)
return nil
}
func (cli *DockerCli) CmdInsert(args ...string) error {
cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 3 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("url", cmd.Arg(1))
v.Set("path", cmd.Arg(2))
return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
}
// mkBuildContext returns an archive of an empty context with the contents
// of `dockerfile` at the path ./Dockerfile
func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
files = append(files, [2]string{"Dockerfile", dockerfile})
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress verbose build output")
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, false, "Remove intermediate containers after a successful build")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var (
context archive.Archive
isRemote bool
err error
)
if cmd.Arg(0) == "-" {
// As a special case, 'docker build -' will build from an empty context with the
// contents of stdin as a Dockerfile
dockerfile, err := ioutil.ReadAll(cli.in)
if err != nil {
return err
}
context, err = MkBuildContext(string(dockerfile), nil)
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true
} else {
if _, err := os.Stat(cmd.Arg(0)); err != nil {
return err
}
filename := path.Join(cmd.Arg(0), "Dockerfile")
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context")
}
// Upload the build context
v := &url.Values{}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
if *noCache {
v.Set("nocache", "1")
}
if *rm {
v.Set("rm", "1")
}
cli.LoadConfigFile()
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(cli.configFile)
if err != nil {
return err
}
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
if context != nil {
headers.Set("Content-Type", "application/tar")
}
err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
if jerr, ok := err.(*utils.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
var username, password, email string
cmd.StringVar(&username, []string{"u", "-username"}, "", "username")
cmd.StringVar(&password, []string{"p", "-password"}, "", "password")
cmd.StringVar(&email, []string{"e", "-email"}, "", "email")
err := cmd.Parse(args)
if err != nil {
return nil
}
serverAddress := auth.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress, err = registry.ExpandAndVerifyRegistryUrl(cmd.Arg(0))
if err != nil {
return err
}
fmt.Fprintf(cli.out, "Login against server at %s\n", serverAddress)
}
promptDefault := func(prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(cli.out, "%s: ", prompt)
} else {
fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
}
}
readInput := func(in io.Reader, out io.Writer) string {
reader := bufio.NewReader(in)
line, _, err := reader.ReadLine()
if err != nil {
fmt.Fprintln(out, err.Error())
os.Exit(1)
}
return string(line)
}
cli.LoadConfigFile()
authconfig, ok := cli.configFile.Configs[serverAddress]
if !ok {
authconfig = auth.AuthConfig{}
}
if username == "" {
promptDefault("Username", authconfig.Username)
username = readInput(cli.in, cli.out)
if username == "" {
username = authconfig.Username
}
}
if username != authconfig.Username {
if password == "" {
oldState, _ := term.SaveState(cli.terminalFd)
fmt.Fprintf(cli.out, "Password: ")
term.DisableEcho(cli.terminalFd, oldState)
password = readInput(cli.in, cli.out)
fmt.Fprint(cli.out, "\n")
term.RestoreTerminal(cli.terminalFd, oldState)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
}
if email == "" {
promptDefault("Email", authconfig.Email)
email = readInput(cli.in, cli.out)
if email == "" {
email = authconfig.Email
}
}
} else {
password = authconfig.Password
email = authconfig.Email
}
authconfig.Username = username
authconfig.Password = password
authconfig.Email = email
authconfig.ServerAddress = serverAddress
cli.configFile.Configs[serverAddress] = authconfig
body, statusCode, err := readBody(cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false))
if statusCode == 401 {
delete(cli.configFile.Configs, serverAddress)
auth.SaveConfig(cli.configFile)
return err
}
if err != nil {
return err
}
var out2 engine.Env
err = json.Unmarshal(body, &out2)
if err != nil {
cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
return err
}
auth.SaveConfig(cli.configFile)
if out2.Get("Status") != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
}
return nil
}
// 'docker wait': block until a container stops
func (cli *DockerCli) CmdWait(args ...string) error {
cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
status, err := waitForExit(cli, name)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
} else {
fmt.Fprintf(cli.out, "%d\n", status)
}
}
return encounteredError
}
// 'docker version': show version information
func (cli *DockerCli) CmdVersion(args ...string) error {
cmd := cli.Subcmd("version", "", "Show the docker version information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
if VERSION != "" {
fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
}
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
if GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT)
}
body, _, err := readBody(cli.call("GET", "/version", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteVersion, err := out.AddEnv()
if err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
release := utils.GetReleaseVersion()
if release != "" {
fmt.Fprintf(cli.out, "Last stable version: %s", release)
if (VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) {
fmt.Fprintf(cli.out, ", please update docker")
}
fmt.Fprintf(cli.out, "\n")
}
return nil
}
// 'docker info': display system-wide information.
func (cli *DockerCli) CmdInfo(args ...string) error {
cmd := cli.Subcmd("info", "", "Display system-wide information")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/info", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote info: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
}
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
}
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
cli.LoadConfigFile()
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
}
}
if !remoteInfo.GetBool("MemoryLimit") {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
}
if !remoteInfo.GetBool("SwapLimit") {
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
}
if !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
}
return nil
}
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
sigc := make(chan os.Signal, 1)
utils.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil {
utils.Debugf("Error sending signal: %s", err)
}
}
}()
return sigc
}
func (cli *DockerCli) CmdStart(args ...string) error {
cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var cErr chan error
var tty bool
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("Impossible to start and attach multiple containers at once.")
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
tty = container.Config.Tty
if !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if *openStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
cErr = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil)
})
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
if err != nil {
if !*attach || !*openStdin {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to start one or more containers")
}
} else {
if !*attach || !*openStdin {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
}
if encounteredError != nil {
if *openStdin || *attach {
cli.in.Close()
<-cErr
}
return encounteredError
}
if *openStdin || *attach {
if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
return <-cErr
}
return nil
}
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var tmpl *template.Template
if *tmplStr != "" {
var err error
if tmpl, err = template.New("").Parse(*tmplStr); err != nil {
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
return &utils.StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
}
indented := new(bytes.Buffer)
indented.WriteByte('[')
status := 0
for _, name := range cmd.Args() {
obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false))
if err != nil {
if strings.Contains(err.Error(), "No such") {
fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
} else {
fmt.Fprintf(cli.err, "%s", err)
}
status = 1
continue
}
}
if tmpl == nil {
if err = json.Indent(indented, obj, "", " "); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
} else {
// Has template, will render
var value interface{}
if err := json.Unmarshal(obj, &value); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
if err := tmpl.Execute(cli.out, value); err != nil {
return err
}
cli.out.Write([]byte{'\n'})
}
indented.WriteString(",")
}
if indented.Len() > 1 {
// Remove trailing ','
indented.Truncate(indented.Len() - 1)
}
indented.WriteByte(']')
if tmpl == nil {
if _, err := io.Copy(cli.out, indented); err != nil {
return err
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() == 0 {
cmd.Usage()
return nil
}
val := url.Values{}
if cmd.NArg() > 1 {
val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false))
if err != nil {
return err
}
procs := APITop{}
err = json.Unmarshal(body, &procs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, strings.Join(procs.Titles, "\t"))
for _, proc := range procs.Processes {
fmt.Fprintln(w, strings.Join(proc, "\t"))
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
port := cmd.Arg(1)
proto := "tcp"
parts := strings.SplitN(port, "/", 2)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
if err != nil {
return err
}
var out Container
err = json.Unmarshal(body, &out)
if err != nil {
return err
}
if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
} else {
return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
}
return nil
}
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
cmd := cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
body, _, err := readBody(cli.call("DELETE", "/images/"+name, nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else {
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue
}
for _, out := range outs.Data {
if out.Get("Deleted") != "" {
fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted"))
} else {
fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged"))
}
}
}
}
return encounteredError
}
func (cli *DockerCli) CmdHistory(args ...string) error {
cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
}
for _, out := range outs.Data {
outID := out.Get("ID")
if !*quiet {
if *noTrunc {
fmt.Fprintf(w, "%s\t", outID)
} else {
fmt.Fprintf(w, "%s\t", utils.TruncateID(outID))
}
fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
} else {
fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
}
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size")))
} else {
if *noTrunc {
fmt.Fprintln(w, outID)
} else {
fmt.Fprintln(w, utils.TruncateID(outID))
}
}
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container")
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
val := url.Values{}
if *v {
val.Set("v", "1")
}
if *link {
val.Set("link", "1")
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
// 'docker kill NAME' kills a running container
func (cli *DockerCli) CmdKill(args ...string) error {
cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)")
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdImport(args ...string) error {
cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var src, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
src = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("fromSrc", src)
var in io.Reader
if src == "-" {
in = cli.in
}
return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
if name == "" {
cmd.Usage()
return nil
}
cli.LoadConfigFile()
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(name)
if err != nil {
return err
}
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
// If we're not using a custom registry, we know the restrictions
// applied to repository names and can warn the user in advance.
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
username := cli.configFile.Configs[auth.IndexServerAddress()].Username
if username == "" {
username = "<user>"
}
return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
}
v := url.Values{}
push := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := push(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return push(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdPull(args ...string) error {
cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
if *tag == "" {
*tag = parsedTag
}
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return err
}
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
v := url.Values{}
v.Set("fromImage", remote)
v.Set("tag", *tag)
pull := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := pull(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to pull:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return pull(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdImages(args ...string) error {
cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs")
all := cmd.Bool([]string{"a", "-all"}, false, "show all images (by default filter out the intermediate images used to build)")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "output graph in graphviz format")
flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "output graph in tree format")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
filter := cmd.Arg(0)
if *flViz || *flTree {
body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
var (
printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)
startImage *engine.Env
roots = engine.NewTable("Created", outs.Len())
byParent = make(map[string]*engine.Table)
)
for _, image := range outs.Data {
if image.Get("ParentId") == "" {
roots.Add(image)
} else {
if children, exists := byParent[image.Get("ParentId")]; exists {
children.Add(image)
} else {
byParent[image.Get("ParentId")] = engine.NewTable("Created", 1)
byParent[image.Get("ParentId")].Add(image)
}
}
if filter != "" {
if filter == image.Get("ID") || filter == utils.TruncateID(image.Get("ID")) {
startImage = image
}
for _, repotag := range image.GetList("RepoTags") {
if repotag == filter {
startImage = image
}
}
}
}
if *flViz {
fmt.Fprintf(cli.out, "digraph docker {\n")
printNode = (*DockerCli).printVizNode
} else {
printNode = (*DockerCli).printTreeNode
}
if startImage != nil {
root := engine.NewTable("Created", 1)
root.Add(startImage)
cli.WalkTree(*noTrunc, root, byParent, "", printNode)
} else if filter == "" {
cli.WalkTree(*noTrunc, roots, byParent, "", printNode)
}
if *flViz {
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
}
} else {
v := url.Values{}
if cmd.NArg() == 1 {
v.Set("filter", filter)
}
if *all {
v.Set("all", "1")
}
body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
for _, out := range outs.Data {
for _, repotag := range out.GetList("RepoTags") {
repo, tag := utils.ParseRepositoryTag(repotag)
outID := out.Get("ID")
if !*noTrunc {
outID = utils.TruncateID(outID)
}
if !*quiet {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize")))
} else {
fmt.Fprintln(w, outID)
}
}
}
if !*quiet {
w.Flush()
}
}
return nil
}
func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
length := images.Len()
if length > 1 {
for index, image := range images.Data {
if index+1 == length {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
} else {
printNode(cli, noTrunc, image, prefix+"\u251C─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode)
}
}
}
} else {
for _, image := range images.Data {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
}
}
}
func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
var (
imageID string
parentID string
)
if noTrunc {
imageID = image.Get("ID")
parentID = image.Get("ParentId")
} else {
imageID = utils.TruncateID(image.Get("ID"))
parentID = utils.TruncateID(image.Get("ParentId"))
}
if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
} else {
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
}
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n"))
}
}
func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
var imageID string
if noTrunc {
imageID = image.Get("ID")
} else {
imageID = utils.TruncateID(image.Get("ID"))
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize")))
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", "))
} else {
fmt.Fprint(cli.out, "\n")
}
}
func displayablePorts(ports *engine.Table) string {
result := []string{}
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
}
}
sort.Strings(result)
return strings.Join(result, ", ")
}
func (cli *DockerCli) CmdPs(args ...string) error {
cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes")
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.")
before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.")
last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
if err := cmd.Parse(args); err != nil {
return nil
}
v := url.Values{}
if *last == -1 && *nLatest {
*last = 1
}
if *all {
v.Set("all", "1")
}
if *last != -1 {
v.Set("limit", strconv.Itoa(*last))
}
if *since != "" {
v.Set("since", *since)
}
if *before != "" {
v.Set("before", *before)
}
if *size {
v.Set("size", "1")
}
body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
if *size {
fmt.Fprintln(w, "\tSIZE")
} else {
fmt.Fprint(w, "\n")
}
}
for _, out := range outs.Data {
var (
outID = out.Get("ID")
outNames = out.GetList("Names")
)
if !*noTrunc {
outID = utils.TruncateID(outID)
}
// Remove the leading / from the names
for i := 0; i < len(outNames); i++ {
outNames[i] = outNames[i][1:]
}
if !*quiet {
var (
outCommand = out.Get("Command")
ports = engine.NewTable("", 0)
)
if !*noTrunc {
outCommand = utils.Trunc(outCommand, 20)
}
ports.ReadListFrom([]byte(out.Get("Ports")))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ","))
if *size {
if out.GetInt("SizeRootFs") > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs")))
} else {
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw")))
}
} else {
fmt.Fprint(w, "\n")
}
} else {
fmt.Fprintln(w, outID)
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <[email protected]>\"")
flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
if err := cmd.Parse(args); err != nil {
return nil
}
var name, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
if name == "" {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("container", name)
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("comment", *flComment)
v.Set("author", *flAuthor)
var config *Config
if *flConfig != "" {
config = &Config{}
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
return err
}
}
body, _, err := readBody(cli.call("POST", "/commit?"+v.Encode(), config, false))
if err != nil {
return err
}
apiID := &APIID{}
err = json.Unmarshal(body, apiID)
if err != nil {
return err
}
fmt.Fprintf(cli.out, "%s\n", apiID.ID)
return nil
}
func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
v := url.Values{}
if *since != "" {
loc := time.FixedZone(time.Now().Zone())
format := "2006-01-02 15:04:05 -0700 MST"
if len(*since) < len(format) {
format = format[:len(*since)]
}
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
v.Set("since", strconv.FormatInt(t.Unix(), 10))
} else {
v.Set("since", *since)
}
}
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExport(args ...string) error {
cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdDiff(args ...string) error {
cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
for _, change := range outs.Data {
var kind string
switch change.GetInt("Kind") {
case archive.ChangeModify:
kind = "C"
case archive.ChangeAdd:
kind = "A"
case archive.ChangeDelete:
kind = "D"
}
fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path"))
}
return nil
}
func (cli *DockerCli) CmdLogs(args ...string) error {
cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
v := url.Values{}
v.Set("logs", "1")
v.Set("stdout", "1")
v.Set("stderr", "1")
if *follow && container.State.Running {
v.Set("stream", "1")
}
if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdAttach(args ...string) error {
cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
if !container.State.IsRunning() {
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
}
if container.Config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Debugf("Error monitoring TTY size: %s", err)
}
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if !*noStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *proxy && !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
return err
}
_, status, err := getExitCode(cli, cmd.Arg(0))
if err != nil {
return err
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdSearch(args ...string) error {
cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds")
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("term", cmd.Arg(0))
body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("star_count", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n")
for _, out := range outs.Data {
if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) {
continue
}
desc := strings.Replace(out.Get("description"), "\n", " ", -1)
desc = strings.Replace(desc, "\r", " ", -1)
if !*noTrunc && len(desc) > 45 {
desc = utils.Trunc(desc, 42) + "..."
}
fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count"))
if out.GetBool("is_official") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\t")
if out.GetBool("is_trusted") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (cli *DockerCli) CmdTag(args ...string) error {
cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 && cmd.NArg() != 3 {
cmd.Usage()
return nil
}
var repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
repository, tag = cmd.Arg(1), cmd.Arg(2)
} else {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
if *force {
v.Set("force", "1")
}
if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil {
return err
}
return nil
}
//FIXME Only used in tests
func ParseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := flag.NewFlagSet("run", flag.ContinueOnError)
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
return parseRun(cmd, args, sysInfo)
}
func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
var (
// FIXME: use utils.ListOpts for attach and volumes?
flAttach = NewListOpts(ValidateAttach)
flVolumes = NewListOpts(ValidatePath)
flLinks = NewListOpts(ValidateLink)
flEnv = NewListOpts(ValidateEnv)
flPublish ListOpts
flExpose ListOpts
flDns ListOpts
flVolumesFrom ListOpts
flLxcOpts ListOpts
flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id")
flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container")
flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container")
flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces")
flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached")
flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty")
flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file")
flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image")
flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name")
flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID")
flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
// For documentation purpose
_ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
_ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
)
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.")
cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)")
cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables")
cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat))
cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host")
cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
// Check if the kernel supports memory limit cgroup.
if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit {
*flMemoryString = ""
}
// Validate input params
if *flDetach && flAttach.Len() > 0 {
return nil, nil, cmd, ErrConflictAttachDetach
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvalidWorikingDirectory
}
if *flDetach && *flAutoRemove {
return nil, nil, cmd, ErrConflictDetachAutoRemove
}
// If neither -d or -a are set, attach to everything by default
if flAttach.Len() == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
var flMemory int64
if *flMemoryString != "" {
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
if err != nil {
return nil, nil, cmd, err
}
flMemory = parsedMemory
}
var binds []string
// add any bind targets to the list of container volumes
for bind := range flVolumes.GetMap() {
if arr := strings.Split(bind, ":"); len(arr) > 1 {
if arr[0] == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
}
dstDir := arr[1]
flVolumes.Set(dstDir)
binds = append(binds, bind)
flVolumes.Delete(bind)
} else if bind == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'")
}
}
var (
parsedArgs = cmd.Args()
runCmd []string
entrypoint []string
image string
)
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
lxcConf, err := parseLxcConfOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
var (
domainname string
hostname = *flHostname
parts = strings.SplitN(hostname, ".", 2)
)
if len(parts) > 1 {
hostname = parts[0]
domainname = parts[1]
}
ports, portBindings, err := parsePortSpecs(flPublish.GetAll())
if err != nil {
return nil, nil, cmd, err
}
// Merge in exposed ports to the map of published ports
for _, e := range flExpose.GetAll() {
if strings.Contains(e, ":") {
return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e)
}
p := NewPort(splitProtoPort(e))
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
}
config := &Config{
Hostname: hostname,
Domainname: domainname,
PortSpecs: nil, // Deprecated
ExposedPorts: ports,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv.GetAll(),
Cmd: runCmd,
Dns: flDns.GetAll(),
Image: image,
Volumes: flVolumes.GetMap(),
VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
LxcConf: lxcConf,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks.GetAll(),
PublishAllPorts: *flPublishAll,
}
if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
}
func (cli *DockerCli) CmdRun(args ...string) error {
config, hostConfig, cmd, err := parseRun(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil)
if err != nil {
return err
}
if config.Image == "" {
cmd.Usage()
return nil
}
// Retrieve relevant client-side config
var (
flName = cmd.Lookup("name")
flRm = cmd.Lookup("rm")
flSigProxy = cmd.Lookup("sig-proxy")
autoRemove, _ = strconv.ParseBool(flRm.Value.String())
sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String())
)
// Disable sigProxy in case on TTY
if config.Tty {
sigProxy = false
}
var containerIDFile io.WriteCloser
if len(hostConfig.ContainerIDFile) > 0 {
if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil {
return fmt.Errorf("cid file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile)
}
if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil {
return fmt.Errorf("failed to create the container ID file: %s", err)
}
defer containerIDFile.Close()
}
containerValues := url.Values{}
if name := flName.Value.String(); name != "" {
containerValues.Set("name", name)
}
//create the container
body, statusCode, err := readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false))
//if image not found try to pull it
if statusCode == 404 {
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
tag = DEFAULTTAG
}
fmt.Fprintf(cli.err, "Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
v := url.Values{}
repos, tag := utils.ParseRepositoryTag(config.Image)
v.Set("fromImage", repos)
v.Set("tag", tag)
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(repos)
if err != nil {
return err
}
// Load the auth config file, to be able to pull the image
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
return err
}
if body, _, err = readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false)); err != nil {
return err
}
} else if err != nil {
return err
}
var runResult APIRun
if err := json.Unmarshal(body, &runResult); err != nil {
return err
}
for _, warning := range runResult.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if len(hostConfig.ContainerIDFile) > 0 {
if _, err = containerIDFile.Write([]byte(runResult.ID)); err != nil {
return fmt.Errorf("failed to write the container ID to the file: %s", err)
}
}
if sigProxy {
sigc := cli.forwardAllSignals(runResult.ID)
defer utils.StopCatch(sigc)
}
var (
waitDisplayId chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchrone in order to let the client write to stdin before having to read the ID
waitDisplayId = make(chan struct{})
go func() {
defer close(waitDisplayId)
fmt.Fprintf(cli.out, "%s\n", runResult.ID)
}()
}
// We need to instanciate the chan because the select needs it. It can
// be closed but can't be uninitialized.
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
utils.Errorf("Hijack did not finish (chan still open)")
}
}()
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
var (
out, stderr io.Writer
in io.ReadCloser
v = url.Values{}
)
v.Set("stream", "1")
if config.AttachStdin {
v.Set("stdin", "1")
in = cli.in
}
if config.AttachStdout {
v.Set("stdout", "1")
out = cli.out
}
if config.AttachStderr {
v.Set("stderr", "1")
if config.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked)
})
} else {
close(hijacked)
}
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
//start the container
if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig, false)); err != nil {
return err
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(runResult.ID); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayId
return nil
}
var status int
// Attached mode
if autoRemove {
// Autoremove: wait for the container to finish, retrieve
// the exit code and remove the container
if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.ID+"/wait", nil, false)); err != nil {
return err
}
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.ID+"?v=1", nil, false)); err != nil {
return err
}
} else {
if !config.Tty {
// In non-tty mode, we can't dettach, so we know we need to wait.
if status, err = waitForExit(cli, runResult.ID); err != nil {
return err
}
} else {
// In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call
// and result in a wrong exit code.
// No Autoremove: Simply retrieve the exit code
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
var copyData APICopy
info := strings.Split(cmd.Arg(0), ":")
if len(info) != 2 {
return fmt.Errorf("Error: Path not specified")
}
copyData.Resource = info[1]
copyData.HostPath = cmd.Arg(1)
stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false)
if stream != nil {
defer stream.Close()
}
if err != nil {
return err
}
if statusCode == 200 {
if err := archive.Untar(stream, copyData.HostPath, nil); err != nil {
return err
}
}
return nil
}
func (cli *DockerCli) CmdSave(args ...string) error {
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
image := cmd.Arg(0)
if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdLoad(args ...string) error {
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
var params io.Reader
if data != nil {
buf, err := json.Marshal(data)
if err != nil {
return nil, -1, err
}
params = bytes.NewBuffer(buf)
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params)
if err != nil {
return nil, -1, err
}
if passAuthInfo {
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress())
getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return nil, err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
}
if headers, err := getHeaders(authConfig); err == nil && headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
if err != nil {
clientconn.Close()
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, -1, err
}
if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return clientconn.Close()
})
return wrapper, resp.StatusCode, nil
}
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
if (method == "POST" || method == "PUT") && in == nil {
in = bytes.NewReader([]byte{})
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
defer clientconn.Close()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
}
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
defer func() {
if started != nil {
close(started)
}
}()
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = utils.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminal {
term.RestoreTerminal(cli.terminalFd, oldState)
}
in.Close()
}
}()
// When TTY is ON, use regular copy
if setRawTerminal {
_, err = io.Copy(stdout, br)
} else {
_, err = utils.StdCopy(stdout, stderr, br)
}
utils.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := utils.Go(func() error {
if in != nil {
io.Copy(rwc, in)
utils.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
utils.Errorf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminal {
if err := <-sendStdin; err != nil {
utils.Errorf("Error sendStdin: %s", err)
return err
}
}
return nil
}
func (cli *DockerCli) getTtySize() (int, int) {
if !cli.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(cli.terminalFd)
if err != nil {
utils.Errorf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
func (cli *DockerCli) resizeTty(id string) {
height, width := cli.getTtySize()
if height == 0 && width == 0 {
return
}
v := url.Values{}
v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width))
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
utils.Errorf("Error resize: %s", err)
}
}
func (cli *DockerCli) monitorTtySize(id string) error {
cli.resizeTty(id)
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for _ = range sigchan {
cli.resizeTty(id)
}
}()
return nil
}
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
os.Exit(2)
}
return flags
}
func (cli *DockerCli) LoadConfigFile() (err error) {
cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
if err != nil {
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
}
return err
}
func waitForExit(cli *DockerCli, containerId string) (int, error) {
body, _, err := readBody(cli.call("POST", "/containers/"+containerId+"/wait", nil, false))
if err != nil {
return -1, err
}
var out APIWait
if err := json.Unmarshal(body, &out); err != nil {
return -1, err
}
return out.StatusCode, nil
}
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return false, -1, err
}
return false, -1, nil
}
c := &Container{}
if err := json.Unmarshal(body, c); err != nil {
return false, -1, err
}
return c.State.IsRunning(), c.State.GetExitCode(), nil
}
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
if stream != nil {
defer stream.Close()
}
if err != nil {
return nil, statusCode, err
}
body, err := ioutil.ReadAll(stream)
if err != nil {
return nil, -1, err
}
return body, statusCode, nil
}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
var (
isTerminal = false
terminalFd uintptr
)
if in != nil {
if file, ok := in.(*os.File); ok {
terminalFd = file.Fd()
isTerminal = term.IsTerminal(terminalFd)
}
}
if err == nil {
err = out
}
return &DockerCli{
proto: proto,
addr: addr,
in: in,
out: out,
err: err,
isTerminal: isTerminal,
terminalFd: terminalFd,
}
}
type DockerCli struct {
proto string
addr string
configFile *auth.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer
isTerminal bool
terminalFd uintptr
}
| [
"\"HOME\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"NORAW\"",
"\"HOME\""
] | [] | [
"HOME",
"NORAW",
"DEBUG"
] | [] | ["HOME", "NORAW", "DEBUG"] | go | 3 | 0 | |
docker/daemon_windows.go | // +build daemon
package main
import (
"os"
apiserver "github.com/sara-nl/docker-1.9.1/api/server"
"github.com/sara-nl/docker-1.9.1/daemon"
)
func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config {
return serverConfig
}
// currentUserIsOwner checks whether the current user is the owner of the given
// file.
func currentUserIsOwner(f string) bool {
return false
}
// setDefaultUmask doesn't do anything on windows
func setDefaultUmask() error {
return nil
}
func getDaemonConfDir() string {
return os.Getenv("PROGRAMDATA") + `\docker\config`
}
// notifySystem sends a message to the host when the server is ready to be used
func notifySystem() {
}
| [
"\"PROGRAMDATA\""
] | [] | [
"PROGRAMDATA"
] | [] | ["PROGRAMDATA"] | go | 1 | 0 | |
cmd/go-selfupdate/main.go | package main
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/kr/binarydist"
)
var version, genDir string
type current struct {
Version string
Sha256 []byte
}
func generateSha256(path string) []byte {
h := sha256.New()
b, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println(err)
}
h.Write(b)
sum := h.Sum(nil)
return sum
//return base64.URLEncoding.EncodeToString(sum)
}
type gzReader struct {
z, r io.ReadCloser
}
func (g *gzReader) Read(p []byte) (int, error) {
return g.z.Read(p)
}
func (g *gzReader) Close() error {
g.z.Close()
return g.r.Close()
}
func newGzReader(r io.ReadCloser) io.ReadCloser {
var err error
g := new(gzReader)
g.r = r
g.z, err = gzip.NewReader(r)
if err != nil {
panic(err)
}
return g
}
func createUpdate(path string, platform string) {
c := current{Version: version, Sha256: generateSha256(path)}
b, err := json.MarshalIndent(c, "", " ")
if err != nil {
fmt.Println("error:", err)
}
err = ioutil.WriteFile(filepath.Join(genDir, platform+".json"), b, 0755)
if err != nil {
panic(err)
}
os.MkdirAll(filepath.Join(genDir, version), 0755)
var buf bytes.Buffer
w := gzip.NewWriter(&buf)
f, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
w.Write(f)
w.Close() // You must close this first to flush the bytes to the buffer.
err = ioutil.WriteFile(filepath.Join(genDir, version, platform+".gz"), buf.Bytes(), 0755)
files, err := ioutil.ReadDir(genDir)
if err != nil {
fmt.Println(err)
}
for _, file := range files {
if file.IsDir() == false {
continue
}
if file.Name() == version {
continue
}
os.Mkdir(filepath.Join(genDir, file.Name(), version), 0755)
fName := filepath.Join(genDir, file.Name(), platform+".gz")
old, err := os.Open(fName)
if err != nil {
// Don't have an old release for this os/arch, continue on
continue
}
fName = filepath.Join(genDir, version, platform+".gz")
newF, err := os.Open(fName)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't open %s: error: %s\n", fName, err)
os.Exit(1)
}
ar := newGzReader(old)
defer ar.Close()
br := newGzReader(newF)
defer br.Close()
patch := new(bytes.Buffer)
if err := binarydist.Diff(ar, br, patch); err != nil {
panic(err)
}
ioutil.WriteFile(filepath.Join(genDir, file.Name(), version, platform), patch.Bytes(), 0755)
}
}
func printUsage() {
fmt.Println("")
fmt.Println("Positional arguments:")
fmt.Println("\tSingle platform: go-selfupdate myapp 1.2")
fmt.Println("\tCross platform: go-selfupdate /tmp/mybinares/ 1.2")
}
func createBuildDir() {
os.MkdirAll(genDir, 0755)
}
func main() {
outputDirFlag := flag.String("o", "public", "Output directory for writing updates")
var defaultPlatform string
goos := os.Getenv("GOOS")
goarch := os.Getenv("GOARCH")
if goos != "" && goarch != "" {
defaultPlatform = goos + "-" + goarch
} else {
defaultPlatform = runtime.GOOS + "-" + runtime.GOARCH
}
platformFlag := flag.String("platform", defaultPlatform,
"Target platform in the form OS-ARCH. Defaults to running os/arch or the combination of the environment variables GOOS and GOARCH if both are set.")
flag.Parse()
if flag.NArg() < 2 {
flag.Usage()
printUsage()
os.Exit(0)
}
platform := *platformFlag
appPath := flag.Arg(0)
version = flag.Arg(1)
genDir = *outputDirFlag
createBuildDir()
// If dir is given create update for each file
fi, err := os.Stat(appPath)
if err != nil {
panic(err)
}
if fi.IsDir() {
files, err := ioutil.ReadDir(appPath)
if err == nil {
for _, file := range files {
createUpdate(filepath.Join(appPath, file.Name()), file.Name())
}
os.Exit(0)
}
}
createUpdate(appPath, platform)
}
| [
"\"GOOS\"",
"\"GOARCH\""
] | [] | [
"GOARCH",
"GOOS"
] | [] | ["GOARCH", "GOOS"] | go | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.