filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tests/optim/test_oss.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import os
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import fairscale.optim as optim
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
def setup_module(module):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend=BACKEND, rank=0, world_size=1)
def dist_init(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29501"
dist.init_process_group(backend=BACKEND, rank=rank, world_size=world_size)
def test_create():
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
# Check that the pulled state is what we expect, and that we have all the expected keys
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
# Check that it's correctly loaded
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs():
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure():
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.local_state_dict()
o = optim.OSS([x], lr=0.01)
o.load_local_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.state_dict()
o = optim.OSS([x], lr=0.01)
o.load_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def run_test_add_param_group(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [4, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have 8 elements.
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == 8
assert len(o.optim.param_groups) == 2
def test_add_param_group():
world_size = 3
mp.spawn(run_test_add_param_group, args=(world_size,), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size):
dist_init(rank, world_size)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
def test_zero_grad():
world_size = 2
mp.spawn(run_test_zero_grad, args=(world_size,), nprocs=world_size, join=True)
def run_test_step(rank, world_size):
dist_init(rank, world_size)
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank)
assert m.bias == torch.tensor([1.85], device=rank)
@skip_if_no_cuda
def test_step():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step, args=(world_size,), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, optimizer=None):
dist_init(rank, world_size)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step_with_closure, args=(world_size,), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [5, 4, 2, 6, 4, 3]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == 8
def test_sharding():
world_size = 3
mp.spawn(run_test_sharding, args=(world_size,), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank):
dist_init(rank, world_size)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == world_size
else:
optimizer_state_dict = {}
optimizer_state_dict = optim.utils.broadcast_object(
optimizer_state_dict, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
def test_collect_shards():
world_size = 3
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank), nprocs=world_size, join=True,
)
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT"]
|
python
| 2 | 0 | |
python/pi_only/games_pi_only.py
|
# WS2812 LED Matrix Gamecontrol (Tetris, Snake, Pong)
# by M Oehler
# https://hackaday.io/project/11064-raspberry-pi-retro-gaming-led-display
# ported from
# Tetromino (a Tetris clone)
# By Al Sweigart [email protected]
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, time, sys, os, pickle
from PIL import Image
# If Pi = False the script runs in simulation mode using pygame lib
PI = False
import pygame
from pygame.locals import *
if PI:
os.environ["SDL_VIDEODRIVER"] = "dummy" #dummy display for pygame joystick usage
import board
import neopixel
import subprocess
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.virtual import viewport
from luma.core.legacy import text, show_message
from luma.core.legacy.font import proportional, CP437_FONT, TINY_FONT, SINCLAIR_FONT, LCD_FONT
# only modify this two values for size adaption!
PIXEL_X=10
PIXEL_Y=20
SIZE= 20
FPS = 15
BOXSIZE = 20
WINDOWWIDTH = BOXSIZE * PIXEL_X
WINDOWHEIGHT = BOXSIZE * PIXEL_Y
BOARDWIDTH = PIXEL_X
BOARDHEIGHT = PIXEL_Y
BLANK = '.'
MOVESIDEWAYSFREQ = 0.15
MOVEDOWNFREQ = 0.15
FALLING_SPEED = 0.8
LED_BRIGHTNESS = 0.6
# R G B
WHITE = (255, 255, 255)
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
LIGHTRED = (175, 20, 20)
GREEN = ( 0, 255, 0)
LIGHTGREEN = ( 20, 175, 20)
BLUE = ( 0, 0, 255)
LIGHTBLUE = ( 20, 20, 175)
YELLOW = (255, 255, 0)
LIGHTYELLOW = (175, 175, 20)
CYAN = ( 0, 255, 255)
MAGENTA = (255, 0, 255)
ORANGE = (255, 100, 0)
SCORES =(0,40,100,300,1200)
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
COLORS = (BLUE,GREEN,RED,YELLOW,CYAN,MAGENTA,ORANGE)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
#assert len(COLORS) == len(LIGHTCOLORS) # each color must have light color
TEMPLATEWIDTH = 5
TEMPLATEHEIGHT = 5
S_SHAPE_TEMPLATE = [['.....',
'.....',
'..OO.',
'.OO..',
'.....'],
['.....',
'..O..',
'..OO.',
'...O.',
'.....']]
Z_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'..OO.',
'.....'],
['.....',
'..O..',
'.OO..',
'.O...',
'.....']]
I_SHAPE_TEMPLATE = [['..O..',
'..O..',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'OOOO.',
'.....',
'.....']]
O_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'.OO..',
'.....']]
J_SHAPE_TEMPLATE = [['.....',
'.O...',
'.OOO.',
'.....',
'.....'],
['.....',
'..OO.',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'...O.',
'.....'],
['.....',
'..O..',
'..O..',
'.OO..',
'.....']]
L_SHAPE_TEMPLATE = [['.....',
'...O.',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..O..',
'..OO.',
'.....'],
['.....',
'.....',
'.OOO.',
'.O...',
'.....'],
['.....',
'.OO..',
'..O..',
'..O..',
'.....']]
T_SHAPE_TEMPLATE = [['.....',
'..O..',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..OO.',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'..O..',
'.....'],
['.....',
'..O..',
'.OO..',
'..O..',
'.....']]
PIECES = {'S': S_SHAPE_TEMPLATE,
'Z': Z_SHAPE_TEMPLATE,
'I': I_SHAPE_TEMPLATE,
'J': J_SHAPE_TEMPLATE,
'L': L_SHAPE_TEMPLATE,
'O': O_SHAPE_TEMPLATE,
'T': T_SHAPE_TEMPLATE}
PIECES_ORDER = {'S': 0,'Z': 1,'I': 2,'J': 3,'L': 4,'O': 5,'T': 6}
# snake constants #
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
# font clock #
clock_font = [
0x1F, 0x11, 0x1F,
0x00, 0x00, 0x1F,
0x1D, 0x15, 0x17,
0x15, 0x15, 0x1F,
0x07, 0x04, 0x1F,
0x17, 0x15, 0x1D,
0x1F, 0x15, 0x1D,
0x01, 0x01, 0x1F,
0x1F, 0x15, 0x1F,
0x17, 0x15, 0x1F]
theTetrisFont = [
0x78,0x78,0x1E,0x1E, #S
0x1E,0x1E,0x78,0x78, #Z
0x00,0xFF,0xFF,0x00, #I
0x06,0x06,0x7E,0x7E, #J
0x7E,0x7E,0x06,0x06, #L
0x3C,0x3C,0x3C,0x3C, #O
0x7E,0x7E,0x18,0x18, #T
]
if PI:
serial = spi(port=0, device=0, gpio=noop())
device = max7219(serial, cascaded=4, blocks_arranged_in_reverse_order=True)
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = PIXEL_X*PIXEL_Y
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=LED_BRIGHTNESS, auto_write=False,pixel_order=ORDER)
# key server for controller #
QKEYDOWN=0
QKEYUP=1
JKEY_X=3
JKEY_Y=4
JKEY_A=0
JKEY_B=1
JKEY_R=7
JKEY_L=6
JKEY_SEL=10
JKEY_START=11
mykeys = {
K_1: JKEY_A,
K_2: JKEY_B,
K_3: JKEY_Y,
K_4: JKEY_X,
K_x: JKEY_SEL,
K_s: JKEY_START
}
mask = bytearray([1,2,4,8,16,32,64,128])
# main #
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
global a1_counter ,RUNNING
a1_counter=0
RUNNING=True
joystick_detected=False
joystick_cnt=0
if not PI:
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((PIXEL_X*SIZE, PIXEL_Y*SIZE))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 100)
pygame.display.set_caption('Pi Games')
DISPLAYSURF.fill(BGCOLOR)
pygame.display.update()
drawImage('pi.bmp')
time.sleep(2)
else:
device.contrast(200)
pygame.init()
drawImage('/home/pi/pi.bmp')
pygame.joystick.init()
while joystick_detected==False:
show_message(device,"Waiting for controller...",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
pygame.joystick.quit()
pygame.joystick.init()
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
# print("Initialized joystick: {}".format(joystick.get_name()))
joystick_detected = True
except pygame.error:
print("no joystick found.")
joystick_detected = False
clearScreen()
drawClock(1)
if PI:
show_message(device,"Let's play",fill="white", font=proportional(CP437_FONT))
while True:
clearScreen()
#drawSymbols()
if PI:
drawImage('/home/pi/select.bmp')
else:
drawImage('select.bmp')
updateScreen()
if not PI:
checkForQuit()
#check if joystick is still connected
if PI:
if joystick_cnt==50:
joystick_cnt=0
pygame.joystick.quit()
pygame.joystick.init()
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
# print("Initialized joystick: {}".format(joystick.get_name()))
joystick_detected = True
except pygame.error:
print("no joystick found.")
joystick_detected = False
else:
joystick_cnt+=1
pygame.event.pump()
for event in pygame.event.get():
# print("event detected {}".format(event))
if event.type == pygame.JOYBUTTONDOWN or event.type == KEYDOWN:
if event.type == pygame.JOYBUTTONDOWN:
myevent = event.button
else:
if event.key in mykeys:
myevent = mykeys[event.key]
else:
myevent = -1
if (myevent == JKEY_B):
drawClock(1)
if (myevent == JKEY_A):
runPongGame()
if (myevent == JKEY_X):
runTetrisGame()
if (myevent == JKEY_Y):
runSnakeGame()
if (myevent == JKEY_START):
shutdownScreen()
if event.type == pygame.QUIT: # get all the QUIT events
terminate() # terminate if any QUIT events are present
time.sleep(.1)
terminate()
# gaming main routines #
def runPongGame():
down = 0
up = 1
left = 0
right = 1
lowerbarx = PIXEL_X//2
upperbarx = PIXEL_X//2
score1 = 0
score2 = 0
ballx = PIXEL_X//2
bally = PIXEL_Y//2
directiony = down
directionx = left
movingRightUpper = False
movingLeftUpper = False
movingRightLower = False
movingLeftLower = False
restart=False
lastLowerMoveSidewaysTime = time.time()
lastUpperMoveSidewaysTime = time.time()
while True: # main game loop
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
axis = event.axis
val = round(event.value)
if (axis == 0 and val == -1):
movingLeftLower = True
movingRightLower = False
if (axis == 0 and val == 1):
movingLeftLower = False
movingRightLower = True
if (val == 0):
movingLeftLower = False
movingRightLower = False
if event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed: {}".format(event.button))
if (event.button == JKEY_A):
movingLeftUpper = True
movingRightUpper = False
if (event.button == JKEY_B):
movingLeftUpper = False
movingRightUpper = True
if (event.button == JKEY_SEL):
# quit game
return
if event.type == pygame.JOYBUTTONUP:
movingLeftUpper = False
movingRightUpper = False
if event.type == pygame.KEYDOWN:
if(event.key==K_LEFT):
movingLeftLower = True
movingRightLower = False
if(event.key==K_RIGHT):
movingLeftLower = False
movingRightLower = True
if(event.key==K_1):
movingLeftUpper = True
movingRightUpper = False
if(event.key==K_2):
movingLeftUpper = False
movingRightUpper = True
if(event.key==K_s):
return
if event.type == pygame.KEYUP:
movingLeftLower = False
movingRightLower = False
movingLeftUpper = False
movingRightUpper = False
if (movingLeftLower) and time.time() - lastLowerMoveSidewaysTime > MOVESIDEWAYSFREQ:
if lowerbarx >1:
lowerbarx-=1;
lastLowerMoveSidewaysTime = time.time()
if (movingRightLower) and time.time() - lastLowerMoveSidewaysTime > MOVESIDEWAYSFREQ:
if lowerbarx <PIXEL_X-2:
lowerbarx+=1;
lastLowerMoveSidewaysTime = time.time()
if (movingLeftUpper) and time.time() - lastUpperMoveSidewaysTime > MOVESIDEWAYSFREQ:
if upperbarx >1:
upperbarx-=1;
lastUpperMoveSidewaysTime = time.time()
if (movingRightUpper) and time.time() - lastUpperMoveSidewaysTime > MOVESIDEWAYSFREQ:
if upperbarx <PIXEL_X-2:
upperbarx+=1;
lastUpperMoveSidewaysTime = time.time()
if not PI:
checkForQuit()
if (directiony == up):
if (bally>1):
bally-=1
else:
if (abs(ballx-upperbarx)<2):
directiony = down
if (ballx==upperbarx+1):
if (directionx==left):
directionx=right
if (ballx==upperbarx-1):
if (directionx==right):
directionx=left
elif ((ballx-upperbarx==2) and (directionx==left)):
directionx=right
directiony = down
elif ((ballx-upperbarx==-2) and (directionx==right)):
directionx=left
directiony = down
else:
bally-=1
score1+=1
restart = True
else:
if (bally<PIXEL_Y-2):
bally+=1
else:
if (abs(ballx-lowerbarx)<2):
directiony = up
if (ballx==lowerbarx+1):
if (directionx==left):
directionx=right
if (ballx==lowerbarx-1):
if (directionx==right):
directionx=left
elif ((ballx-lowerbarx==2) and (directionx==left)):
directionx=right
directiony = up
elif ((ballx-lowerbarx==-2) and (directionx==right)):
directionx=left
directiony = up
else:
bally+=1
score2+=1
restart = True
if (directionx == left):
if (ballx>0):
if (ballx==1):
ballx-=1
else:
ballx-=random.randint(1,2)
else:
directionx = right
ballx+=1
if(directiony == up):
if(bally>2):
bally-=1
if(directiony == down):
if(bally<PIXEL_Y-2):
bally+=1
else:
if (ballx<PIXEL_X-1):
if (ballx==8):
ballx+=1
else:
ballx+=random.randint(1,2)
else:
directionx = left
ballx-=random.randint(1,2)
if(directiony == up):
if(bally>3):
bally-=random.randint(0,2)
if(directiony == down):
if(bally<PIXEL_Y-3):
bally+=random.randint(0,2)
clearScreen()
drawBall(ballx,bally)
drawBar(upperbarx,0)
drawBar(lowerbarx,PIXEL_Y-1)
twoscoreText(score1,score2)
updateScreen()
if (score1 == 9) or (score2 == 9):
time.sleep(3)
return
if restart:
time.sleep(1)
ballx=PIXEL_X//2
bally=PIXEL_Y//2
if directiony==down:
directiony = up
else:
directiony = down
restart=False
else:
time.sleep(.1)
def runSnakeGame():
# Set a random start point.
startx = random.randint(2, BOARDWIDTH-2 )
starty = random.randint(2, BOARDHEIGHT -2 )
wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
direction = RIGHT
score = 0
if os.path.isfile('/home/pi/hs_snake.p')==True:
try:
highscore = pickle.load(open("/home/pi/hs_snake.p","rb"))
except EOFError:
highscore = 0
else:
highscore=0
if PI:
show_message(device,"Snake Highscore: " + str(highscore),fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
# Start the apple in a random place.
apple = getRandomLocation(wormCoords)
while True: # main game loop
olddirection = direction
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
if (olddirection== direction): #only one direction change per step
axis = event.axis
val = round(event.value)
if (axis == 0 and val == -1):
if direction != RIGHT:
direction = LEFT
if (axis == 0 and val == 1):
if direction != LEFT:
direction = RIGHT
if (axis == 1 and val == 1):
if direction != UP:
direction = DOWN
if (axis == 1 and val == -1):
if direction != DOWN:
direction = UP
if event.type == pygame.KEYDOWN:
if (event.key==K_LEFT):
if direction != RIGHT:
direction = LEFT
if (event.key==K_RIGHT):
if direction != LEFT:
direction = RIGHT
if (event.key==K_DOWN):
if direction != UP:
direction = DOWN
if (event.key==K_UP):
if direction != DOWN:
direction = UP
if (event.key == JKEY_SEL):
#quit game
return
if event.type == pygame.JOYBUTTONDOWN:
if (event.button==JKEY_SEL):
# quit game
return
# check if the worm has hit itself or the edge
if wormCoords[HEAD]['x'] == -1 or wormCoords[HEAD]['x'] == BOARDWIDTH or wormCoords[HEAD]['y'] == -1 or wormCoords[HEAD]['y'] == BOARDHEIGHT:
time.sleep(1.5)
if score > highscore:
highscore = score
if PI:
pickle.dump(highscore, open("/home/pi/hs_snake.p", "wb"))
show_message(device,"New Highscore !!!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
return # game over
for wormBody in wormCoords[1:]:
if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:
time.sleep(1.5)
if score > highscore:
highscore = score
if PI:
pickle.dump(highscore, open("/home/pi/hs_snake.p", "wb"))
show_message(device,"New Highscore !!!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
return # game over
# check if worm has eaten an apple
if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:
# don't remove worm's tail segment
score += 1
apple = getRandomLocation(wormCoords) # set a new apple somewhere
else:
del wormCoords[-1] # remove worm's tail segment
# move the worm by adding a segment in the direction it is moving
if direction == UP:
if wormCoords[HEAD]['y'] == 0 :
newHead = {'x': wormCoords[HEAD]['x'], 'y': BOARDHEIGHT-1}
else:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] - 1}
elif direction == DOWN:
if wormCoords[HEAD]['y'] == BOARDHEIGHT-1 :
newHead = {'x': wormCoords[HEAD]['x'], 'y': 0}
else:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] + 1}
elif direction == LEFT:
if wormCoords[HEAD]['x'] == 0 :
newHead = {'x': BOARDWIDTH -1, 'y': wormCoords[HEAD]['y'] }
else:
newHead = {'x': wormCoords[HEAD]['x'] - 1, 'y': wormCoords[HEAD]['y']}
elif direction == RIGHT:
if wormCoords[HEAD]['x'] == BOARDWIDTH-1:
newHead = {'x': 0, 'y': wormCoords[HEAD]['y']}
else:
newHead = {'x': wormCoords[HEAD]['x'] + 1, 'y': wormCoords[HEAD]['y']}
if not PI:
checkForQuit()
wormCoords.insert(0, newHead)
clearScreen()
drawWorm(wormCoords)
drawApple(apple)
scoreText(score)
updateScreen()
time.sleep(.15)
def runTetrisGame():
# setup varia
# bles for the start of the game
#if PI:
#device.contrast(255)
#device.show()
board = getBlankBoard()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
lastFallTime = time.time()
movingDown = False # note: there is no movingUp variable
movingLeft = False
movingRight = False
score = 0
oldscore = -1
oldpiece = 10
lines = 0
level, fallFreq = calculateLevelAndFallFreq(lines)
if os.path.isfile('/home/pi/hs_tetris.p')==True:
try:
highscore = pickle.load(open("/home/pi/hs_tetris.p","rb"))
except EOFError:
highscore = 0
else:
highscore=0
if PI:
show_message(device,"Tetris Highscore: " + str(highscore),fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
fallingPiece = getNewPiece()
nextPiece = getNewPiece()
while True: # game loop
if fallingPiece == None:
# No falling piece in play, so start a new piece at the top
fallingPiece = nextPiece
nextPiece = getNewPiece()
lastFallTime = time.time() # reset lastFallTime
if not isValidPosition(board, fallingPiece):
time.sleep(2)
if score > highscore:
highscore = score
if PI:
pickle.dump(highscore, open("/home/pi/hs_tetris.p", "wb"))
show_message(device,"New Highscore !!!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
return # can't fit a new piece on the board, so game over
if not PI:
checkForQuit()
pygame.event.pump()
for event in pygame.event.get():
# print("event detected {}".format(event))
if event.type == pygame.JOYAXISMOTION:
axis = event.axis
val = round(event.value)
if (axis == 0 and val == 0):
# no motion or down motion
movingLeft = movingRight = False
if (axis == 1 and val == 0) :
movingDown = False
if (axis==0 and val== -1) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
if (axis == 0 and val== 1) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingLeft = False
movingRight = True
lastMoveSidewaysTime = time.time()
if (axis==1 and val == 1):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
if (axis==1 and val == -1):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if event.type == pygame.KEYDOWN:
if (event.key==K_LEFT) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
if (event.key==K_RIGHT) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingLeft = False
movingRight = True
lastMoveSidewaysTime = time.time()
if (event.key==K_DOWN):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
if (event.key==K_UP):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if (event.key == K_3):
fallingPiece['rotation'] = (fallingPiece['rotation'] -1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if (event.key == K_4):
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
score+=i #TODO: more digits on numbercounter, more scores
fallingPiece['y'] += i - 1
if event.type == pygame.KEYUP:
movingDown = False
movingLeft = False
movingRight = False
if event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed: {}".format(event.button))
if (event.button == JKEY_A):
fallingPiece['rotation'] = (fallingPiece['rotation'] -1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if (event.button == JKEY_Y):
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
score+=i #TODO: more digits on numbercounter, more scores
fallingPiece['y'] += i - 1
# return
# handle moving the piece because of user input
if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > MOVESIDEWAYSFREQ:
if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
elif movingRight and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
lastMoveSidewaysTime = time.time()
if movingDown and time.time() - lastMoveDownTime > MOVEDOWNFREQ and isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# let the piece fall if it is time to fall
if time.time() - lastFallTime > fallFreq:
# see if the piece has landed
if not isValidPosition(board, fallingPiece, adjY=1):
# falling piece has landed, set it on the board
addToBoard(board, fallingPiece)
remLine = removeCompleteLines(board)
# count lines for level calculation
lines += remLine
# more lines, more points per line
score += SCORES[remLine]*level
level, fallFreq = calculateLevelAndFallFreq(lines)
fallingPiece = None
else:
# piece did not land, just move the piece down
fallingPiece['y'] += 1
lastFallTime = time.time()
# drawing everything on the screen
clearScreen()
drawBoard(board)
#scoreText(score)
if score>oldscore:
scoreTetris(score,level,PIECES_ORDER.get(nextPiece['shape']))
oldscore = score
if oldpiece!=PIECES_ORDER.get(nextPiece['shape']):
scoreTetris(score,level,PIECES_ORDER.get(nextPiece['shape']))
oldpiece=PIECES_ORDER.get(nextPiece['shape'])
#drawStatus(score, level)
#drawNextPiece(nextPiece)
if fallingPiece != None:
drawPiece(fallingPiece)
updateScreen()
#FPSCLOCK.tick(FPS)
time.sleep(.05)
def drawClock(color):
joystick_cnt=0
if PI:
device.clear();
device.show();
hour = time.localtime().tm_hour
minute= time.localtime().tm_min
second= time.localtime().tm_sec
while True:
pygame.event.pump()
for event in pygame.event.get(): # User did something
# print("event detected {}".format(event))
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN or event.type == KEYDOWN:
if event.type == pygame.JOYBUTTONDOWN:
myevent = event.button
else:
if event.key in mykeys:
myevent = mykeys[event.key]
else:
myevent = -1
# print("Joystick button pressed: {}".format(event.button))
if (myevent==JKEY_X):
# print("exiting clock")
clearScreen()
updateScreen()
return
if (myevent == JKEY_A):
color = color + 1
if (color > (len(COLORS) - 1)):
color = 0
if event.type == pygame.QUIT: # get all the QUIT events
terminate() # terminate if any QUIT events are present
#check if joystick is still connected
if PI:
if joystick_cnt==25:
joystick_cnt=0
pygame.joystick.quit()
pygame.joystick.init()
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
# print("Initialized joystick: {}".format(joystick.get_name()))
#joystick_detected = True
except pygame.error:
print("no joystick found.")
#joystick_detected = False
else:
joystick_cnt+=1
ltime = time.localtime()
hour = ltime.tm_hour
minute= ltime.tm_min
second= ltime.tm_sec
clearScreen()
drawnumber(int(hour/10),2,1,color)
drawnumber(int(hour%10),6,1,color)
drawnumber(int(minute/10),2,8,color)
drawnumber(int(minute%10),6,8,color)
drawnumber(int(second/10),2,15,color)
drawnumber(int(second%10),6,15,color)
updateScreen()
time.sleep(.2)
def shutdownScreen():
if PI:
device.clear();
device.show();
drawImage('/home/pi/shutdown.bmp')
show_message(device,"Press Select to shutdown!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
else:
drawImage('shutdown.bmp')
while True:
pygame.event.pump()
for event in pygame.event.get(): # User did something
# print("event detected {}".format(event))
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN or event.type == KEYDOWN:
if event.type == pygame.JOYBUTTONDOWN:
myevent = event.button
else:
if event.key in mykeys:
myevent = mykeys[event.key]
else:
myevent = -1
# print("Joystick button pressed: {}".format(event.button))
if (myevent!=JKEY_SEL):
# print("exiting clock")
clearScreen()
updateScreen()
return
else:
if not PI:
terminate()
else:
clearScreen()
updateScreen()
show_message(device,"Shutdown...",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
subprocess.Popen(['shutdown','-h','now'])
#call("sudo nohup shutdown -h now", shell=True)
terminate()
if event.type == pygame.QUIT: # get all the QUIT events
terminate() # terminate if any QUIT events are present
updateScreen()
time.sleep(.2)
def drawImage(filename):
im = Image.open(filename)
for row in range(0,BOARDHEIGHT):
for col in range(0,BOARDWIDTH):
r,g,b = im.getpixel((col,row))
drawPixelRgb(col,row,r,g,b)
updateScreen()
def drawHalfImage(filename,offset):
im = Image.open(filename)
if offset>10:
offset = 10
for row in range(0,10):
for col in range(0,10):
r,g,b = im.getpixel((col,row))
drawPixelRgb(col,row+offset,r,g,b)
# drawing #
def clearScreen():
if PI:
pixels.fill((0,0,0))
else:
DISPLAYSURF.fill(BGCOLOR)
def updateScreen():
if PI:
pixels.show()
else:
pygame.display.update()
def drawPixel(x,y,color):
if color == BLANK:
return
if PI:
try:
if (x>=0 and y>=0 and color >=0):
if x%2==1:
pixels[x*PIXEL_Y+y] = COLORS[color]
else:
pixels[x*PIXEL_Y+(PIXEL_Y-1-y)] = COLORS[color]
except:
print(str(x) + ' --- ' + str(y))
else:
pygame.draw.rect(DISPLAYSURF, COLORS[color], (x*SIZE+1, y*SIZE+1, SIZE-2, SIZE-2))
def drawPixelRgb(x,y,r,g,b):
if PI:
if (x>=0 and y>=0):
if x%2==1:
pixels[x*PIXEL_Y+y] = (r,g,b)
else:
pixels[x*PIXEL_Y+(PIXEL_Y-1-y)] = (r,g,b)
else:
pygame.draw.rect(DISPLAYSURF, (r,g,b), (x*SIZE+1, y*SIZE+1, SIZE-2, SIZE-2))
def drawnumber(number,offsetx,offsety,color):
for x in range(0,3):
for y in range(0,5):
if clock_font[3*number + x]&mask[y]:
drawPixel(offsetx+x, offsety+y, color)
def drawnumberMAX7219(number, offsetx, offsety, draw1):
for x in range(0,3):
for y in range(0,5):
if clock_font[3*number+2- x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,1,draw1)
elif clock_font[3*number+2- x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,0,draw1)
def drawTetrisMAX7219(piece,offsetx,offsety,draw1):
for x in range(0,4):
for y in range(0,8):
if theTetrisFont[4*piece + x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,1,draw1)
elif theTetrisFont[4*piece + x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,0,draw1)
def drawScorePixel(x,y,on,draw):
if PI:
draw.point((31-x,y), fill= "white")
time.sleep(.01)
else:
pygame.draw.rect(DISPLAYSURF, COLORS[2], (64-2*x, 410+2*y,2,2))
def makeTextObjs(text, font, color):
surf = font.render(text, True, color)
return surf, surf.get_rect()
def scrollText(text):
if PI:
show_message(device,text,fill="white", font=proportional(CP437_FONT))
else:
titleSurf, titleRect = makeTextObjs(str(text), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
def scoreText(score):
_score=score
if _score>999:
_score = 999
if PI:
with canvas(device) as draw:
for i in range(0,3):
text(draw, ((3-i)*8, 0), str(_score%10), fill="white")
_score //=10
else:
titleSurf, titleRect = makeTextObjs(str(_score), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
def scoreTetris(score,level,nextpiece):
#if PI:
#device.clear()
_score=score
if _score>999999:
_score = 999999
if PI:
# one point per level
with canvas(device) as draw1:
for i in range(0,level):
drawScorePixel(i*2,7,1,draw1)
# score as 6 digit value
for i in range(0,6):
drawnumberMAX7219(_score%10,i*4,0,draw1)
_score //=10
# draw next piece
drawTetrisMAX7219(nextpiece,27,0,draw1)
if PI:
device.show()
def twoscoreText(score1,score2):
_score1=score1
_score2=score2
if _score1>9:
_score1 = 9
if _score2>9:
_score2 = 9
if PI:
with canvas(device) as draw:
text(draw, (0, 0), str(_score1), fill="white")
text(draw, (8, 0), ":", fill="white")
text(draw, (16, 0), str(_score2), fill="white")
text(draw, (24, 0), " ", fill="white")
else:
titleSurf, titleRect = makeTextObjs(str(_score1)+':'+str(_score2), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
# program flow #
def terminate():
RUNNING = False
pygame.quit()
exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
# tetris subroutines #
def calculateLevelAndFallFreq(lines):
# Based on the score, return the level the player is on and
# how many seconds pass until a falling piece falls one space.
level = int(lines / 10) + 1
# limit level to 10
if level >10:
level = 10
fallFreq = FALLING_SPEED - (level * 0.05)
if fallFreq <= 0.05:
fallFreq = 0.05
return level, fallFreq
def getNewPiece():
# return a random new piece in a random rotation and color
shape = random.choice(list(PIECES.keys()))
newPiece = {'shape': shape,
'rotation': random.randint(0, len(PIECES[shape]) - 1),
'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),
'y': -2, # start it above the board (i.e. less than 0)
'color': PIECES_ORDER.get(shape)}
return newPiece
def addToBoard(board, piece):
# fill in the board based on piece's location, shape, and rotation
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:
board[x + piece['x']][y + piece['y']] = piece['color']
def isOnBoard(x, y):
return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT
def isValidPosition(board, piece, adjX=0, adjY=0):
# Return True if the piece is within the board and not colliding
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
isAboveBoard = y + piece['y'] + adjY < 0
if isAboveBoard or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK:
continue
if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):
return False
if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:
return False
return True
def isCompleteLine(board, y):
# Return True if the line filled with boxes with no gaps.
for x in range(BOARDWIDTH):
if board[x][y] == BLANK:
return False
return True
def removeCompleteLines(board):
# Remove any completed lines on the board, move everything above them down, and return the number of complete lines.
numLinesRemoved = 0
y = BOARDHEIGHT - 1 # start y at the bottom of the board
while y >= 0:
if isCompleteLine(board, y):
# Remove the line and pull boxes down by one line.
for pullDownY in range(y, 0, -1):
for x in range(BOARDWIDTH):
board[x][pullDownY] = board[x][pullDownY-1]
# Set very top line to blank.
for x in range(BOARDWIDTH):
board[x][0] = BLANK
numLinesRemoved += 1
# Note on the next iteration of the loop, y is the same.
# This is so that if the line that was pulled down is also
# complete, it will be removed.
else:
y -= 1 # move on to check next row up
return numLinesRemoved
def drawBoard(matrix):
for i in range(0,BOARDWIDTH):
for j in range(0,BOARDHEIGHT):
drawPixel(i,j,matrix[i][j])
def getBlankBoard():
# create and return a new blank board data structure
board = []
for i in range(BOARDWIDTH):
board.append([BLANK] * BOARDHEIGHT)
return board
def drawPiece(piece, pixelx=None, pixely=None):
shapeToDraw = PIECES[piece['shape']][piece['rotation']]
if pixelx == None and pixely == None:
# if pixelx & pixely hasn't been specified, use the location stored in the piece data structure
pixelx=piece['x']
pixely=piece['y']
# draw each of the boxes that make up the piece
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if shapeToDraw[y][x] != BLANK:
drawPixel( pixelx+ x , pixely+y,piece['color'])
# snake subroutines #
def getRandomLocation(wormCoords):
while True:
x = random.randint(0, BOARDWIDTH - 1)
y = random.randint(0, BOARDHEIGHT - 1)
if {'x': x, 'y': y} in wormCoords:
print('no apples on worm')
else:
break
return {'x': x, 'y': y}
def drawWorm(wormCoords):
for coord in wormCoords:
x = coord['x']
y = coord['y']
drawPixel(x,y,1)
def drawApple(coord):
x = coord['x']
y = coord['y']
drawPixel(x,y,2)
# pong subroutines #
def drawBar(x,y):
drawPixel(x-1,y,1)
drawPixel(x,y,1)
drawPixel(x+1,y,1)
def drawBall(x,y):
drawPixel(x,y,0)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"SDL_VIDEODRIVER"
] |
[]
|
["SDL_VIDEODRIVER"]
|
python
| 1 | 0 | |
dpsctl/dpsctl.py
|
#!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 Johan Kanflo (github.com/kanflo)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This script is used to communicate with an OpenDPS device and can be used to
change all the settings possible with the buttons and dial on the device
directly. The device can be talked to via a serial interface or (if you added
an ESP8266) via wifi
dpsctl.py --help will provide enlightment.
Oh, and if you get tired of specifying the comms interface (TTY or IP) all the
time, add it tht the environment variable DPSIF.
"""
import argparse
import sys
import os
import socket
try:
import serial
except:
print("Missing dependency pyserial:")
print(" sudo pip%s install pyserial" % ("3" if sys.version_info.major == 3 else ""))
raise SystemExit()
import threading
import time
from uhej import uhej
from protocol import *
import uframe
import binascii
try:
from PyCRC.CRCCCITT import CRCCCITT
except:
print("Missing dependency pycrc:")
print(" sudo pip%s install pycrc" % ("3" if sys.version_info.major == 3 else ""))
raise SystemExit()
import json
parameters = []
"""
An abstract class that describes a communication interface
"""
class comm_interface(object):
_if_name = None
def __init__(self, if_name):
self._if_name = if_name
def open(self):
return False
def close(self):
return False
def write(self, bytes):
return False
def read(self):
return bytearray()
def name(self):
return self._if_name
"""
A class that describes a serial interface
"""
class tty_interface(comm_interface):
_port_handle = None
def __init__(self, if_name):
self._if_name = if_name
def open(self):
self._port_handle = serial.Serial(baudrate = 115200, timeout = 1.0)
self._port_handle.port = self._if_name
self._port_handle.open()
return True
def close(self):
self._port_handle.port.close()
self._port_handle = None
return True
def write(self, bytes):
self._port_handle.write(bytes)
return True
def read(self):
bytes = bytearray()
sof = False
while True:
b = self._port_handle.read(1)
if not b: # timeout
break
b = ord(b)
if b == uframe._SOF:
bytes = bytearray()
sof = True
if sof:
bytes.append(b)
if b == uframe._EOF:
break
return bytes
"""
A class that describes a UDP interface
"""
class udp_interface(comm_interface):
_socket = None
def __init__(self, if_name):
self._if_name = if_name
def open(self):
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.settimeout(1.0)
except socket.error:
return False
return True
def close(self):
self._socket.close()
self._socket = None
return True
def write(self, bytes):
try:
self._socket.sendto(bytes, (self._if_name, 5005))
except socket.error as msg:
fail("%s (%d)" % (str(msg[0]), msg[1]))
return True
def read(self):
reply = bytearray()
try:
d = self._socket.recvfrom(1000)
reply = bytearray(d[0])
addr = d[1]
except socket.timeout:
pass
except socket.error:
pass
return reply
"""
Print error message and exit with error
"""
def fail(message):
print("Error: %s." % (message))
sys.exit(1)
"""
Return name of unit (must of course match unit_t in opendps/uui.h)
"""
def unit_name(unit):
if unit == 0: return "A"
if unit == 1: return "V"
if unit == 2: return "W"
if unit == 3: return "s"
if unit == 4: return "Hz"
return "unknown"
"""
Return SI prefix
"""
def prefix_name(prefix):
if prefix == -6: return "u"
if prefix == -3: return "m"
if prefix == -2: return "c"
if prefix == -1: return "d" # TODO: is this correct (deci?
if prefix == 0: return ""
if prefix == 1: return "D" # TODO: is this correct (deca)?
if prefix == 2: return "hg"
if prefix == 3: return "k"
if prefix == 4: return "M"
return "e%d" % prefix
"""
Handle a response frame from the device.
Return a dictionaty of interesting information.
"""
def handle_response(command, frame, args):
ret_dict = {}
resp_command = frame.get_frame()[0]
if resp_command & cmd_response:
resp_command ^= cmd_response
success = frame.get_frame()[1]
if resp_command != command:
print("Warning: sent command %02x, response was %02x." % (command, resp_command))
if resp_command != cmd_upgrade_start and resp_command != cmd_upgrade_data and not success:
fail("command failed according to device")
if args.json:
_json = {}
_json["cmd"] = resp_command;
_json["status"] = 1; # we're here aren't we?
if resp_command == cmd_ping:
print("Got pong from device")
elif resp_command == cmd_cal_report:
ret_dict = unpack_cal_report(frame)
elif resp_command == cmd_query:
data = unpack_query_response(frame)
enable_str = "on" if data['output_enabled'] else "temperature shutdown" if data['temp_shutdown'] == 1 else "off"
v_in_str = "%d.%02d" % (data['v_in']/1000, (data['v_in']%1000)/10)
v_out_str = "%d.%02d" % (data['v_out']/1000, (data['v_out']%1000)/10)
i_out_str = "%d.%03d" % (data['i_out']/1000, data['i_out']%1000)
if args.json:
_json = data
else:
print("%-10s : %s (%s)" % ('Func', data['cur_func'], enable_str))
for key, value in data['params'].iteritems():
print(" %-8s : %s" % (key, value))
print("%-10s : %s V" % ('V_in', v_in_str))
print("%-10s : %s V" % ('V_out', v_out_str))
print("%-10s : %s A" % ('I_out', i_out_str))
if 'temp1' in data:
print("%-10s : %.1f" % ('temp1', data['temp1']))
if 'temp2' in data:
print("%-10s : %.1f" % ('temp2', data['temp2']))
elif resp_command == cmd_upgrade_start:
# * DPS BL: [cmd_response | cmd_upgrade_start] [<upgrade_status_t>] [<chunk_size:16>]
cmd = frame.unpack8()
status = frame.unpack8()
chunk_size = frame.unpack16()
ret_dict["status"] = status
ret_dict["chunk_size"] = chunk_size
elif resp_command == cmd_upgrade_data:
cmd = frame.unpack8()
status = frame.unpack8()
ret_dict["status"] = status
elif resp_command == cmd_set_function:
cmd = frame.unpack8()
status = frame.unpack8()
if not status:
print("Function does not exist.") # Never reached due to status == 0
else:
print("Changed function.")
elif resp_command == cmd_list_functions:
cmd = frame.unpack8()
status = frame.unpack8()
if status == 0:
print("Error, failed to list available functions")
else:
functions = []
name = frame.unpack_cstr()
while name != "":
functions.append(name)
name = frame.unpack_cstr()
if args.json:
_json["functions"] = functions;
else:
if len(functions) == 0:
print("Selected OpenDPS supports no functions at all, which is quite weird when you think about it...")
elif len(functions) == 1:
print("Selected OpenDPS supports the %s function." % functions[0])
else:
temp = ", ".join(functions[:-1])
temp = "%s and %s" % (temp, functions[-1])
print("Selected OpenDPS supports the %s functions." % temp)
elif resp_command == cmd_set_parameters:
cmd = frame.unpack8()
status = frame.unpack8()
for p in args.parameter:
status = frame.unpack8()
parts = p.split("=")
# TODO: handle json output
print("%s: %s" % (parts[0], "ok" if status == 0 else "unknown parameter" if status == 1 else "out of range" if status == 2 else "unsupported parameter" if status == 3 else "unknown error %d" % (status)))
elif resp_command == cmd_list_parameters:
cmd = frame.unpack8()
status = frame.unpack8()
if status == 0:
print("Error, failed to list available parameters")
else:
cur_func = frame.unpack_cstr()
parameters = []
while not frame.eof():
parameter = {}
parameter['name'] = frame.unpack_cstr()
parameter['unit'] = unit_name(frame.unpack8())
parameter['prefix'] = prefix_name(frame.unpacks8())
parameters.append(parameter)
if args.json:
_json["current_function"] = cur_func;
_json["parameters"] = parameters
else:
if len(parameters) == 0:
print("Selected OpenDPS supports no parameters at all for the %s function" % (cur_func))
elif len(parameters) == 1:
print("Selected OpenDPS supports the %s parameter (%s%s) for the %s function." % (parameters[0]['name'], parameters[0]['prefix'], parameters[0]['unit'], cur_func))
else:
temp = ""
for p in parameters:
temp += p['name'] + ' (%s%s)' % (p['prefix'], p['unit']) + " "
print("Selected OpenDPS supports the %sparameters for the %s function." % (temp, cur_func))
elif resp_command == cmd_enable_output:
cmd = frame.unpack8()
status = frame.unpack8()
if status == 0:
print("Error, failed to enable/disable output.")
elif resp_command == cmd_temperature_report:
pass
elif resp_command == cmd_lock:
pass
else:
print("Unknown response %d from device." % (resp_command))
if args.json:
print(json.dumps(_json, indent=4, sort_keys=True))
return ret_dict
"""
Communicate with the DPS device according to the user's whishes
"""
def communicate(comms, frame, args):
bytes = frame.get_frame()
if not comms:
fail("no communication interface specified")
if not comms.open():
fail("could not open %s" % (comms.name()))
if args.verbose:
print("Communicating with %s" % (comms.name()))
print("TX %2d bytes [%s]" % (len(bytes), " ".join("%02x" % b for b in bytes)))
if not comms.write(bytes):
fail("write failed on %s" % (comms.name()))
resp = comms.read()
if len(resp) == 0:
fail("timeout talking to device %s" % (comms._if_name))
elif args.verbose:
print("RX %2d bytes [%s]\n" % (len(resp), " ".join("%02x" % b for b in resp)))
if not comms.close:
print("Warning: could not close %s" % (comms.name()))
f = uFrame()
res = f.set_frame(resp)
if res < 0:
fail("protocol error (%d)" % (res))
else:
return handle_response(frame.get_frame()[1], f, args)
"""
Communicate with the DPS device according to the user's whishes
"""
def handle_commands(args):
if args.scan:
uhej_scan()
return
comms = create_comms(args)
if args.init:
communicate(comms,create_cmd(cmd_init),args)
if args.ping:
communicate(comms, create_cmd(cmd_ping), args)
if args.firmware:
run_upgrade(comms, args.firmware, args)
if args.lock:
communicate(comms, create_lock(1), args)
if args.unlock:
communicate(comms, create_lock(0), args)
if args.list_functions:
communicate(comms, create_cmd(cmd_list_functions), args)
if args.list_parameters:
communicate(comms, create_cmd(cmd_list_parameters), args)
if args.function:
communicate(comms, create_set_function(args.function), args)
if args.enable:
if args.enable == 'on' or args.enable == 'off':
communicate(comms, create_enable_output(args.enable), args)
else:
fail("enable is 'on' or 'off'")
if args.calibration_args:
payload = create_set_calibration(args.calibration_args)
if payload:
communicate(comms,payload,args)
else:
fail("malformatted parameters")
if args.calibration_report:
data = communicate(comms, create_cmd(cmd_cal_report), args)
print "Calibration Report:\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}".format(
"A_ADC_K",data['cal']['A_ADC_K'][0],
"A_ADC_C",data['cal']['A_ADC_C'][0],
"A_DAC_K",data['cal']['A_DAC_K'][0],
"A_DAC_C",data['cal']['A_DAC_C'][0],
"V_ADC_K",data['cal']['V_DAC_K'][0],
"V_ADC_C",data['cal']['V_DAC_C'][0],
"V_DAC_K",data['cal']['V_DAC_K'][0],
"V_DAC_C",data['cal']['V_DAC_C'][0],
"VIN_ADC_K",data['cal']['VIN_ADC_K'][0],
"VIN_ADC_C",data['cal']['VIN_ADC_C'][0],
"VIN_ADC",data['vin_adc'],
"VOUT_ADC",data['vout_adc'],
"IOUT_ADC",data['iout_adc'],
"IOUT_DAC",data['iout_dac'],
"VOUT_DAC",data['vout_dac'])
if args.parameter:
payload = create_set_parameter(args.parameter)
if payload:
communicate(comms, payload, args)
else:
fail("malformatted parameters")
if args.query:
communicate(comms, create_cmd(cmd_query), args)
if hasattr(args, 'temperature') and args.temperature:
communicate(comms, create_temperature(float(args.temperature)), args)
if args.calibrate:
do_calibration(comms,args)
"""
Return True if the parameter if_name is an IP address.
"""
def is_ip_address(if_name):
try:
socket.inet_aton(if_name)
return True
except socket.error:
return False
# Darn beautiful, from SO: https://stackoverflow.com/a/1035456
def chunk_from_file(filename, chunk_size):
with open(filename, "rb") as f:
while True:
chunk = f.read(chunk_size)
if chunk:
yield bytearray(chunk)
else:
break
"""
Run OpenDPS firmware upgrade
"""
def run_upgrade(comms, fw_file_name, args):
with open(fw_file_name, mode='rb') as file:
#crc = binascii.crc32(file.read()) % (1<<32)
content = file.read()
if content.encode('hex')[6:8] != "20" and not args.force:
fail("The firmware file does not seem valid, use --force to force upgrade")
crc = CRCCCITT().calculate(content)
chunk_size = 1024
ret_dict = communicate(comms, create_upgrade_start(chunk_size, crc), args)
if ret_dict["status"] == upgrade_continue:
if chunk_size != ret_dict["chunk_size"]:
print("Device selected chunk size %d" % (ret_dict["chunk_size"]))
counter = 0
for chunk in chunk_from_file(fw_file_name, chunk_size):
counter += len(chunk)
sys.stdout.write("\rDownload progress: %d%% " % (counter*1.0/len(content)*100.0) )
sys.stdout.flush()
# print(" %d bytes" % (counter))
ret_dict = communicate(comms, create_upgrade_data(chunk), args)
status = ret_dict["status"]
if status == upgrade_continue:
pass
elif status == upgrade_crc_error:
print("")
fail("device reported CRC error")
elif status == upgrade_erase_error:
print("")
fail("device reported erasing error")
elif status == upgrade_flash_error:
print("")
fail("device reported flashing error")
elif status == upgrade_overflow_error:
print("")
fail("device reported firmware overflow error")
elif status == upgrade_success:
print("")
else:
print("")
fail("device reported an unknown error (%d)" % status)
else:
fail("Device rejected firmware upgrade")
sys.exit(os.EX_OK)
"""
Run DPS calibration prompts
"""
def do_calibration(comms,args):
data = communicate(comms, create_cmd(cmd_cal_report), args)
print "Previous Calibration Constants:\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n \
{} = {}\r\n".format(
"A_ADC_K",data['cal']['A_ADC_K'][0],
"A_ADC_C",data['cal']['A_ADC_C'][0],
"A_DAC_K",data['cal']['A_DAC_K'][0],
"A_DAC_C",data['cal']['A_DAC_C'][0],
"V_ADC_K",data['cal']['V_DAC_K'][0],
"V_ADC_C",data['cal']['V_DAC_C'][0],
"V_DAC_K",data['cal']['V_DAC_K'][0],
"V_DAC_C",data['cal']['V_DAC_C'][0],
"VIN_ADC_K",data['cal']['VIN_ADC_K'][0],
"VIN_ADC_C",data['cal']['VIN_ADC_C'][0])
print "Please ensure nothing is hooked up to the DPS before starting calibration\r\n"
t = raw_input("Perform Input Voltage Calibration? (Y/n): ")
if t.lower() != 'n' or t.lower() == 'y':
print "You will need an accurate method of measuring voltage, Such as a multimeter."
print "You will need an accurate method of generating 2 stable input voltages."
print "please type results in in mV, EG 1V = 1000 mV"
print "###########################################################"
#Do First voltage hookup, We need the adc values, hopefully
#peoples computers assign consistent serial ports/IP's
print "Please hook up the first lower supply voltage to the DPS now \r\n \
ensuring that the serial connection is connected after boot"
v1 = float(raw_input("Type input voltage in mV: "))
data1 = communicate(comms, create_cmd(cmd_cal_report), args)
#Do second Voltage Hookup
print "Please hook up the Second higher supply voltage to the DPS now \r\n \
ensuring that the serial connection is connected after boot"
v2 = float(raw_input("Type input voltage in mV: "))
data2 = communicate(comms, create_cmd(cmd_cal_report), args)
#Math out the calibration constants
k_adc = (v1-v2)/(data1['vin_adc']-data2['vin_adc'])
c_adc = v1-k_adc*data1['vin_adc']
args.calibration_args = ['VIN_ADC_K={}'.format(k_adc),
'VIN_ADC_C={}'.format(c_adc)]
payload = create_set_calibration(args.calibration_args)
if payload:
communicate(comms,payload,args)
print "Input Voltage Calibration Complete"
t = raw_input("Perform Output Voltage Calibration? (Y/n): ")
if t.lower() != 'n' or t.lower() == 'y':
print "You will need an accurate method of measuring voltage, Such as a multimeter."
print "please type results in in mV, EG 1V = 1000 mV"
max_v = float(raw_input("DPS input voltage: "))
print "Cal Point 1, 10% of Max"
args.parameter = ["voltage={}".format(max_v*.1),"current=1000"]
payload = create_set_parameter(args.parameter)
#start with 10% of Max
if payload:
communicate(comms, payload, args)
communicate(comms, create_enable_output("on"), args)
c1 = float(raw_input("Measured Voltage: "))
c1_data = communicate(comms, create_cmd(cmd_cal_report), args)
print "Cal Point 2, 90% of Max"
args.parameter = ["voltage={}".format(max_v*.9)]
payload = create_set_parameter(args.parameter)
if payload:
communicate(comms, payload, args)
c2 = float(raw_input("Measured Voltage: "))
c2_data = communicate(comms, create_cmd(cmd_cal_report), args)
communicate(comms, create_enable_output("off"), args)
k_dac = (c1_data['vout_dac']-c2_data['vout_dac'])/(c1-c2)
c_dac = c1_data['vout_dac']-k_dac*c1
k_adc = (c1-c2)/(c1_data['vout_adc']-c2_data['vout_adc'])
c_adc = c1-k_adc*c1_data['vout_adc']
args.calibration_args = ['V_DAC_K={}'.format(k_dac),
'V_DAC_C={}'.format(c_dac),
'V_ADC_K={}'.format(k_adc),
'V_ADC_C={}'.format(c_adc)]
print args.calibration_args
payload = create_set_calibration(args.calibration_args)
if payload:
communicate(comms,payload,args)
print "Output Voltage Calibration Complete"
t = raw_input("Perform Output Current Calibration? (Y/n): ")
if t.lower() != 'n' or t.lower() == 'y':
print "You will need an accurate method of measuring resistors, Such as a multimeter."
print "You will need 2 known loads, capable of handling the required power."
print "please type results in ohms"
max_v = float(raw_input("DPS input voltage in mV: "))
max_a = float(raw_input("DPS max Amperage in mA: "))
print "Cal Point, {}mV".format(max_v*.5)
communicate(comms, create_enable_output("off"), args)
c1 = float(raw_input("1st load Measured Resitance: "))
args.parameter = ["voltage={}".format(max_v*.5),"current={}".format(max_a)]
payload = create_set_parameter(args.parameter)
if payload:
communicate(comms, payload, args)
raw_input("Please hook up load to DPS, Then press enter")
communicate(comms, create_enable_output("on"), args)
os.sleep(.5) #wait for DPS to settle
c1_data = communicate(comms, create_cmd(cmd_cal_report), args)
communicate(comms, create_enable_output("off"), args)
print "Cal Point 2, {}mV".format(max_v*.5)
c2 = float(raw_input("2nd load Measured Resitance: "))
args.parameter = ["voltage={}".format(max_v*.5),"current={}".format(max_a)]
payload = create_set_parameter(args.parameter)
if payload:
communicate(comms, payload, args)
raw_input("Please hook up load to DPS, Then press enter")
communicate(comms, create_enable_output("on"), args)
os.sleep(.5) #wait for DPS to settle
c2_data = communicate(comms, create_cmd(cmd_cal_report), args)
communicate(comms, create_enable_output("off"), args)
k_adc = (c1-c2)/(c1_data['iout_adc']-c2_data['iout_adc'])
c_adc = c1-k_adc*c1_data['iout_adc']
args.calibration_args = ['I_ADC_K={}'.format(k_dac),
'I_ADC_C={}'.format(c_dac)]
payload = create_set_calibration(args.calibration_args)
if payload:
communicate(comms,payload,args)
print (k_adc,c_adc,k_dac,c_dac)
t = raw_input("Perform Constant Current Calibration? (Y/n): ")
if t.lower() != 'n' or t.lower() == 'y':
pass
"""
Create and return a comminications interface object or None if no comms if
was specified.
"""
def create_comms(args):
if_name = None
comms = None
if args.device:
if_name = args.device
elif 'DPSIF' in os.environ and len(os.environ['DPSIF']) > 0:
if_name = os.environ['DPSIF']
if if_name != None:
if is_ip_address(if_name):
comms = udp_interface(if_name)
else:
comms = tty_interface(if_name)
else:
fail("no comms interface specified")
return comms
"""
The worker thread used by uHej for service discovery
"""
def uhej_worker_thread():
global discovery_list
global sock
while 1:
try:
data, addr = sock.recvfrom(1024)
port = addr[1]
addr = addr[0]
frame = bytearray(data)
try:
f = uhej.decode_frame(frame)
f["source"] = addr
f["port"] = port
types = ["UDP", "TCP", "mcast"]
if uhej.ANNOUNCE == f["frame_type"]:
for s in f["services"]:
key = "%s:%s:%s" % (f["source"], s["port"], s["type"])
if not key in discovery_list:
if s["service_name"] == "opendps":
discovery_list[key] = True # Keep track of which hosts we have seen
print("%s" % (f["source"]))
# print("%16s:%-5d %-8s %s" % (f["source"], s["port"], types[s["type"]], s["service_name"]))
except uhej.IllegalFrameException as e:
pass
except socket.error as e:
print('Exception'), e
"""
Scan for OpenDPS devices on the local network
"""
def uhej_scan():
global discovery_list
global sock
discovery_list = {}
ANY = "0.0.0.0"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except AttributeError:
pass
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
sock.bind((ANY, uhej.MCAST_PORT))
thread = threading.Thread(target = uhej_worker_thread)
thread.daemon = True
thread.start()
sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(uhej.MCAST_GRP) + socket.inet_aton(ANY))
run_time_s = 6 # Run query for this many seconds
query_interval_s = 2 # Send query this often
last_query = 0
start_time = time.time()
while time.time() - start_time < run_time_s:
if time.time() - last_query > query_interval_s:
f = uhej.query(uhej.UDP, "*")
sock.sendto(f, (uhej.MCAST_GRP, uhej.MCAST_PORT))
last_query = time.time()
time.sleep(1)
num_found = len(discovery_list)
if num_found == 0:
print("No OpenDPS devices found")
elif num_found == 1:
print("1 OpenDPS device found")
else:
print("%d OpenDPS devices found" % (num_found))
"""
Ye olde main
"""
def main():
global args
testing = '--testing' in sys.argv
parser = argparse.ArgumentParser(description='Instrument an OpenDPS device')
parser.add_argument('-d', '--device', help="OpenDPS device to connect to. Can be a /dev/tty device or an IP number. If omitted, dpsctl.py will try the environment variable DPSIF", default='')
parser.add_argument('-S', '--scan', action="store_true", help="Scan for OpenDPS wifi devices")
parser.add_argument('-f', '--function', nargs='?', help="Set active function")
parser.add_argument('-F', '--list-functions', action='store_true', help="List available functions")
parser.add_argument('-p', '--parameter', nargs='+', help="Set function parameter <name>=<value>")
parser.add_argument('-P', '--list-parameters', action='store_true', help="List function parameters of active function")
parser.add_argument('-c', '--calibrate', action="store_true", help="Starts System Calibration")
parser.add_argument('-cr', '--calibration_report', action="store_true", help="Prints Calibration report")
parser.add_argument('-C', '--calibration_args', nargs='+', help="Set calibration constants <name>=<value>")
parser.add_argument('-o', '--enable', help="Enable output ('on' or 'off')")
parser.add_argument( '--ping', action='store_true', help="Ping device (causes screen to flash)")
parser.add_argument('-L', '--lock', action='store_true', help="Lock device keys")
parser.add_argument('-l', '--unlock', action='store_true', help="Unlock device keys")
parser.add_argument('-q', '--query', action='store_true', help="Query device settings and measurements")
parser.add_argument('-j', '--json', action='store_true', help="Output parameters as JSON")
parser.add_argument('-v', '--verbose', action='store_true', help="Verbose communications")
parser.add_argument('-U', '--upgrade', type=str, dest="firmware", help="Perform upgrade of OpenDPS firmware")
parser.add_argument('--init', action='store_true', help="Re-inits internal storage")
parser.add_argument( '--force', action='store_true', help="Force upgrade even if dpsctl complains about the firmware")
if testing:
parser.add_argument('-t', '--temperature', type=str, dest="temperature", help="Send temperature report (for testing)")
args, unknown = parser.parse_known_args()
try:
handle_commands(args)
except KeyboardInterrupt:
print("")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"DPSIF"
] |
[]
|
["DPSIF"]
|
python
| 1 | 0 | |
test/src/examples_complete_test.go
|
package test
import (
"math/rand"
"strconv"
"testing"
"os"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/stretchr/testify/assert"
)
// Test the Terraform module in examples/complete using Terratest.
func TestExamplesComplete(t *testing.T) {
t.Parallel()
randId := strconv.Itoa(rand.Intn(100000))
attributes := []string{randId}
terraformOptions := &terraform.Options{
// The path to where our Terraform code is located
TerraformDir: "../../examples/complete",
Upgrade: true,
// Variables to pass to our Terraform code using -var-file options
VarFiles: []string{"fixtures.tfvars"},
// We always include a random attribute so that parallel tests
// and AWS resources do not interfere with each other
Vars: map[string]interface{}{
"attributes": attributes,
"tfc_agent_token": os.Getenv("TFC_AGENT_TOKEN"),
},
}
// At the end of the test, run `terraform destroy` to clean up any resources that were created
defer terraform.Destroy(t, terraformOptions)
// This will run `terraform init` and `terraform apply` and fail the test if there are any errors
terraform.InitAndApply(t, terraformOptions)
// Run `terraform output` to get the value of an output variable
service_account_name := terraform.Output(t, terraformOptions, "service_account_name")
namespace := terraform.Output(t, terraformOptions, "namespace")
// Verify we're getting back the outputs we expect
assert.Equal(t, "eg-ue2-test-example-"+randId, service_account_name)
assert.Equal(t, "tfc-agent", namespace)
}
|
[
"\"TFC_AGENT_TOKEN\""
] |
[] |
[
"TFC_AGENT_TOKEN"
] |
[]
|
["TFC_AGENT_TOKEN"]
|
go
| 1 | 0 | |
pkg/cmd/roachtest/cluster.go
|
// Copyright 2018 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
package main
import (
"bufio"
"bytes"
"context"
gosql "database/sql"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"net/url"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
// "postgres" gosql driver
_ "github.com/lib/pq"
)
var (
local bool
artifacts string
cockroach string
encrypt bool
workload string
roachprod string
buildTag string
clusterName string
clusterID string
clusterWipe bool
username = os.Getenv("ROACHPROD_USER")
zonesF string
teamCity bool
testingSkipValidation bool
)
func ifLocal(trueVal, falseVal string) string {
if local {
return trueVal
}
return falseVal
}
func filepathAbs(path string) (string, error) {
path, err := filepath.Abs(path)
if err != nil {
return "", errors.Wrap(err, "")
}
return path, nil
}
func findBinary(binary, defValue string) (string, error) {
if binary == "" {
binary = defValue
}
// Check to see if binary exists and is a regular file and executable.
if fi, err := os.Stat(binary); err == nil && fi.Mode().IsRegular() && (fi.Mode()&0111) != 0 {
return filepathAbs(binary)
}
// Find the binary to run and translate it to an absolute path. First, look
// for the binary in PATH.
path, err := exec.LookPath(binary)
if err != nil {
if strings.HasPrefix(binary, "/") {
return "", errors.Wrap(err, "")
}
// We're unable to find the binary in PATH and "binary" is a relative path:
// look in the cockroach repo.
gopath := os.Getenv("GOPATH")
if gopath == "" {
return "", errors.Wrap(err, "")
}
var binSuffix string
if !local {
binSuffix = ".docker_amd64"
}
dirs := []string{
"/src/github.com/cockroachdb/cockroach/",
"/src/github.com/cockroachdb/cockroach/bin" + binSuffix,
filepath.Join(os.ExpandEnv("PWD"), "bin"+binSuffix),
}
for _, dir := range dirs {
path = filepath.Join(gopath, dir, binary)
var err2 error
path, err2 = exec.LookPath(path)
if err2 == nil {
return filepathAbs(path)
}
}
return "", errors.Wrap(err, "")
}
return filepathAbs(path)
}
func initBinaries() {
// If we're running against an existing "local" cluster, force the local flag
// to true in order to get the "local" test configurations.
if clusterName == "local" {
local = true
}
cockroachDefault := "cockroach"
if !local {
cockroachDefault = "cockroach-linux-2.6.32-gnu-amd64"
}
var err error
cockroach, err = findBinary(cockroach, cockroachDefault)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}
workload, err = findBinary(workload, "workload")
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}
roachprod, err = findBinary(roachprod, "roachprod")
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}
}
var clusters = map[*cluster]struct{}{}
var clustersMu syncutil.Mutex
var interrupted int32
func destroyAllClusters() {
atomic.StoreInt32(&interrupted, 1)
// Fire off a goroutine to destroy all of the clusters.
done := make(chan struct{})
go func() {
defer close(done)
var wg sync.WaitGroup
clustersMu.Lock()
wg.Add(len(clusters))
for c := range clusters {
go func(c *cluster) {
defer wg.Done()
c.destroy(context.Background())
}(c)
}
clusters = map[*cluster]struct{}{}
clustersMu.Unlock()
wg.Wait()
}()
// Wait up to 5 min for clusters to be destroyed. This can take a while and
// we don't want to rush it.
select {
case <-done:
case <-time.After(5 * time.Minute):
}
}
func registerCluster(c *cluster) {
clustersMu.Lock()
clusters[c] = struct{}{}
clustersMu.Unlock()
}
func unregisterCluster(c *cluster) bool {
clustersMu.Lock()
_, exists := clusters[c]
if exists {
delete(clusters, c)
}
clustersMu.Unlock()
return exists
}
func execCmd(ctx context.Context, l *logger, args ...string) error {
l.printf("> %s\n", strings.Join(args, " "))
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
cmd.Stdout = l.stdout
cmd.Stderr = l.stderr
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, `%s`, strings.Join(args, ` `))
}
return nil
}
func execCmdWithBuffer(ctx context.Context, l *logger, args ...string) ([]byte, error) {
l.printf("> %s\n", strings.Join(args, " "))
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
out, err := cmd.CombinedOutput()
if err != nil {
return out, errors.Wrapf(err, `%s`, strings.Join(args, ` `))
}
return out, nil
}
func makeGCEClusterName(testName, id, username string) string {
name := fmt.Sprintf("%s-%s-%s", username, id, testName)
name = strings.ToLower(name)
name = regexp.MustCompile(`[^-a-z0-9]+`).ReplaceAllString(name, "-")
name = regexp.MustCompile(`-+`).ReplaceAllString(name, "-")
return name
}
func makeClusterName(t testI) string {
if clusterName != "" {
return clusterName
}
if local {
return "local"
}
if username == "" {
usr, err := user.Current()
if err != nil {
panic(fmt.Sprintf("user.Current: %s", err))
}
username = usr.Username
}
id := clusterID
if id == "" {
id = fmt.Sprintf("%d", timeutil.Now().Unix())
}
return makeGCEClusterName(t.Name(), id, username)
}
type testI interface {
Name() string
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Failed() bool
}
// TODO(tschottdorf): Consider using a more idiomatic approach in which options
// act upon a config struct:
// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type option interface {
option()
}
type nodeSelector interface {
option
merge(nodeListOption) nodeListOption
}
type nodeListOption []int
func (n nodeListOption) option() {}
func (n nodeListOption) merge(o nodeListOption) nodeListOption {
t := make(nodeListOption, 0, len(n)+len(o))
t = append(t, n...)
t = append(t, o...)
sort.Ints([]int(t))
r := t[:1]
for i := 1; i < len(t); i++ {
if r[len(r)-1] != t[i] {
r = append(r, t[i])
}
}
return r
}
func (n nodeListOption) randNode() nodeListOption {
return nodeListOption{n[rand.Intn(len(n))]}
}
func (n nodeListOption) String() string {
if len(n) == 0 {
return ""
}
var buf bytes.Buffer
buf.WriteByte(':')
appendRange := func(start, end int) {
if buf.Len() > 1 {
buf.WriteByte(',')
}
if start == end {
fmt.Fprintf(&buf, "%d", start)
} else {
fmt.Fprintf(&buf, "%d-%d", start, end)
}
}
start, end := -1, -1
for _, i := range n {
if start != -1 && end == i-1 {
end = i
continue
}
if start != -1 {
appendRange(start, end)
}
start, end = i, i
}
if start != -1 {
appendRange(start, end)
}
return buf.String()
}
type nodeSpec struct {
Count int
CPUs int
MachineType string
Zones string
Geo bool
Lifetime time.Duration
}
func (s *nodeSpec) args() []string {
var args []string
if s.MachineType != "" {
args = append(args, "--gce-machine-type="+s.MachineType)
}
if s.Zones != "" {
args = append(args, "--gce-zones="+s.Zones)
}
if s.Geo {
args = append(args, "--geo")
}
if s.Lifetime != 0 {
args = append(args, "--lifetime="+s.Lifetime.String())
}
return args
}
func (s *nodeSpec) expiration() time.Time {
l := s.Lifetime
if l == 0 {
l = 12 * time.Hour
}
return timeutil.Now().Add(l)
}
type createOption interface {
apply(spec *nodeSpec)
}
type nodeCPUOption int
func (o nodeCPUOption) apply(spec *nodeSpec) {
spec.CPUs = int(o)
if !local {
// TODO(peter): This is awkward: below 16 cpus, use n1-standard so that the
// machines have a decent amount of RAM. We could use customer machine
// configurations, but the rules for the amount of RAM per CPU need to be
// determined (you can't request any arbitrary amount of RAM).
if spec.CPUs < 16 {
spec.MachineType = fmt.Sprintf("n1-standard-%d", spec.CPUs)
} else {
spec.MachineType = fmt.Sprintf("n1-highcpu-%d", spec.CPUs)
}
}
}
// cpu is a node option which requests nodes with the specified number of CPUs.
func cpu(n int) nodeCPUOption {
return nodeCPUOption(n)
}
type nodeGeoOption struct{}
func (o nodeGeoOption) apply(spec *nodeSpec) {
spec.Geo = true
}
// geo is a node option which requests geo-distributed nodes.
func geo() nodeGeoOption {
return nodeGeoOption{}
}
type nodeZonesOption string
func (o nodeZonesOption) apply(spec *nodeSpec) {
spec.Zones = string(o)
}
// zones is a node option which requests geo-distributed nodes. Note that this
// overrides the --zones flag and is useful for tests that require running on
// specific zones.
func zones(s string) nodeZonesOption {
return nodeZonesOption(s)
}
type nodeLifetimeOption time.Duration
func (o nodeLifetimeOption) apply(spec *nodeSpec) {
spec.Lifetime = time.Duration(o)
}
// nodes is a helper method for creating a []nodeSpec given a node count and
// options.
func nodes(count int, opts ...createOption) []nodeSpec {
spec := nodeSpec{
Count: count,
}
cpu(4).apply(&spec)
for _, o := range opts {
o.apply(&spec)
}
return []nodeSpec{spec}
}
// cluster provides an interface for interacting with a set of machines,
// starting and stopping a cockroach cluster on a subset of those machines, and
// running load generators and other operations on the machines.
//
// A cluster is intended to be used only by a single test. Sharing of a cluster
// between a test and a subtest is current disallowed (see cluster.assertT). A
// cluster is safe for concurrent use by multiple goroutines.
type cluster struct {
name string
nodes int
status func(...interface{})
t testI
l *logger
destroyed chan struct{}
expiration time.Time
}
// TODO(peter): Should set the lifetime of clusters to 2x the expected test
// duration. The default lifetime of 12h is too long for some tests and will be
// too short for others.
//
// TODO(peter): The nodes spec should really contain a nodeSpec per node. Need
// to figure out how to make that work with `roachprod create`. Perhaps one
// invocation of `roachprod create` per unique node-spec. Are there guarantees
// we're making here about the mapping of nodeSpecs to node IDs?
func newCluster(ctx context.Context, t testI, nodes []nodeSpec) *cluster {
if atomic.LoadInt32(&interrupted) == 1 {
t.Fatal("interrupted")
}
switch {
case len(nodes) == 0:
return nil
case len(nodes) > 1:
// TODO(peter): Need a motivating test that has different specs per node.
t.Fatalf("TODO(peter): unsupported nodes spec: %v", nodes)
}
l, err := rootLogger(t.Name())
if err != nil {
t.Fatal(err)
}
c := &cluster{
name: makeClusterName(t),
nodes: nodes[0].Count,
status: func(...interface{}) {},
t: t,
l: l,
destroyed: make(chan struct{}),
expiration: nodes[0].expiration(),
}
if impl, ok := t.(*test); ok {
c.status = impl.Status
}
registerCluster(c)
if c.name != clusterName {
sargs := []string{roachprod, "create", c.name, "-n", fmt.Sprint(c.nodes)}
sargs = append(sargs, nodes[0].args()...)
if !local && zonesF != "" && nodes[0].Zones == "" {
sargs = append(sargs, "--gce-zones="+zonesF)
}
c.status("creating cluster")
if err := execCmd(ctx, l, sargs...); err != nil {
t.Fatal(err)
return nil
}
} else if !testingSkipValidation {
// Perform validation on the existing cluster.
c.status("checking that existing cluster matches spec")
sargs := []string{roachprod, "list", c.name, "--json"}
out, err := execCmdWithBuffer(ctx, l, sargs...)
if err != nil {
t.Fatal(err)
return nil
}
// jsonOutput matches the structure of the output from `roachprod list`
// when in json mode.
type jsonOutput struct {
Clusters map[string]struct {
VMs []struct {
MachineType string `json:"machine_type"`
} `json:"vms"`
} `json:"clusters"`
}
var details jsonOutput
if err := json.Unmarshal(out, &details); err != nil {
t.Fatal(err)
return nil
}
cDetails, ok := details.Clusters[c.name]
if !ok {
t.Fatalf("cluster %q not found", c.name)
return nil
}
if len(cDetails.VMs) < c.nodes {
t.Fatalf("cluster has %d nodes, test requires at least %d", len(cDetails.VMs), c.nodes)
return nil
}
if typ := nodes[0].MachineType; typ != "" {
for i, vm := range cDetails.VMs {
if vm.MachineType != typ {
t.Fatalf("node %d has machine type %s, test requires %s", i, vm.MachineType, typ)
return nil
}
}
}
c.status("stopping cluster")
c.Stop(ctx, c.All())
if clusterWipe {
c.Wipe(ctx, c.All())
} else {
l.printf("skipping cluster wipe\n")
}
}
c.status("running test")
return c
}
// clone creates a new cluster object that refers to the same cluster as the
// receiver, but is associated with the specified test.
func (c *cluster) clone(t *test) *cluster {
l, err := rootLogger(t.Name())
if err != nil {
t.Fatal(err)
}
return &cluster{
name: c.name,
nodes: c.nodes,
status: t.Status,
t: t,
l: l,
expiration: c.expiration,
}
}
// All returns a node list containing all of the nodes in the cluster.
func (c *cluster) All() nodeListOption {
return c.Range(1, c.nodes)
}
// All returns a node list containing the nodes [begin,end].
func (c *cluster) Range(begin, end int) nodeListOption {
if begin < 1 || end > c.nodes {
c.t.Fatalf("invalid node range: %d-%d (1-%d)", begin, end, c.nodes)
}
r := make(nodeListOption, 0, 1+end-begin)
for i := begin; i <= end; i++ {
r = append(r, i)
}
return r
}
// All returns a node list containing only the node i.
func (c *cluster) Node(i int) nodeListOption {
return c.Range(i, i)
}
func (c *cluster) FetchLogs(ctx context.Context) {
// Don't hang forever if we can't fetch the logs.
execCtx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
c.status("retrieving logs")
_ = execCmd(execCtx, c.l, roachprod, "get", c.name, "logs",
filepath.Join(artifacts, teamCityNameEscape(c.t.Name()), "logs"))
}
func (c *cluster) Destroy(ctx context.Context) {
if c == nil {
return
}
// Only destroy the cluster if it exists in the cluster registry. The cluster
// may not exist if the test was interrupted and the teardown machinery is
// destroying all clusters. (See destroyAllClusters).
if exists := unregisterCluster(c); exists {
c.destroy(ctx)
}
// If the test was interrupted, another goroutine is destroying the cluster
// and we need to wait for that to finish before closing the
// logger. Otherwise, the destruction can get interrupted due to closing the
// stdout/stderr of the roachprod command.
<-c.destroyed
c.l.close()
}
func (c *cluster) destroy(ctx context.Context) {
defer close(c.destroyed)
if clusterWipe {
if c.name != clusterName {
c.status("destroying cluster")
if err := execCmd(ctx, c.l, roachprod, "destroy", c.name); err != nil {
c.l.errorf("%s", err)
}
} else {
c.status("wiping cluster")
if err := execCmd(ctx, c.l, roachprod, "wipe", c.name); err != nil {
c.l.errorf("%s", err)
}
}
} else {
c.l.printf("skipping cluster wipe\n")
}
}
// Run a command with output redirected to the logs instead of to os.Stdout
// (which doesn't go anywhere I've been able to find) Don't use this if you're
// going to call cmd.CombinedOutput or cmd.Output.
func (c *cluster) LoggedCommand(ctx context.Context, arg0 string, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, arg0, args...)
cmd.Stdout = c.l.stdout
cmd.Stderr = c.l.stderr
return cmd
}
// Put a local file to all of the machines in a cluster.
func (c *cluster) Put(ctx context.Context, src, dest string, opts ...option) {
if c.t.Failed() {
// If the test has failed, don't try to limp along.
return
}
if atomic.LoadInt32(&interrupted) == 1 {
c.t.Fatal("interrupted")
}
c.status("uploading binary")
err := execCmd(ctx, c.l, roachprod, "put", c.makeNodes(opts...), src, dest)
if err != nil {
c.t.Fatal(err)
}
}
// GitClone clones a git repo from src into dest and checks out
// origin's version of the given branch. The src, dest, and branch
// arguments must not contain shell special characters.
func (c *cluster) GitClone(ctx context.Context, src, dest, branch string, node nodeListOption) {
c.Run(ctx, node, "bash", "-e", "-c", fmt.Sprintf(`'
if ! test -d %s; then
git clone -b %s --depth 1 %s %s
else
cd %s
git fetch origin
git checkout origin/%s
fi
'`, dest,
branch, src, dest,
dest,
branch))
}
// startArgs specifies extra arguments that are passed to `roachprod` during `c.Start`.
func startArgs(extraArgs ...string) option {
return roachprodArgOption(extraArgs)
}
// startArgsDontEncrypt will pass '--encrypt=false' to roachprod regardless of the
// --encrypt flag on roachtest. This is useful for tests that cannot pass with
// encryption enabled.
var startArgsDontEncrypt = startArgs("--encrypt=false")
// racks is an option which specifies the number of racks to partition the nodes
// into.
func racks(n int) option {
return startArgs(fmt.Sprintf("--racks=%d", n))
}
// stopArgs specifies extra arguments that are passed to `roachprod` during `c.Stop`.
func stopArgs(extraArgs ...string) option {
return roachprodArgOption(extraArgs)
}
type roachprodArgOption []string
func (o roachprodArgOption) option() {}
func roachprodArgs(opts []option) []string {
var args []string
for _, opt := range opts {
a, ok := opt.(roachprodArgOption)
if !ok {
continue
}
args = append(args, ([]string)(a)...)
}
return args
}
// Start cockroach nodes on a subset of the cluster. The nodes parameter can
// either be a specific node, empty (to indicate all nodes), or a pair of nodes
// indicating a range.
func (c *cluster) Start(ctx context.Context, opts ...option) {
if c.t.Failed() {
// If the test has failed, don't try to limp along.
return
}
if atomic.LoadInt32(&interrupted) == 1 {
c.t.Fatal("interrupted")
}
c.status("starting cluster")
defer c.status()
args := []string{
roachprod,
"start",
}
args = append(args, roachprodArgs(opts)...)
args = append(args, c.makeNodes(opts...))
if encrypt && !argExists(args, "--encrypt") {
args = append(args, "--encrypt")
}
if local {
// This avoids annoying firewall prompts on macos
args = append(args, "--args", "--listen-addr=127.0.0.1")
}
if err := execCmd(ctx, c.l, args...); err != nil {
c.t.Fatal(err)
}
}
func argExists(args []string, target string) bool {
for _, arg := range args {
if arg == target || strings.HasPrefix(arg, target+"=") {
return true
}
}
return false
}
// Stop cockroach nodes running on a subset of the cluster. See cluster.Start()
// for a description of the nodes parameter.
func (c *cluster) Stop(ctx context.Context, opts ...option) {
if c.t.Failed() {
// If the test has failed, don't try to limp along.
return
}
args := []string{
roachprod,
"stop",
}
args = append(args, roachprodArgs(opts)...)
args = append(args, c.makeNodes(opts...))
if atomic.LoadInt32(&interrupted) == 1 {
c.t.Fatal("interrupted")
}
c.status("stopping cluster")
defer c.status()
err := execCmd(ctx, c.l, args...)
if err != nil {
c.t.Fatal(err)
}
}
// Wipe a subset of the nodes in a cluster. See cluster.Start() for a
// description of the nodes parameter.
func (c *cluster) Wipe(ctx context.Context, opts ...option) {
if c.t.Failed() {
// If the test has failed, don't try to limp along.
return
}
if atomic.LoadInt32(&interrupted) == 1 {
c.t.Fatal("interrupted")
}
c.status("wiping cluster")
defer c.status()
err := execCmd(ctx, c.l, roachprod, "wipe", c.makeNodes(opts...))
if err != nil {
c.t.Fatal(err)
}
}
// Run a command on the specified node.
func (c *cluster) Run(ctx context.Context, node nodeListOption, args ...string) {
err := c.RunL(ctx, c.l, node, args...)
if err != nil {
c.t.Fatal(err)
}
}
// Reformat the disk on the specified node.
func (c *cluster) Reformat(ctx context.Context, node nodeListOption, args ...string) {
err := execCmd(ctx, c.l,
append([]string{roachprod, "reformat", c.makeNodes(node), "--"}, args...)...)
if err != nil {
c.t.Fatal(err)
}
}
// Install a package in a node
func (c *cluster) Install(ctx context.Context, node nodeListOption, args ...string) {
err := execCmd(ctx, c.l,
append([]string{roachprod, "install", c.makeNodes(node), "--"}, args...)...)
if err != nil {
c.t.Fatal(err)
}
}
// RunE runs a command on the specified node, returning an error.
func (c *cluster) RunE(ctx context.Context, node nodeListOption, args ...string) error {
return c.RunL(ctx, c.l, node, args...)
}
// RunL runs a command on the specified node, returning an error.
func (c *cluster) RunL(ctx context.Context, l *logger, node nodeListOption, args ...string) error {
if err := c.preRunChecks(); err != nil {
return err
}
return execCmd(ctx, l,
append([]string{roachprod, "run", c.makeNodes(node), "--"}, args...)...)
}
// preRunChecks runs checks to see if it makes sense to run a command.
func (c *cluster) preRunChecks() error {
if c.t.Failed() {
// If the test has failed, don't try to limp along.
return errors.New("test already failed")
}
if atomic.LoadInt32(&interrupted) == 1 {
return errors.New("interrupted")
}
return nil
}
// RunWithBuffer runs a command on the specified node, returning the resulting combined stderr
// and stdout or an error.
func (c *cluster) RunWithBuffer(
ctx context.Context, l *logger, node nodeListOption, args ...string,
) ([]byte, error) {
if err := c.preRunChecks(); err != nil {
return nil, err
}
return execCmdWithBuffer(ctx, l,
append([]string{roachprod, "run", c.makeNodes(node), "--"}, args...)...)
}
// RemountNoBarrier remounts the cluster's local SSDs with the nobarrier option.
func (c *cluster) RemountNoBarrier(ctx context.Context) {
c.Run(ctx, c.All(),
"sudo", "umount", "/mnt/data1", ";",
"sudo", "mount", "-o", "discard,defaults,nobarrier",
"/dev/disk/by-id/google-local-ssd-0", "/mnt/data1")
}
// pgURL returns the Postgres endpoint for the specified node. It accepts a flag
// specifying whether the URL should include the node's internal or external IP
// address. In general, inter-cluster communication and should use internal IPs
// and communication from a test driver to nodes in a cluster should use
// external IPs.
func (c *cluster) pgURL(ctx context.Context, node nodeListOption, external bool) []string {
args := []string{`pgurl`}
if external {
args = append(args, `--external`)
}
args = append(args, c.makeNodes(node))
cmd := exec.CommandContext(ctx, roachprod, args...)
output, err := cmd.Output()
if err != nil {
fmt.Println(strings.Join(cmd.Args, ` `))
c.t.Fatal(err)
}
urls := strings.Split(strings.TrimSpace(string(output)), " ")
for i := range urls {
urls[i] = strings.Trim(urls[i], "'")
}
return urls
}
// InternalPGUrl returns the internal Postgres endpoint for the specified nodes.
func (c *cluster) InternalPGUrl(ctx context.Context, node nodeListOption) []string {
return c.pgURL(ctx, node, false /* external */)
}
// Silence unused warning.
var _ = (&cluster{}).InternalPGUrl
// ExternalPGUrl returns the external Postgres endpoint for the specified nodes.
func (c *cluster) ExternalPGUrl(ctx context.Context, node nodeListOption) []string {
return c.pgURL(ctx, node, true /* external */)
}
func addrToAdminUIAddr(c *cluster, addr string) string {
host, port, err := net.SplitHostPort(addr)
if err != nil {
c.t.Fatal(err)
}
webPort, err := strconv.Atoi(port)
if err != nil {
c.t.Fatal(err)
}
// Roachprod makes Admin UI's port to be node's port + 1.
return fmt.Sprintf("%s:%d", host, webPort+1)
}
func urlToAddr(c *cluster, pgURL string) string {
u, err := url.Parse(pgURL)
if err != nil {
c.t.Fatal(err)
}
return u.Host
}
func addrToIP(c *cluster, addr string) string {
host, _, err := net.SplitHostPort(addr)
if err != nil {
c.t.Fatal(err)
}
return host
}
// InternalAdminUIAddr returns the internal Admin UI address in the form host:port
// for the specified node.
func (c *cluster) InternalAdminUIAddr(ctx context.Context, node nodeListOption) []string {
var addrs []string
for _, u := range c.InternalAddr(ctx, node) {
addrs = append(addrs, addrToAdminUIAddr(c, u))
}
return addrs
}
// InternalAddr returns the internal address in the form host:port for the
// specified nodes.
func (c *cluster) InternalAddr(ctx context.Context, node nodeListOption) []string {
var addrs []string
for _, u := range c.pgURL(ctx, node, false /* external */) {
addrs = append(addrs, urlToAddr(c, u))
}
return addrs
}
// InternalIP returns the internal IP addresses for the specified nodes.
func (c *cluster) InternalIP(ctx context.Context, node nodeListOption) []string {
var ips []string
for _, addr := range c.InternalAddr(ctx, node) {
ips = append(ips, addrToIP(c, addr))
}
return ips
}
// ExternalAddr returns the external address in the form host:port for the
// specified node.
func (c *cluster) ExternalAddr(ctx context.Context, node nodeListOption) []string {
var addrs []string
for _, u := range c.pgURL(ctx, node, true /* external */) {
addrs = append(addrs, urlToAddr(c, u))
}
return addrs
}
// ExternalIP returns the external IP addresses for the specified node.
func (c *cluster) ExternalIP(ctx context.Context, node nodeListOption) []string {
var ips []string
for _, addr := range c.ExternalAddr(ctx, node) {
ips = append(ips, addrToIP(c, addr))
}
return ips
}
// Silence unused warning.
var _ = (&cluster{}).ExternalIP
// Conn returns a SQL connection to the specified node.
func (c *cluster) Conn(ctx context.Context, node int) *gosql.DB {
url := c.ExternalPGUrl(ctx, c.Node(node))[0]
db, err := gosql.Open("postgres", url)
if err != nil {
c.t.Fatal(err)
}
return db
}
func (c *cluster) makeNodes(opts ...option) string {
var r nodeListOption
for _, o := range opts {
if s, ok := o.(nodeSelector); ok {
r = s.merge(r)
}
}
return c.name + r.String()
}
func (c *cluster) isLocal() bool {
return c.name == "local"
}
func getDiskUsageInByte(ctx context.Context, c *cluster, nodeIdx int) (int, error) {
out, err := c.RunWithBuffer(ctx, c.l, c.Node(nodeIdx), fmt.Sprint("du -sk {store-dir} | grep -oE '^[0-9]+'"))
if err != nil {
return 0, err
}
str := string(out)
// We need this check because sometimes the first line of the roachprod output is a warning
// about adding an ip to a list of known hosts.
if strings.Contains(str, "Warning") {
str = strings.Split(str, "\n")[1]
}
size, err := strconv.Atoi(strings.TrimSpace(str))
if err != nil {
return 0, err
}
return size * 1024, nil
}
type monitor struct {
t testI
l *logger
nodes string
ctx context.Context
cancel func()
g *errgroup.Group
expDeaths int32 // atomically
}
func newMonitor(ctx context.Context, c *cluster, opts ...option) *monitor {
m := &monitor{
t: c.t,
l: c.l,
nodes: c.makeNodes(opts...),
}
m.ctx, m.cancel = context.WithCancel(ctx)
m.g, m.ctx = errgroup.WithContext(m.ctx)
return m
}
// ExpectDeath lets the monitor know that a node is about to be killed, and that
// this should be ignored.
func (m *monitor) ExpectDeath() {
m.ExpectDeaths(1)
}
// ExpectDeaths lets the monitor know that a specific number of nodes are about
// to be killed, and that they should be ignored.
func (m *monitor) ExpectDeaths(count int32) {
atomic.AddInt32(&m.expDeaths, count)
}
func (m *monitor) Go(fn func(context.Context) error) {
m.g.Go(func() error {
if impl, ok := m.t.(*test); ok {
// Automatically clear the worker status message when the goroutine exits.
defer impl.Status()
}
return fn(m.ctx)
})
}
func (m *monitor) Wait() {
if m.t.Failed() {
// If the test has failed, don't try to limp along.
return
}
err := m.wait(roachprod, "monitor", m.nodes)
if err != nil {
m.t.Fatal(err)
}
}
func (m *monitor) wait(args ...string) error {
// It is surprisingly difficult to get the cancelation semantics exactly
// right. We need to watch for the "workers" group (m.g) to finish, or for
// the monitor command to emit an unexpected node failure, or for the monitor
// command itself to exit. We want to capture whichever error happens first
// and then cancel the other goroutines. This ordering prevents the usage of
// an errgroup.Group for the goroutines below. Consider:
//
// g, _ := errgroup.WithContext(m.ctx)
// g.Go(func(context.Context) error {
// defer m.cancel()
// return m.g.Wait()
// })
//
// Now consider what happens when an error is returned. Before the error
// reaches the errgroup, we invoke the cancelation closure which can cause
// the other goroutines to wake up and perhaps race and set the errgroup
// error first.
//
// The solution is to implement our own errgroup mechanism here which allows
// us to set the error before performing the cancelation.
var errOnce sync.Once
var err error
setErr := func(e error) {
if e != nil {
errOnce.Do(func() {
err = e
})
}
}
// 1. The first goroutine waits for the worker errgroup to exit.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer func() {
m.cancel()
wg.Done()
}()
setErr(m.g.Wait())
}()
// 2. The second goroutine forks/execs the monitoring command.
pipeR, pipeW := io.Pipe()
wg.Add(1)
go func() {
defer func() {
_ = pipeW.Close()
wg.Done()
// NB: we explicitly do not want to call m.cancel() here as we want the
// goroutine that is reading the monitoring events to be able to decide
// on the error if the monitoring command exits peacefully.
}()
monL, err := m.l.childLogger(`MONITOR`)
if err != nil {
setErr(err)
return
}
defer monL.close()
cmd := exec.CommandContext(m.ctx, args[0], args[1:]...)
cmd.Stdout = io.MultiWriter(pipeW, monL.stdout)
cmd.Stderr = monL.stderr
if err := cmd.Run(); err != nil {
if err != context.Canceled && !strings.Contains(err.Error(), "killed") {
// The expected reason for an error is that the monitor was killed due
// to the context being canceled. Any other error is an actual error.
setErr(err)
return
}
}
// Returning will cause the pipe to be closed which will cause the reader
// goroutine to exit and close the monitoring channel.
}()
// 3. The third goroutine reads from the monitoring pipe, watching for any
// unexpected death events.
wg.Add(1)
go func() {
defer func() {
_ = pipeR.Close()
m.cancel()
wg.Done()
}()
scanner := bufio.NewScanner(pipeR)
for scanner.Scan() {
msg := scanner.Text()
var id int
var s string
if n, _ := fmt.Sscanf(msg, "%d: %s", &id, &s); n == 2 {
if strings.Contains(s, "dead") && atomic.AddInt32(&m.expDeaths, -1) < 0 {
setErr(fmt.Errorf("unexpected node event: %s", msg))
return
}
}
}
}()
wg.Wait()
return err
}
func waitForFullReplication(t *test, db *gosql.DB) {
for ok := false; !ok; time.Sleep(time.Second) {
if err := db.QueryRow(
"SELECT min(array_length(replicas, 1)) >= 3 FROM crdb_internal.ranges",
).Scan(&ok); err != nil {
t.Fatal(err)
}
}
}
|
[
"\"ROACHPROD_USER\"",
"\"GOPATH\""
] |
[] |
[
"ROACHPROD_USER",
"GOPATH"
] |
[]
|
["ROACHPROD_USER", "GOPATH"]
|
go
| 2 | 0 | |
simulation/vendor/google.golang.org/genproto/googleapis/cloud/redis/v1/cloud_redis.pb.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.22.0
// protoc v3.12.3
// source: google/cloud/redis/v1/cloud_redis.proto
package redis
import (
context "context"
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/api/annotations"
longrunning "google.golang.org/genproto/googleapis/longrunning"
field_mask "google.golang.org/genproto/protobuf/field_mask"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Represents the different states of a Redis instance.
type Instance_State int32
const (
// Not set.
Instance_STATE_UNSPECIFIED Instance_State = 0
// Redis instance is being created.
Instance_CREATING Instance_State = 1
// Redis instance has been created and is fully usable.
Instance_READY Instance_State = 2
// Redis instance configuration is being updated. Certain kinds of updates
// may cause the instance to become unusable while the update is in
// progress.
Instance_UPDATING Instance_State = 3
// Redis instance is being deleted.
Instance_DELETING Instance_State = 4
// Redis instance is being repaired and may be unusable.
Instance_REPAIRING Instance_State = 5
// Maintenance is being performed on this Redis instance.
Instance_MAINTENANCE Instance_State = 6
// Redis instance is importing data (availability may be affected).
Instance_IMPORTING Instance_State = 8
// Redis instance is failing over (availability may be affected).
Instance_FAILING_OVER Instance_State = 9
)
// Enum value maps for Instance_State.
var (
Instance_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "CREATING",
2: "READY",
3: "UPDATING",
4: "DELETING",
5: "REPAIRING",
6: "MAINTENANCE",
8: "IMPORTING",
9: "FAILING_OVER",
}
Instance_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"CREATING": 1,
"READY": 2,
"UPDATING": 3,
"DELETING": 4,
"REPAIRING": 5,
"MAINTENANCE": 6,
"IMPORTING": 8,
"FAILING_OVER": 9,
}
)
func (x Instance_State) Enum() *Instance_State {
p := new(Instance_State)
*p = x
return p
}
func (x Instance_State) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Instance_State) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[0].Descriptor()
}
func (Instance_State) Type() protoreflect.EnumType {
return &file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[0]
}
func (x Instance_State) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Instance_State.Descriptor instead.
func (Instance_State) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{0, 0}
}
// Available service tiers to choose from
type Instance_Tier int32
const (
// Not set.
Instance_TIER_UNSPECIFIED Instance_Tier = 0
// BASIC tier: standalone instance
Instance_BASIC Instance_Tier = 1
// STANDARD_HA tier: highly available primary/replica instances
Instance_STANDARD_HA Instance_Tier = 3
)
// Enum value maps for Instance_Tier.
var (
Instance_Tier_name = map[int32]string{
0: "TIER_UNSPECIFIED",
1: "BASIC",
3: "STANDARD_HA",
}
Instance_Tier_value = map[string]int32{
"TIER_UNSPECIFIED": 0,
"BASIC": 1,
"STANDARD_HA": 3,
}
)
func (x Instance_Tier) Enum() *Instance_Tier {
p := new(Instance_Tier)
*p = x
return p
}
func (x Instance_Tier) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Instance_Tier) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[1].Descriptor()
}
func (Instance_Tier) Type() protoreflect.EnumType {
return &file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[1]
}
func (x Instance_Tier) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Instance_Tier.Descriptor instead.
func (Instance_Tier) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{0, 1}
}
// Available connection modes.
type Instance_ConnectMode int32
const (
// Not set.
Instance_CONNECT_MODE_UNSPECIFIED Instance_ConnectMode = 0
// Connect via direct peering to the Memorystore for Redis hosted service.
Instance_DIRECT_PEERING Instance_ConnectMode = 1
// Connect your Memorystore for Redis instance using Private Service
// Access. Private services access provides an IP address range for multiple
// Google Cloud services, including Memorystore.
Instance_PRIVATE_SERVICE_ACCESS Instance_ConnectMode = 2
)
// Enum value maps for Instance_ConnectMode.
var (
Instance_ConnectMode_name = map[int32]string{
0: "CONNECT_MODE_UNSPECIFIED",
1: "DIRECT_PEERING",
2: "PRIVATE_SERVICE_ACCESS",
}
Instance_ConnectMode_value = map[string]int32{
"CONNECT_MODE_UNSPECIFIED": 0,
"DIRECT_PEERING": 1,
"PRIVATE_SERVICE_ACCESS": 2,
}
)
func (x Instance_ConnectMode) Enum() *Instance_ConnectMode {
p := new(Instance_ConnectMode)
*p = x
return p
}
func (x Instance_ConnectMode) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Instance_ConnectMode) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[2].Descriptor()
}
func (Instance_ConnectMode) Type() protoreflect.EnumType {
return &file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[2]
}
func (x Instance_ConnectMode) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Instance_ConnectMode.Descriptor instead.
func (Instance_ConnectMode) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{0, 2}
}
// Specifies different modes of operation in relation to the data retention.
type FailoverInstanceRequest_DataProtectionMode int32
const (
// Defaults to LIMITED_DATA_LOSS if a data protection mode is not
// specified.
FailoverInstanceRequest_DATA_PROTECTION_MODE_UNSPECIFIED FailoverInstanceRequest_DataProtectionMode = 0
// Instance failover will be protected with data loss control. More
// specifically, the failover will only be performed if the current
// replication offset diff between master and replica is under a certain
// threshold.
FailoverInstanceRequest_LIMITED_DATA_LOSS FailoverInstanceRequest_DataProtectionMode = 1
// Instance failover will be performed without data loss control.
FailoverInstanceRequest_FORCE_DATA_LOSS FailoverInstanceRequest_DataProtectionMode = 2
)
// Enum value maps for FailoverInstanceRequest_DataProtectionMode.
var (
FailoverInstanceRequest_DataProtectionMode_name = map[int32]string{
0: "DATA_PROTECTION_MODE_UNSPECIFIED",
1: "LIMITED_DATA_LOSS",
2: "FORCE_DATA_LOSS",
}
FailoverInstanceRequest_DataProtectionMode_value = map[string]int32{
"DATA_PROTECTION_MODE_UNSPECIFIED": 0,
"LIMITED_DATA_LOSS": 1,
"FORCE_DATA_LOSS": 2,
}
)
func (x FailoverInstanceRequest_DataProtectionMode) Enum() *FailoverInstanceRequest_DataProtectionMode {
p := new(FailoverInstanceRequest_DataProtectionMode)
*p = x
return p
}
func (x FailoverInstanceRequest_DataProtectionMode) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FailoverInstanceRequest_DataProtectionMode) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[3].Descriptor()
}
func (FailoverInstanceRequest_DataProtectionMode) Type() protoreflect.EnumType {
return &file_google_cloud_redis_v1_cloud_redis_proto_enumTypes[3]
}
func (x FailoverInstanceRequest_DataProtectionMode) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FailoverInstanceRequest_DataProtectionMode.Descriptor instead.
func (FailoverInstanceRequest_DataProtectionMode) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{14, 0}
}
// A Google Cloud Redis instance.
type Instance struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Unique name of the resource in this scope including project and
// location using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
//
// Note: Redis instances are managed and addressed at regional level so
// location_id here refers to a GCP region; however, users may choose which
// specific zone (or collection of zones for cross-zone instances) an instance
// should be provisioned in. Refer to [location_id][google.cloud.redis.v1.Instance.location_id] and
// [alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id] fields for more details.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// An arbitrary and optional user-provided name for the instance.
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// Resource labels to represent user provided metadata
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The zone where the instance will be provisioned. If not provided,
// the service will choose a zone for the instance. For STANDARD_HA tier,
// instances will be created across two zones for protection against zonal
// failures. If [alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id] is also provided, it must be
// different from [location_id][google.cloud.redis.v1.Instance.location_id].
LocationId string `protobuf:"bytes,4,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// Optional. Only applicable to STANDARD_HA tier which protects the instance
// against zonal failures by provisioning it across two zones. If provided, it
// must be a different zone from the one provided in [location_id][google.cloud.redis.v1.Instance.location_id].
AlternativeLocationId string `protobuf:"bytes,5,opt,name=alternative_location_id,json=alternativeLocationId,proto3" json:"alternative_location_id,omitempty"`
// Optional. The version of Redis software.
// If not provided, latest supported version will be used. Currently, the
// supported values are:
//
// * `REDIS_3_2` for Redis 3.2 compatibility
// * `REDIS_4_0` for Redis 4.0 compatibility (default)
// * `REDIS_5_0` for Redis 5.0 compatibility
RedisVersion string `protobuf:"bytes,7,opt,name=redis_version,json=redisVersion,proto3" json:"redis_version,omitempty"`
// Optional. The CIDR range of internal addresses that are reserved for this
// instance. If not provided, the service will choose an unused /29 block,
// for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique
// and non-overlapping with existing subnets in an authorized network.
ReservedIpRange string `protobuf:"bytes,9,opt,name=reserved_ip_range,json=reservedIpRange,proto3" json:"reserved_ip_range,omitempty"`
// Output only. Hostname or IP address of the exposed Redis endpoint used by
// clients to connect to the service.
Host string `protobuf:"bytes,10,opt,name=host,proto3" json:"host,omitempty"`
// Output only. The port number of the exposed Redis endpoint.
Port int32 `protobuf:"varint,11,opt,name=port,proto3" json:"port,omitempty"`
// Output only. The current zone where the Redis endpoint is placed. For Basic
// Tier instances, this will always be the same as the [location_id][google.cloud.redis.v1.Instance.location_id]
// provided by the user at creation time. For Standard Tier instances,
// this can be either [location_id][google.cloud.redis.v1.Instance.location_id] or [alternative_location_id][google.cloud.redis.v1.Instance.alternative_location_id] and can
// change after a failover event.
CurrentLocationId string `protobuf:"bytes,12,opt,name=current_location_id,json=currentLocationId,proto3" json:"current_location_id,omitempty"`
// Output only. The time the instance was created.
CreateTime *timestamp.Timestamp `protobuf:"bytes,13,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. The current state of this instance.
State Instance_State `protobuf:"varint,14,opt,name=state,proto3,enum=google.cloud.redis.v1.Instance_State" json:"state,omitempty"`
// Output only. Additional information about the current status of this
// instance, if available.
StatusMessage string `protobuf:"bytes,15,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
// Optional. Redis configuration parameters, according to
// http://redis.io/topics/config. Currently, the only supported parameters
// are:
//
// Redis version 3.2 and newer:
//
// * maxmemory-policy
// * notify-keyspace-events
//
// Redis version 4.0 and newer:
//
// * activedefrag
// * lfu-decay-time
// * lfu-log-factor
// * maxmemory-gb
//
// Redis version 5.0 and newer:
//
// * stream-node-max-bytes
// * stream-node-max-entries
RedisConfigs map[string]string `protobuf:"bytes,16,rep,name=redis_configs,json=redisConfigs,proto3" json:"redis_configs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Required. The service tier of the instance.
Tier Instance_Tier `protobuf:"varint,17,opt,name=tier,proto3,enum=google.cloud.redis.v1.Instance_Tier" json:"tier,omitempty"`
// Required. Redis memory size in GiB.
MemorySizeGb int32 `protobuf:"varint,18,opt,name=memory_size_gb,json=memorySizeGb,proto3" json:"memory_size_gb,omitempty"`
// Optional. The full name of the Google Compute Engine
// [network](https://cloud.google.com/vpc/docs/vpc) to which the
// instance is connected. If left unspecified, the `default` network
// will be used.
AuthorizedNetwork string `protobuf:"bytes,20,opt,name=authorized_network,json=authorizedNetwork,proto3" json:"authorized_network,omitempty"`
// Output only. Cloud IAM identity used by import / export operations to
// transfer data to/from Cloud Storage. Format is
// "serviceAccount:<service_account_email>". The value may change over time
// for a given instance so should be checked before each import/export
// operation.
PersistenceIamIdentity string `protobuf:"bytes,21,opt,name=persistence_iam_identity,json=persistenceIamIdentity,proto3" json:"persistence_iam_identity,omitempty"`
// Optional. The network connect mode of the Redis instance.
// If not provided, the connect mode defaults to DIRECT_PEERING.
ConnectMode Instance_ConnectMode `protobuf:"varint,22,opt,name=connect_mode,json=connectMode,proto3,enum=google.cloud.redis.v1.Instance_ConnectMode" json:"connect_mode,omitempty"`
}
func (x *Instance) Reset() {
*x = Instance{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Instance) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Instance) ProtoMessage() {}
func (x *Instance) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Instance.ProtoReflect.Descriptor instead.
func (*Instance) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{0}
}
func (x *Instance) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Instance) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
func (x *Instance) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *Instance) GetLocationId() string {
if x != nil {
return x.LocationId
}
return ""
}
func (x *Instance) GetAlternativeLocationId() string {
if x != nil {
return x.AlternativeLocationId
}
return ""
}
func (x *Instance) GetRedisVersion() string {
if x != nil {
return x.RedisVersion
}
return ""
}
func (x *Instance) GetReservedIpRange() string {
if x != nil {
return x.ReservedIpRange
}
return ""
}
func (x *Instance) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *Instance) GetPort() int32 {
if x != nil {
return x.Port
}
return 0
}
func (x *Instance) GetCurrentLocationId() string {
if x != nil {
return x.CurrentLocationId
}
return ""
}
func (x *Instance) GetCreateTime() *timestamp.Timestamp {
if x != nil {
return x.CreateTime
}
return nil
}
func (x *Instance) GetState() Instance_State {
if x != nil {
return x.State
}
return Instance_STATE_UNSPECIFIED
}
func (x *Instance) GetStatusMessage() string {
if x != nil {
return x.StatusMessage
}
return ""
}
func (x *Instance) GetRedisConfigs() map[string]string {
if x != nil {
return x.RedisConfigs
}
return nil
}
func (x *Instance) GetTier() Instance_Tier {
if x != nil {
return x.Tier
}
return Instance_TIER_UNSPECIFIED
}
func (x *Instance) GetMemorySizeGb() int32 {
if x != nil {
return x.MemorySizeGb
}
return 0
}
func (x *Instance) GetAuthorizedNetwork() string {
if x != nil {
return x.AuthorizedNetwork
}
return ""
}
func (x *Instance) GetPersistenceIamIdentity() string {
if x != nil {
return x.PersistenceIamIdentity
}
return ""
}
func (x *Instance) GetConnectMode() Instance_ConnectMode {
if x != nil {
return x.ConnectMode
}
return Instance_CONNECT_MODE_UNSPECIFIED
}
// Request for [ListInstances][google.cloud.redis.v1.CloudRedis.ListInstances].
type ListInstancesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The resource name of the instance location using the form:
// `projects/{project_id}/locations/{location_id}`
// where `location_id` refers to a GCP region.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The maximum number of items to return.
//
// If not specified, a default value of 1000 will be used by the service.
// Regardless of the page_size value, the response may include a partial list
// and a caller should only rely on response's
// [`next_page_token`][google.cloud.redis.v1.ListInstancesResponse.next_page_token]
// to determine if there are more instances left to be queried.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// The `next_page_token` value returned from a previous
// [ListInstances][google.cloud.redis.v1.CloudRedis.ListInstances] request, if any.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
}
func (x *ListInstancesRequest) Reset() {
*x = ListInstancesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListInstancesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListInstancesRequest) ProtoMessage() {}
func (x *ListInstancesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListInstancesRequest.ProtoReflect.Descriptor instead.
func (*ListInstancesRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{1}
}
func (x *ListInstancesRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *ListInstancesRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
func (x *ListInstancesRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
// Response for [ListInstances][google.cloud.redis.v1.CloudRedis.ListInstances].
type ListInstancesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// A list of Redis instances in the project in the specified location,
// or across all locations.
//
// If the `location_id` in the parent field of the request is "-", all regions
// available to the project are queried, and the results aggregated.
// If in such an aggregated query a location is unavailable, a dummy Redis
// entry is included in the response with the `name` field set to a value of
// the form `projects/{project_id}/locations/{location_id}/instances/`- and
// the `status` field set to ERROR and `status_message` field set to "location
// not available for ListInstances".
Instances []*Instance `protobuf:"bytes,1,rep,name=instances,proto3" json:"instances,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results in the list.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// Locations that could not be reached.
Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
}
func (x *ListInstancesResponse) Reset() {
*x = ListInstancesResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListInstancesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListInstancesResponse) ProtoMessage() {}
func (x *ListInstancesResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListInstancesResponse.ProtoReflect.Descriptor instead.
func (*ListInstancesResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{2}
}
func (x *ListInstancesResponse) GetInstances() []*Instance {
if x != nil {
return x.Instances
}
return nil
}
func (x *ListInstancesResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
func (x *ListInstancesResponse) GetUnreachable() []string {
if x != nil {
return x.Unreachable
}
return nil
}
// Request for [GetInstance][google.cloud.redis.v1.CloudRedis.GetInstance].
type GetInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Redis instance resource name using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
// where `location_id` refers to a GCP region.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *GetInstanceRequest) Reset() {
*x = GetInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetInstanceRequest) ProtoMessage() {}
func (x *GetInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetInstanceRequest.ProtoReflect.Descriptor instead.
func (*GetInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{3}
}
func (x *GetInstanceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Request for [CreateInstance][google.cloud.redis.v1.CloudRedis.CreateInstance].
type CreateInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The resource name of the instance location using the form:
// `projects/{project_id}/locations/{location_id}`
// where `location_id` refers to a GCP region.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The logical name of the Redis instance in the customer project
// with the following restrictions:
//
// * Must contain only lowercase letters, numbers, and hyphens.
// * Must start with a letter.
// * Must be between 1-40 characters.
// * Must end with a number or a letter.
// * Must be unique within the customer project / location
InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
// Required. A Redis [Instance] resource
Instance *Instance `protobuf:"bytes,3,opt,name=instance,proto3" json:"instance,omitempty"`
}
func (x *CreateInstanceRequest) Reset() {
*x = CreateInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateInstanceRequest) ProtoMessage() {}
func (x *CreateInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateInstanceRequest.ProtoReflect.Descriptor instead.
func (*CreateInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{4}
}
func (x *CreateInstanceRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *CreateInstanceRequest) GetInstanceId() string {
if x != nil {
return x.InstanceId
}
return ""
}
func (x *CreateInstanceRequest) GetInstance() *Instance {
if x != nil {
return x.Instance
}
return nil
}
// Request for [UpdateInstance][google.cloud.redis.v1.CloudRedis.UpdateInstance].
type UpdateInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Mask of fields to update. At least one path must be supplied in
// this field. The elements of the repeated paths field may only include these
// fields from [Instance][google.cloud.redis.v1.Instance]:
//
// * `displayName`
// * `labels`
// * `memorySizeGb`
// * `redisConfig`
UpdateMask *field_mask.FieldMask `protobuf:"bytes,1,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// Required. Update description.
// Only fields specified in update_mask are updated.
Instance *Instance `protobuf:"bytes,2,opt,name=instance,proto3" json:"instance,omitempty"`
}
func (x *UpdateInstanceRequest) Reset() {
*x = UpdateInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateInstanceRequest) ProtoMessage() {}
func (x *UpdateInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateInstanceRequest.ProtoReflect.Descriptor instead.
func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{5}
}
func (x *UpdateInstanceRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
func (x *UpdateInstanceRequest) GetInstance() *Instance {
if x != nil {
return x.Instance
}
return nil
}
// Request for [UpgradeInstance][google.cloud.redis.v1.CloudRedis.UpgradeInstance].
type UpgradeInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Redis instance resource name using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
// where `location_id` refers to a GCP region.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. Specifies the target version of Redis software to upgrade to.
RedisVersion string `protobuf:"bytes,2,opt,name=redis_version,json=redisVersion,proto3" json:"redis_version,omitempty"`
}
func (x *UpgradeInstanceRequest) Reset() {
*x = UpgradeInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpgradeInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpgradeInstanceRequest) ProtoMessage() {}
func (x *UpgradeInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpgradeInstanceRequest.ProtoReflect.Descriptor instead.
func (*UpgradeInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{6}
}
func (x *UpgradeInstanceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *UpgradeInstanceRequest) GetRedisVersion() string {
if x != nil {
return x.RedisVersion
}
return ""
}
// Request for [DeleteInstance][google.cloud.redis.v1.CloudRedis.DeleteInstance].
type DeleteInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Redis instance resource name using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
// where `location_id` refers to a GCP region.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *DeleteInstanceRequest) Reset() {
*x = DeleteInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteInstanceRequest) ProtoMessage() {}
func (x *DeleteInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteInstanceRequest.ProtoReflect.Descriptor instead.
func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{7}
}
func (x *DeleteInstanceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// The Cloud Storage location for the input content
type GcsSource struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Source data URI. (e.g. 'gs://my_bucket/my_object').
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
}
func (x *GcsSource) Reset() {
*x = GcsSource{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GcsSource) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GcsSource) ProtoMessage() {}
func (x *GcsSource) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GcsSource.ProtoReflect.Descriptor instead.
func (*GcsSource) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{8}
}
func (x *GcsSource) GetUri() string {
if x != nil {
return x.Uri
}
return ""
}
// The input content
type InputConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Specify source location of input data
//
// Types that are assignable to Source:
// *InputConfig_GcsSource
Source isInputConfig_Source `protobuf_oneof:"source"`
}
func (x *InputConfig) Reset() {
*x = InputConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *InputConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*InputConfig) ProtoMessage() {}
func (x *InputConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use InputConfig.ProtoReflect.Descriptor instead.
func (*InputConfig) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{9}
}
func (m *InputConfig) GetSource() isInputConfig_Source {
if m != nil {
return m.Source
}
return nil
}
func (x *InputConfig) GetGcsSource() *GcsSource {
if x, ok := x.GetSource().(*InputConfig_GcsSource); ok {
return x.GcsSource
}
return nil
}
type isInputConfig_Source interface {
isInputConfig_Source()
}
type InputConfig_GcsSource struct {
// Google Cloud Storage location where input content is located.
GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3,oneof"`
}
func (*InputConfig_GcsSource) isInputConfig_Source() {}
// Request for [Import][google.cloud.redis.v1.CloudRedis.ImportInstance].
type ImportInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Redis instance resource name using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
// where `location_id` refers to a GCP region.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. Specify data to be imported.
InputConfig *InputConfig `protobuf:"bytes,3,opt,name=input_config,json=inputConfig,proto3" json:"input_config,omitempty"`
}
func (x *ImportInstanceRequest) Reset() {
*x = ImportInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ImportInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImportInstanceRequest) ProtoMessage() {}
func (x *ImportInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImportInstanceRequest.ProtoReflect.Descriptor instead.
func (*ImportInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{10}
}
func (x *ImportInstanceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ImportInstanceRequest) GetInputConfig() *InputConfig {
if x != nil {
return x.InputConfig
}
return nil
}
// The Cloud Storage location for the output content
type GcsDestination struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Data destination URI (e.g.
// 'gs://my_bucket/my_object'). Existing files will be overwritten.
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
}
func (x *GcsDestination) Reset() {
*x = GcsDestination{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GcsDestination) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GcsDestination) ProtoMessage() {}
func (x *GcsDestination) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GcsDestination.ProtoReflect.Descriptor instead.
func (*GcsDestination) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{11}
}
func (x *GcsDestination) GetUri() string {
if x != nil {
return x.Uri
}
return ""
}
// The output content
type OutputConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Specify destination location of output data
//
// Types that are assignable to Destination:
// *OutputConfig_GcsDestination
Destination isOutputConfig_Destination `protobuf_oneof:"destination"`
}
func (x *OutputConfig) Reset() {
*x = OutputConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *OutputConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OutputConfig) ProtoMessage() {}
func (x *OutputConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OutputConfig.ProtoReflect.Descriptor instead.
func (*OutputConfig) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{12}
}
func (m *OutputConfig) GetDestination() isOutputConfig_Destination {
if m != nil {
return m.Destination
}
return nil
}
func (x *OutputConfig) GetGcsDestination() *GcsDestination {
if x, ok := x.GetDestination().(*OutputConfig_GcsDestination); ok {
return x.GcsDestination
}
return nil
}
type isOutputConfig_Destination interface {
isOutputConfig_Destination()
}
type OutputConfig_GcsDestination struct {
// Google Cloud Storage destination for output content.
GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"`
}
func (*OutputConfig_GcsDestination) isOutputConfig_Destination() {}
// Request for [Export][google.cloud.redis.v1.CloudRedis.ExportInstance].
type ExportInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Redis instance resource name using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
// where `location_id` refers to a GCP region.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. Specify data to be exported.
OutputConfig *OutputConfig `protobuf:"bytes,3,opt,name=output_config,json=outputConfig,proto3" json:"output_config,omitempty"`
}
func (x *ExportInstanceRequest) Reset() {
*x = ExportInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExportInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExportInstanceRequest) ProtoMessage() {}
func (x *ExportInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExportInstanceRequest.ProtoReflect.Descriptor instead.
func (*ExportInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{13}
}
func (x *ExportInstanceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ExportInstanceRequest) GetOutputConfig() *OutputConfig {
if x != nil {
return x.OutputConfig
}
return nil
}
// Request for [Failover][google.cloud.redis.v1.CloudRedis.FailoverInstance].
type FailoverInstanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Redis instance resource name using the form:
// `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
// where `location_id` refers to a GCP region.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. Available data protection modes that the user can choose. If it's
// unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
DataProtectionMode FailoverInstanceRequest_DataProtectionMode `protobuf:"varint,2,opt,name=data_protection_mode,json=dataProtectionMode,proto3,enum=google.cloud.redis.v1.FailoverInstanceRequest_DataProtectionMode" json:"data_protection_mode,omitempty"`
}
func (x *FailoverInstanceRequest) Reset() {
*x = FailoverInstanceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FailoverInstanceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FailoverInstanceRequest) ProtoMessage() {}
func (x *FailoverInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FailoverInstanceRequest.ProtoReflect.Descriptor instead.
func (*FailoverInstanceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{14}
}
func (x *FailoverInstanceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *FailoverInstanceRequest) GetDataProtectionMode() FailoverInstanceRequest_DataProtectionMode {
if x != nil {
return x.DataProtectionMode
}
return FailoverInstanceRequest_DATA_PROTECTION_MODE_UNSPECIFIED
}
// Represents the v1 metadata of the long-running operation.
type OperationMetadata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Creation timestamp.
CreateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// End timestamp.
EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// Operation target.
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// Operation verb.
Verb string `protobuf:"bytes,4,opt,name=verb,proto3" json:"verb,omitempty"`
// Operation status details.
StatusDetail string `protobuf:"bytes,5,opt,name=status_detail,json=statusDetail,proto3" json:"status_detail,omitempty"`
// Specifies if cancellation was requested for the operation.
CancelRequested bool `protobuf:"varint,6,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"`
// API version.
ApiVersion string `protobuf:"bytes,7,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
}
func (x *OperationMetadata) Reset() {
*x = OperationMetadata{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *OperationMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OperationMetadata) ProtoMessage() {}
func (x *OperationMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OperationMetadata.ProtoReflect.Descriptor instead.
func (*OperationMetadata) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{15}
}
func (x *OperationMetadata) GetCreateTime() *timestamp.Timestamp {
if x != nil {
return x.CreateTime
}
return nil
}
func (x *OperationMetadata) GetEndTime() *timestamp.Timestamp {
if x != nil {
return x.EndTime
}
return nil
}
func (x *OperationMetadata) GetTarget() string {
if x != nil {
return x.Target
}
return ""
}
func (x *OperationMetadata) GetVerb() string {
if x != nil {
return x.Verb
}
return ""
}
func (x *OperationMetadata) GetStatusDetail() string {
if x != nil {
return x.StatusDetail
}
return ""
}
func (x *OperationMetadata) GetCancelRequested() bool {
if x != nil {
return x.CancelRequested
}
return false
}
func (x *OperationMetadata) GetApiVersion() string {
if x != nil {
return x.ApiVersion
}
return ""
}
// This location metadata represents additional configuration options for a
// given location where a Redis instance may be created. All fields are output
// only. It is returned as content of the
// `google.cloud.location.Location.metadata` field.
type LocationMetadata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Output only. The set of available zones in the location. The map is keyed
// by the lowercase ID of each zone, as defined by GCE. These keys can be
// specified in `location_id` or `alternative_location_id` fields when
// creating a Redis instance.
AvailableZones map[string]*ZoneMetadata `protobuf:"bytes,1,rep,name=available_zones,json=availableZones,proto3" json:"available_zones,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *LocationMetadata) Reset() {
*x = LocationMetadata{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *LocationMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LocationMetadata) ProtoMessage() {}
func (x *LocationMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LocationMetadata.ProtoReflect.Descriptor instead.
func (*LocationMetadata) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{16}
}
func (x *LocationMetadata) GetAvailableZones() map[string]*ZoneMetadata {
if x != nil {
return x.AvailableZones
}
return nil
}
// Defines specific information for a particular zone. Currently empty and
// reserved for future use only.
type ZoneMetadata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ZoneMetadata) Reset() {
*x = ZoneMetadata{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ZoneMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ZoneMetadata) ProtoMessage() {}
func (x *ZoneMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ZoneMetadata.ProtoReflect.Descriptor instead.
func (*ZoneMetadata) Descriptor() ([]byte, []int) {
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP(), []int{17}
}
var File_google_cloud_redis_v1_cloud_redis_proto protoreflect.FileDescriptor
var file_google_cloud_redis_v1_cloud_redis_proto_rawDesc = []byte{
0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x72,
0x65, 0x64, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x65,
0x64, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31,
0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67,
0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x0b, 0x0a, 0x08,
0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
0x4e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x24, 0x0a, 0x0b, 0x6c, 0x6f, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12,
0x3b, 0x0a, 0x17, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c,
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69,
0x76, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x0d,
0x72, 0x65, 0x64, 0x69, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20,
0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x73, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
0x49, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18,
0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74,
0x12, 0x17, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03,
0xe0, 0x41, 0x03, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x63, 0x75, 0x72,
0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x63, 0x75, 0x72,
0x72, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x40,
0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72,
0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61,
0x74, 0x65, 0x12, 0x2a, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x5b,
0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18,
0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72,
0x65, 0x64, 0x69, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x04, 0x74,
0x69, 0x65, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76,
0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x54, 0x69, 0x65, 0x72, 0x42,
0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x74, 0x69, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x0e, 0x6d, 0x65,
0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x12, 0x20, 0x01,
0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53,
0x69, 0x7a, 0x65, 0x47, 0x62, 0x12, 0x32, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
0x7a, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x14, 0x20, 0x01, 0x28,
0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
0x65, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x3d, 0x0a, 0x18, 0x70, 0x65, 0x72,
0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03,
0x52, 0x16, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x49, 0x61, 0x6d,
0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x6e,
0x65, 0x63, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65,
0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01,
0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x1a, 0x39, 0x0a,
0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x52, 0x65, 0x64, 0x69,
0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x94, 0x01, 0x0a, 0x05, 0x53, 0x74,
0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53,
0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52,
0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44,
0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x55, 0x50, 0x44, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10,
0x03, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12,
0x0d, 0x0a, 0x09, 0x52, 0x45, 0x50, 0x41, 0x49, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0f,
0x0a, 0x0b, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x06, 0x12,
0x0d, 0x0a, 0x09, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x08, 0x12, 0x10,
0x0a, 0x0c, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x10, 0x09,
0x22, 0x38, 0x0a, 0x04, 0x54, 0x69, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x49, 0x45, 0x52,
0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09,
0x0a, 0x05, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41,
0x4e, 0x44, 0x41, 0x52, 0x44, 0x5f, 0x48, 0x41, 0x10, 0x03, 0x22, 0x5b, 0x0a, 0x0b, 0x43, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e,
0x4e, 0x45, 0x43, 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x49, 0x52, 0x45, 0x43,
0x54, 0x5f, 0x50, 0x45, 0x45, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x50,
0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41,
0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x3a, 0x60, 0xea, 0x41, 0x5d, 0x0a, 0x1d, 0x72, 0x65,
0x64, 0x69, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x3c, 0x70, 0x72, 0x6f,
0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f,
0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b,
0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x22, 0x95, 0x01, 0x0a, 0x14, 0x4c, 0x69,
0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70,
0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x69,
0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65,
0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
0x09, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65,
0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c,
0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68,
0x61, 0x62, 0x6c, 0x65, 0x22, 0x4f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
0x0a, 0x1d, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65,
0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69,
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x69, 0x6e,
0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e,
0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x15, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d,
0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61,
0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x40, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31,
0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08,
0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x7d, 0x0a, 0x16, 0x55, 0x70, 0x67, 0x72,
0x61, 0x64, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49,
0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a,
0x0d, 0x72, 0x65, 0x64, 0x69, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x73,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x52, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x22, 0x0a, 0x09, 0x47,
0x63, 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x03, 0x75, 0x72, 0x69, 0x22,
0x5a, 0x0a, 0x0b, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41,
0x0a, 0x0a, 0x67, 0x63, 0x73, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x63, 0x73, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x67, 0x63, 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x7c, 0x0a, 0x15, 0x49,
0x6d, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a,
0x0c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x70, 0x75,
0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x69, 0x6e,
0x70, 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x27, 0x0a, 0x0e, 0x47, 0x63, 0x73,
0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x03, 0x75,
0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x03, 0x75,
0x72, 0x69, 0x22, 0x6f, 0x0a, 0x0c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x50, 0x0a, 0x0f, 0x67, 0x63, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73,
0x2e, 0x76, 0x31, 0x2e, 0x47, 0x63, 0x73, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x63, 0x73, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x22, 0x7f, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69,
0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x22, 0xb6, 0x02, 0x0a, 0x17, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65,
0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x64,
0x61, 0x74, 0x61, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76,
0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x72,
0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, 0xe0, 0x41,
0x01, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x66, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f,
0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x20, 0x44,
0x41, 0x54, 0x41, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x15, 0x0a, 0x11, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x5f, 0x44, 0x41, 0x54,
0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x46, 0x4f, 0x52, 0x43,
0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x02, 0x22, 0xa4, 0x02,
0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64,
0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07,
0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12,
0x12, 0x0a, 0x04, 0x76, 0x65, 0x72, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x76,
0x65, 0x72, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74,
0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x63,
0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
0x28, 0x08, 0x52, 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe5, 0x01, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x69, 0x0a, 0x0f, 0x61, 0x76, 0x61,
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x76, 0x61, 0x69,
0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42,
0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5a,
0x6f, 0x6e, 0x65, 0x73, 0x1a, 0x66, 0x0a, 0x13, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c,
0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69,
0x73, 0x2e, 0x76, 0x31, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x0e, 0x0a, 0x0c,
0x5a, 0x6f, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x32, 0xb3, 0x11, 0x0a,
0x0a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x65, 0x64, 0x69, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x0d,
0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64,
0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e,
0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f,
0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72,
0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0xda,
0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x97, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74,
0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31,
0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x63, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76,
0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x89, 0x02, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e,
0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x22, 0xa9, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x22, 0x2d, 0x2f, 0x76, 0x31,
0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d,
0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x3a, 0x08, 0x69, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x63, 0x65, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e,
0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x63, 0x65, 0xca, 0x41, 0x49, 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8b,
0x02, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xab,
0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x32, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x69, 0x6e,
0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a,
0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0xda, 0x41, 0x14, 0x75, 0x70, 0x64, 0x61,
0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
0xca, 0x41, 0x49, 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61,
0x6e, 0x63, 0x65, 0x12, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x83, 0x02, 0x0a,
0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65,
0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa1,
0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x22, 0x35, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x3a, 0x01,
0x2a, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x65, 0x64, 0x69, 0x73, 0x5f, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0xca, 0x41, 0x49, 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31,
0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31,
0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x12, 0xff, 0x01, 0x0a, 0x0e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d,
0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e,
0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x22, 0x9f, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x22, 0x34, 0x2f, 0x76, 0x31,
0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x69, 0x6d, 0x70, 0x6f, 0x72,
0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x69, 0x6e, 0x70, 0x75,
0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xca, 0x41, 0x49, 0x0a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e,
0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e,
0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x12, 0x80, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x49,
0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e,
0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c,
0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x22, 0x34, 0x2f,
0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f,
0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x65, 0x78, 0x70,
0x6f, 0x72, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6f, 0x75,
0x74, 0x70, 0x75, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xca, 0x41, 0x49, 0x0a, 0x1e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64,
0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64,
0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8d, 0x02, 0x0a, 0x10, 0x46, 0x61, 0x69, 0x6c,
0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69,
0x73, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa9, 0x01, 0x82, 0xd3,
0xe4, 0x93, 0x02, 0x3b, 0x22, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
0x2f, 0x2a, 0x7d, 0x3a, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x3a, 0x01, 0x2a, 0xda,
0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x72, 0x6f, 0x74,
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0xca, 0x41, 0x49, 0x0a, 0x1e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64,
0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64,
0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xde, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65,
0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e,
0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a,
0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0xca, 0x41, 0x40, 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12,
0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65,
0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x48, 0xca, 0x41, 0x14, 0x72, 0x65, 0x64,
0x69, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f,
0x72, 0x6d, 0x42, 0x73, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2e, 0x76, 0x31, 0x42,
0x18, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x65, 0x64, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x56, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67,
0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x72, 0x65, 0x64, 0x69, 0x73, 0x2f, 0x76,
0x31, 0x3b, 0x72, 0x65, 0x64, 0x69, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_cloud_redis_v1_cloud_redis_proto_rawDescOnce sync.Once
file_google_cloud_redis_v1_cloud_redis_proto_rawDescData = file_google_cloud_redis_v1_cloud_redis_proto_rawDesc
)
func file_google_cloud_redis_v1_cloud_redis_proto_rawDescGZIP() []byte {
file_google_cloud_redis_v1_cloud_redis_proto_rawDescOnce.Do(func() {
file_google_cloud_redis_v1_cloud_redis_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_redis_v1_cloud_redis_proto_rawDescData)
})
return file_google_cloud_redis_v1_cloud_redis_proto_rawDescData
}
var file_google_cloud_redis_v1_cloud_redis_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
var file_google_cloud_redis_v1_cloud_redis_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
var file_google_cloud_redis_v1_cloud_redis_proto_goTypes = []interface{}{
(Instance_State)(0), // 0: google.cloud.redis.v1.Instance.State
(Instance_Tier)(0), // 1: google.cloud.redis.v1.Instance.Tier
(Instance_ConnectMode)(0), // 2: google.cloud.redis.v1.Instance.ConnectMode
(FailoverInstanceRequest_DataProtectionMode)(0), // 3: google.cloud.redis.v1.FailoverInstanceRequest.DataProtectionMode
(*Instance)(nil), // 4: google.cloud.redis.v1.Instance
(*ListInstancesRequest)(nil), // 5: google.cloud.redis.v1.ListInstancesRequest
(*ListInstancesResponse)(nil), // 6: google.cloud.redis.v1.ListInstancesResponse
(*GetInstanceRequest)(nil), // 7: google.cloud.redis.v1.GetInstanceRequest
(*CreateInstanceRequest)(nil), // 8: google.cloud.redis.v1.CreateInstanceRequest
(*UpdateInstanceRequest)(nil), // 9: google.cloud.redis.v1.UpdateInstanceRequest
(*UpgradeInstanceRequest)(nil), // 10: google.cloud.redis.v1.UpgradeInstanceRequest
(*DeleteInstanceRequest)(nil), // 11: google.cloud.redis.v1.DeleteInstanceRequest
(*GcsSource)(nil), // 12: google.cloud.redis.v1.GcsSource
(*InputConfig)(nil), // 13: google.cloud.redis.v1.InputConfig
(*ImportInstanceRequest)(nil), // 14: google.cloud.redis.v1.ImportInstanceRequest
(*GcsDestination)(nil), // 15: google.cloud.redis.v1.GcsDestination
(*OutputConfig)(nil), // 16: google.cloud.redis.v1.OutputConfig
(*ExportInstanceRequest)(nil), // 17: google.cloud.redis.v1.ExportInstanceRequest
(*FailoverInstanceRequest)(nil), // 18: google.cloud.redis.v1.FailoverInstanceRequest
(*OperationMetadata)(nil), // 19: google.cloud.redis.v1.OperationMetadata
(*LocationMetadata)(nil), // 20: google.cloud.redis.v1.LocationMetadata
(*ZoneMetadata)(nil), // 21: google.cloud.redis.v1.ZoneMetadata
nil, // 22: google.cloud.redis.v1.Instance.LabelsEntry
nil, // 23: google.cloud.redis.v1.Instance.RedisConfigsEntry
nil, // 24: google.cloud.redis.v1.LocationMetadata.AvailableZonesEntry
(*timestamp.Timestamp)(nil), // 25: google.protobuf.Timestamp
(*field_mask.FieldMask)(nil), // 26: google.protobuf.FieldMask
(*longrunning.Operation)(nil), // 27: google.longrunning.Operation
}
var file_google_cloud_redis_v1_cloud_redis_proto_depIdxs = []int32{
22, // 0: google.cloud.redis.v1.Instance.labels:type_name -> google.cloud.redis.v1.Instance.LabelsEntry
25, // 1: google.cloud.redis.v1.Instance.create_time:type_name -> google.protobuf.Timestamp
0, // 2: google.cloud.redis.v1.Instance.state:type_name -> google.cloud.redis.v1.Instance.State
23, // 3: google.cloud.redis.v1.Instance.redis_configs:type_name -> google.cloud.redis.v1.Instance.RedisConfigsEntry
1, // 4: google.cloud.redis.v1.Instance.tier:type_name -> google.cloud.redis.v1.Instance.Tier
2, // 5: google.cloud.redis.v1.Instance.connect_mode:type_name -> google.cloud.redis.v1.Instance.ConnectMode
4, // 6: google.cloud.redis.v1.ListInstancesResponse.instances:type_name -> google.cloud.redis.v1.Instance
4, // 7: google.cloud.redis.v1.CreateInstanceRequest.instance:type_name -> google.cloud.redis.v1.Instance
26, // 8: google.cloud.redis.v1.UpdateInstanceRequest.update_mask:type_name -> google.protobuf.FieldMask
4, // 9: google.cloud.redis.v1.UpdateInstanceRequest.instance:type_name -> google.cloud.redis.v1.Instance
12, // 10: google.cloud.redis.v1.InputConfig.gcs_source:type_name -> google.cloud.redis.v1.GcsSource
13, // 11: google.cloud.redis.v1.ImportInstanceRequest.input_config:type_name -> google.cloud.redis.v1.InputConfig
15, // 12: google.cloud.redis.v1.OutputConfig.gcs_destination:type_name -> google.cloud.redis.v1.GcsDestination
16, // 13: google.cloud.redis.v1.ExportInstanceRequest.output_config:type_name -> google.cloud.redis.v1.OutputConfig
3, // 14: google.cloud.redis.v1.FailoverInstanceRequest.data_protection_mode:type_name -> google.cloud.redis.v1.FailoverInstanceRequest.DataProtectionMode
25, // 15: google.cloud.redis.v1.OperationMetadata.create_time:type_name -> google.protobuf.Timestamp
25, // 16: google.cloud.redis.v1.OperationMetadata.end_time:type_name -> google.protobuf.Timestamp
24, // 17: google.cloud.redis.v1.LocationMetadata.available_zones:type_name -> google.cloud.redis.v1.LocationMetadata.AvailableZonesEntry
21, // 18: google.cloud.redis.v1.LocationMetadata.AvailableZonesEntry.value:type_name -> google.cloud.redis.v1.ZoneMetadata
5, // 19: google.cloud.redis.v1.CloudRedis.ListInstances:input_type -> google.cloud.redis.v1.ListInstancesRequest
7, // 20: google.cloud.redis.v1.CloudRedis.GetInstance:input_type -> google.cloud.redis.v1.GetInstanceRequest
8, // 21: google.cloud.redis.v1.CloudRedis.CreateInstance:input_type -> google.cloud.redis.v1.CreateInstanceRequest
9, // 22: google.cloud.redis.v1.CloudRedis.UpdateInstance:input_type -> google.cloud.redis.v1.UpdateInstanceRequest
10, // 23: google.cloud.redis.v1.CloudRedis.UpgradeInstance:input_type -> google.cloud.redis.v1.UpgradeInstanceRequest
14, // 24: google.cloud.redis.v1.CloudRedis.ImportInstance:input_type -> google.cloud.redis.v1.ImportInstanceRequest
17, // 25: google.cloud.redis.v1.CloudRedis.ExportInstance:input_type -> google.cloud.redis.v1.ExportInstanceRequest
18, // 26: google.cloud.redis.v1.CloudRedis.FailoverInstance:input_type -> google.cloud.redis.v1.FailoverInstanceRequest
11, // 27: google.cloud.redis.v1.CloudRedis.DeleteInstance:input_type -> google.cloud.redis.v1.DeleteInstanceRequest
6, // 28: google.cloud.redis.v1.CloudRedis.ListInstances:output_type -> google.cloud.redis.v1.ListInstancesResponse
4, // 29: google.cloud.redis.v1.CloudRedis.GetInstance:output_type -> google.cloud.redis.v1.Instance
27, // 30: google.cloud.redis.v1.CloudRedis.CreateInstance:output_type -> google.longrunning.Operation
27, // 31: google.cloud.redis.v1.CloudRedis.UpdateInstance:output_type -> google.longrunning.Operation
27, // 32: google.cloud.redis.v1.CloudRedis.UpgradeInstance:output_type -> google.longrunning.Operation
27, // 33: google.cloud.redis.v1.CloudRedis.ImportInstance:output_type -> google.longrunning.Operation
27, // 34: google.cloud.redis.v1.CloudRedis.ExportInstance:output_type -> google.longrunning.Operation
27, // 35: google.cloud.redis.v1.CloudRedis.FailoverInstance:output_type -> google.longrunning.Operation
27, // 36: google.cloud.redis.v1.CloudRedis.DeleteInstance:output_type -> google.longrunning.Operation
28, // [28:37] is the sub-list for method output_type
19, // [19:28] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
}
func init() { file_google_cloud_redis_v1_cloud_redis_proto_init() }
func file_google_cloud_redis_v1_cloud_redis_proto_init() {
if File_google_cloud_redis_v1_cloud_redis_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Instance); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListInstancesRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListInstancesResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpgradeInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GcsSource); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*InputConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ImportInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GcsDestination); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OutputConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExportInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FailoverInstanceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OperationMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LocationMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ZoneMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[9].OneofWrappers = []interface{}{
(*InputConfig_GcsSource)(nil),
}
file_google_cloud_redis_v1_cloud_redis_proto_msgTypes[12].OneofWrappers = []interface{}{
(*OutputConfig_GcsDestination)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_cloud_redis_v1_cloud_redis_proto_rawDesc,
NumEnums: 4,
NumMessages: 21,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_cloud_redis_v1_cloud_redis_proto_goTypes,
DependencyIndexes: file_google_cloud_redis_v1_cloud_redis_proto_depIdxs,
EnumInfos: file_google_cloud_redis_v1_cloud_redis_proto_enumTypes,
MessageInfos: file_google_cloud_redis_v1_cloud_redis_proto_msgTypes,
}.Build()
File_google_cloud_redis_v1_cloud_redis_proto = out.File
file_google_cloud_redis_v1_cloud_redis_proto_rawDesc = nil
file_google_cloud_redis_v1_cloud_redis_proto_goTypes = nil
file_google_cloud_redis_v1_cloud_redis_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// CloudRedisClient is the client API for CloudRedis service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type CloudRedisClient interface {
// Lists all Redis instances owned by a project in either the specified
// location (region) or all locations.
//
// The location should have the following format:
//
// * `projects/{project_id}/locations/{location_id}`
//
// If `location_id` is specified as `-` (wildcard), then all regions
// available to the project are queried, and the results are aggregated.
ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error)
// Gets the details of a specific Redis instance.
GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error)
// Creates a Redis instance based on the specified tier and memory size.
//
// By default, the instance is accessible from the project's
// [default network](https://cloud.google.com/vpc/docs/vpc).
//
// The creation is executed asynchronously and callers may check the returned
// operation to track its progress. Once the operation is completed the Redis
// instance will be fully functional. Completed longrunning.Operation will
// contain the new instance object in the response field.
//
// The returned operation is automatically deleted after a few hours, so there
// is no need to call DeleteOperation.
CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Updates the metadata and configuration of a specific Redis instance.
//
// Completed longrunning.Operation will contain the new instance object
// in the response field. The returned operation is automatically deleted
// after a few hours, so there is no need to call DeleteOperation.
UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Upgrades Redis instance to the newer Redis version specified in the
// request.
UpgradeInstance(ctx context.Context, in *UpgradeInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
//
// Redis may stop serving during this operation. Instance state will be
// IMPORTING for entire operation. When complete, the instance will contain
// only data from the imported file.
//
// The returned operation is automatically deleted after a few hours, so
// there is no need to call DeleteOperation.
ImportInstance(ctx context.Context, in *ImportInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Export Redis instance data into a Redis RDB format file in Cloud Storage.
//
// Redis will continue serving during this operation.
//
// The returned operation is automatically deleted after a few hours, so
// there is no need to call DeleteOperation.
ExportInstance(ctx context.Context, in *ExportInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Initiates a failover of the master node to current replica node for a
// specific STANDARD tier Cloud Memorystore for Redis instance.
FailoverInstance(ctx context.Context, in *FailoverInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Deletes a specific Redis instance. Instance stops serving and data is
// deleted.
DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}
type cloudRedisClient struct {
cc grpc.ClientConnInterface
}
func NewCloudRedisClient(cc grpc.ClientConnInterface) CloudRedisClient {
return &cloudRedisClient{cc}
}
func (c *cloudRedisClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) {
out := new(ListInstancesResponse)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/ListInstances", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) {
out := new(Instance)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/GetInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/CreateInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/UpdateInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) UpgradeInstance(ctx context.Context, in *UpgradeInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/UpgradeInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) ImportInstance(ctx context.Context, in *ImportInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/ImportInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) ExportInstance(ctx context.Context, in *ExportInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/ExportInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) FailoverInstance(ctx context.Context, in *FailoverInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/FailoverInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cloudRedisClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.redis.v1.CloudRedis/DeleteInstance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// CloudRedisServer is the server API for CloudRedis service.
type CloudRedisServer interface {
// Lists all Redis instances owned by a project in either the specified
// location (region) or all locations.
//
// The location should have the following format:
//
// * `projects/{project_id}/locations/{location_id}`
//
// If `location_id` is specified as `-` (wildcard), then all regions
// available to the project are queried, and the results are aggregated.
ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error)
// Gets the details of a specific Redis instance.
GetInstance(context.Context, *GetInstanceRequest) (*Instance, error)
// Creates a Redis instance based on the specified tier and memory size.
//
// By default, the instance is accessible from the project's
// [default network](https://cloud.google.com/vpc/docs/vpc).
//
// The creation is executed asynchronously and callers may check the returned
// operation to track its progress. Once the operation is completed the Redis
// instance will be fully functional. Completed longrunning.Operation will
// contain the new instance object in the response field.
//
// The returned operation is automatically deleted after a few hours, so there
// is no need to call DeleteOperation.
CreateInstance(context.Context, *CreateInstanceRequest) (*longrunning.Operation, error)
// Updates the metadata and configuration of a specific Redis instance.
//
// Completed longrunning.Operation will contain the new instance object
// in the response field. The returned operation is automatically deleted
// after a few hours, so there is no need to call DeleteOperation.
UpdateInstance(context.Context, *UpdateInstanceRequest) (*longrunning.Operation, error)
// Upgrades Redis instance to the newer Redis version specified in the
// request.
UpgradeInstance(context.Context, *UpgradeInstanceRequest) (*longrunning.Operation, error)
// Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
//
// Redis may stop serving during this operation. Instance state will be
// IMPORTING for entire operation. When complete, the instance will contain
// only data from the imported file.
//
// The returned operation is automatically deleted after a few hours, so
// there is no need to call DeleteOperation.
ImportInstance(context.Context, *ImportInstanceRequest) (*longrunning.Operation, error)
// Export Redis instance data into a Redis RDB format file in Cloud Storage.
//
// Redis will continue serving during this operation.
//
// The returned operation is automatically deleted after a few hours, so
// there is no need to call DeleteOperation.
ExportInstance(context.Context, *ExportInstanceRequest) (*longrunning.Operation, error)
// Initiates a failover of the master node to current replica node for a
// specific STANDARD tier Cloud Memorystore for Redis instance.
FailoverInstance(context.Context, *FailoverInstanceRequest) (*longrunning.Operation, error)
// Deletes a specific Redis instance. Instance stops serving and data is
// deleted.
DeleteInstance(context.Context, *DeleteInstanceRequest) (*longrunning.Operation, error)
}
// UnimplementedCloudRedisServer can be embedded to have forward compatible implementations.
type UnimplementedCloudRedisServer struct {
}
func (*UnimplementedCloudRedisServer) ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListInstances not implemented")
}
func (*UnimplementedCloudRedisServer) GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetInstance not implemented")
}
func (*UnimplementedCloudRedisServer) CreateInstance(context.Context, *CreateInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateInstance not implemented")
}
func (*UnimplementedCloudRedisServer) UpdateInstance(context.Context, *UpdateInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateInstance not implemented")
}
func (*UnimplementedCloudRedisServer) UpgradeInstance(context.Context, *UpgradeInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpgradeInstance not implemented")
}
func (*UnimplementedCloudRedisServer) ImportInstance(context.Context, *ImportInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method ImportInstance not implemented")
}
func (*UnimplementedCloudRedisServer) ExportInstance(context.Context, *ExportInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExportInstance not implemented")
}
func (*UnimplementedCloudRedisServer) FailoverInstance(context.Context, *FailoverInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method FailoverInstance not implemented")
}
func (*UnimplementedCloudRedisServer) DeleteInstance(context.Context, *DeleteInstanceRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteInstance not implemented")
}
func RegisterCloudRedisServer(s *grpc.Server, srv CloudRedisServer) {
s.RegisterService(&_CloudRedis_serviceDesc, srv)
}
func _CloudRedis_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListInstancesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).ListInstances(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/ListInstances",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).ListInstances(ctx, req.(*ListInstancesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).GetInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/GetInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).GetInstance(ctx, req.(*GetInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).CreateInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/CreateInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).CreateInstance(ctx, req.(*CreateInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).UpdateInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/UpdateInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).UpdateInstance(ctx, req.(*UpdateInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_UpgradeInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpgradeInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).UpgradeInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/UpgradeInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).UpgradeInstance(ctx, req.(*UpgradeInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_ImportInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ImportInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).ImportInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/ImportInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).ImportInstance(ctx, req.(*ImportInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_ExportInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).ExportInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/ExportInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).ExportInstance(ctx, req.(*ExportInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_FailoverInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FailoverInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).FailoverInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/FailoverInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).FailoverInstance(ctx, req.(*FailoverInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CloudRedis_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CloudRedisServer).DeleteInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.redis.v1.CloudRedis/DeleteInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CloudRedisServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
var _CloudRedis_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.redis.v1.CloudRedis",
HandlerType: (*CloudRedisServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListInstances",
Handler: _CloudRedis_ListInstances_Handler,
},
{
MethodName: "GetInstance",
Handler: _CloudRedis_GetInstance_Handler,
},
{
MethodName: "CreateInstance",
Handler: _CloudRedis_CreateInstance_Handler,
},
{
MethodName: "UpdateInstance",
Handler: _CloudRedis_UpdateInstance_Handler,
},
{
MethodName: "UpgradeInstance",
Handler: _CloudRedis_UpgradeInstance_Handler,
},
{
MethodName: "ImportInstance",
Handler: _CloudRedis_ImportInstance_Handler,
},
{
MethodName: "ExportInstance",
Handler: _CloudRedis_ExportInstance_Handler,
},
{
MethodName: "FailoverInstance",
Handler: _CloudRedis_FailoverInstance_Handler,
},
{
MethodName: "DeleteInstance",
Handler: _CloudRedis_DeleteInstance_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/redis/v1/cloud_redis.proto",
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
qa/rpc-tests/util.py
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2018 The Blocknode developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "blocknode.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
blocknoded and blocknode-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run blocknoded:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "blocknoded"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "blocknode-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in blocknode.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a blocknoded and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "blocknoded"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "blocknode-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple blocknodeds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
[] |
[] |
[
"BITCOINCLI",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "BITCOIND"]
|
python
| 2 | 0 | |
bdd_mtl/tools/test.py
|
import argparse
import os
import os.path as osp
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmdet.apis import init_dist
from mmdet.core import results2json, coco_eval, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
eval_types = args.eval
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
coco_eval(result_file, eval_types, dataset.coco)
else:
if not isinstance(outputs[0], dict):
result_files = results2json(dataset, outputs, args.out)
coco_eval(result_files, eval_types, dataset.coco)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out + '.{}'.format(name)
result_files = results2json(dataset, outputs_,
result_file)
coco_eval(result_files, eval_types, dataset.coco)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LOCAL_RANK"
] |
[]
|
["LOCAL_RANK"]
|
python
| 1 | 0 | |
conda_build/jinja_context.py
|
'''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
import json
import os
from conda.compat import PY3
from .environ import get_dict as get_environ
_setuptools_data = None
def load_setuptools(setup_file='setup.py'):
global _setuptools_data
if _setuptools_data is None:
_setuptools_data = {}
def setup(**kw):
_setuptools_data.update(kw)
import setuptools
#Add current directory to path
import sys
sys.path.append('.')
#Patch setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup
exec(open(setup_file).read())
setuptools.setup = setuptools_setup
del sys.path[-1]
return _setuptools_data
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor():
ctx = get_environ()
environ = dict(os.environ)
environ.update(get_environ())
ctx.update(load_setuptools=load_setuptools,
load_npm=load_npm,
environ=environ)
return ctx
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
api/server.py
|
# Copyright 2020 - 2021 IBM Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import flask
import json
import os
import requests
import sys
import uuid
from gevent.wsgi import WSGIServer
from werkzeug.exceptions import HTTPException
import kubernetes
from kubernetes.client.rest import ApiException
from kafka import KafkaProducer
from kafka.admin import KafkaAdminClient, NewTopic
from kafka.errors import KafkaError, TopicAlreadyExistsError
import iso8601
import kubernetes
from kubernetes.client import V1Namespace
from kubernetes.client import V1ObjectMeta
from kubernetes.client.rest import ApiException
KAFKA_API_VERSION = (0, 10, 1)
KAFKA_TIMEOUT = 10 # seconds
KAFKA_IP = os.getenv('ISSM_KAFKA_HOST')
KAFKA_PORT = os.getenv('ISSM_KAFKA_PORT', 9092)
if not KAFKA_IP:
print ('ISSM_KAFKA_HOST not set')
raise sys.exit(1)
ARGO_SERVER = os.getenv('ARGO_SERVER')
if not ARGO_SERVER:
print ('ARGO_SERVER not set')
raise sys.exit(1)
LB_ARGO_SERVER = os.getenv('LB_ARGO_SERVER')
if not LB_ARGO_SERVER:
print ('LB_ARGO_SERVER not set')
raise sys.exit(1)
TRANSACTION_TYPES = ['instantiate', 'scaleout']
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise Exception(str(e))
except TypeError as e:
raise Exception(str(e))
def publish_intent(kafka_ip, kafka_port, topic, payload):
"""
Send the intent to the ISSM kafka bus
:param kafka_ip: kafka broker ipaddress
:type kafka_ip: ``str``
:param kafka_port: kafka broker port
:type kafka_port: ``int``
:param payload: the payload (intent) to send
:type payload: ``dict``
"""
producer = KafkaProducer(bootstrap_servers="%s:%s" % (kafka_ip, kafka_port),
api_version=KAFKA_API_VERSION,
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
print('[INTENT] %s' % payload)
t = producer.send(topic, payload)
# Block for 'synchronous' send; set timeout on X seconds
try:
t.get(timeout=KAFKA_TIMEOUT)
except KafkaError as ke:
print('[ERROR] KafkaError: %s' % str(ke))
raise ke
finally:
producer.close()
class Proxy:
def __init__(self):
"""
Initialize the proxy with the in-cluster configuration and the required
APIs
"""
kubernetes.config.load_incluster_config()
self.api = kubernetes.client.CustomObjectsApi()
sys.stdout.write('ISSM-API initialized\n')
def instantiate(self, service_owner, transaction_type, intent):
"""
Instantiate ISSM flow with the given intent on-behalf of the service_owner.
:param service_owner: the name of the service owner.
:type service_owner: ``str``
:param transaction_type: the operation for this flow. Currently 'instantiate',
'scaleout' are supported.
:type transaction_type: ``str``
:param intent: intent payload e.g. slice creation intent
:type intent: ``dict``
"""
event_uuid = str(uuid.uuid4()).replace('-','')
print ('** event_uuid: %s' % event_uuid)
payload = dict(event_uuid=event_uuid, transaction_uuid=event_uuid,
service_owner=service_owner,
operation=transaction_type, sub_operation='new_intent')
payload['callback'] = dict(type='kafka', kafka_topic=service_owner)
payload.update(intent)
publish_intent(KAFKA_IP, KAFKA_PORT,
topic='issm-in-%s' % service_owner, payload=payload)
return {'transaction_uuid': event_uuid}
def get_transactions(self, service_owner, transaction_type=None):
"""
Return a list of transactions belonging to the given service owner.
If transaction_type supplied then filter by operation.
:param service_owner: the service owner
:type service_owner: ``str``
:param transaction_type: the transaction type
:type transaction_type: ``str``
"""
sys.stdout.write (
'Enter get_transactions [service_owner=%s, transaction_type=%s]\n' %
(service_owner, transaction_type))
query_str = "fields=items.metadata.name,items.metadata.creationTimestamp,"\
"items.metadata.labels.transaction_uuid,"\
"items.metadata.labels.operation,items.status.phase&"\
"listOptions.labelSelector=issm=true"
if transaction_type:
query_str = query_str + \
",operation=%s" % transaction_type
headers = {'Content-Type': 'application/json'}
r = requests.get("http://%(argo_server)s/api/v1/workflows/%(namespace)s?%(query)s" %
{
"argo_server": ARGO_SERVER,
"namespace": "domain-"+service_owner,
"query": query_str
}, headers=headers)
sys.stdout.write ('Parsing result [r.json=%s]..\n' % r.json())
items = r.json()['items'] if r.json().get('items') is not None else []
transactions = dict()
for i in items:
# transaction key points to list of its subflows
transactions.setdefault(i['metadata']['labels']['transaction_uuid'], []).append(i)
# prepare the output
res = []
for k in transactions:
subflows = transactions[k]
status_set = set()
t_type = subflows[0]['metadata']['labels']['operation']
for sf in subflows:
status_set.add(sf['status']['phase'])
dates = [parse_isotime(sf['metadata']['creationTimestamp']) for sf in subflows]
dates.sort()
sorteddates = [datetime.datetime.isoformat(ts) for ts in dates]
sys.stdout.write ('sorteddates of transaction [%s]: [%s]\n' % (k, sorteddates))
# at least one Failed
if 'Failed' in status_set:
status = 'Failed'
# at least one Running
elif 'Running' in status_set:
status = 'Running'
# all Succeeded
elif len(status_set) == 1 and 'Succeeded' in status_set:
status = 'Succeeded'
res.append(
dict(transaction_uuid=k, transaction_type=t_type, status=status,
created=sorteddates[0],
ref='http://%s/workflows/domain-%s?label=transaction_uuid=%s'
% (LB_ARGO_SERVER, service_owner, k)))
return res
def delete_transaction(self, service_owner, transaction_uuid):
"""
Delete a transaction of a given service owner.
Delete all subflows belonging to the given transaction uuid for this
service owner.
:param service_owner: the service owner owning the transaction
:type service_owner: ``str``
:param transaction_uuid: the transaction uuid
:type transaction_uuid: ``str`` in uuid format
"""
sys.stdout.write (
'Enter delete_transaction [service_owner=%s, transaction_uuid=%s]\n' %
(service_owner, transaction_uuid))
headers = {'Content-Type': 'application/json'}
query_str = "fields=items.metadata.name,"\
"items.metadata.labels.transaction_uuid&"\
"listOptions.labelSelector=transaction_uuid=%s" % transaction_uuid
sys.stdout.write ('Retrieve workflows [namespace=%s] [query=%s]' %
('domain'+service_owner, query_str))
r = requests.get("http://%(argo_server)s/api/v1/workflows/%(namespace)s?%(query)s" %
{
"argo_server": ARGO_SERVER,
"namespace": "domain-"+service_owner,
"query": query_str
}, headers=headers)
sys.stdout.write ('Parsing result [r.json=%s]..\n' % r.json())
items = r.json()['items'] if r.json().get('items') is not None else []
if not items:
raise Exception(
"[transaction_uuid=%s] does not exist for [service_owner=%s]" %
(transaction_uuid, service_owner))
for i in items:
name = i['metadata']['name']
sys.stdout.write ('Deleting workflow [name=%s]..\n' % name)
requests.delete("http://%(argo_server)s/api/v1/workflows/%(namespace)s/%(name)s" %
{
"argo_server": ARGO_SERVER,
"namespace": "domain-"+service_owner,
"name": name
}, headers=headers)
def get_workflow(self, transaction_uuid):
"""
Retrieve the *first* workflow object of a given transaction_uuid.
This operation is cross namespace and hence goes through k8s API.
"""
sys.stdout.write('Requesting workflows\n')
res = {}
# Filter also by metadata.name so that we get the *first* transaction flow
workflows = self.api.list_cluster_custom_object(
group="argoproj.io",
version="v1alpha1",
plural="workflows",
label_selector="transaction_uuid=%s" % transaction_uuid,
field_selector="metadata.name=%s" % transaction_uuid)
if workflows and workflows.get('items'):
#sys.stdout.write(str(workflows['items'][0]) + '\n')
sys.stdout.write(str(len(workflows['items'])) + '\n')
wf = (workflows['items'][0])
wf_params = wf.get('spec', {}).get('arguments', {}).get('parameters', [])
res = {
'name': wf['metadata']['name'],
'phase': wf['status']['phase'],
'progress': wf['status']['progress'],
'workflow_parameters': wf_params
}
return res
proxy = flask.Flask(__name__)
proxy.debug = True
server = None
proxy_server = None
def setServer(s):
global server
server = s
def setProxy(p):
global proxy_server
proxy_server = p
def getMessagePayload():
message = flask.request.get_json(force=True, silent=True)
if message and not isinstance(message, dict):
flask.abort(400, 'message payload is not a dictionary')
else:
value = message if (message or message == {}) else {}
if not isinstance(value, dict):
flask.abort(400, 'message payload did not provide binding for "value"')
return value
@proxy.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,POST,DELETE')
return response
@proxy.route("/hello")
def hello():
sys.stdout.write ('Enter /hello\n')
return ("Greetings from the ISSM-API server! ")
@proxy.route("/transactions/<service_owner>/<transaction_type>", methods=['POST'])
def transactions_submit(service_owner, transaction_type):
sys.stdout.write('Received submit request for '
'[service_owner=%s, transaction_type=%s] \n' %
(service_owner, transaction_type))
try:
value = getMessagePayload()
if transaction_type not in TRANSACTION_TYPES:
raise Exception(
'transaction_type value does not match: %s' %
TRANSACTION_TYPES)
intent = value
response = flask.jsonify(
proxy_server.instantiate(
service_owner=service_owner, transaction_type=transaction_type,
intent=intent))
response.status_code = 200
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
except Exception as e:
response = flask.jsonify({'error': 'Internal error. {}'.format(e)})
#response.headers["Access-Control-Allow-Origin"] = "*"
response.status_code = 500
sys.stdout.write('Exit /instantiate %s\n' % str(response))
return response
@proxy.route("/transactions/<service_owner>", methods=['GET'])
def transactions_get_all(service_owner):
sys.stdout.write('Received get request for '
'[service_owner=%s] \n' % service_owner)
try:
flow_json = proxy_server.get_transactions(service_owner)
response = flask.jsonify(flow_json)
response.status_code = 200
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
except HTTPException as e:
return e
except Exception as e:
response = flask.jsonify({'error': 'Internal error. {}'.format(e)})
response.status_code = 500
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
@proxy.route("/transactions/<service_owner>/<transaction_type>", methods=['GET'])
def transactions_get(service_owner, transaction_type):
sys.stdout.write('Received get request for '
'[service_owner=%s, transaction_type=%s] \n' %
(service_owner, transaction_type))
try:
flow_json = proxy_server.get_transactions(service_owner, transaction_type)
response = flask.jsonify(flow_json)
response.status_code = 200
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
except HTTPException as e:
return e
except Exception as e:
response = flask.jsonify({'error': 'Internal error. {}'.format(e)})
response.status_code = 500
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
@proxy.route("/transactions/<service_owner>/<transaction_uuid>", methods=['DELETE'])
def transactions_delete(service_owner, transaction_uuid):
sys.stdout.write('Received delete request for '
'[service_owner=%s, transaction_uuid=%s] \n' %
(service_owner, transaction_uuid))
try:
proxy_server.delete_transaction(service_owner, transaction_uuid)
response = flask.jsonify({'OK': 200})
response.status_code = 200
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
except HTTPException as e:
return e
except Exception as e:
response = flask.jsonify({'error': 'Internal error. {}'.format(e)})
response.status_code = 500
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
@proxy.route("/transactions_types", methods=['GET'])
def transactions_types():
sys.stdout.write('Received get types\n')
response = flask.jsonify(TRANSACTION_TYPES)
response.status_code = 200
#response.headers["Access-Control-Allow-Origin"] = "*"
return response
@proxy.route("/workflows/<transaction_uuid>", methods=['GET'])
def workflows_get(transaction_uuid):
sys.stdout.write('Received get workflows for [transaction_uuid=%s]\n' %
transaction_uuid)
try:
flow_json = proxy_server.get_workflow(transaction_uuid)
if not flow_json:
response = flask.jsonify({'error': 'Workflow not found'})
response.status_code = 404
else:
response = flask.jsonify(flow_json)
response.status_code = 200
return response
except HTTPException as e:
return e
except Exception as e:
response = flask.jsonify({'error': 'Internal error. {}'.format(e)})
response.status_code = 500
return response
def main():
port = int(os.getenv('LISTEN_PORT', 8080))
server = WSGIServer(('0.0.0.0', port), proxy, log=None)
setServer(server)
print ('\n\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
print ("Starting ISSM-API .. ready to serve requests on PORT: %s..\n\n"
"KAFKA_SERVER '%s:%s' "
"KAFKA_API_VERSION '%s' " %
(int(port), KAFKA_IP, str(KAFKA_PORT), KAFKA_API_VERSION))
print ('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\n')
server.serve_forever()
if __name__ == '__main__':
setProxy(Proxy())
main()
|
[] |
[] |
[
"ISSM_KAFKA_PORT",
"ARGO_SERVER",
"LISTEN_PORT",
"LB_ARGO_SERVER",
"ISSM_KAFKA_HOST"
] |
[]
|
["ISSM_KAFKA_PORT", "ARGO_SERVER", "LISTEN_PORT", "LB_ARGO_SERVER", "ISSM_KAFKA_HOST"]
|
python
| 5 | 0 | |
mestres_vidas/wsgi.py
|
"""
WSGI config for mestres_vidas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mestres_vidas.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
protocol/goose-mysql/init_integration_test.go
|
// +build integration
package goosemysql
import (
"context"
"log"
"net/url"
"os"
"strings"
"time"
proxypkg "github.com/docker/go-connections/proxy"
"github.com/go-sql-driver/mysql"
"github.com/powerman/check"
"github.com/powerman/gotest/testinit"
"github.com/powerman/mysqlx"
"github.com/powerman/narada4d/internal"
)
const (
testDBSuffix = "github.com/powerman/narada4d/protocol/goose_mysql"
sqlDropTable = "DROP TABLE Narada4D, goose_db_version"
)
var (
loc *url.URL
proxy *proxypkg.TCPProxy
)
func init() { testinit.Setup(2, setupIntegration) }
func setupIntegration() {
logger := log.New(os.Stderr, "", log.LstdFlags)
var err error
loc, err = url.Parse(os.Getenv("NARADA4D_TEST_MYSQL"))
if err != nil {
testinit.Fatal("failed to parse $NARADA4D_TEST_MYSQL as URL: ", err)
}
loc.Scheme = "goose-mysql"
ctx, cancel := context.WithTimeout(ctx, 7*testSecond)
defer cancel()
proxy, err = internal.NewTCPProxy(ctx, "127.0.0.1:0", loc.Host)
if err != nil {
testinit.Fatal("failed to NewTCPProxy: ", err)
}
testinit.Teardown(func() { proxy.Close() })
loc.Host = proxy.FrontendAddr().String()
dbCfg, err := mysql.ParseDSN(dsn(loc))
if err != nil {
testinit.Fatal("failed to parse $NARADA4D_TEST_MYSQL as DSN: ", err)
}
dbCfg.Timeout = 3 * testSecond
dbCfg, cleanup, err := mysqlx.EnsureTempDB(logger, testDBSuffix, dbCfg)
if err != nil {
testinit.Fatal(err)
}
testinit.Teardown(cleanup)
loc.Path = "/" + dbCfg.DBName
}
func dropTable(t *check.C) {
t.Helper()
s, err := newStorage(loc)
t.Nil(err)
_, err = s.db.Exec(sqlDropTable)
t.Nil(err)
t.Nil(s.Close())
}
func testLock(name string, loc *url.URL, unlockc chan struct{}, statusc chan string) {
v, err := newStorage(loc)
if err != nil {
panic(err)
}
cancel := make(chan struct{}, 1)
go func() {
select {
case <-cancel:
case <-time.After(testSecond / 10):
statusc <- "block " + name
}
}()
switch {
case strings.HasPrefix(name, "EX"):
v.ExclusiveLock()
case strings.HasPrefix(name, "SH"):
v.SharedLock()
default:
panic("name must begins with EX or SH")
}
cancel <- struct{}{}
statusc <- "acquired " + name
<-unlockc
v.Unlock()
_ = v.Close()
}
|
[
"\"NARADA4D_TEST_MYSQL\""
] |
[] |
[
"NARADA4D_TEST_MYSQL"
] |
[]
|
["NARADA4D_TEST_MYSQL"]
|
go
| 1 | 0 | |
libtor/linux_openssl_ssl_s3_msg.go
|
// go-libtor - Self-contained Tor from Go
// Copyright (c) 2018 Péter Szilágyi. All rights reserved.
// +build linux android
// +build staticOpenssl
package libtor
/*
#define DSO_NONE
#define OPENSSLDIR "/usr/local/ssl"
#define ENGINESDIR "/usr/local/lib/engines"
#include <../ssl/s3_msg.c>
*/
import "C"
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
util/fix/fix.go
|
package fix
import (
"os"
"strings"
)
// Path resolves "~" intro the real home path
func Path(path string) string {
return strings.Replace(path, "~", os.Getenv("HOME"), 1)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
dnnlib/submission/submit.py
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Submit a function to be run either locally or in a computing cluster."""
import copy
import inspect
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import sys
import time
import traceback
import threading
import dnnlib.tflib as tflib
import tflex
import tensorflow as tf
from enum import Enum
from .. import util
from ..util import EasyDict
from . import internal
class SubmitTarget(Enum):
"""The target where the function should be run.
LOCAL: Run it locally.
"""
LOCAL = 1
class PathType(Enum):
"""Determines in which format should a path be formatted.
WINDOWS: Format with Windows style.
LINUX: Format with Linux/Posix style.
AUTO: Use current OS type to select either WINDOWS or LINUX.
"""
WINDOWS = 1
LINUX = 2
AUTO = 3
class PlatformExtras:
"""A mixed bag of values used by dnnlib heuristics.
Attributes:
data_reader_buffer_size: Used by DataReader to size internal shared memory buffers.
data_reader_process_count: Number of worker processes to spawn (zero for single thread operation)
"""
def __init__(self):
self.data_reader_buffer_size = 1<<30 # 1 GB
self.data_reader_process_count = 0 # single threaded default
_user_name_override = None
class SubmitConfig(util.EasyDict):
"""Strongly typed config dict needed to submit runs.
Attributes:
run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template.
run_desc: Description of the run. Will be used in the run dir and task name.
run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir.
run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir.
submit_target: Submit target enum value. Used to select where the run is actually launched.
num_gpus: Number of GPUs used/requested for the run.
print_info: Whether to print debug information when submitting.
local.do_not_copy_source_files: Do not copy source files from the working directory to the run dir.
run_id: Automatically populated value during submit.
run_name: Automatically populated value during submit.
run_dir: Automatically populated value during submit.
run_func_name: Automatically populated value during submit.
run_func_kwargs: Automatically populated value during submit.
user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value.
task_name: Automatically populated value during submit.
host_name: Automatically populated value during submit.
platform_extras: Automatically populated values during submit. Used by various dnnlib libraries such as the DataReader class.
"""
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode", "_cudacache"]
self.run_dir_extra_files = []
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.nvprof = False
self.local = internal.local.TargetOptions()
self.datasets = []
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
self.platform_extras = PlatformExtras()
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
"""Replace tags in the given path template and return either Windows or Linux formatted path."""
if path_template.startswith('gs://'):
return path_template
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
else:
path_type = PathType.LINUX
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
"""Convert a normal path back to its template representation."""
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
"""Convert a normal path to template and the convert it back to a normal path with given path type."""
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
"""Set the global username override value."""
global _user_name_override
_user_name_override = name
def get_user_name():
"""Get the current user name."""
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
else:
try:
import pwd # pylint: disable=import-error
return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member
except:
return "unknown"
def make_run_dir_path(*paths):
"""Make a path/filename that resides under the current submit run_dir.
Args:
*paths: Path components to be passed to os.path.join
Returns:
A file/dirname rooted at submit_config.run_dir. If there's no
submit_config or run_dir, the base directory is the current
working directory.
E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))`
"""
import dnnlib
if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):
return os.path.join(os.getcwd(), *paths)
return os.path.join(dnnlib.submit_config.run_dir, *paths)
def _create_run_dir_local(submit_config: SubmitConfig) -> str:
"""Create a new run dir with increasing ID number at the start."""
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
os.makedirs(run_dir_root)
submit_config.run_id = _get_next_run_id_local(run_dir_root)
submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if os.path.exists(run_dir):
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
"""Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names."""
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:
"""Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable."""
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False)
if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:
return
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert '.' in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count('.') - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True)
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
def run_wrapper(submit_config: SubmitConfig) -> None:
"""Wrap the actual run function call for handling logging, exceptions, typing, etc."""
is_local = submit_config.submit_target == SubmitTarget.LOCAL
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name=None, should_flush=True)
import dnnlib
dnnlib.submit_config = submit_config
exit_with_errcode = False
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
run_func_obj = util.get_obj_by_name(submit_config.run_func_name)
assert callable(run_func_obj)
def thunk():
sig = inspect.signature(run_func_obj)
if 'submit_config' in sig.parameters:
run_func_obj(submit_config=submit_config, **submit_config.run_func_kwargs)
else:
run_func_obj(**submit_config.run_func_kwargs)
kws = submit_config.run_func_kwargs
tf_config = kws['tf_config'] if 'tf_config' in kws else {}
if 'TPU_NAME' not in os.environ or 'NO_SWARM' in os.environ:
tflib.init_tf(tf_config)
thunk()
else:
threads = []
tflex.trainers = []
tpu_core_count = 1 if 'TPU_CORE_COUNT' not in os.environ else int(os.environ['TPU_CORE_COUNT'])
tpu_core_offset = 0 if 'TPU_CORE_OFFSET' not in os.environ else int(os.environ['TPU_CORE_OFFSET'])
for i in range(tpu_core_count):
def worker(i):
_id = i + tpu_core_offset
spec = '#%d' % _id
print(spec, 'Initializing...')
tflib.init_tf(tf_config)
sess = tf.get_default_session()
cores = tflex.get_cores()[tpu_core_offset:tpu_core_offset+tpu_core_count]
sess.id = _id
tflex.trainers.append(sess)
if False:
tflex.set_override_device(cores[i])
with tf.device(cores[i]):
print(spec, 'Running thunk...')
thunk()
else:
tflex.set_override_cores(cores)
print(spec, 'Running thunk...')
thunk()
if tpu_core_count <= 1:
worker(i)
else:
thread = threading.Thread(target=worker, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
# Defer sys.exit(1) to happen after we close the logs and create a _finished.txt
exit_with_errcode = True
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.RunContext.get().close()
dnnlib.submit_config = None
logger.close()
# If we hit an error, get out of the script now and signal the error
# to whatever process that started this script.
if exit_with_errcode:
sys.exit(1)
return submit_config
def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None:
"""Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place."""
submit_config = copy.deepcopy(submit_config)
submit_target = submit_config.submit_target
farm = None
if submit_target == SubmitTarget.LOCAL:
farm = internal.local.Target()
assert farm is not None # unknown target
# Disallow submitting jobs with zero num_gpus.
if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):
raise RuntimeError("submit_config.num_gpus must be set to a non-zero value")
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
#--------------------------------------------------------------------
# Prepare submission by populating the run dir
#--------------------------------------------------------------------
host_run_dir = _create_run_dir_local(submit_config)
submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
if not re.match(docker_valid_name_regex, submit_config.task_name):
raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name)
# Farm specific preparations for a submit
farm.finalize_submit_config(submit_config, host_run_dir)
_populate_run_dir(submit_config, host_run_dir)
return farm.submit(submit_config, host_run_dir)
|
[] |
[] |
[
"TPU_CORE_OFFSET",
"TPU_CORE_COUNT"
] |
[]
|
["TPU_CORE_OFFSET", "TPU_CORE_COUNT"]
|
python
| 2 | 0 | |
samples/sample_cli.py
|
#----------------------------------------------------------------------------
# Do NOT modify or remove this copyright
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#****************************************************************************
# \file sample_cli.py
# \brief Sample script showing how to use the TCGstorageAPI
# Note: this script is an example only and uses hardcoded passwords; please change them.
#--------------------------------------------------------------------------------------------------
import os
import sys
import logging
import logging.handlers
import argparse
import struct
import uuid
from TCGstorageAPI.tcgapi import PskCipherSuites
from TCGstorageAPI.tcgapi import Sed
from TCGstorageAPI import keymanager as keymanager
import TCGstorageAPI.tcgSupport as tcgSupport
import helper as verifyidentity
import datetime
class Sedcfg(object):
'''
This is a class for performing operations on the SED drive
Attributes:
dev: Device handle of the drive.
'''
#
# WARNING! WARNING! WARNING!
# This sample script uses hardcoded values for the drive credentials.
# This is not a good security practice.
# Change these credential values to something more secure (up to 32-bytes in length)!
#
cred_table = {
'SID': 'ADMIN',
'C_PIN_Admin1': 'ADMIN1',
'Admin1': 'ADMIN1',
'C_PIN_User1': 'USER1',
'User1' : 'USER1',
'User2' : 'USER2',
'C_PIN_User2': 'USER2',
'EraseMaster': 'ERASEMASTER',
'BandMaster0': 'BANDMASTER0',
'BandMaster1': 'BANDMASTER1',
'BandMaster2': 'BANDMASTER2'
}
#NOT_FIPS --> Drive is not a FIPS drive
#FIPS_MODE --> Drive is a Fips drive and operating in FIPS mode
#NOT_FIPS_MODE -->Drive is a Fips drive and is not operating in FIPS mode/non-deterministic
Fips_status = ('NOT_FIPS','FIPS_MODE','NOT_FIPS_MODE')
def __init__(self, dev):
'''
The constructor for the class.
Parameters:
dev:Device handle of the drive.
'''
os_type = {'linux2':self.linux_platform,'linux':self.linux_platform, 'win32':self.windows_platform, 'freebsd12':self.freebsd_platform}
os_type[sys.platform](dev)
logging.basicConfig(
filename=self.log_filename,
format="%(asctime)s %(name)s (%(threadName)s) - %(message)s",
level=logging.DEBUG
)
self.logger = logging.getLogger(self.log_filename)
self.logger.debug('Start sedcfg Logger')
self.psk = None
self.keymanager = keymanager.KeyManager()
# Build the SED object for the drive
self.sed = Sed(self.devname, callbacks=self)
for key, val in list(self.cred_table.items()):
self.keymanager.setKey(key, val)
self.BandLayout = sedbandlayout()
self.BandLayout.bandauth(self)
self.initial_cred = self.sed.mSID
def linux_platform(self, devname):
'''
The function to initialize parameters for the linux platform.
Parameters:
devname:Device handle of the drive.
'''
self.log_filename = os.path.join(os.path.dirname(__file__), 'sedcfg.log')
self.devname = devname
def windows_platform(self, devname):
'''
The function to initialize parameters for the windows platform.
Parameters:
devname:Device handle of the drive.
'''
if getattr(sys, 'frozen', False):
# frozen
self.log_filename = os.path.join(os.path.dirname(sys.executable), 'sedcfg.log')
else:
# unfrozen
self.log_filename = os.path.join(os.path.dirname(__file__), 'sedcfg.log')
# For Windows we need to modify the input value from PD to the physical volume
# Extract PD from string and take the number value to be used and extrapolate into \\.\PhysicalDrive#
if ("PD" not in devname):
print("Please pass drive in as PD<drive number>")
print("Example: Disk 1 is PD1")
exit (1)
drive_number = devname[-1:]
self.devname = "\\\\.\\PhysicalDrive" + drive_number
def freebsd_platform(self, devname):
'''
The function to initialize parameters for the bsd platorm.
Parameters:
devanme:Device handle of the drive.
'''
self.log_filename = os.path.join(os.path.dirname(__file__), 'sedcfg.log')
self.devname = devname
def TlsOperation(self, args=None):
'''
The function to enable and disable TLS on the drive.
Parameters:
args - Commandline arguments,i.e enable/disable
'''
if sys.platform=="win32":
print("Tls support not provided for Windows")
return False
if self.BandLayout.authority[1] == 'Admin1'and self.sed.checkPIN(self.BandLayout.authority[0], self.sed.mSID) == True:
print("Please perform operation changecreds before Tls enable")
return False
authAs = [(self.BandLayout.authority[0], None), (self.BandLayout.authority[1], None)]
key = tcgSupport.getPsk(self.sed)
if key == None:
print("Pre-Shared Key not generated")
return False
toUse = self.sed.getPskEntry(0)
for entryId in range(4):
psk = self.sed.getPskEntry(entryId)
if psk is None:
print("Drive doesn't support TLS")
return False
if psk.Enabled == True and psk.CipherSuite == (self.sed.cipherSuite):
if args.enabledisable == 'enable':
print("Tls already enabled")
return True
if args.enabledisable == 'disable':
return self.sed.setPskEntry(toUse, authAs, Enabled=False, CipherSuite=self.sed.cipherSuite, PSK=key)
if args.enabledisable == 'enable':
return self.sed.setPskEntry(toUse, authAs, Enabled=True, CipherSuite=self.sed.cipherSuite, PSK=key)
elif args.enabledisable == 'disable':
print(" TLS already disabled on the drive")
return True
else:
print("Please enter your input to either enable or disable Tls on the drive")
return False
def Fw_attestation(self,args=None):
'''
THIS IS A SEAGATE PROPRIETARY METHOD AND IT WORKS ONLY WITH SEAGATE DEVICES
The function to enable Firmware Attestation on the drive.
Assessor Nonce: A random 128 bit value generated by the Assessor (Host)
Root of Trust Reporting ID: The Common Name derived from the subject name in Tper Attestation Certificate encoded in DER format (GUDID)
The firmware_attestation method will fail if the RTR ID does not match
Assessor ID: A random value generated by the host, which will be included in the Signed Attestation Message
Hashes of the Measurment data which comprises of the following is generated and printed:
Boot firmware 1 measurement value
Boot firmware 2 measurement value
Servo firmware measurement value
Controller firmware measurement value
Security firmware measurement value
'''
self.logger.debug('Receive Fw attestation cert')
print()
print("*** THIS IS THE FW ATTEST METHOD. IT IS A SEAGATE PROPRIETARY METHOD AND WORKS ONLY WITH SEAGATE DEVICES ***")
print()
# Retrieve the Tper attestation certificate
att_cert = self.sed.get_tperAttestation_Cert()
if (len(att_cert)) == 0:
print("The drive does not contain a certificate")
return
# Validate the drive attestation certificate against the root certificate
identity = verifyidentity.VerifyIdentity(att_cert)
identity.validate_drive_cert()
# Simulated values for the assessor_nonce, assessor_ID, sub_name
assessor_nonce,sub_name='23helloseagate',identity.CN,
assessor_ID='34254525432Seagate'
# Receive Firmware attestation message from the drive
self.logger.debug('Get Fw attestation meassge')
ret = self.sed.firmware_attestation(assessor_nonce,sub_name,assessor_ID)
# Verify the signature with the original string
if (ret):
return_val = ret[0]
Assessor_Nonce,Measurement,data,signature =tcgSupport.convert(return_val[512:528].replace(b'\x00',b'')),return_val[528:1376].hex(),return_val[0:1376],return_val[1376:1760]
if (Assessor_Nonce!=assessor_nonce):
return False
if (sub_name and assessor_ID):
Assessor_ID,RTR_ID = tcgSupport.convert(return_val[0:256].replace(b'\x00',b'')),tcgSupport.convert(return_val[256:512].replace(b'\x00',b''))
if (Assessor_ID!=assessor_ID and RTR_ID!=sub_name):
return False
# Display the measurement data to customers for verification
if identity.validate_signature(data,signature) == True:
print('The measurement data fields are displayed below:\n')
print('Secure Boot Process Device state={}\nSigning Authority Database={}\nSigning Authority Key Certificate Hash={}\nSee Signing Authority Key Certificate Hash={}\nBFW ITCM Hash={}\nBFW IDBA Hash={}\nServo FW Hash={}\nCFW Hash={}\nSEE FW Hash={}\n'.format(Measurement[3:131],Measurement[131:351],Measurement[351:383],Measurement[383:415],Measurement[415:447],Measurement[447:479],Measurement[479:511],Measurement[511:543],Measurement[543:575]))
return True
return False
def device_identification(self):
'''
The function to perform device identity attestation by validating the device certificate and digital signature
Uses Tpersign method to sign an input string to return the signature.
Succeeds if a drive is Seagate specific,fails otherwise
'''
self.sed.fipsCompliance = self.sed.fipsCompliance()
if self.sed.fipsCompliance != None:
print("Drive being tested is a FIPS drive, device identification not supported")
return
# Pull the drive certificate
self.logger.debug('Obtaining Drive certificate')
device_cert = self.sed.get_tperSign_cert()
# Validate the drive_certificate against the root certificate
identity = verifyidentity.VerifyIdentity(device_cert)
identity.validate_drive_cert()
# Send a string to obtain the device signature
string = str(datetime.datetime.today())
self.logger.debug('Performing digital signing operation')
signature = self.sed.tperSign(bytes(string,encoding='utf8'))
# Validate drive signature
verify = identity.validate_signature(string, signature)
if verify == True:
print("Device identification successfull, drive being tested is a Seagate drive")
else:
print("Drive being tested is not a Seagate drive")
return
def take_ownership(self, args=None):
'''
The function to take owenership of the drive by changing default Admin credentials, to create band authorities and changing
credentials of the created band authorities.
Parameters:
args - Commandline arguments
Returns:
True: Successful completion of taking drive ownership.
False: Failure of taking drive ownership.
'''
self.logger.debug('Taking ownership of the drive')
if self.sed.checkPIN(self.BandLayout.authority[0], bytes(self.sed.mSID,encoding='utf8')) == False:
print("Revert the drive to factory state,Drive ownership already taken")
return False
# Change PIN of Admin to a new PIN from default value
good = self.sed.changePIN(self.BandLayout.authority[0], self.keymanager.getKey(self.BandLayout.authority[0]), (None, self.initial_cred))
if good is True:
if self.BandLayout.authority[1] == 'Admin1':
# Activate the Locking SP of the drive only for OPAL case
if self.sed.activate(self.BandLayout.authority[0]) == False:
return False
self.initial_cred = tcgSupport.getCred(self.keymanager,'SID')
# Change PIN of Admin of Locking SP
if self.sed.changePIN(self.BandLayout.authority[1], self.keymanager.getKey(self.BandLayout.authority[1]), (None, self.initial_cred), self.BandLayout.auth_objs[0]) == False:
return False
if self.enable_authority() is True:
print('Credentials of the drive are changed successfully')
return True
return False
def enable_authority(self):
'''
The function to enable authorities and change their credentials.
Returns:
True: Enable Authority successfull.
False: Failure to Enable Authority.
'''
self.logger.debug('Enable Authority on the drive')
# Enable two users User1 and User2 and change their password to USER1 and USER2, Bandmaster1 is enabled by default in case of Enterprise.
for obj in self.BandLayout.auth_objs[3:]:
if self.sed.enableAuthority(self.BandLayout.authority[1], True, obj) is True:
continue
else:
return False
# By default global range is enabled in Entperise drives
if self.BandLayout.enabled_bands:
if self.sed.changePIN(self.BandLayout.enabled_bands[0], self.keymanager.getKey(self.BandLayout.enabled_bands[0]), (None, self.initial_cred), self.BandLayout.enabled_bands[0])!= True:
return False
# Change pin of band authorities to a new value
for (obj, auth) in zip(self.BandLayout.auth_objs[1:], self.BandLayout.authority[2:]):
if self.BandLayout.authority[1] == 'Admin1':
auth = 'Admin1'
self.initial_cred = self.keymanager.getKey(auth)
if self.sed.changePIN(auth, self.keymanager.getKey(obj), (None, self.initial_cred), obj) == False:
return False
else:
continue
return True
def configure_bands(self, args):
'''
The function to configure bands on the drive and assign bands to authorities.
Parameters:
args - Commandline arguments:
Bandno: Bandnumber to be configured
RangeStart: RangeStart value
Rangelength:Rangelength value
LockOnReset: True or False
Returns:
True: Successfull completion of configuring bands.
False: Failure to configure bands.
'''
self.logger.debug('Configuring bands on the drive')
if self.sed.checkPIN(self.BandLayout.authority[0], self.sed.mSID) == True:
print("Take ownership of the drive before configuring the drive")
return False
# Enable band and set ranges for band
if self.BandLayout.authority[1] == 'Admin1':
auth = 'Admin1'
else:
auth = 'BandMaster' + args.Bandno
if auth == 'Admin1' and args.Bandno == '0':
print("Global range not present in Opal drives")
return False
elif args.Bandno == '0' and args.RangeStart != None:
print("Can't change range for global locking range")
return False
elif args.Bandno != '0'and args.RangeStart == None:
print("Please provide RangeStart and RangeLength values")
return False
configure = self.sed.setRange(auth, int(args.Bandno), authAs=(auth, self.keymanager.getKey(auth)), RangeStart=int(args.RangeStart) if args.RangeStart is not None else None, RangeLength=int(args.RangeLength) if args.RangeLength is not None else None,
ReadLockEnabled=1, WriteLockEnabled=1, LockOnReset=args.LockOnReset,
ReadLocked=0, WriteLocked=0)
if auth == 'Admin1' and configure is True:
# Give access to users to read and write unlock range only in OPAL case, Bands are assigned to authorities by default in case of Enterprise.
range_objs = ['ACE_Locking_Range1_Set_RdLocked', 'ACE_Locking_Range1_Set_WrLocked',
'ACE_Locking_Range2_Set_RdLocked', 'ACE_Locking_Range2_Set_WrLocked']
if args.Bandno == '1':
range_obj = range_objs[:2]
else:
range_obj = range_objs[2:]
for objts in range_obj:
ret = self.sed.enable_range_access(objts, 'User' + args.Bandno, auth)
if ret == False:
return False
if configure == True:
print('Band{} is configured'.format(args.Bandno))
return True
return False
def enable_fipsmode(self, args=None):
'''
The function to enable FIPS mode on the drive.
Returns:
True: Successfull completion of enable fips.
False: Failure to enable fips.
'''
self.logger.debug('Enabling FIPS mode')
# Retrieve FIPS status
status = self.fips_status(self.sed)
if status == "NOT_FIPS":
return False
elif status == "FIPS_MODE":
return True
# Check the credentials of authorities to confirm ownership
for auth in self.BandLayout.authority:
if self.sed.checkPIN(auth, self.sed.mSID) is True:
print("Please take the ownership of the drive before FIPS enable operation")
return False
# Check whether Locking is enabled for any of the bands
if self.BandLayout.authority[1] == 'Admin1':
auth, start = 'Admin1', 1
else:
auth, start = 'Anybody', 0
lock_enabled = False
for bandnumber in range (start, 3):
locking_info, status = self.sed.getRange(bandnumber, auth)
if status is True and locking_info is not None:
if getattr(locking_info, 'ReadLockEnabled') == True or getattr(locking_info, 'WriteLockEnabled') == True:
lock_enabled = True
break
if lock_enabled == False:
print("Please set ReadLockEnabled and WriteLockEnabled to True for any of the enabled bands by performing configure operation")
return False
# Disable Makers Authority
if self.sed.enableAuthority('SID', False, 'Makers') == False:
print("Failed to disable Makers Authority")
return False
# Change MinPINlength
if self.sed.SSC == "Opalv2":
self.authorities = {self.BandLayout.authority[1]: self.BandLayout.auth_objs[0], self.BandLayout.authority[2]: self.BandLayout.auth_objs[1], self.BandLayout.authority[3]: self.BandLayout.auth_objs[2], self.BandLayout.authority[0]: self.BandLayout.authority[0]}
for auth, auth_obj in self.authorities.items():
if self.sed.setMinPINLength(auth, 4, authAs=(auth, self.keymanager.getKey(auth)), obj = auth_obj) is not True:
print("Failed to set MinPINlength for the authorities")
return False
# Disable Firmware Download
for uid in self.sed.ports.keys():
p = self.sed.getPort(uid)
if p is not None and hasattr(p, 'Name') and p.Name == 'FWDownload':
if p.PortLocked != True:
if self.sed.setPort(uid, PortLocked=True, LockOnReset=True) == False:
print("Failed to disable firmware download port")
return False
if self.sed.fipsApprovedMode==True:
print("FIPS mode of the drive enabled successfully")
return True
else:
print("Failed to enable FIPS mode")
return False
def lock_unlock_bands(self, args):
'''
The function to lock and unlock the bands present on the drive
Parameters:
args - Command line arguments:
lock/unlock: Lock/Unlock the band
bandno: Bandnumber
Returns:
True : Successfull completion of the operation.
False: Failure of the operation
'''
if self.sed.checkPIN(self.BandLayout.authority[0], self.sed.mSID) == True:
print("Take ownership of the drive and configure band before lock/unlock")
return False
if args.bandno == '0' and self.BandLayout.authority[1] == 'Admin1':
print("Global range not present in Opal drives")
return False
Range_info = self.sed.getRange(int(args.bandno), self.BandLayout.authority[1])
if Range_info == False:
return False
print("Band state before lock/unlock =\n{}".format(Range_info[0]))
self.logger.debug('Locking/Unlocking bands on the drive')
if(args.lockunlock == "lock"):
lock_unlock = 1
if (Range_info[0].ReadLocked == 1):
print("Band{} already in locked state".format(args.bandno))
return True
elif(args.lockunlock == "unlock"):
lock_unlock = 0
if (Range_info[0].ReadLocked == 0):
print("Band{} already in unlocked state".format(args.bandno))
return True
# Perform a lock-unlock on the range
auth = 'User' + args.bandno if self.BandLayout.authority[1] == 'Admin1' else 'BandMaster' + args.bandno
lock_unlock = self.sed.setRange(auth, int(args.bandno), authAs=(auth, self.keymanager.getKey(auth)), ReadLocked=lock_unlock, WriteLocked=lock_unlock)
if lock_unlock == True:
print("Band{} {}ed successfully by {}".format(args.bandno, args.lockunlock, auth))
return True
print("Range not configured properly")
return False
def datastore(self, args):
'''
The function to read/write small amount of data to the datastore on the drive.
Returns:
True: Successfull completion of read/write data.
False: Failure to read/write data.
'''
auth = self.BandLayout.authority[1]
self.table_number = 0
if auth == 'Admin1' and self.sed.checkPIN('SID', self.sed.mSID):
print("Please perform operation changecreds before using the datastore")
return False
for entryId in range(4):
psk = self.sed.getPskEntry(entryId)
if psk is None:
break
if psk.Enabled == True and psk.CipherSuite == self.sed.cipherSuite:
print("Please disable Tls")
return False
self.data = nvdata = {
'fips': self.sed.fipsCompliance , # Store the FIPS status of the drive.
'iv': uuid.uuid4().bytes, # initialization vector used for hashes/wrappings
'Ids': [None, None, None, None], # keyID for each credential
}
self.sed.data_length = (len(tcgSupport.serialize(self.data)))
self.logger.debug('Reading/Writing data to the datastore on the drive')
if args.readwrite == "write":
if auth == 'Admin1':
if self.sed.writeaccess('User1', self.table_number) == False:
return False
if self.sed.writeData(self.BandLayout.authority[2], self.data) == True:
return True
return False
if args.readwrite == "read":
if auth == 'Admin1':
if self.sed.readaccess('User1', self.table_number) == False:
return False
readData = self.sed.readData(self.BandLayout.authority[2])
if readData == None:
print("DataStore is empty, no data to read")
return True
elif readData == False:
return False
print(readData)
return True
def erase_drive(self, args):
'''
The function to revert the drive back to factory state.
Parameters:
args - Commadline arguments.
psid: PSID number of the drive
Returns:
True : Successfull completion of the operation.
False: Failure of the operation
'''
self.logger.debug('Erasing the drive')
result = self.sed.revert(args.psid)
if (result == True):
return True
else:
print("Wrong PSID")
return False
@staticmethod
def fips_status(sed):
'''
The function to retrieve the FIPS compliance and FIPS operating mode from the drive
Parameters:
sed - SED object
Returns:
NOT_FIPS: Drive is not a FIPS drive
FIPS_MODE: Drive is a Fips drive and operating in FIPS mode
NOT_FIPS_MODE: Drive is a Fips drive and is not operating in FIPS mode/non-deterministic
'''
# Checking Fips Compliance Descriptor
if sed.fipsCompliance == None or sed.fipsCompliance["standard"] != "FIPS 140-2" and sed.fipsCompliance["standard"] != "FIPS 140-3":
print ("Drive doesn't support FIPS 140-2 or FIPS 140-3 Standard")
return Sedcfg.Fips_status[0]
#This uses Seagate Vendor Unique functionality, and may not be supported by other vendors
#May not work on older Seagate models
if sed.fipsApprovedMode is True:
print ("Drive operating in FIPS mode")
return Sedcfg.Fips_status[1]
else:
return Sedcfg.Fips_status[2]
class sedbandlayout(object):
'''
This a class defining the band Layout of the drive.
'''
# Class can be modified to add multiple users in a dynamic fashion
def __init__(self):
'''
The function defines parameters for the BandLayout of the drive.
'''
self.Ent_auth = ['SID', 'EraseMaster', 'BandMaster1', 'BandMaster2']
self.Opal_auth = ['SID', 'Admin1', 'User1', 'User2']
self.Ent_objs = ['EraseMaster', 'BandMaster1', 'BandMaster2', 'C_PIN_BandMaster1', 'C_PIN_BandMaster2']
self.Opal_objs = ['C_PIN_Admin1', 'C_PIN_User1', 'C_PIN_User2', 'User1', 'User2']
def bandauth(self, sedbandcfg):
'''
The function to choose between Enterprise and Opal band layout.
'''
if sedbandcfg.sed.SSC == 'Enterprise':
self.authority = self.Ent_auth
self.auth_objs = self.Ent_objs
self.enabled_bands = ['BandMaster0']
else:
self.authority = self.Opal_auth
self.auth_objs = self.Opal_objs
self.enabled_bands = None
class argParser(object):
'''
This is a class to parse the command line arguments.
'''
prog = 'sample_cli'
description = 'Sample CLI that implements TCG protocol for SED operations'
def getParser(self):
'''
The Function to parse command line arguments and initialize operations.
'''
main = self.main = argparse.ArgumentParser(
prog=self.prog,
description=self.description,
)
main.add_argument('device', help='Specific wwn or device names of drives to operate on')
subparser = main.add_subparsers(title='subcommand')
enableTls = subparser.add_parser('Tls', help='EnableTls on the Drive')
enableTls.add_argument('enabledisable', help='enable or disable Tls communication')
enableTls.set_defaults(operation=Sedcfg.TlsOperation)
fwattestation = subparser.add_parser('fwattest', help='Seagate proprietary method to enable Firmware attestation on the Drive')
fwattestation.add_argument('enable', help='enable FW attestation communication')
fwattestation.set_defaults(operation=Sedcfg.Fw_attestation)
datastore = subparser.add_parser('store', help='Use the DataStore on the Drive')
datastore.add_argument('readwrite', help='Read/Write the data from the DataStore')
datastore.set_defaults(operation=Sedcfg.datastore)
revert = subparser.add_parser('revert', help='Revert the drive back to factory state')
revert.add_argument('psid', help='PSID of the drive used to revert the drive back to factory state')
revert.set_defaults(operation=Sedcfg.erase_drive)
changecreds = subparser.add_parser('changecreds', help='Change the drive default credentials')
changecreds.set_defaults(operation=Sedcfg.take_ownership)
configure = subparser.add_parser('configure', help='Configure the bands by setting new band ranges')
configure.add_argument('Bandno', help='Band number to configure')
configure.add_argument('--RangeStart', help='Rangestart value, Default(4097)')
configure.add_argument('--RangeLength', help='RangeLength value, Default(219749770)')
configure.add_argument('LockOnReset', help='True or False value for LockOnReset')
configure.set_defaults(operation=Sedcfg.configure_bands)
enablefips = subparser.add_parser('enablefips', help='Enable FIPS mode on the fips drive')
enablefips.set_defaults(operation=Sedcfg.enable_fipsmode)
bandops = subparser.add_parser('bandops', help='Perform a lock or an unlock on the band')
bandops.add_argument('lockunlock', help='Lock, Unlock the band')
bandops.add_argument('bandno', help='band number to be locked unlocked')
bandops.set_defaults(operation=Sedcfg.lock_unlock_bands)
return main
def doParse(self, args):
'''
The function to obtain arguments.
'''
if args is not None:
args = shlex.split(args)
else:
args = sys.argv[1:]
namespace = self.getParser().parse_args(args)
return namespace
def main(args=None):
drive_namespace = argParser().doParse(args)
sedcfg = Sedcfg(drive_namespace.device)
if sedcfg.sed.SSC != 'Enterprise' and sedcfg.sed.SSC != 'Opalv2':
print("Unable to retrieve SED functionality of the device. Enable OS to allow secure commands ")
return 1
sedcfg.device_identification()
rv = drive_namespace.operation(sedcfg, drive_namespace)
if rv is not True:
print("Operation failed")
return 1
else:
print("Operation completed successfully")
if __name__ == "__main__":
sys.exit(main())
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
graphsaint/setup.py
|
# cython: language_level=3
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
# import cython_utils
import os
os.environ["CC"] = "g++"
os.environ["CXX"] = "g++"
setup(ext_modules = cythonize(["graphsaint/cython_sampler.pyx","graphsaint/cython_utils.pyx","graphsaint/norm_aggr.pyx"]), include_dirs = [numpy.get_include()])
# to compile: python graphsaint/setup.py build_ext --inplace
|
[] |
[] |
[
"CXX",
"CC"
] |
[]
|
["CXX", "CC"]
|
python
| 2 | 0 | |
boltcluster_test.go
|
package boltcluster_test
import (
"os"
"strings"
"sync"
"testing"
"time"
"github.com/adamluzsi/boltcluster"
"github.com/adamluzsi/convert"
"github.com/boltdb/bolt"
)
var once sync.Once
var subject *boltcluster.Cluster
var distributionKey int = 1
var verboseCluster bool
func init() {
if strings.ToLower(os.Getenv("VERBOSE")) == "true" {
verboseCluster = true
}
}
func setUp(t *testing.T) {
once.Do(func() {
subject = boltcluster.New()
if verboseCluster {
subject.Logger.Verbosity = true
}
if err := subject.Open(); err != nil {
t.Log(err)
t.Fail()
}
})
}
func TestOptions(t *testing.T) {
newDirectoryPath := "./dbstest"
if _, err := os.Stat(newDirectoryPath); !os.IsNotExist(err) {
os.RemoveAll(newDirectoryPath)
}
boltcluster.New(boltcluster.SetDirectoryPathTo(newDirectoryPath))
if _, err := os.Stat(newDirectoryPath); os.IsNotExist(err) {
t.Log("passing directory path as options does not created the db folder on initialization")
t.Fail()
}
}
func TestUpdate(t *testing.T) {
setUp(t)
expectedValue := "World"
err := subject.Update(distributionKey, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Stob(expectedValue))
return nil
})
if err != nil {
t.Log(err)
t.Fail()
}
var result []byte
err = subject.Update(distributionKey, func(tx *bolt.Tx) error {
bucket := tx.Bucket(convert.Stob(`testing`))
result = convert.Copy(bucket.Get(convert.Stob("hello")))
return nil
})
if err != nil {
t.Log(err)
t.Fail()
}
resultstr := string(result)
if resultstr != expectedValue {
t.Logf("expected %v got %v", expectedValue, resultstr)
t.Fail()
}
}
func TestBatch(t *testing.T) {
setUp(t)
expectedValue := "World"
err := subject.Batch(distributionKey, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Stob(expectedValue))
return nil
})
if err != nil {
t.Log(err)
t.Fail()
}
var result []byte
err = subject.Batch(distributionKey, func(tx *bolt.Tx) error {
bucket := tx.Bucket(convert.Stob(`testing`))
result = convert.Copy(bucket.Get(convert.Stob("hello")))
return nil
})
if err != nil {
t.Log(err)
t.Fail()
}
resultstr := string(result)
if resultstr != expectedValue {
t.Logf("expected %v got %v", expectedValue, resultstr)
t.Fail()
}
}
func TestResizeCluster(t *testing.T) {
newDirectoryPath := "./dbstest"
if _, err := os.Stat(newDirectoryPath); !os.IsNotExist(err) {
os.RemoveAll(newDirectoryPath)
}
c := boltcluster.New(boltcluster.SetDirectoryPathTo(newDirectoryPath))
err := c.Open()
if err != nil {
t.Log(err)
t.Fail()
}
if verboseCluster {
c.Logger.Verbosity = true
}
c.Update(distributionKey, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Stob("world"))
return nil
})
c.Close()
c.RedistributeTo(10, func(tx *bolt.Tx) error {
tx.ForEach(func(k []byte, b *bolt.Bucket) error {
b.ForEach(func(key, value []byte) error {
if value != nil {
somethingThatBeingUsedAsDistributionKey := distributionKey
bName := convert.Copy(k)
kName := convert.Copy(key)
vName := convert.Copy(value)
c.Batch(somethingThatBeingUsedAsDistributionKey, func(t *bolt.Tx) error {
bucket, err := t.CreateBucketIfNotExists(bName)
if err != nil {
return err
}
err = bucket.Put(kName, vName)
if err != nil {
return err
}
return nil
})
}
return nil
})
return nil
})
return nil
})
var value string
c.Update(distributionKey, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
value = string(bucket.Get(convert.Stob("hello")))
return nil
})
if value != "world" {
t.Log("Distribution failed")
t.Fail()
}
c.Close()
}
func TestParallelUpdate(t *testing.T) {
newDirectoryPath := "./pupdate"
if _, err := os.Stat(newDirectoryPath); !os.IsNotExist(err) {
os.RemoveAll(newDirectoryPath)
}
c := boltcluster.New(boltcluster.SetDirectoryPathTo(newDirectoryPath))
c.RedistributeTo(2, func(_ *bolt.Tx) error { return nil })
defer c.Close()
c.Update(1, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Itob8(1))
return nil
})
c.Update(2, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Itob8(2))
return nil
})
ch := make(chan int)
var m sync.Mutex
set := make(map[int]struct{})
go func() {
for i := range ch {
m.Lock()
set[i] = struct{}{}
m.Unlock()
}
}()
var wg sync.WaitGroup
for index := 0; index < 1000; index++ {
wg.Add(1)
go func() {
defer wg.Done()
errs := c.ParallelUpdate(func(tx *bolt.Tx) error {
bucket := tx.Bucket(convert.Stob(`testing`))
if bucket != nil {
by := bucket.Get(convert.Stob("hello"))
ch <- convert.Btoi(by)
}
return nil
})
if len(errs) != 0 {
panic(errs[0])
}
}()
wg.Wait()
m.Lock()
setLength := len(set)
m.Unlock()
if setLength == 2 {
break
} else {
time.Sleep(500 * time.Millisecond)
}
}
close(ch)
if len(set) != 2 {
t.Log("Failed to assert the expected result set is equal")
t.Log(set)
t.Fail()
}
}
func TestParallelBatch(t *testing.T) {
newDirectoryPath := "./pbatch"
if _, err := os.Stat(newDirectoryPath); !os.IsNotExist(err) {
os.RemoveAll(newDirectoryPath)
}
c := boltcluster.New(boltcluster.SetDirectoryPathTo(newDirectoryPath))
c.RedistributeTo(2, func(_ *bolt.Tx) error { return nil })
defer c.Close()
c.Update(1, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Itob8(1))
return nil
})
c.Update(2, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Itob8(2))
return nil
})
ch := make(chan int)
var m sync.Mutex
set := make(map[int]struct{})
go func() {
for i := range ch {
m.Lock()
set[i] = struct{}{}
m.Unlock()
}
}()
var wg sync.WaitGroup
for index := 0; index < 1000; index++ {
wg.Add(1)
go func() {
defer wg.Done()
errs := c.ParallelBatch(func(tx *bolt.Tx) error {
bucket := tx.Bucket(convert.Stob(`testing`))
if bucket != nil {
by := bucket.Get(convert.Stob("hello"))
ch <- convert.Btoi(by)
}
return nil
})
if len(errs) != 0 {
panic(errs[0])
}
}()
wg.Wait()
m.Lock()
setLength := len(set)
m.Unlock()
if setLength == 2 {
break
} else {
time.Sleep(500 * time.Millisecond)
}
}
close(ch)
if len(set) != 2 {
t.Log("Failed to assert the expected result set is equal")
t.Log(set)
t.Fail()
}
}
func TestView(t *testing.T) {
newDirectoryPath := "./pview"
if _, err := os.Stat(newDirectoryPath); !os.IsNotExist(err) {
os.RemoveAll(newDirectoryPath)
}
c := boltcluster.New(boltcluster.SetDirectoryPathTo(newDirectoryPath))
c.RedistributeTo(2, func(_ *bolt.Tx) error { return nil })
defer c.Close()
c.Update(1, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Itob8(42))
return nil
})
c.Update(2, func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(convert.Stob(`testing`))
if err != nil {
t.Fail()
}
bucket.Put(convert.Stob("hello"), convert.Itob8(32))
return nil
})
ch := make(chan int)
var m sync.Mutex
set := make(map[int]struct{})
go func() {
for i := range ch {
m.Lock()
set[i] = struct{}{}
m.Unlock()
}
}()
var wg sync.WaitGroup
for index := 0; index < 1000; index++ {
wg.Add(1)
go func() {
defer wg.Done()
err := c.View(1, func(tx *bolt.Tx) error {
bucket := tx.Bucket(convert.Stob(`testing`))
if bucket != nil {
by := bucket.Get(convert.Stob("hello"))
ch <- convert.Btoi(by)
}
return nil
})
if err != nil {
t.Log(err)
t.Fail()
}
}()
wg.Wait()
m.Lock()
setLength := len(set)
m.Unlock()
if setLength == 1 {
break
} else {
time.Sleep(500 * time.Millisecond)
}
}
close(ch)
if len(set) != 1 {
t.Log("Failed to assert the expected result set is equal")
t.Log(set)
t.Fail()
}
if _, ok := set[42]; !ok {
t.Log("Value not matching")
t.Log(set)
t.Fail()
}
}
|
[
"\"VERBOSE\""
] |
[] |
[
"VERBOSE"
] |
[]
|
["VERBOSE"]
|
go
| 1 | 0 | |
sdk/resourcemanager/chaos/armchaos/zz_generated_constants.go
|
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package armchaos
const (
moduleName = "armchaos"
moduleVersion = "v0.2.1"
)
// ActionType - Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs.
type ActionType string
const (
ActionTypeInternal ActionType = "Internal"
)
// PossibleActionTypeValues returns the possible values for the ActionType const type.
func PossibleActionTypeValues() []ActionType {
return []ActionType{
ActionTypeInternal,
}
}
// ToPtr returns a *ActionType pointing to the current value.
func (c ActionType) ToPtr() *ActionType {
return &c
}
// CreatedByType - The type of identity that created the resource.
type CreatedByType string
const (
CreatedByTypeApplication CreatedByType = "Application"
CreatedByTypeKey CreatedByType = "Key"
CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity"
CreatedByTypeUser CreatedByType = "User"
)
// PossibleCreatedByTypeValues returns the possible values for the CreatedByType const type.
func PossibleCreatedByTypeValues() []CreatedByType {
return []CreatedByType{
CreatedByTypeApplication,
CreatedByTypeKey,
CreatedByTypeManagedIdentity,
CreatedByTypeUser,
}
}
// ToPtr returns a *CreatedByType pointing to the current value.
func (c CreatedByType) ToPtr() *CreatedByType {
return &c
}
// Origin - The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit logs UX. Default
// value is "user,system"
type Origin string
const (
OriginSystem Origin = "system"
OriginUser Origin = "user"
OriginUserSystem Origin = "user,system"
)
// PossibleOriginValues returns the possible values for the Origin const type.
func PossibleOriginValues() []Origin {
return []Origin{
OriginSystem,
OriginUser,
OriginUserSystem,
}
}
// ToPtr returns a *Origin pointing to the current value.
func (c Origin) ToPtr() *Origin {
return &c
}
// ResourceIdentityType - String of the resource identity type.
type ResourceIdentityType string
const (
ResourceIdentityTypeNone ResourceIdentityType = "None"
ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned"
)
// PossibleResourceIdentityTypeValues returns the possible values for the ResourceIdentityType const type.
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
return []ResourceIdentityType{
ResourceIdentityTypeNone,
ResourceIdentityTypeSystemAssigned,
}
}
// ToPtr returns a *ResourceIdentityType pointing to the current value.
func (c ResourceIdentityType) ToPtr() *ResourceIdentityType {
return &c
}
// SelectorType - Enum of the selector type.
type SelectorType string
const (
SelectorTypePercent SelectorType = "Percent"
SelectorTypeRandom SelectorType = "Random"
SelectorTypeTag SelectorType = "Tag"
SelectorTypeList SelectorType = "List"
)
// PossibleSelectorTypeValues returns the possible values for the SelectorType const type.
func PossibleSelectorTypeValues() []SelectorType {
return []SelectorType{
SelectorTypePercent,
SelectorTypeRandom,
SelectorTypeTag,
SelectorTypeList,
}
}
// ToPtr returns a *SelectorType pointing to the current value.
func (c SelectorType) ToPtr() *SelectorType {
return &c
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
timeweb/timewebapp/migrations/0089_alter_settingsmodel_def_min_work_time.py
|
# Generated by Django 3.2.7 on 2021-11-19 09:08
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timewebapp', '0088_alter_timewebmodel_due_time'),
]
operations = [
migrations.AlterField(
model_name='settingsmodel',
name='def_min_work_time',
field=models.DecimalField(blank=True, decimal_places=2, default=15, max_digits=15, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.01'), 'The default minimum work time must be positive')], verbose_name='Default Minimum Daily Work Time in Minutes'),
),
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
cmd/root.go
|
/*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bing-wallpaper/pkg/wallpaper"
"fmt"
"io/ioutil"
"os"
"github.com/kbinani/screenshot"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/spf13/viper"
)
var cfgFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "bing-wallpaper",
Short: "bing-wallpaper is a tool written in go (golang) to get the daily wallper from bing and set it as wallpaper",
TraverseChildren: true,
Run: runRoot,
// Uncomment the following line if your bare application
// has an action associated with it:
// Run: func(cmd *cobra.Command, args []string) { },
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
cobra.CheckErr(rootCmd.Execute())
}
func init() {
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.bing-wallpaper.yaml)")
rootCmd.PersistentFlags().IntP("daysback", "d", 0, "Number of days in the past to get the wallpaper from")
rootCmd.PersistentFlags().Bool("daemon", false, "Run in daemon mode")
// Cobra also supports local flags, which will only run
// when this action is called directly.
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := os.UserHomeDir()
cobra.CheckErr(err)
// Search config in home directory with name ".bing-wallpaper" (without extension).
viper.AddConfigPath(home)
viper.SetConfigType("yaml")
viper.SetConfigName(".bing-wallpaper")
configName := os.Getenv("HOME") + "/.bing-wallpaper.yaml"
if _, err := os.Stat(configName); os.IsNotExist(err) {
var config wallpaper.Config
config = wallpaper.Config{
Auto_Update: true,
}
b, err := yaml.Marshal(config)
if err != nil {
logrus.Errorln(err)
}
err = ioutil.WriteFile(configName, b, 0755)
if err != nil {
logrus.Errorln(err)
}
}
}
viper.AutomaticEnv() // d in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
if err != nil {
logrus.Errorln(err)
}
}
}
func runRoot(c *cobra.Command, args []string) {
bounds := screenshot.GetDisplayBounds(0)
daysBack, err := c.Flags().GetInt("daysback")
if err != nil {
logrus.Errorln(err)
}
runDaemon, err := c.Flags().GetBool("daemon")
if err != nil {
logrus.Errorln(err)
}
wallpaper.AutoUpdate = viper.Get("auto_update").(bool)
wallpaperPath, _, err := wallpaper.GetWallpaper(fmt.Sprint(bounds.Dx()), fmt.Sprint(bounds.Dy()), daysBack, "", true)
if err != nil {
logrus.Errorln(err)
}
if wallpaper.AutoUpdate || daysBack > 0 {
wallpaper.SetWallpaper(wallpaperPath)
}
if runDaemon {
wallpaper.Systray()
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
test/function_tests/multiclusterhub_install_test/multiclusterhub_test.go
|
// Copyright (c) 2020 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project
package multiclusterhub_install_test
import (
"context"
"fmt"
"os"
"time"
"github.com/Masterminds/semver"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utils "github.com/open-cluster-management/multiclusterhub-operator/test/function_tests/utils"
)
var _ = Describe("Multiclusterhub", func() {
BeforeEach(func() {
By("Attempting to delete MultiClusterHub if it exists")
utils.DeleteIfExists(utils.DynamicKubeClient, utils.GVRMultiClusterHub, utils.MCHName, utils.MCHNamespace, true)
Eventually(func() error {
err := utils.ValidateDelete(utils.DynamicKubeClient)
if err != nil {
return err
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
By("Attempting to delete Image Overrides ConfigMap with bad image reference if it exists")
err := utils.DeleteConfigMapIfExists(utils.ImageOverridesCMBadImageName, utils.MCHNamespace)
Expect(err).Should(BeNil())
})
if os.Getenv("full_test_suite") == "true" {
By("Beginning Full Install Test Suite ...")
FullInstallTestSuite()
} else {
By("Beginning Basic Install Test Suite ...")
It("Install Default MCH CR", func() {
By("Creating MultiClusterHub")
start := time.Now()
utils.CreateMCHNotManaged()
if err := utils.ValidateStatusesExist(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
return
}
err := utils.ValidateMCH()
if err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
return
}
fmt.Printf("Installation Time: %s\n", time.Since(start))
return
})
}
})
func FullInstallTestSuite() {
It("If certain resources exist, block creation", func() {
By("Creating MultiClusterEngine")
utils.CreateMultiClusterEngineCRD()
utils.CreateMultiClusterEngineCR()
mch := utils.NewMultiClusterHub("test-mch", "open-cluster-management", "", true)
_, err := utils.DynamicKubeClient.Resource(utils.GVRMultiClusterHub).Namespace("open-cluster-management").Create(context.TODO(), mch, metav1.CreateOptions{})
Expect(err.Error()).To(BeEquivalentTo("admission webhook \"multiclusterhub.validating-webhook.open-cluster-management.io\" denied the request: cannot create test-mch resource. Existing MultiClusterEngine resources must first be deleted"))
utils.DeleteMultiClusterEngineCR()
utils.DeleteMultiClusterEngineCRD()
})
It("Test Hiveconfig", func() {
By("- If HiveConfig is edited directly, ensure changes are persisted")
utils.CreateMCHNotManaged()
err := utils.ValidateMCH()
Expect(err).To(BeNil())
By("- Editing HiveConfig")
hiveConfig, err := utils.DynamicKubeClient.Resource(utils.GVRHiveConfig).Get(context.TODO(), utils.HiveConfigName, metav1.GetOptions{})
Expect(err).To(BeNil()) // If HiveConfig does not exist, err
spec, ok := hiveConfig.Object["spec"].(map[string]interface{})
Expect(ok).To(BeTrue())
spec["targetNamespace"] = "test-hive"
spec["logLevel"] = "info"
hiveConfig, err = utils.DynamicKubeClient.Resource(utils.GVRHiveConfig).Update(context.TODO(), hiveConfig, metav1.UpdateOptions{})
Expect(err).To(BeNil()) // If HiveConfig does not exist, err
By("- Confirming edit was successful")
hiveConfig, err = utils.DynamicKubeClient.Resource(utils.GVRHiveConfig).Get(context.TODO(), utils.HiveConfigName, metav1.GetOptions{})
Expect(err).To(BeNil()) // If HiveConfig does not exist, err
spec, ok = hiveConfig.Object["spec"].(map[string]interface{})
Expect(ok).To(BeTrue())
Expect(spec["targetNamespace"]).To(BeEquivalentTo("test-hive"))
Expect(spec["logLevel"]).To(BeEquivalentTo("info"))
By("- Restart MCH Operator to ensure HiveConfig is not updated on reconcile")
// Delete MCH Operator pod to force reconcile
labelSelector := fmt.Sprintf("name=%s", "multiclusterhub-operator")
listOptions := metav1.ListOptions{
LabelSelector: labelSelector,
Limit: 1,
}
err = utils.KubeClient.CoreV1().Pods(utils.MCHNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
Expect(err).To(BeNil()) // Deletion should always be successful
time.Sleep(60 * time.Second)
hiveConfig, err = utils.DynamicKubeClient.Resource(utils.GVRHiveConfig).Get(context.TODO(), utils.HiveConfigName, metav1.GetOptions{})
Expect(err).To(BeNil()) // If HiveConfig does not exist, err
spec, ok = hiveConfig.Object["spec"].(map[string]interface{})
Expect(ok).To(BeTrue())
Expect(spec["targetNamespace"]).To(BeEquivalentTo("test-hive"))
Expect(spec["logLevel"]).To(BeEquivalentTo("info"))
By("- If HiveConfig is Deleted, ensure it is recreated")
err = utils.DynamicKubeClient.Resource(utils.GVRHiveConfig).Delete(context.TODO(), utils.HiveConfigName, metav1.DeleteOptions{})
Expect(err).To(BeNil()) // If HiveConfig does not exist, err
Eventually(func() error {
hiveConfig, err = utils.DynamicKubeClient.Resource(utils.GVRHiveConfig).Get(context.TODO(), utils.HiveConfigName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("HiveConfig has not been recreated")
}
return nil
}, utils.GetWaitInMinutes()*2, 1).Should(BeNil())
By("- If MCH.spec.hive is edited, ensure edit is blocked")
mch, err := utils.DynamicKubeClient.Resource(utils.GVRMultiClusterHub).Namespace(utils.MCHNamespace).Get(context.TODO(), utils.MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
spec, ok = mch.Object["spec"].(map[string]interface{})
Expect(ok).To(BeTrue())
spec["hive"] = map[string]interface{}{
"maintenanceMode": true,
"failedProvisionConfig": map[string]interface{}{
"skipGatherLogs": true,
},
}
_, err = utils.DynamicKubeClient.Resource(utils.GVRMultiClusterHub).Namespace(utils.MCHNamespace).Update(context.TODO(), mch, metav1.UpdateOptions{})
Expect(err.Error()).To(BeEquivalentTo("admission webhook \"multiclusterhub.validating-webhook.open-cluster-management.io\" denied the request: Hive updates are forbidden"))
return
})
It("Testing Image Overrides Configmap", func() {
By("- If configmap is manually overwitten, ensure MCH Operator will overwrite")
utils.CreateMCHNotManaged()
err := utils.ValidateMCH()
Expect(err).To(BeNil())
By("- Overwrite Image Overrides Configmap")
currentVersion, err := utils.GetCurrentVersionFromMCH()
Expect(err).To(BeNil())
v, err := semver.NewVersion(currentVersion)
Expect(err).Should(BeNil())
c, err := semver.NewConstraint(">= 2.4.0")
Expect(err).Should(BeNil())
if c.Check(v) {
configmap, err := utils.KubeClient.CoreV1().ConfigMaps(utils.MCHNamespace).Get(context.TODO(), fmt.Sprintf("mch-image-manifest-%s", currentVersion), metav1.GetOptions{})
Expect(err).To(BeNil())
// Clear all data in configmap
configmap.Data = make(map[string]string)
configmap, err = utils.KubeClient.CoreV1().ConfigMaps(utils.MCHNamespace).Update(context.TODO(), configmap, metav1.UpdateOptions{})
Expect(err).To(BeNil())
Expect(len(configmap.Data)).Should(Equal(0))
Eventually(func() error {
configmap, err = utils.KubeClient.CoreV1().ConfigMaps(utils.MCHNamespace).Get(context.TODO(), fmt.Sprintf("mch-image-manifest-%s", currentVersion), metav1.GetOptions{})
if len(configmap.Data) == 0 {
return fmt.Errorf("Configmap has not been updated")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
}
return
})
It("- If `mch-imageOverridesCM` annotation is given, ensure Image Overrides Configmap is updated ", func() {
By("- Creating Developer Image Overrides Configmap")
utils.CreateMCHNotManaged()
err := utils.ValidateMCH()
Expect(err).To(BeNil())
configmap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-config",
Namespace: utils.MCHNamespace,
},
Data: map[string]string{
"overrides.json": `[
{
"image-name": "application-ui",
"image-tag": "not-a-real-tag",
"image-remote": "quay.io/open-cluster-management",
"image-key": "application_ui"
}
]`,
},
}
// Create configmap overrides
currentVersion, err := utils.GetCurrentVersionFromMCH()
Expect(err).To(BeNil())
v, err := semver.NewVersion(currentVersion)
Expect(err).Should(BeNil())
c, err := semver.NewConstraint(">= 2.4.0")
Expect(err).Should(BeNil())
if c.Check(v) {
_, err = utils.KubeClient.CoreV1().ConfigMaps(utils.MCHNamespace).Create(context.TODO(), configmap, metav1.CreateOptions{})
Expect(err).To(BeNil())
// Annotate MCH
annotations := make(map[string]string)
annotations["mch-imageOverridesCM"] = "my-config"
utils.UpdateAnnotations(annotations)
Eventually(func() error {
configmap, err = utils.KubeClient.CoreV1().ConfigMaps(utils.MCHNamespace).Get(context.TODO(), fmt.Sprintf("mch-image-manifest-%s", currentVersion), metav1.GetOptions{})
if len(configmap.Data) == 0 {
return fmt.Errorf("Configmap has not been updated")
}
if configmap.Data["application_ui"] != "quay.io/open-cluster-management/application-ui:not-a-real-tag" {
return fmt.Errorf("Configmap has not been updated from overrides CM.")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
annotations = make(map[string]string)
utils.UpdateAnnotations(annotations)
err = utils.KubeClient.CoreV1().ConfigMaps(utils.MCHNamespace).Delete(context.TODO(), "my-config", metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
return
})
It("- If `spec.disableUpdateClusterImageSets` controls the automatic updates of clusterImageSets", func() {
By("- Verfiying default ")
utils.CreateMCHNotManaged()
err := utils.ValidateMCH()
Expect(err).To(BeNil())
// Test initial case with no setting, is equivalent to disableUpdateClusterImageSets: false
err = utils.ValidateClusterImageSetsSubscriptionPause("false")
Expect(err).To(BeNil())
// Set the disableUpdateCluterImageSets: true
By("- Setting `spec.disableUpdateClusterImageSets` to true to disable automatic updates of clusterImageSets")
utils.ToggleDisableUpdateClusterImageSets(true)
Eventually(func() error {
if err := utils.ValidateClusterImageSetsSubscriptionPause("true"); err != nil {
return fmt.Errorf("Console AppSub not updated")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
// Set the disableUpdateCluterImageSets: false
By("- Setting `spec.disableUpdateClusterImageSets` to false to enable automatic updates of clusterImageSets")
utils.ToggleDisableUpdateClusterImageSets(false)
Eventually(func() error {
if err := utils.ValidateClusterImageSetsSubscriptionPause("false"); err != nil {
return fmt.Errorf("Console AppSub not updated")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
})
It(fmt.Sprintf("Installing MCH with bad image reference - should have Installing status"), func() {
By("Creating Bad Image Overrides Configmap")
imageOverridesCM := utils.NewImageOverridesConfigmapBadImageRef(utils.ImageOverridesCMBadImageName, utils.MCHNamespace)
err := utils.CreateNewConfigMap(imageOverridesCM, utils.MCHNamespace)
Expect(err).To(BeNil())
By("Creating MultiClusterHub with image overrides annotation")
utils.CreateMCHImageOverridesAnnotation(utils.ImageOverridesCMBadImageName)
err = utils.ValidateMCHUnsuccessful()
})
It(fmt.Sprintf("Installing MCH with old components on cluster"), func() {
By("Installing old component")
subName := "topology-sub"
sub := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "apps.open-cluster-management.io/v1",
"kind": "Subscription",
"metadata": map[string]interface{}{
"name": subName,
"namespace": utils.MCHNamespace,
},
"spec": map[string]interface{}{
"channel": fmt.Sprintf("%s/charts-v1", utils.MCHNamespace),
"name": "test",
"placement": map[string]interface{}{
"local": true,
},
},
},
}
k8sClient := utils.DynamicKubeClient.Resource(utils.GVRAppSub).Namespace(utils.MCHNamespace)
_, err := k8sClient.Create(context.TODO(), sub, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Clean up resource manually in case of failure
defer k8sClient.Delete(context.TODO(), subName, metav1.DeleteOptions{})
By("Installing MCH")
utils.CreateMCHNotManaged()
Expect(utils.ValidateMCH()).To(Succeed())
By("Verifying old component has been removed")
_, err = k8sClient.Get(context.TODO(), subName, metav1.GetOptions{})
Expect(errors.IsNotFound(err)).To(BeTrue(), "should have been deleted by the reconciler and return a NotFound error")
By("Verifying status is not complete while a resource has not been successfully pruned")
// Create appsub again, this time with a finalizer
finalizer := []string{"test-finalizer"}
sub.SetFinalizers(finalizer)
_, err = k8sClient.Create(context.TODO(), sub, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Remove finalizer manually in case of failure
defer func() {
dsub, err := k8sClient.Get(context.TODO(), subName, metav1.GetOptions{})
if err != nil {
return
}
dsub.SetFinalizers([]string{})
k8sClient.Update(context.TODO(), dsub, metav1.UpdateOptions{})
}()
// Force reconcile
Expect(utils.DeleteMCHRepo()).To(Succeed())
timeout := 2 * time.Minute
interval := time.Second * 2
Eventually(func() error {
status, err := utils.GetMCHStatus()
if err != nil {
return err
}
return utils.FindCondition(status, "Progressing", "False")
}, timeout, interval).Should(Succeed(), "the blocked resource deletion should prevent progress")
By("Verifying status recovers once the blocked resource is cleaned up")
Eventually(func() error {
unblockedSub, _ := k8sClient.Get(context.TODO(), subName, metav1.GetOptions{})
unblockedSub.SetFinalizers([]string{})
_, err = k8sClient.Update(context.TODO(), unblockedSub, metav1.UpdateOptions{})
return err
}, time.Minute, time.Second).Should(Succeed(), "the blocked resource deletion should prevent progress")
Expect(utils.ValidateMCH()).To(Succeed())
return
})
It(fmt.Sprintf("Installing MCH with old rcm component on cluster"), func() {
By("Installing old component")
subName := "rcm-sub"
sub := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "apps.open-cluster-management.io/v1",
"kind": "Subscription",
"metadata": map[string]interface{}{
"name": subName,
"namespace": utils.MCHNamespace,
},
"spec": map[string]interface{}{
"channel": fmt.Sprintf("%s/charts-v1", utils.MCHNamespace),
"name": "test",
"placement": map[string]interface{}{
"local": true,
},
},
},
}
k8sClient := utils.DynamicKubeClient.Resource(utils.GVRAppSub).Namespace(utils.MCHNamespace)
_, err := k8sClient.Create(context.TODO(), sub, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Clean up resource manually in case of failure
defer k8sClient.Delete(context.TODO(), subName, metav1.DeleteOptions{})
By("Installing MCH")
utils.CreateMCHNotManaged()
Expect(utils.ValidateMCH()).To(Succeed())
By("Verifying old component has been removed")
_, err = k8sClient.Get(context.TODO(), subName, metav1.GetOptions{})
Expect(errors.IsNotFound(err)).To(BeTrue(), "should have been deleted by the reconciler and return a NotFound error")
By("Verifying status is not complete while a resource has not been successfully pruned")
// Create appsub again, this time with a finalizer
finalizer := []string{"test-finalizer"}
sub.SetFinalizers(finalizer)
_, err = k8sClient.Create(context.TODO(), sub, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Remove finalizer manually in case of failure
defer func() {
dsub, err := k8sClient.Get(context.TODO(), subName, metav1.GetOptions{})
if err != nil {
return
}
dsub.SetFinalizers([]string{})
k8sClient.Update(context.TODO(), dsub, metav1.UpdateOptions{})
}()
// Force reconcile
Expect(utils.DeleteMCHRepo()).To(Succeed())
timeout := 2 * time.Minute
interval := time.Second * 2
Eventually(func() error {
status, err := utils.GetMCHStatus()
if err != nil {
return err
}
return utils.FindCondition(status, "Progressing", "False")
}, timeout, interval).Should(Succeed(), "the blocked resource deletion should prevent progress")
By("Verifying status recovers once the blocked resource is cleaned up")
Eventually(func() error {
unblockedSub, _ := k8sClient.Get(context.TODO(), subName, metav1.GetOptions{})
unblockedSub.SetFinalizers([]string{})
_, err = k8sClient.Update(context.TODO(), unblockedSub, metav1.UpdateOptions{})
return err
}, time.Minute, time.Second).Should(Succeed(), "the blocked resource deletion should prevent progress")
Expect(utils.ValidateMCH()).To(Succeed())
return
})
totalAttempts := 2
for i := 1; i <= totalAttempts; i++ {
ok := It(fmt.Sprintf("Installing MCH - Attempt %d of %d", i, totalAttempts), func() {
By("Creating MultiClusterHub")
utils.CreateMCHNotManaged()
if err := utils.ValidateStatusesExist(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
Expect(err).To(BeNil())
return
}
err := utils.ValidateMCH()
if err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
Expect(err).To(BeNil())
return
}
By("Degrading the installation")
oldImage, err := utils.BrickCLC()
if err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
return
}
if err := utils.ValidateMCHDegraded(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
return
}
if err := utils.FixCLC(oldImage); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
return
}
if err := utils.ValidateMCH(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
return
}
if err := utils.BrickMCHRepo(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
Expect(err).To(BeNil())
return
}
if err := utils.ValidateMCHDegraded(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
Expect(err).To(BeNil())
return
}
if err := utils.FixMCHRepo(); err != nil {
fmt.Println(fmt.Sprintf("Error: %s\n", err.Error()))
Expect(err).To(BeNil())
return
}
return
})
if !ok {
break
}
}
It("- If `spec.disableHubSelfManagement` controls the existence of the related resources", func() {
Skip("Skipping all tests related to local cluster and self management")
By("- Verifying default install has local-cluster resources")
utils.CreateDefaultMCH()
err := utils.ValidateMCH()
Expect(err).To(BeNil())
By("- Setting `spec.disableHubSelfManagement` to true to remove local-cluster resources")
utils.ToggleDisableHubSelfManagement(true)
By("- Sleeping some compulsory 15 minutes because of some foundation bug")
utils.CoffeeBreak(15)
By("- Returning from compulsory coffee break")
Eventually(func() error {
if err := utils.ValidateImportHubResourcesExist(false); err != nil {
return fmt.Errorf("resources still exist")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
By("- Setting `spec.disableHubSelfManagement` to false to create local-cluster resources")
utils.ToggleDisableHubSelfManagement(false)
By("- Sleeping some compulsory 15 minutes because of some foundation bug")
utils.CoffeeBreak(15)
By("- Returning from compulsory coffee break")
Eventually(func() error {
if err := utils.ValidateImportHubResourcesExist(true); err != nil {
return fmt.Errorf("resources don't exist")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
})
It("- Delete ManagedCluster before it is joined/available", func() {
Skip("Skipping all tests related to local cluster and self management")
By("- Verifying install has local-cluster resources")
utils.CreateDefaultMCH()
Eventually(func() error {
if err := utils.ValidateImportHubResourcesExist(true); err != nil {
return fmt.Errorf("resources still exist")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
By("- Setting `spec.disableHubSelfManagement` to true to remove local-cluster resources")
utils.ToggleDisableHubSelfManagement(true)
Eventually(func() error {
if err := utils.ValidateImportHubResourcesExist(false); err != nil {
return fmt.Errorf("resources still exist")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
By("- Setting `spec.disableHubSelfManagement` to false to create local-cluster resources")
utils.ToggleDisableHubSelfManagement(false)
Eventually(func() error {
if err := utils.ValidateImportHubResourcesExist(true); err != nil {
return fmt.Errorf("resources don't exist")
}
return nil
}, utils.GetWaitInMinutes()*60, 1).Should(BeNil())
})
}
|
[
"\"full_test_suite\""
] |
[] |
[
"full_test_suite"
] |
[]
|
["full_test_suite"]
|
go
| 1 | 0 | |
internal/service/git/provider/default.go
|
package provider
import "github.com/go-git/go-git/v5"
// Default opens a Git repository from the working directory.
// It walks parent directories until found a .git directory.
// It returns git.ErrRepositoryNotExists if the working directory
// doesn't contain a valid repository.
func Default() (*git.Repository, error) {
opt := new(git.PlainOpenOptions)
opt.DetectDotGit = true
return git.PlainOpenWithOptions("", opt)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
e2e/iam/servicelinkedrole_test.go
|
/*
Copyright © 2019 AWS Controller authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_test
import (
"context"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudformationv1alpha1 "go.awsctrl.io/manager/apis/cloudformation/v1alpha1"
iamv1alpha1 "go.awsctrl.io/manager/apis/iam/v1alpha1"
metav1alpha1 "go.awsctrl.io/manager/apis/meta/v1alpha1"
)
// RunAccountSpecs allows all servicelinkedrole E2E tests to run
var _ = Describe("Run IAM ServiceLinkedRole Controller", func() {
Context("Without ServiceLinkedRole{} existing", func() {
It("Should create iam.ServiceLinkedRole{}", func() {
var stackID string
var stackName string
var stack *cloudformationv1alpha1.Stack
k8sclient := k8smanager.GetClient()
Expect(k8sclient).ToNot(BeNil())
instance := &iamv1alpha1.ServiceLinkedRole{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sample-servicelinkedrole-",
Namespace: podnamespace,
},
Spec: iamv1alpha1.ServiceLinkedRoleSpec{
AWSServiceName: "autoscaling.amazonaws.com",
Description: "Test SLR description",
CustomSuffix: "TestSuffix",
},
}
By("Creating new IAM ServiceLinkedRole")
Expect(k8sclient.Create(context.Background(), instance)).Should(Succeed())
key := types.NamespacedName{
Name: instance.GetName(),
Namespace: podnamespace,
}
By("Expecting CreateComplete")
Eventually(func() bool {
By("Getting latest IAM ServiceLinkedRole")
instance = &iamv1alpha1.ServiceLinkedRole{}
err := k8sclient.Get(context.Background(), key, instance)
if err != nil {
return false
}
stackID = instance.GetStackID()
stackName = instance.GetStackName()
return instance.Status.Status == metav1alpha1.CreateCompleteStatus ||
(os.Getenv("USE_AWS_CLIENT") != "true" && instance.Status.Status != "")
}, timeout, interval).Should(BeTrue())
By("Checking object OwnerShip")
Eventually(func() bool {
stackkey := types.NamespacedName{
Name: stackName,
Namespace: key.Namespace,
}
stack = &cloudformationv1alpha1.Stack{}
Expect(k8sclient.Get(context.Background(), stackkey, stack)).Should(Succeed())
expectedOwnerReference := v1.OwnerReference{
Kind: instance.Kind,
APIVersion: instance.APIVersion,
UID: instance.UID,
Name: instance.Name,
}
ownerrefs := stack.GetOwnerReferences()
Expect(len(ownerrefs)).To(Equal(1))
return ownerrefs[0].Name == expectedOwnerReference.Name
}, timeout, interval).Should(BeTrue())
By("Deleting IAM ServiceLinkedRole")
Expect(k8sclient.Delete(context.Background(), instance)).Should(Succeed())
By("Deleting IAM ServiceLinkedRole Stack")
Expect(k8sclient.Delete(context.Background(), stack)).Should(Succeed())
By("Expecting metav1alpha1.DeleteCompleteStatus")
Eventually(func() bool {
if os.Getenv("USE_AWS_CLIENT") != "true" {
return true
}
output, err := awsclient.GetClient("us-west-2").DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackID)})
Expect(err).To(BeNil())
stackoutput := output.Stacks[0].StackStatus
return *stackoutput == "DELETE_COMPLETE"
}, timeout, interval).Should(BeTrue())
})
})
})
|
[
"\"USE_AWS_CLIENT\"",
"\"USE_AWS_CLIENT\""
] |
[] |
[
"USE_AWS_CLIENT"
] |
[]
|
["USE_AWS_CLIENT"]
|
go
| 1 | 0 | |
store/storetest/mocks/ReactionStore.go
|
// Code generated by mockery v1.0.0
// Regenerate this file using `make store-mocks`.
package mocks
import mock "github.com/stretchr/testify/mock"
import model "github.com/demisto/mattermost-server/model"
import store "github.com/demisto/mattermost-server/store"
// ReactionStore is an autogenerated mock type for the ReactionStore type
type ReactionStore struct {
mock.Mock
}
// Delete provides a mock function with given fields: reaction
func (_m *ReactionStore) Delete(reaction *model.Reaction) store.StoreChannel {
ret := _m.Called(reaction)
var r0 store.StoreChannel
if rf, ok := ret.Get(0).(func(*model.Reaction) store.StoreChannel); ok {
r0 = rf(reaction)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(store.StoreChannel)
}
}
return r0
}
// DeleteAllWithEmojiName provides a mock function with given fields: emojiName
func (_m *ReactionStore) DeleteAllWithEmojiName(emojiName string) store.StoreChannel {
ret := _m.Called(emojiName)
var r0 store.StoreChannel
if rf, ok := ret.Get(0).(func(string) store.StoreChannel); ok {
r0 = rf(emojiName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(store.StoreChannel)
}
}
return r0
}
// GetForPost provides a mock function with given fields: postId, allowFromCache
func (_m *ReactionStore) GetForPost(postId string, allowFromCache bool) store.StoreChannel {
ret := _m.Called(postId, allowFromCache)
var r0 store.StoreChannel
if rf, ok := ret.Get(0).(func(string, bool) store.StoreChannel); ok {
r0 = rf(postId, allowFromCache)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(store.StoreChannel)
}
}
return r0
}
// PermanentDeleteBatch provides a mock function with given fields: endTime, limit
func (_m *ReactionStore) PermanentDeleteBatch(endTime int64, limit int64) store.StoreChannel {
ret := _m.Called(endTime, limit)
var r0 store.StoreChannel
if rf, ok := ret.Get(0).(func(int64, int64) store.StoreChannel); ok {
r0 = rf(endTime, limit)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(store.StoreChannel)
}
}
return r0
}
// Save provides a mock function with given fields: reaction
func (_m *ReactionStore) Save(reaction *model.Reaction) store.StoreChannel {
ret := _m.Called(reaction)
var r0 store.StoreChannel
if rf, ok := ret.Get(0).(func(*model.Reaction) store.StoreChannel); ok {
r0 = rf(reaction)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(store.StoreChannel)
}
}
return r0
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
google/cloud/dialogflow_v2/services/participants/client.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.services.participants import pagers
from google.cloud.dialogflow_v2.types import participant
from google.cloud.dialogflow_v2.types import participant as gcd_participant
from google.cloud.dialogflow_v2.types import session
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import ParticipantsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ParticipantsGrpcTransport
from .transports.grpc_asyncio import ParticipantsGrpcAsyncIOTransport
class ParticipantsClientMeta(type):
"""Metaclass for the Participants client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[ParticipantsTransport]]
_transport_registry["grpc"] = ParticipantsGrpcTransport
_transport_registry["grpc_asyncio"] = ParticipantsGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[ParticipantsTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ParticipantsClient(metaclass=ParticipantsClientMeta):
"""Service for managing
[Participants][google.cloud.dialogflow.v2.Participant].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ParticipantsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ParticipantsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ParticipantsTransport:
"""Returns the transport used by the client instance.
Returns:
ParticipantsTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def context_path(project: str, session: str, context: str,) -> str:
"""Returns a fully-qualified context string."""
return "projects/{project}/agent/sessions/{session}/contexts/{context}".format(
project=project, session=session, context=context,
)
@staticmethod
def parse_context_path(path: str) -> Dict[str, str]:
"""Parses a context path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/agent/sessions/(?P<session>.+?)/contexts/(?P<context>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def intent_path(project: str, intent: str,) -> str:
"""Returns a fully-qualified intent string."""
return "projects/{project}/agent/intents/{intent}".format(
project=project, intent=intent,
)
@staticmethod
def parse_intent_path(path: str) -> Dict[str, str]:
"""Parses a intent path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/agent/intents/(?P<intent>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def message_path(project: str, conversation: str, message: str,) -> str:
"""Returns a fully-qualified message string."""
return "projects/{project}/conversations/{conversation}/messages/{message}".format(
project=project, conversation=conversation, message=message,
)
@staticmethod
def parse_message_path(path: str) -> Dict[str, str]:
"""Parses a message path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/conversations/(?P<conversation>.+?)/messages/(?P<message>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def participant_path(project: str, conversation: str, participant: str,) -> str:
"""Returns a fully-qualified participant string."""
return "projects/{project}/conversations/{conversation}/participants/{participant}".format(
project=project, conversation=conversation, participant=participant,
)
@staticmethod
def parse_participant_path(path: str) -> Dict[str, str]:
"""Parses a participant path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/conversations/(?P<conversation>.+?)/participants/(?P<participant>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def session_entity_type_path(project: str, session: str, entity_type: str,) -> str:
"""Returns a fully-qualified session_entity_type string."""
return "projects/{project}/agent/sessions/{session}/entityTypes/{entity_type}".format(
project=project, session=session, entity_type=entity_type,
)
@staticmethod
def parse_session_entity_type_path(path: str) -> Dict[str, str]:
"""Parses a session_entity_type path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/agent/sessions/(?P<session>.+?)/entityTypes/(?P<entity_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ParticipantsTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the participants client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ParticipantsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ParticipantsTransport):
# transport is a ParticipantsTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def create_participant(
self,
request: Union[gcd_participant.CreateParticipantRequest, dict] = None,
*,
parent: str = None,
participant: gcd_participant.Participant = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_participant.Participant:
r"""Creates a new participant in a conversation.
Args:
request (Union[google.cloud.dialogflow_v2.types.CreateParticipantRequest, dict]):
The request object. The request message for
[Participants.CreateParticipant][google.cloud.dialogflow.v2.Participants.CreateParticipant].
parent (str):
Required. Resource identifier of the conversation adding
the participant. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
participant (google.cloud.dialogflow_v2.types.Participant):
Required. The participant to create.
This corresponds to the ``participant`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Participant:
Represents a conversation participant
(human agent, virtual agent, end-user).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, participant])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_participant.CreateParticipantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcd_participant.CreateParticipantRequest):
request = gcd_participant.CreateParticipantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if participant is not None:
request.participant = participant
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_participant]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_participant(
self,
request: Union[participant.GetParticipantRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> participant.Participant:
r"""Retrieves a conversation participant.
Args:
request (Union[google.cloud.dialogflow_v2.types.GetParticipantRequest, dict]):
The request object. The request message for
[Participants.GetParticipant][google.cloud.dialogflow.v2.Participants.GetParticipant].
name (str):
Required. The name of the participant. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>/participants/<Participant ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Participant:
Represents a conversation participant
(human agent, virtual agent, end-user).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a participant.GetParticipantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, participant.GetParticipantRequest):
request = participant.GetParticipantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_participant]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_participants(
self,
request: Union[participant.ListParticipantsRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListParticipantsPager:
r"""Returns the list of all participants in the specified
conversation.
Args:
request (Union[google.cloud.dialogflow_v2.types.ListParticipantsRequest, dict]):
The request object. The request message for
[Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
parent (str):
Required. The conversation to list all participants
from. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.participants.pagers.ListParticipantsPager:
The response message for
[Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a participant.ListParticipantsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, participant.ListParticipantsRequest):
request = participant.ListParticipantsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_participants]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListParticipantsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def update_participant(
self,
request: Union[gcd_participant.UpdateParticipantRequest, dict] = None,
*,
participant: gcd_participant.Participant = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_participant.Participant:
r"""Updates the specified participant.
Args:
request (Union[google.cloud.dialogflow_v2.types.UpdateParticipantRequest, dict]):
The request object. The request message for
[Participants.UpdateParticipant][google.cloud.dialogflow.v2.Participants.UpdateParticipant].
participant (google.cloud.dialogflow_v2.types.Participant):
Required. The participant to update.
This corresponds to the ``participant`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to specify which
fields to update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Participant:
Represents a conversation participant
(human agent, virtual agent, end-user).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([participant, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_participant.UpdateParticipantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcd_participant.UpdateParticipantRequest):
request = gcd_participant.UpdateParticipantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if participant is not None:
request.participant = participant
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_participant]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("participant.name", request.participant.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def analyze_content(
self,
request: Union[gcd_participant.AnalyzeContentRequest, dict] = None,
*,
participant: str = None,
text_input: session.TextInput = None,
event_input: session.EventInput = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_participant.AnalyzeContentResponse:
r"""Adds a text (chat, for example), or audio (phone recording, for
example) message from a participant into the conversation.
Note: Always use agent versions for production traffic sent to
virtual agents. See `Versions and
environments <https://cloud.google.com/dialogflow/es/docs/agents-versions>`__.
Args:
request (Union[google.cloud.dialogflow_v2.types.AnalyzeContentRequest, dict]):
The request object. The request message for
[Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
participant (str):
Required. The name of the participant this text comes
from. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>/participants/<Participant ID>``.
This corresponds to the ``participant`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
text_input (google.cloud.dialogflow_v2.types.TextInput):
The natural language text to be
processed.
This corresponds to the ``text_input`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
event_input (google.cloud.dialogflow_v2.types.EventInput):
An input event to send to Dialogflow.
This corresponds to the ``event_input`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.AnalyzeContentResponse:
The response message for
[Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([participant, text_input, event_input])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_participant.AnalyzeContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcd_participant.AnalyzeContentRequest):
request = gcd_participant.AnalyzeContentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if participant is not None:
request.participant = participant
if text_input is not None:
request.text_input = text_input
if event_input is not None:
request.event_input = event_input
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.analyze_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("participant", request.participant),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def suggest_articles(
self,
request: Union[participant.SuggestArticlesRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> participant.SuggestArticlesResponse:
r"""Gets suggested articles for a participant based on
specific historical messages.
Args:
request (Union[google.cloud.dialogflow_v2.types.SuggestArticlesRequest, dict]):
The request object. The request message for
[Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
parent (str):
Required. The name of the participant to fetch
suggestion for. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>/participants/<Participant ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SuggestArticlesResponse:
The response message for
[Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a participant.SuggestArticlesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, participant.SuggestArticlesRequest):
request = participant.SuggestArticlesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.suggest_articles]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def suggest_faq_answers(
self,
request: Union[participant.SuggestFaqAnswersRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> participant.SuggestFaqAnswersResponse:
r"""Gets suggested faq answers for a participant based on
specific historical messages.
Args:
request (Union[google.cloud.dialogflow_v2.types.SuggestFaqAnswersRequest, dict]):
The request object. The request message for
[Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
parent (str):
Required. The name of the participant to fetch
suggestion for. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>/participants/<Participant ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SuggestFaqAnswersResponse:
The request message for
[Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a participant.SuggestFaqAnswersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, participant.SuggestFaqAnswersRequest):
request = participant.SuggestFaqAnswersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.suggest_faq_answers]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ParticipantsClient",)
|
[] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"]
|
python
| 2 | 0 | |
sfjf/sfjf/settings.py
|
"""
Django settings for sfjf project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'mu2pv_4n^w(a7jl#w^rfkbf2^xa@mj^@+r(9#^qi^==a3%wd!_')
if os.path.isfile(SECRET_KEY):
SECRET_KEY = open(SECRET_KEY).read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_PRODUCTION') is None
ALLOWED_HOSTS = ['localhost', 'personalsite']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'markdownx',
'sfjf_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sfjf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sfjf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = 'http://' + ALLOWED_HOSTS[0] + '/static/'
# Media directory
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = 'http://' + ALLOWED_HOSTS[0] + '/media/'
# Crispy Forms bootstrap version
CRISPY_TEMPLATE_PACK = 'bootstrap4'
|
[] |
[] |
[
"DJANGO_PRODUCTION",
"DJANGO_SECRET_KEY"
] |
[]
|
["DJANGO_PRODUCTION", "DJANGO_SECRET_KEY"]
|
python
| 2 | 0 | |
enterprise/internal/insights/background/background.go
|
package background
import (
"context"
"database/sql"
"os"
"strconv"
"github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery"
"github.com/sourcegraph/sourcegraph/enterprise/internal/insights/compression"
"github.com/inconshreveable/log15"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/sourcegraph/sourcegraph/enterprise/internal/insights/background/queryrunner"
"github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
"github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker"
)
// GetBackgroundJobs is the main entrypoint which starts background jobs for code insights. It is
// called from the worker service.
func GetBackgroundJobs(ctx context.Context, mainAppDB *sql.DB, insightsDB *sql.DB) []goroutine.BackgroundRoutine {
insightPermStore := store.NewInsightPermissionStore(mainAppDB)
insightsStore := store.New(insightsDB, insightPermStore)
// Create a base store to be used for storing worker state. We store this in the main app Postgres
// DB, not the TimescaleDB (which we use only for storing insights data.)
workerBaseStore := basestore.NewWithDB(mainAppDB, sql.TxOptions{})
// Create basic metrics for recording information about background jobs.
observationContext := &observation.Context{
Logger: log15.Root(),
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
Registerer: prometheus.DefaultRegisterer,
}
queryRunnerWorkerMetrics, queryRunnerResetterMetrics := newWorkerMetrics(observationContext, "insights_search_queue")
insightsMetadataStore := store.NewInsightStore(insightsDB)
workerStore := queryrunner.CreateDBWorkerStore(workerBaseStore, observationContext)
// Start background goroutines for all of our workers.
routines := []goroutine.BackgroundRoutine{
// Register the background goroutine which discovers and enqueues insights work.
newInsightEnqueuer(ctx, workerBaseStore, insightsMetadataStore, observationContext),
// Register the query-runner worker and resetter, which executes search queries and records
// results to TimescaleDB.
queryrunner.NewWorker(ctx, workerStore, insightsStore, queryRunnerWorkerMetrics),
queryrunner.NewResetter(ctx, workerStore, queryRunnerResetterMetrics),
// disabling the cleaner job while we debug mismatched results from historical insights
queryrunner.NewCleaner(ctx, workerBaseStore, observationContext),
// TODO(slimsag): future: register another worker here for webhook querying.
}
// todo(insights) add setting to disable this indexer
routines = append(routines, compression.NewCommitIndexerWorker(ctx, mainAppDB, insightsDB, observationContext))
// Register the background goroutine which discovers historical gaps in data and enqueues
// work to fill them - if not disabled.
disableHistorical, _ := strconv.ParseBool(os.Getenv("DISABLE_CODE_INSIGHTS_HISTORICAL"))
if !disableHistorical {
routines = append(routines, newInsightHistoricalEnqueuer(ctx, workerBaseStore, insightsMetadataStore, insightsStore, observationContext))
}
routines = append(routines, discovery.NewMigrateSettingInsightsJob(ctx, mainAppDB, insightsDB))
return routines
}
// newWorkerMetrics returns a basic set of metrics to be used for a worker and its resetter:
//
// * WorkerMetrics records worker operations & number of jobs.
// * ResetterMetrics records the number of jobs that got reset because workers timed out / took too
// long.
//
// Individual insights workers may then _also_ want to register their own metrics, if desired, in
// their NewWorker functions.
func newWorkerMetrics(observationContext *observation.Context, workerName string) (workerutil.WorkerMetrics, dbworker.ResetterMetrics) {
workerMetrics := workerutil.NewMetrics(observationContext, workerName+"_processor")
resetterMetrics := dbworker.NewMetrics(observationContext, workerName)
return workerMetrics, *resetterMetrics
}
|
[
"\"DISABLE_CODE_INSIGHTS_HISTORICAL\""
] |
[] |
[
"DISABLE_CODE_INSIGHTS_HISTORICAL"
] |
[]
|
["DISABLE_CODE_INSIGHTS_HISTORICAL"]
|
go
| 1 | 0 | |
cmd/almond2mqtt/main.go
|
package main
import (
"log"
"os"
"github.com/cicloid/almond2mqtt"
)
func main() {
c := almond2mqtt.Config{
AlmondAddr: os.Getenv("ALMOND_ADDR"),
AlmondUser: os.Getenv("ALMOND_USER"),
AlmondPassword: os.Getenv("ALMOND_PASSWORD"),
MQTTURL: os.Getenv("MQTT_URL"),
MQTTPrefix: os.Getenv("MQTT_PREFIX"),
}
c.ApplyDefaults()
s, err := almond2mqtt.New(c)
if err != nil {
log.Fatal(err)
}
if err := s.Run(); err != nil {
log.Fatal(err)
}
}
|
[
"\"ALMOND_ADDR\"",
"\"ALMOND_USER\"",
"\"ALMOND_PASSWORD\"",
"\"MQTT_URL\"",
"\"MQTT_PREFIX\""
] |
[] |
[
"MQTT_PREFIX",
"MQTT_URL",
"ALMOND_PASSWORD",
"ALMOND_USER",
"ALMOND_ADDR"
] |
[]
|
["MQTT_PREFIX", "MQTT_URL", "ALMOND_PASSWORD", "ALMOND_USER", "ALMOND_ADDR"]
|
go
| 5 | 0 | |
python2_source/BlackBoxAuditing/data.py
|
from splitters import split_by_percent
import csv
from test_data import preloaded
def is_int(string):
try:
int(string)
return True
except ValueError:
return False
def is_float(string):
try:
float(string)
return True
except ValueError:
return False
def get_types(data,correct_types,empty_symbol):
for i in range(len(correct_types)):
row = 0
while correct_types[i] is None:
if data[row][i] == empty_symbol:
row += 1
else:
if is_int(data[row][i]):
correct_types[i] = int
elif is_float[i] == float:
correct_types[i] = float
else:
correct_types[i] = str
def load_data(data):
if data not in preloaded:
raise KeyError("{} is not an available dataset".format(data))
if data == "DRP":
return load_DRP(data)
filename = preloaded[data]["filepath"]
testdata = preloaded[data]["testdata"]
correct_types = preloaded[data]["correct_types"]
train_percentage = preloaded[data]["train_percentage"]
response_header = preloaded[data]["response_header"]
features_to_ignore = preloaded[data]["features_to_ignore"]
with open(filename) as f:
reader = csv.reader(f)
data = [row for row in reader]
headers = data.pop(0)
for i, row in enumerate(data):
for j, correct_type in enumerate(correct_types):
data[i][j] = correct_type(row[j])
if testdata is None:
train, test = split_by_percent(data, train_percentage)
else:
train = data
with open(testdata) as f:
reader = csv.reader(f)
test = [row for row in reader][1:] # Ignore headers.
for i, row in enumerate(test):
for j, correct_type in enumerate(correct_types):
test[i][j] = correct_type(row[j])
return headers, train, test, response_header, features_to_ignore
def load_DRP(data):
train_filename = preloaded[data]["filepath"]
test_filename = preloaded[data]["testdata"]
train_percentage = preloaded[data]["train_percentage"]
header = preloaded[data]["response_header"]
features_to_ignore = preloaded[data]["features_to_ignore"]
header_types = OrderedDict()
data = []
with open(train_filename) as f:
for line in f:
if "@attribute" in line:
_, header, arff_type = line.split()
header_types[header] = float if arff_type=="numeric" else str
elif "@relation" in line or "@data" in line or line == "\n":
pass
else:
row = line[:-1].split(",") #TODO: This is a naive way of splitting, captain.
row = [header_types[h](v) for h,v in zip(header_types, row)]
data.append(row)
with open(test_filename) as f:
for line in f:
if "@attribute" not in line and "@relation" not in line and "@data" not in line and line != "\n":
row = line[:-1].split(",") #TODO: This is a naive way of splitting, captain.
row = [header_types[h](v) for h,v in zip(header_types, row)]
data.append(row)
headers = header_types.keys()
train, test = split_by_percent(data, train_percentage)
return headers, train, test, header, features_to_ignore
def load_from_file(datafile, testdata=None, correct_types=None, train_percentage=2.0/3.0,
response_header=None, features_to_ignore=None, missing_data_symbol=""):
with open(datafile) as f:
reader = csv.reader(f)
data = [row for row in reader]
headers = data.pop(0)
# Set defaults in case they are not handed in as arguments
if response_header is None:
response_header = headers[-1]
if features_to_ignore is None:
features_to_ignore = []
if correct_types is None:
correct_types = get_types(data, [None]*len(headers), missing_data_symbol)
for i, row in enumerate(data):
for j, correct_type in enumerate(correct_types):
data[i][j] = correct_type(row[j])
if testdata is None:
train, test = split_by_percent(data, train_percentage)
else:
train = data
with open(testdata) as f:
reader = csv.reader(f)
test = [row for row in reader][1:] # Ignore headers.
for i, row in enumerate(test):
for j, correct_type in enumerate(correct_types):
test[i][j] = correct_type(row[j])
return headers, train, test, response_header, features_to_ignore
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
chatbot_app/chatbot/botpredictor.py
|
# Copyright 2017 Bo Shao. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nltk
import os
import string
import tensorflow as tf
from chatbot_app.chatbot.tokenizeddata import TokenizedData
from chatbot_app.chatbot.modelcreator import ModelCreator
from chatbot_app.chatbot.knowledgebase import KnowledgeBase
from chatbot_app.chatbot.sessiondata import SessionData
from chatbot_app.chatbot.patternutils import check_patterns_and_replace
from chatbot_app.chatbot.functiondata import call_function
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class BotPredictor(object):
def __init__(self, session, corpus_dir, knbase_dir, result_dir, result_file):
"""
Args:
session: The TensorFlow session.
corpus_dir: Name of the folder storing corpus files and vocab information.
knbase_dir: Name of the folder storing data files for the knowledge base.
result_dir: The folder containing the trained result files.
result_file: The file name of the trained model.
"""
self.session = session
# Prepare data and hyper parameters
print("# Prepare dataset placeholder and hyper parameters ...")
tokenized_data = TokenizedData(corpus_dir=corpus_dir, training=False)
self.knowledge_base = KnowledgeBase()
self.knowledge_base.load_knbase(knbase_dir)
self.session_data = SessionData()
self.hparams = tokenized_data.hparams
self.src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
src_dataset = tf.data.Dataset.from_tensor_slices(self.src_placeholder)
self.infer_batch = tokenized_data.get_inference_batch(src_dataset)
# Create model
print("# Creating inference model ...")
self.model = ModelCreator(training=False, tokenized_data=tokenized_data,
batch_input=self.infer_batch)
# Restore model weights
print("# Restoring model weights ...")
self.model.saver.restore(session, os.path.join(result_dir, result_file))
self.session.run(tf.tables_initializer())
def predict(self, session_id, question):
chat_session = self.session_data.get_session(session_id)
chat_session.before_prediction() # Reset before each prediction
if question.strip() == '':
answer = "Don't you want to say something to me?"
chat_session.after_prediction(question, answer)
return answer
pat_matched, new_sentence, para_list = check_patterns_and_replace(question)
for pre_time in range(2):
tokens = nltk.word_tokenize(new_sentence.lower())
tmp_sentence = [' '.join(tokens[:]).strip()]
self.session.run(self.infer_batch.initializer,
feed_dict={self.src_placeholder: tmp_sentence})
outputs, _ = self.model.infer(self.session)
if self.hparams.beam_width > 0:
outputs = outputs[0]
eos_token = self.hparams.eos_token.encode("utf-8")
outputs = outputs.tolist()[0]
if eos_token in outputs:
outputs = outputs[:outputs.index(eos_token)]
if pat_matched and pre_time == 0:
out_sentence, if_func_val = self._get_final_output(outputs, chat_session,
para_list=para_list)
if if_func_val:
chat_session.after_prediction(question, out_sentence)
return out_sentence
else:
new_sentence = question
else:
out_sentence, _ = self._get_final_output(outputs, chat_session)
chat_session.after_prediction(question, out_sentence)
return out_sentence
def _get_final_output(self, sentence, chat_session, para_list=None):
sentence = b' '.join(sentence).decode('utf-8')
if sentence == '':
return "I don't know what to say.", False
if_func_val = False
last_word = None
word_list = []
for word in sentence.split(' '):
word = word.strip()
if not word:
continue
if word.startswith('_func_val_'):
if_func_val = True
word = call_function(word[10:], knowledge_base=self.knowledge_base,
chat_session=chat_session, para_list=para_list)
if word is None or word == '':
continue
else:
if word in self.knowledge_base.upper_words:
word = self.knowledge_base.upper_words[word]
if (last_word is None or last_word in ['.', '!', '?']) and not word[0].isupper():
word = word.capitalize()
if not word.startswith('\'') and word != 'n\'t' \
and (word[0] not in string.punctuation or word in ['(', '[', '{', '``', '$']) \
and last_word not in ['(', '[', '{', '``', '$']:
word = ' ' + word
word_list.append(word)
last_word = word
return ''.join(word_list).strip(), if_func_val
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
analyze.py
|
import cannibalize
import xlsxwriter
import sys
import os
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
usage = "Kit Cannibaliztion\n" \
"usage: analyze.py kit_number serial1 serial2 serial3 ..."
if len(sys.argv) < 2:
print(usage)
else:
KIT = sys.argv[1]
SERIALS = [str(i) for i in sys.argv[2:]]
FILE_NAME = '{}\\cannibalization_report_{}.xlsx'.format(desktop,KIT)
kit_assembly_data = cannibalize.create_new_kit_assembly(KIT, SERIALS)
workbook = xlsxwriter.Workbook(FILE_NAME)
v_i_data = []
for r in kit_assembly_data['assembly']:
v_i_data.append([KIT, r['serial'], r['status'], str(len(r['build']))])
first_worksheet = workbook.add_worksheet('Report')
first_worksheet.set_column('A:C', 20)
first_worksheet.add_table('A1:C{}'.format(str(1 + len(v_i_data))),
{'data': v_i_data,
'columns': [{'header': 'kit_number'},
{'header': 'serial_number'},
{'header': 'status'},
{'header': 'components_in_kit'}
]})
for unique_serial in kit_assembly_data['assembly']:
worksheet = workbook.add_worksheet('Serial ~ {}'.format(unique_serial['serial']))
worksheet.set_column('A:B', 20)
worksheet.write(0, 0, 'Serial ~ {}'.format(unique_serial['serial']))
worksheet.write(0, 1, 'Status: {}'.format(unique_serial['status'].upper()))
table_data = []
for component_information in unique_serial['build']:
table_data.append([component_information['component'],
str(component_information['qty'])])
worksheet.add_table('A2:B{}'.format(str(1 + len(unique_serial['build']))),
{'data': table_data,
'columns': [{'header': 'component'},
{'header': 'qty_in_kit'}
]})
workbook.close()
|
[] |
[] |
[
"USERPROFILE"
] |
[]
|
["USERPROFILE"]
|
python
| 1 | 0 | |
cli/cmd/main_test.go
|
package cmd
import (
"flag"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/sergi/go-diff/diffmatchpatch"
)
var (
// updateFixtures is set by the `-update` flag.
updateFixtures bool
// prettyDiff is set by the `-pretty-diff` flag.
prettyDiff bool
)
// TestMain parses flags before running tests
func TestMain(m *testing.M) {
flag.BoolVar(&updateFixtures, "update", false, "update text fixtures in place")
prettyDiff = os.Getenv("LINKERD_TEST_PRETTY_DIFF") != ""
flag.BoolVar(&prettyDiff, "pretty-diff", prettyDiff, "display the full text when diffing")
flag.Parse()
os.Exit(m.Run())
}
// readTestdata reads a file and returns the contents of that file as a string.
func readTestdata(t *testing.T, fileName string) string {
file, err := os.Open(filepath.Join("testdata", fileName))
if err != nil {
t.Fatalf("Failed to open expected output file: %v", err)
}
fixture, err := ioutil.ReadAll(file)
if err != nil {
t.Fatalf("Failed to read expected output file: %v", err)
}
return string(fixture)
}
func writeTestdata(t *testing.T, fileName string, data []byte) {
p := filepath.Join("testdata", fileName)
if err := ioutil.WriteFile(p, data, 0644); err != nil {
t.Fatal(err)
}
}
// TODO: share this with integration tests
func diffTestdata(t *testing.T, path, actual string) {
expected := readTestdata(t, path)
if actual == expected {
return
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(expected, actual, true)
diffs = dmp.DiffCleanupSemantic(diffs)
var diff string
if prettyDiff {
diff = dmp.DiffPrettyText(diffs)
} else {
diff = dmp.PatchToText(dmp.PatchMake(diffs))
}
t.Errorf("mismatch: %s\n%s", path, diff)
if updateFixtures {
writeTestdata(t, path, []byte(actual))
}
}
|
[
"\"LINKERD_TEST_PRETTY_DIFF\""
] |
[] |
[
"LINKERD_TEST_PRETTY_DIFF"
] |
[]
|
["LINKERD_TEST_PRETTY_DIFF"]
|
go
| 1 | 0 | |
base/src/test/java/com/emc/mongoose/endurance/ParallelPipelineAndInfiniteLoopTest.java
|
package com.emc.mongoose.endurance;
import static com.emc.mongoose.util.TestCaseUtil.stepId;
import static com.emc.mongoose.util.docker.MongooseContainer.ENDURANCE_TEST_MEMORY_LIMIT;
import static com.emc.mongoose.util.docker.MongooseContainer.IMAGE_VERSION;
import static com.emc.mongoose.util.docker.MongooseEntryNodeContainer.enduranceTestContainerScenarioPath;
import static org.junit.Assert.fail;
import com.emc.mongoose.params.Concurrency;
import com.emc.mongoose.params.EnvParams;
import com.emc.mongoose.params.ItemSize;
import com.emc.mongoose.params.RunMode;
import com.emc.mongoose.params.StorageType;
import com.emc.mongoose.util.DirWithManyFilesDeleter;
import com.emc.mongoose.util.docker.HttpStorageMockContainer;
import com.emc.mongoose.util.docker.MongooseAdditionalNodeContainer;
import com.emc.mongoose.util.docker.MongooseEntryNodeContainer;
import com.github.akurilov.commons.concurrent.AsyncRunnableBase;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class ParallelPipelineAndInfiniteLoopTest {
@Parameterized.Parameters(name = "{0}, {1}, {2}, {3}")
public static List<Object[]> envParams() {
return EnvParams.PARAMS;
}
private final String scenarioPath = enduranceTestContainerScenarioPath(getClass());
private final Map<String, HttpStorageMockContainer> storageMocks = new HashMap<>();
private final Map<String, MongooseAdditionalNodeContainer> slaveNodes = new HashMap<>();
private final MongooseEntryNodeContainer testContainer;
private final String stepId;
private final StorageType storageType;
private final RunMode runMode;
private final Concurrency concurrency;
private final ItemSize itemSize;
public ParallelPipelineAndInfiniteLoopTest(
final StorageType storageType,
final RunMode runMode,
final Concurrency concurrency,
final ItemSize itemSize)
throws Exception {
stepId = stepId(getClass(), storageType, runMode, concurrency, itemSize);
try {
FileUtils.deleteDirectory(
Paths.get(MongooseEntryNodeContainer.HOST_LOG_PATH.toString(), stepId).toFile());
} catch (final IOException ignored) {
}
this.storageType = storageType;
this.runMode = runMode;
this.concurrency = concurrency;
this.itemSize = itemSize;
if (storageType.equals(StorageType.FS)) {
// TODO cleanup test files use DirWithManyFilesDeleter.deleteExternal(...) method to
// delete a big count of the test files
}
final List<String> env =
System.getenv().entrySet().stream()
.map(e -> e.getKey() + "=" + e.getValue())
.collect(Collectors.toList());
final List<String> args = new ArrayList<>();
switch (storageType) {
case SWIFT:
args.add("--storage-namespace=ns1");
case ATMOS:
case S3:
final HttpStorageMockContainer storageMock =
new HttpStorageMockContainer(
HttpStorageMockContainer.DEFAULT_PORT,
false,
null,
null,
Character.MAX_RADIX,
10 * HttpStorageMockContainer.DEFAULT_CAPACITY,
HttpStorageMockContainer.DEFAULT_CONTAINER_COUNT_LIMIT,
5 * HttpStorageMockContainer.DEFAULT_CONTAINER_CAPACITY,
HttpStorageMockContainer.DEFAULT_FAIL_CONNECT_EVERY,
HttpStorageMockContainer.DEFAULT_FAIL_RESPONSES_EVERY,
0);
final String addr = "127.0.0.1:" + HttpStorageMockContainer.DEFAULT_PORT;
storageMocks.put(addr, storageMock);
args.add(
"--storage-net-node-addrs="
+ storageMocks.keySet().stream().collect(Collectors.joining(",")));
break;
case FS:
try {
DirWithManyFilesDeleter.deleteExternal(
MongooseEntryNodeContainer.getHostItemOutputPath(stepId));
} catch (final Exception e) {
e.printStackTrace(System.err);
}
break;
}
switch (runMode) {
case DISTRIBUTED:
for (int i = 1; i < runMode.getNodeCount(); i++) {
final int port = MongooseAdditionalNodeContainer.DEFAULT_PORT + i;
final MongooseAdditionalNodeContainer nodeSvc =
new MongooseAdditionalNodeContainer(IMAGE_VERSION, port, ENDURANCE_TEST_MEMORY_LIMIT);
final String addr = "127.0.0.1:" + port;
slaveNodes.put(addr, nodeSvc);
}
args.add(
"--load-step-node-addrs="
+ slaveNodes.keySet().stream().collect(Collectors.joining(",")));
break;
}
testContainer =
new MongooseEntryNodeContainer(
IMAGE_VERSION,
stepId,
storageType,
runMode,
concurrency,
itemSize.getValue(),
scenarioPath,
env,
args,
true,
false,
false,
ENDURANCE_TEST_MEMORY_LIMIT);
}
@Before
public final void setUp() throws Exception {
storageMocks.values().forEach(AsyncRunnableBase::start);
slaveNodes.values().forEach(AsyncRunnableBase::start);
testContainer.start();
}
@After
public final void tearDown() throws Exception {
testContainer.close();
slaveNodes
.values()
.parallelStream()
.forEach(
node -> {
try {
node.close();
} catch (final Throwable t) {
t.printStackTrace(System.err);
}
});
storageMocks
.values()
.parallelStream()
.forEach(
storageMock -> {
try {
storageMock.close();
} catch (final Throwable t) {
t.printStackTrace(System.err);
}
});
}
@Test
public final void test() throws Exception {
while (true) {
if (testContainer.await(1, TimeUnit.MINUTES)) {
fail("Test container has exited with status code: " + testContainer.exitStatusCode());
}
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
src/api/config/cors/cors.go
|
package cors
import (
"io/ioutil"
"os"
log "github.com/Sirupsen/logrus"
yaml "gopkg.in/yaml.v2"
)
type corsYAML struct {
Cors Cors
}
// Cors configuration used during middleware setup
type Cors struct {
AllowedOrigins []string `yaml:"allowed_origins"`
AllowedHeaders []string `yaml:"allowed_headers"`
}
func defaultCors() (Cors, error) {
env := os.Getenv("ENVIRONMENT")
if env == "development" {
return Cors{
AllowedOrigins: []string{"*"},
}, nil
}
// Defaults
return Cors{
AllowedOrigins: []string{""},
AllowedHeaders: []string{"content-type", "x-xsrf-token"},
}, nil
}
// Config returns the CORS configuration for the environment
func Config(configFile string) (Cors, error) {
_, err := os.Stat(configFile)
if os.IsNotExist(err) {
log.Info("Loading default CORS config")
return defaultCors()
}
log.Info("Loading CORS from config file")
cors, err := loadCorsFromFile(configFile)
if err != nil {
return Cors{}, err
}
if len(cors.AllowedOrigins) == 0 && len(cors.AllowedHeaders) == 0 {
return defaultCors()
}
return cors, nil
}
func loadCorsFromFile(filePath string) (Cors, error) {
var yamlStruct corsYAML
bytes, err := ioutil.ReadFile(filePath)
if err != nil {
return Cors{}, err
}
if err := yaml.Unmarshal(bytes, &yamlStruct); err != nil {
return Cors{}, err
}
return yamlStruct.Cors, nil
}
|
[
"\"ENVIRONMENT\""
] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
go
| 1 | 0 | |
config.go
|
package randstr
// Config is the configuration to generate a random string.
type Config struct {
characters []rune
}
// WithCharacters sets characters to use as a optional parameters.
func WithCharacters(str string) func(*Config) {
return func(conf *Config) {
conf.characters = removeDuplicates([]rune(str))
}
}
func removeDuplicates(chars []rune) []rune {
res := make([]rune, 0, len(chars))
enc := map[rune]bool{}
for _, c := range chars {
if !enc[c] {
enc[c] = true
res = append(res, c)
}
}
return res
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
bosh/executor_test.go
|
package bosh_test
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"github.com/cloudfoundry/bosh-bootloader/bosh"
"github.com/cloudfoundry/bosh-bootloader/fakes"
"github.com/cloudfoundry/bosh-bootloader/fileio"
"github.com/cloudfoundry/bosh-bootloader/storage"
"github.com/spf13/afero"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Executor", func() {
var (
fs *afero.Afero
cli *fakes.BOSHCLI
stateDir string
deploymentDir string
varsDir string
relativeDeploymentDir string
relativeVarsDir string
relativeStateDir string
dirInput bosh.DirInput
executor bosh.Executor
)
BeforeEach(func() {
fs = &afero.Afero{afero.NewMemMapFs()}
cli = &fakes.BOSHCLI{}
cli.RunStub = func(stdout io.Writer, workingDirectory string, args []string) error {
stdout.Write([]byte("some-manifest"))
return nil
}
cli.GetBOSHPathCall.Returns.Path = "bosh-path"
var err error
stateDir, err = fs.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
deploymentDir = filepath.Join(stateDir, "deployment")
err = fs.Mkdir(deploymentDir, os.ModePerm)
Expect(err).NotTo(HaveOccurred())
varsDir = filepath.Join(stateDir, "vars")
err = fs.Mkdir(varsDir, os.ModePerm)
Expect(err).NotTo(HaveOccurred())
relativeDeploymentDir = "${BBL_STATE_DIR}/deployment"
relativeVarsDir = "${BBL_STATE_DIR}/vars"
relativeStateDir = "${BBL_STATE_DIR}"
dirInput = bosh.DirInput{
VarsDir: varsDir,
StateDir: stateDir,
}
executor = bosh.NewExecutor(cli, fs)
})
Describe("PlanJumpbox", func() {
It("writes bosh-deployment assets to the deployment dir", func() {
err := executor.PlanJumpbox(dirInput, deploymentDir, "aws")
Expect(err).NotTo(HaveOccurred())
By("writing bosh-deployment assets to the deployment dir", func() {
simplePath := filepath.Join(deploymentDir, "no-external-ip.yml")
expectedContents := bosh.MustAsset("vendor/github.com/cloudfoundry/jumpbox-deployment/no-external-ip.yml")
contents, err := fs.ReadFile(simplePath)
Expect(err).NotTo(HaveOccurred())
Expect(contents).To(Equal(expectedContents))
nestedPath := filepath.Join(deploymentDir, "vsphere", "cpi.yml")
expectedContents = bosh.MustAsset("vendor/github.com/cloudfoundry/jumpbox-deployment/vsphere/cpi.yml")
contents, err = fs.ReadFile(nestedPath)
Expect(err).NotTo(HaveOccurred())
Expect(contents).To(Equal(expectedContents))
})
By("writing create-env and delete-env scripts", func() {
expectedArgs := []string{
fmt.Sprintf("%s/jumpbox.yml", relativeDeploymentDir),
"--state", fmt.Sprintf("%s/jumpbox-state.json", relativeVarsDir),
"--vars-store", fmt.Sprintf("%s/jumpbox-vars-store.yml", relativeVarsDir),
"--vars-file", fmt.Sprintf("%s/jumpbox-vars-file.yml", relativeVarsDir),
"-o", fmt.Sprintf("%s/aws/cpi.yml", relativeDeploymentDir),
"-v", `access_key_id="${BBL_AWS_ACCESS_KEY_ID}"`,
"-v", `secret_access_key="${BBL_AWS_SECRET_ACCESS_KEY}"`,
}
expectedScript := formatScript("create-env", stateDir, expectedArgs)
scriptPath := fmt.Sprintf("%s/create-jumpbox.sh", stateDir)
shellScript, err := fs.ReadFile(scriptPath)
Expect(err).NotTo(HaveOccurred())
fileinfo, err := fs.Stat(scriptPath)
Expect(err).NotTo(HaveOccurred())
Expect(fileinfo.Mode().String()).To(Equal("-rwxr-x---"))
Expect(string(shellScript)).To(Equal(expectedScript))
expectedScript = formatScript("delete-env", stateDir, expectedArgs)
scriptPath = fmt.Sprintf("%s/delete-jumpbox.sh", stateDir)
shellScript, err = fs.ReadFile(scriptPath)
Expect(err).NotTo(HaveOccurred())
fileinfo, err = fs.Stat(scriptPath)
Expect(err).NotTo(HaveOccurred())
Expect(fileinfo.Mode().String()).To(Equal("-rwxr-x---"))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
})
Context("on azure", func() {
It("generates create-env args for jumpbox", func() {
err := executor.PlanJumpbox(dirInput, deploymentDir, "azure")
Expect(err).NotTo(HaveOccurred())
expectedArgs := []string{
fmt.Sprintf("%s/jumpbox.yml", relativeDeploymentDir),
"--state", fmt.Sprintf("%s/jumpbox-state.json", relativeVarsDir),
"--vars-store", fmt.Sprintf("%s/jumpbox-vars-store.yml", relativeVarsDir),
"--vars-file", fmt.Sprintf("%s/jumpbox-vars-file.yml", relativeVarsDir),
"-o", fmt.Sprintf("%s/azure/cpi.yml", relativeDeploymentDir),
"-v", `subscription_id="${BBL_AZURE_SUBSCRIPTION_ID}"`,
"-v", `client_id="${BBL_AZURE_CLIENT_ID}"`,
"-v", `client_secret="${BBL_AZURE_CLIENT_SECRET}"`,
"-v", `tenant_id="${BBL_AZURE_TENANT_ID}"`,
}
By("writing the create-env args to a shell script", func() {
expectedScript := formatScript("create-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/create-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
By("writing the delete-env args to a shell script", func() {
expectedScript := formatScript("delete-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/delete-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
})
})
Context("on gcp", func() {
It("generates create-env args for jumpbox", func() {
err := executor.PlanJumpbox(dirInput, deploymentDir, "gcp")
Expect(err).NotTo(HaveOccurred())
expectedArgs := []string{
fmt.Sprintf("%s/jumpbox.yml", relativeDeploymentDir),
"--state", fmt.Sprintf("%s/jumpbox-state.json", relativeVarsDir),
"--vars-store", fmt.Sprintf("%s/jumpbox-vars-store.yml", relativeVarsDir),
"--vars-file", fmt.Sprintf("%s/jumpbox-vars-file.yml", relativeVarsDir),
"-o", fmt.Sprintf("%s/gcp/cpi.yml", relativeDeploymentDir),
"--var-file", `gcp_credentials_json="${BBL_GCP_SERVICE_ACCOUNT_KEY_PATH}"`,
"-v", `project_id="${BBL_GCP_PROJECT_ID}"`,
"-v", `zone="${BBL_GCP_ZONE}"`,
}
By("writing the create-env args to a shell script", func() {
expectedScript := formatScript("create-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/create-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
By("writing the delete-env args to a shell script", func() {
expectedScript := formatScript("delete-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/delete-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
})
})
Context("when the iaas is vsphere", func() {
It("generates create-env args for jumpbox", func() {
err := executor.PlanJumpbox(dirInput, deploymentDir, "vsphere")
Expect(err).NotTo(HaveOccurred())
expectedArgs := []string{
fmt.Sprintf("%s/jumpbox.yml", relativeDeploymentDir),
"--state", fmt.Sprintf("%s/jumpbox-state.json", relativeVarsDir),
"--vars-store", fmt.Sprintf("%s/jumpbox-vars-store.yml", relativeVarsDir),
"--vars-file", fmt.Sprintf("%s/jumpbox-vars-file.yml", relativeVarsDir),
"-o", fmt.Sprintf("%s/vsphere/cpi.yml", relativeDeploymentDir),
"-o", fmt.Sprintf("%s/vsphere/resource-pool.yml", relativeDeploymentDir),
"-o", fmt.Sprintf("%s/vsphere-jumpbox-network.yml", relativeDeploymentDir),
"-v", `vcenter_user="${BBL_VSPHERE_VCENTER_USER}"`,
"-v", `vcenter_password="${BBL_VSPHERE_VCENTER_PASSWORD}"`,
}
By("writing the jumpbox-network ops-file", func() {
opsfile, err := fs.ReadFile(fmt.Sprintf("%s/vsphere-jumpbox-network.yml", deploymentDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(opsfile)).To(ContainSubstring("instance_groups/name=jumpbox/networks/name=public"))
})
By("writing the create-env args to a shell script", func() {
expectedScript := formatScript("create-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/create-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
By("writing the delete-env args to a shell script", func() {
expectedScript := formatScript("delete-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/delete-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
})
})
Context("openstack", func() {
It("generates create-env args for jumpbox", func() {
err := executor.PlanJumpbox(dirInput, deploymentDir, "openstack")
Expect(err).NotTo(HaveOccurred())
expectedArgs := []string{
fmt.Sprintf("%s/jumpbox.yml", relativeDeploymentDir),
"--state", fmt.Sprintf("%s/jumpbox-state.json", relativeVarsDir),
"--vars-store", fmt.Sprintf("%s/jumpbox-vars-store.yml", relativeVarsDir),
"--vars-file", fmt.Sprintf("%s/jumpbox-vars-file.yml", relativeVarsDir),
"-o", fmt.Sprintf("%s/openstack/cpi.yml", relativeDeploymentDir),
"-o", fmt.Sprintf("%s/openstack-keystone-v3-ops.yml", relativeDeploymentDir),
"-v", `openstack_username="${BBL_OPENSTACK_USERNAME}"`,
"-v", `openstack_password="${BBL_OPENSTACK_PASSWORD}"`,
}
By("writing the keystone v3 ops-file", func() {
opsfile, err := fs.ReadFile(fmt.Sprintf("%s/openstack-keystone-v3-ops.yml", deploymentDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(opsfile)).To(ContainSubstring("/openstack/project?"))
})
By("writing the create-env args to a shell script", func() {
expectedScript := formatScript("create-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/create-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
By("writing the delete-env args to a shell script", func() {
expectedScript := formatScript("delete-env", stateDir, expectedArgs)
shellScript, err := fs.ReadFile(fmt.Sprintf("%s/delete-jumpbox.sh", stateDir))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
})
})
})
Describe("PlanDirector", func() {
It("writes bosh-deployment assets to the deployment dir", func() {
err := executor.PlanDirector(dirInput, deploymentDir, "warden")
Expect(err).NotTo(HaveOccurred())
simplePath := filepath.Join(deploymentDir, "LICENSE")
expectedContents := bosh.MustAsset("vendor/github.com/cloudfoundry/bosh-deployment/LICENSE")
contents, err := fs.ReadFile(simplePath)
Expect(err).NotTo(HaveOccurred())
Expect(contents).To(Equal(expectedContents))
nestedPath := filepath.Join(deploymentDir, "vsphere", "cpi.yml")
expectedContents = bosh.MustAsset("vendor/github.com/cloudfoundry/bosh-deployment/vsphere/cpi.yml")
contents, err = fs.ReadFile(nestedPath)
Expect(err).NotTo(HaveOccurred())
Expect(contents).To(Equal(expectedContents))
})
Context("aws", func() {
It("writes create-director.sh and delete-director.sh", func() {
expectedArgs := []string{
filepath.Join(relativeDeploymentDir, "bosh.yml"),
"--state", filepath.Join(relativeVarsDir, "bosh-state.json"),
"--vars-store", filepath.Join(relativeVarsDir, "director-vars-store.yml"),
"--vars-file", filepath.Join(relativeVarsDir, "director-vars-file.yml"),
"-o", filepath.Join(relativeDeploymentDir, "aws", "cpi.yml"),
"-o", filepath.Join(relativeDeploymentDir, "jumpbox-user.yml"),
"-o", filepath.Join(relativeDeploymentDir, "uaa.yml"),
"-o", filepath.Join(relativeDeploymentDir, "credhub.yml"),
"-o", filepath.Join(relativeStateDir, "bbl-ops-files", "aws", "bosh-director-ephemeral-ip-ops.yml"),
"-o", filepath.Join(relativeDeploymentDir, "aws", "iam-instance-profile.yml"),
"-o", filepath.Join(relativeDeploymentDir, "aws", "encrypted-disk.yml"),
"-v", `access_key_id="${BBL_AWS_ACCESS_KEY_ID}"`,
"-v", `secret_access_key="${BBL_AWS_SECRET_ACCESS_KEY}"`,
}
behavesLikePlan(expectedArgs, cli, fs, executor, dirInput, deploymentDir, "aws", stateDir)
})
It("writes aws-specific ops files", func() {
err := executor.PlanDirector(dirInput, deploymentDir, "aws")
Expect(err).NotTo(HaveOccurred())
ipOpsFile := filepath.Join(stateDir, "bbl-ops-files", "aws", "bosh-director-ephemeral-ip-ops.yml")
ipOpsFileContents, err := fs.ReadFile(ipOpsFile)
Expect(err).NotTo(HaveOccurred())
Expect(string(ipOpsFileContents)).To(Equal(`
- type: replace
path: /resource_pools/name=vms/cloud_properties/auto_assign_public_ip?
value: true
`))
})
})
Context("gcp", func() {
It("writes create-director.sh and delete-director.sh", func() {
expectedArgs := []string{
filepath.Join(relativeDeploymentDir, "bosh.yml"),
"--state", filepath.Join(relativeVarsDir, "bosh-state.json"),
"--vars-store", filepath.Join(relativeVarsDir, "director-vars-store.yml"),
"--vars-file", filepath.Join(relativeVarsDir, "director-vars-file.yml"),
"-o", filepath.Join(relativeDeploymentDir, "gcp", "cpi.yml"),
"-o", filepath.Join(relativeDeploymentDir, "jumpbox-user.yml"),
"-o", filepath.Join(relativeDeploymentDir, "uaa.yml"),
"-o", filepath.Join(relativeDeploymentDir, "credhub.yml"),
"-o", filepath.Join(relativeStateDir, "bbl-ops-files", "gcp", "bosh-director-ephemeral-ip-ops.yml"),
"--var-file", `gcp_credentials_json="${BBL_GCP_SERVICE_ACCOUNT_KEY_PATH}"`,
"-v", `project_id="${BBL_GCP_PROJECT_ID}"`,
"-v", `zone="${BBL_GCP_ZONE}"`,
}
behavesLikePlan(expectedArgs, cli, fs, executor, dirInput, deploymentDir, "gcp", stateDir)
})
It("writes gcp-specific ops files", func() {
err := executor.PlanDirector(dirInput, deploymentDir, "gcp")
Expect(err).NotTo(HaveOccurred())
ipOpsFile := filepath.Join(stateDir, "bbl-ops-files", "gcp", "bosh-director-ephemeral-ip-ops.yml")
ipOpsFileContents, err := fs.ReadFile(ipOpsFile)
Expect(err).NotTo(HaveOccurred())
Expect(string(ipOpsFileContents)).To(Equal(`
- type: replace
path: /networks/name=default/subnets/0/cloud_properties/ephemeral_external_ip?
value: true
`))
})
})
Context("azure", func() {
It("writes create-director.sh and delete-director.sh", func() {
expectedArgs := []string{
filepath.Join(relativeDeploymentDir, "bosh.yml"),
"--state", filepath.Join(relativeVarsDir, "bosh-state.json"),
"--vars-store", filepath.Join(relativeVarsDir, "director-vars-store.yml"),
"--vars-file", filepath.Join(relativeVarsDir, "director-vars-file.yml"),
"-o", filepath.Join(relativeDeploymentDir, "azure", "cpi.yml"),
"-o", filepath.Join(relativeDeploymentDir, "jumpbox-user.yml"),
"-o", filepath.Join(relativeDeploymentDir, "uaa.yml"),
"-o", filepath.Join(relativeDeploymentDir, "credhub.yml"),
"-v", `subscription_id="${BBL_AZURE_SUBSCRIPTION_ID}"`,
"-v", `client_id="${BBL_AZURE_CLIENT_ID}"`,
"-v", `client_secret="${BBL_AZURE_CLIENT_SECRET}"`,
"-v", `tenant_id="${BBL_AZURE_TENANT_ID}"`,
}
behavesLikePlan(expectedArgs, cli, fs, executor, dirInput, deploymentDir, "azure", stateDir)
})
})
Context("vsphere", func() {
It("writes create-director.sh and delete-director.sh", func() {
expectedArgs := []string{
filepath.Join(relativeDeploymentDir, "bosh.yml"),
"--state", filepath.Join(relativeVarsDir, "bosh-state.json"),
"--vars-store", filepath.Join(relativeVarsDir, "director-vars-store.yml"),
"--vars-file", filepath.Join(relativeVarsDir, "director-vars-file.yml"),
"-o", filepath.Join(relativeDeploymentDir, "vsphere", "cpi.yml"),
"-o", filepath.Join(relativeDeploymentDir, "jumpbox-user.yml"),
"-o", filepath.Join(relativeDeploymentDir, "uaa.yml"),
"-o", filepath.Join(relativeDeploymentDir, "credhub.yml"),
"-o", filepath.Join(relativeDeploymentDir, "vsphere", "resource-pool.yml"),
"-v", `vcenter_user="${BBL_VSPHERE_VCENTER_USER}"`,
"-v", `vcenter_password="${BBL_VSPHERE_VCENTER_PASSWORD}"`,
}
behavesLikePlan(expectedArgs, cli, fs, executor, dirInput, deploymentDir, "vsphere", stateDir)
})
})
Context("openstack", func() {
It("writes create-director.sh and delete-director.sh", func() {
expectedArgs := []string{
filepath.Join(relativeDeploymentDir, "bosh.yml"),
"--state", filepath.Join(relativeVarsDir, "bosh-state.json"),
"--vars-store", filepath.Join(relativeVarsDir, "director-vars-store.yml"),
"--vars-file", filepath.Join(relativeVarsDir, "director-vars-file.yml"),
"-o", filepath.Join(relativeDeploymentDir, "openstack", "cpi.yml"),
"-o", filepath.Join(relativeDeploymentDir, "jumpbox-user.yml"),
"-o", filepath.Join(relativeDeploymentDir, "uaa.yml"),
"-o", filepath.Join(relativeDeploymentDir, "credhub.yml"),
"-v", `openstack_username="${BBL_OPENSTACK_USERNAME}"`,
"-v", `openstack_password="${BBL_OPENSTACK_PASSWORD}"`,
}
behavesLikePlan(expectedArgs, cli, fs, executor, dirInput, deploymentDir, "openstack", stateDir)
})
})
})
Describe("WriteDeploymentVars", func() {
BeforeEach(func() {
dirInput.Deployment = "some-deployment"
})
It("writes the deployment vars yml file", func() {
err := executor.WriteDeploymentVars(dirInput, "some-deployment-vars")
Expect(err).NotTo(HaveOccurred())
deploymentVars, err := fs.ReadFile(filepath.Join(varsDir, "some-deployment-vars-file.yml"))
Expect(err).NotTo(HaveOccurred())
Expect(string(deploymentVars)).To(Equal("some-deployment-vars"))
})
})
Describe("CreateEnv", func() {
var (
cli *fakes.BOSHCLI
executor bosh.Executor
createEnvPath string
varsDir string
stateDir string
dirInput bosh.DirInput
state storage.State
)
BeforeEach(func() {
fs = &afero.Afero{afero.NewOsFs()} // real os fs so we can exec scripts...
cli = &fakes.BOSHCLI{}
var err error
varsDir, err = fs.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
stateDir, err = fs.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
executor = bosh.NewExecutor(cli, fs)
dirInput = bosh.DirInput{
Deployment: "some-deployment",
StateDir: stateDir,
VarsDir: varsDir,
}
createEnvPath = filepath.Join(stateDir, "create-some-deployment.sh")
createEnvContents := fmt.Sprintf("#!/bin/bash\necho 'some-vars-store-contents' > %s/some-deployment-vars-store.yml\n", varsDir)
fs.WriteFile(createEnvPath, []byte(createEnvContents), storage.ScriptMode)
})
AfterEach(func() {
fs.Remove(filepath.Join(varsDir, "some-deployment-vars-store.yml"))
fs.Remove(createEnvPath)
fs.Remove(filepath.Join(stateDir, "create-some-deployment-override.sh"))
os.Unsetenv("BBL_STATE_DIR")
})
Context("when the user provides a create-env override", func() {
BeforeEach(func() {
overridePath := filepath.Join(stateDir, "create-some-deployment-override.sh")
overrideContents := fmt.Sprintf("#!/bin/bash\necho 'override-vars-store-contents' > %s/some-deployment-vars-store.yml\n", varsDir)
fs.WriteFile(overridePath, []byte(overrideContents), storage.ScriptMode)
})
It("runs the create-env-override.sh script", func() {
vars, err := executor.CreateEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
Expect(vars).To(ContainSubstring("override-vars-store-contents"))
})
})
Context("when the user provides a create-env override with flag-provided environment variable", func() {
BeforeEach(func() {
err := os.Unsetenv("BBL_IAAS")
Expect(err).NotTo(HaveOccurred())
state.IAAS = "some-fictional-iaas"
overrideContents := fmt.Sprintf("#!/bin/bash\n [ \"${BBL_IAAS}\" = \"some-fictional-iaas\" ]")
overridePath := filepath.Join(stateDir, "create-some-deployment-override.sh")
fs.WriteFile(overridePath, []byte(overrideContents), storage.ScriptMode)
})
It("runs the create-env-override.sh script", func() {
_, err := executor.CreateEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
})
})
It("runs the create-env script and returns the resulting vars-store contents", func() {
vars, err := executor.CreateEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
Expect(vars).To(ContainSubstring("some-vars-store-contents"))
By("setting BBL_STATE_DIR environment variable", func() {
bblStateDirEnv := os.Getenv("BBL_STATE_DIR")
Expect(bblStateDirEnv).To(Equal(stateDir))
})
})
Context("when iaas credentials are provided", func() {
Context("on aws", func() {
BeforeEach(func() {
state.IAAS = "aws"
state.AWS = storage.AWS{
AccessKeyID: "some-access-key-id",
SecretAccessKey: "some-secret-access-key",
}
})
It("sets credentials in environment variables", func() {
_, err := executor.CreateEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_AWS_ACCESS_KEY_ID")).To(Equal("some-access-key-id"))
Expect(os.Getenv("BBL_AWS_SECRET_ACCESS_KEY")).To(Equal("some-secret-access-key"))
})
})
Context("on azure", func() {
BeforeEach(func() {
state.IAAS = "azure"
state.Azure = storage.Azure{
ClientID: "some-client-id",
ClientSecret: "some-client-secret",
SubscriptionID: "some-subscription-id",
TenantID: "some-tenant-id",
}
})
It("sets credentials in environment variables", func() {
_, err := executor.CreateEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_AZURE_CLIENT_ID")).To(Equal("some-client-id"))
Expect(os.Getenv("BBL_AZURE_CLIENT_SECRET")).To(Equal("some-client-secret"))
Expect(os.Getenv("BBL_AZURE_SUBSCRIPTION_ID")).To(Equal("some-subscription-id"))
Expect(os.Getenv("BBL_AZURE_TENANT_ID")).To(Equal("some-tenant-id"))
})
})
Context("on gcp", func() {
BeforeEach(func() {
state.IAAS = "gcp"
state.GCP = storage.GCP{
ServiceAccountKeyPath: "some-service-account-key-path",
Zone: "some-zone",
ProjectID: "some-project-id",
}
})
It("sets credentials in environment variables", func() {
_, err := executor.CreateEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_GCP_SERVICE_ACCOUNT_KEY_PATH")).To(Equal("some-service-account-key-path"))
Expect(os.Getenv("BBL_GCP_ZONE")).To(Equal("some-zone"))
Expect(os.Getenv("BBL_GCP_PROJECT_ID")).To(Equal("some-project-id"))
})
})
Context("on vsphere", func() {
It("sets credentials in environment variables", func() {
_, err := executor.CreateEnv(dirInput, storage.State{
IAAS: "vsphere",
VSphere: storage.VSphere{
VCenterUser: "some-user",
VCenterPassword: "some-password",
},
})
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_VSPHERE_VCENTER_USER")).To(Equal("some-user"))
Expect(os.Getenv("BBL_VSPHERE_VCENTER_PASSWORD")).To(Equal("some-password"))
})
})
Context("on openstack", func() {
It("sets credentials in environment variables", func() {
_, err := executor.CreateEnv(dirInput, storage.State{
IAAS: "openstack",
OpenStack: storage.OpenStack{
Username: "some-user",
Password: "some-password",
},
})
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_OPENSTACK_USERNAME")).To(Equal("some-user"))
Expect(os.Getenv("BBL_OPENSTACK_PASSWORD")).To(Equal("some-password"))
})
})
})
Context("when the create-env script returns an error", func() {
BeforeEach(func() {
createEnvContents := "#!/bin/bash\nexit 1\n"
fs.WriteFile(createEnvPath, []byte(createEnvContents), storage.ScriptMode)
})
It("returns an error", func() {
vars, err := executor.CreateEnv(dirInput, state)
Expect(err).To(MatchError(fmt.Sprintf("Running %s: exit status 1", createEnvPath)))
Expect(vars).To(Equal(""))
})
})
})
Describe("DeleteEnv", func() {
var (
executor bosh.Executor
deleteEnvPath string
varsDir string
stateDir string
dirInput bosh.DirInput
state storage.State
)
BeforeEach(func() {
fs = &afero.Afero{afero.NewOsFs()} // real os fs so we can exec scripts...
var err error
varsDir, err = fs.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
stateDir, err = fs.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
executor = bosh.NewExecutor(cli, fs)
dirInput = bosh.DirInput{
Deployment: "director",
VarsDir: varsDir,
StateDir: stateDir,
}
state = storage.State{
IAAS: "some-iaas",
}
deleteEnvPath = filepath.Join(stateDir, "delete-director.sh")
deleteEnvContents := "#!/bin/bash\necho delete-env > /dev/null\n"
fs.WriteFile(deleteEnvPath, []byte(deleteEnvContents), storage.ScriptMode)
deploymentStateJson := filepath.Join(varsDir, "bosh-state.json")
fs.WriteFile(deploymentStateJson, []byte("some: deployment"), storage.StateMode)
})
AfterEach(func() {
os.Unsetenv("BBL_STATE_DIR")
fs.Remove(filepath.Join(stateDir, "delete-director.sh"))
})
Context("when the user provides a delete-env override", func() {
BeforeEach(func() {
overridePath := filepath.Join(stateDir, "delete-director-override.sh")
overrideContents := fmt.Sprintf("#!/bin/bash\necho 'override' > %s/delete-env-output\n", varsDir)
fs.WriteFile(overridePath, []byte(overrideContents), storage.ScriptMode)
})
AfterEach(func() {
fs.Remove(filepath.Join(varsDir, "delete-env-output"))
fs.Remove(filepath.Join(stateDir, "delete-director-override.sh"))
})
It("runs the delete-env-override.sh script", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
overrideOut, err := fs.ReadFile(filepath.Join(varsDir, "delete-env-output"))
Expect(err).NotTo(HaveOccurred())
Expect(overrideOut).To(ContainSubstring("override"))
})
})
Context("when the user tries to delete a jumpbox", func() {
BeforeEach(func() {
dirInput.Deployment = "jumpbox"
deleteEnvPath = filepath.Join(stateDir, "delete-jumpbox.sh")
deleteEnvContents := "#!/bin/bash\necho delete-env > /dev/null\n"
fs.WriteFile(deleteEnvPath, []byte(deleteEnvContents), storage.ScriptMode)
deploymentStateJson := filepath.Join(varsDir, "jumpbox-state.json")
fs.WriteFile(deploymentStateJson, []byte("some: deployment"), storage.StateMode)
})
AfterEach(func() {
fs.Remove(filepath.Join(stateDir, "delete-jumpbox.sh"))
fs.Remove(filepath.Join(stateDir, "jumpbox-state.json"))
})
It("deletes a bosh environment with the delete-env script", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
By("setting BBL_STATE_DIR environment variable", func() {
bblStateDirEnv := os.Getenv("BBL_STATE_DIR")
Expect(bblStateDirEnv).To(Equal(stateDir))
})
})
})
Context("when the user tries to delete an unfamiliar deployment-type-thing", func() {
BeforeEach(func() {
dirInput.Deployment = "garbaggio-deployment"
})
It("errors reasonably", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).To(HaveOccurred())
})
})
It("deletes a bosh environment with the delete-env script", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
By("setting BBL_STATE_DIR environment variable", func() {
bblStateDirEnv := os.Getenv("BBL_STATE_DIR")
Expect(bblStateDirEnv).To(Equal(stateDir))
})
})
Context("when iaas credentials are provided", func() {
Context("on aws", func() {
BeforeEach(func() {
state.IAAS = "aws"
state.AWS = storage.AWS{
AccessKeyID: "some-access-key-id",
SecretAccessKey: "some-secret-access-key",
}
})
It("sets credentials in environment variables", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_AWS_ACCESS_KEY_ID")).To(Equal("some-access-key-id"))
Expect(os.Getenv("BBL_AWS_SECRET_ACCESS_KEY")).To(Equal("some-secret-access-key"))
})
})
Context("on azure", func() {
BeforeEach(func() {
state.IAAS = "azure"
state.Azure = storage.Azure{
ClientID: "some-client-id",
ClientSecret: "some-client-secret",
SubscriptionID: "some-subscription-id",
TenantID: "some-tenant-id",
}
})
It("sets credentials in environment variables", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_AZURE_CLIENT_ID")).To(Equal("some-client-id"))
Expect(os.Getenv("BBL_AZURE_CLIENT_SECRET")).To(Equal("some-client-secret"))
Expect(os.Getenv("BBL_AZURE_SUBSCRIPTION_ID")).To(Equal("some-subscription-id"))
Expect(os.Getenv("BBL_AZURE_TENANT_ID")).To(Equal("some-tenant-id"))
})
})
Context("on gcp", func() {
BeforeEach(func() {
state.IAAS = "gcp"
state.GCP = storage.GCP{
ServiceAccountKeyPath: "some-service-account-key-path",
Zone: "some-zone",
ProjectID: "some-project-id",
}
})
It("sets credentials in environment variables", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_GCP_SERVICE_ACCOUNT_KEY_PATH")).To(Equal("some-service-account-key-path"))
Expect(os.Getenv("BBL_GCP_ZONE")).To(Equal("some-zone"))
Expect(os.Getenv("BBL_GCP_PROJECT_ID")).To(Equal("some-project-id"))
})
})
Context("on vsphere", func() {
BeforeEach(func() {
state.IAAS = "vsphere"
state.VSphere = storage.VSphere{
VCenterUser: "some-user",
VCenterPassword: "some-password",
}
})
It("sets credentials in environment variables", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).NotTo(HaveOccurred())
Expect(os.Getenv("BBL_VSPHERE_VCENTER_USER")).To(Equal("some-user"))
Expect(os.Getenv("BBL_VSPHERE_VCENTER_PASSWORD")).To(Equal("some-password"))
})
})
})
Context("when the create-env script returns an error", func() {
BeforeEach(func() {
deleteEnvContents := "#!/bin/bash\nexit 1\n"
fs.WriteFile(deleteEnvPath, []byte(deleteEnvContents), storage.ScriptMode)
})
It("returns an error", func() {
err := executor.DeleteEnv(dirInput, state)
Expect(err).To(MatchError("Run bosh delete-env director: exit status 1"))
})
})
})
Describe("Version", func() {
var (
cli *fakes.BOSHCLI
executor bosh.Executor
)
BeforeEach(func() {
cli = &fakes.BOSHCLI{}
cli.RunStub = func(stdout io.Writer, workingDirectory string, args []string) error {
stdout.Write([]byte("some-text version 1.1.1 some-other-text"))
return nil
}
executor = bosh.NewExecutor(cli, fs)
})
It("returns the correctly trimmed version", func() {
version, err := executor.Version()
Expect(err).NotTo(HaveOccurred())
_, _, args := cli.RunArgsForCall(0)
Expect(args).To(Equal([]string{"-v"}))
Expect(version).To(Equal("1.1.1"))
})
Context("when the run cli fails", func() {
BeforeEach(func() {
cli.RunStub = nil
cli.RunCall.Returns.Error = errors.New("banana")
})
It("returns an error", func() {
_, err := executor.Version()
Expect(err).To(MatchError("banana"))
})
})
Context("when the version cannot be parsed", func() {
BeforeEach(func() {
cli.RunStub = func(stdout io.Writer, workingDirectory string, args []string) error {
stdout.Write([]byte(""))
return nil
}
})
It("returns a bosh version error", func() {
_, err := executor.Version()
Expect(err).To(MatchError("BOSH version could not be parsed"))
})
})
})
})
func formatScript(command string, stateDir string, args []string) string {
script := fmt.Sprintf("#!/bin/sh\nbosh-path %s \\\n", command)
for _, arg := range args {
if arg[0] == '-' {
script = fmt.Sprintf("%s %s", script, arg)
} else {
script = fmt.Sprintf("%s %s \\\n", script, arg)
}
}
return fmt.Sprintf("%s\n", script[:len(script)-2])
}
type behavesLikePlanFs interface {
fileio.FileReader
fileio.Stater
}
func behavesLikePlan(expectedArgs []string, cli *fakes.BOSHCLI, fs behavesLikePlanFs, executor bosh.Executor, input bosh.DirInput, deploymentDir, iaas, stateDir string) {
cli.RunStub = func(stdout io.Writer, workingDirectory string, args []string) error {
stdout.Write([]byte("some-manifest"))
return nil
}
err := executor.PlanDirector(input, deploymentDir, iaas)
Expect(err).NotTo(HaveOccurred())
Expect(cli.RunCallCount()).To(Equal(0))
By("writing the create-env args to a shell script", func() {
expectedScript := formatScript("create-env", stateDir, expectedArgs)
scriptPath := fmt.Sprintf("%s/create-director.sh", stateDir)
shellScript, err := fs.ReadFile(scriptPath)
Expect(err).NotTo(HaveOccurred())
fileinfo, err := fs.Stat(scriptPath)
Expect(err).NotTo(HaveOccurred())
Expect(fileinfo.Mode().String()).To(Equal("-rwxr-x---"))
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
By("writing the delete-env args to a shell script", func() {
expectedScript := formatScript("delete-env", stateDir, expectedArgs)
scriptPath := fmt.Sprintf("%s/delete-director.sh", stateDir)
shellScript, err := fs.ReadFile(scriptPath)
Expect(err).NotTo(HaveOccurred())
fileinfo, err := fs.Stat(scriptPath)
Expect(err).NotTo(HaveOccurred())
Expect(fileinfo.Mode().String()).To(Equal("-rwxr-x---"))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred())
Expect(string(shellScript)).To(Equal(expectedScript))
})
}
|
[
"\"BBL_STATE_DIR\"",
"\"BBL_AWS_ACCESS_KEY_ID\"",
"\"BBL_AWS_SECRET_ACCESS_KEY\"",
"\"BBL_AZURE_CLIENT_ID\"",
"\"BBL_AZURE_CLIENT_SECRET\"",
"\"BBL_AZURE_SUBSCRIPTION_ID\"",
"\"BBL_AZURE_TENANT_ID\"",
"\"BBL_GCP_SERVICE_ACCOUNT_KEY_PATH\"",
"\"BBL_GCP_ZONE\"",
"\"BBL_GCP_PROJECT_ID\"",
"\"BBL_VSPHERE_VCENTER_USER\"",
"\"BBL_VSPHERE_VCENTER_PASSWORD\"",
"\"BBL_OPENSTACK_USERNAME\"",
"\"BBL_OPENSTACK_PASSWORD\"",
"\"BBL_STATE_DIR\"",
"\"BBL_STATE_DIR\"",
"\"BBL_AWS_ACCESS_KEY_ID\"",
"\"BBL_AWS_SECRET_ACCESS_KEY\"",
"\"BBL_AZURE_CLIENT_ID\"",
"\"BBL_AZURE_CLIENT_SECRET\"",
"\"BBL_AZURE_SUBSCRIPTION_ID\"",
"\"BBL_AZURE_TENANT_ID\"",
"\"BBL_GCP_SERVICE_ACCOUNT_KEY_PATH\"",
"\"BBL_GCP_ZONE\"",
"\"BBL_GCP_PROJECT_ID\"",
"\"BBL_VSPHERE_VCENTER_USER\"",
"\"BBL_VSPHERE_VCENTER_PASSWORD\""
] |
[] |
[
"BBL_OPENSTACK_PASSWORD",
"BBL_VSPHERE_VCENTER_USER",
"BBL_GCP_ZONE",
"BBL_VSPHERE_VCENTER_PASSWORD",
"BBL_AWS_SECRET_ACCESS_KEY",
"BBL_STATE_DIR",
"BBL_AZURE_TENANT_ID",
"BBL_AZURE_CLIENT_SECRET",
"BBL_OPENSTACK_USERNAME",
"BBL_AZURE_SUBSCRIPTION_ID",
"BBL_AWS_ACCESS_KEY_ID",
"BBL_GCP_PROJECT_ID",
"BBL_GCP_SERVICE_ACCOUNT_KEY_PATH",
"BBL_AZURE_CLIENT_ID"
] |
[]
|
["BBL_OPENSTACK_PASSWORD", "BBL_VSPHERE_VCENTER_USER", "BBL_GCP_ZONE", "BBL_VSPHERE_VCENTER_PASSWORD", "BBL_AWS_SECRET_ACCESS_KEY", "BBL_STATE_DIR", "BBL_AZURE_TENANT_ID", "BBL_AZURE_CLIENT_SECRET", "BBL_OPENSTACK_USERNAME", "BBL_AZURE_SUBSCRIPTION_ID", "BBL_AWS_ACCESS_KEY_ID", "BBL_GCP_PROJECT_ID", "BBL_GCP_SERVICE_ACCOUNT_KEY_PATH", "BBL_AZURE_CLIENT_ID"]
|
go
| 14 | 0 | |
scripts/update.py
|
import io
import os
import sys
import django
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "infomate.settings")
django.setup()
import re
import logging
from datetime import timedelta, datetime
from urllib.parse import urlparse
from time import mktime
import threading
import queue
import requests
import click
import feedparser
from bs4 import BeautifulSoup
from requests import RequestException
from newspaper import Article as NewspaperArticle, ArticleException
from boards.models import BoardFeed, Article, Board
from scripts.common import DEFAULT_REQUEST_HEADERS, DEFAULT_REQUEST_TIMEOUT, MAX_PARSABLE_CONTENT_LENGTH
DEFAULT_NUM_WORKER_THREADS = 5
DEFAULT_ENTRIES_LIMIT = 100
MIN_REFRESH_DELTA = timedelta(minutes=30)
log = logging.getLogger()
queue = queue.Queue()
@click.command()
@click.option("--num-workers", default=DEFAULT_NUM_WORKER_THREADS, help="Number of parser threads")
@click.option("--force", is_flag=True, help="Force to update all existing feeds")
@click.option("--feed", help="To update one particular feed")
def update(num_workers, force, feed):
if feed:
need_to_update_feeds = BoardFeed.objects.filter(rss=feed)
else:
never_updated_feeds = BoardFeed.objects.filter(refreshed_at__isnull=True)
if not force:
need_to_update_feeds = BoardFeed.objects.filter(
rss__isnull=False,
refreshed_at__lte=datetime.utcnow() - MIN_REFRESH_DELTA
)
else:
need_to_update_feeds = BoardFeed.objects.filter(rss__isnull=False)
need_to_update_feeds = list(never_updated_feeds) + list(need_to_update_feeds)
tasks = []
for feed in need_to_update_feeds:
tasks.append({
"id": feed.id,
"board_id": feed.board_id,
"name": feed.name,
"rss": feed.rss,
"conditions": feed.conditions,
"is_parsable": feed.is_parsable,
})
threads = []
for i in range(num_workers):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# put tasks to the queue
for item in tasks:
queue.put(item)
# wait until tasks are done
queue.join()
# update timestamps
updated_boards = {feed.board_id for feed in need_to_update_feeds}
Board.objects.filter(id__in=updated_boards).update(refreshed_at=datetime.utcnow())
# stop workers
for i in range(num_workers):
queue.put(None)
for t in threads:
t.join()
def worker():
while True:
task = queue.get()
if task is None:
break
try:
refresh_feed(task)
except Exception:
# catch all to avoid infinite wait in .join()
log.exception("Error refreshing feed")
queue.task_done()
def refresh_feed(item):
print(f"Updating feed {item['name']}...")
feed = feedparser.parse(item['rss'])
print(f"Entries found: {len(feed.entries)}")
for entry in feed.entries[:DEFAULT_ENTRIES_LIMIT]:
entry_title = parse_title(entry)
entry_link = parse_link(entry)
if not entry_title or not entry_link:
print("No entry title or link. Skipped")
continue
print(f"- article: '{entry_title}' {entry_link}")
conditions = item.get("conditions")
if conditions:
is_valid = check_conditions(conditions, entry)
if not is_valid:
print(f"Condition {conditions} does not match. Skipped")
continue
article, is_created = Article.objects.get_or_create(
board_id=item["board_id"],
feed_id=item["id"],
uniq_id=entry.get("id") or entry.get("guid") or entry_link,
defaults=dict(
url=entry_link[:2000],
domain=parse_domain(entry_link)[:256],
created_at=parse_datetime(entry),
updated_at=datetime.utcnow(),
title=entry_title[:256],
image=str(parse_rss_image(entry) or "")[:512],
description=entry.get("summary"),
)
)
if is_created:
# parse heavy info
text, lead_image = parse_rss_text_and_image(entry)
if text:
article.description = text[:1000]
if lead_image:
article.image = lead_image[:512]
# get real url
real_url, content_type, content_length = resolve_url(entry_link)
# load and summarize article
if item["is_parsable"] and content_length <= MAX_PARSABLE_CONTENT_LENGTH \
and content_type.startswith("text/"): # to not try to parse podcasts :D
if real_url:
article.url = real_url[:2000]
article.domain = parse_domain(real_url)[:256]
try:
summary, summary_image = load_and_parse_full_article_text_and_image(article.url)
except ArticleException:
summary = None
summary_image = None
if summary:
article.summary = summary
if summary_image:
article.image = summary_image[:512]
article.save()
week_ago = datetime.utcnow() - timedelta(days=7)
frequency = Article.objects.filter(feed_id=item["id"], created_at__gte=week_ago).count()
last_article = Article.objects.filter(feed_id=item["id"]).order_by("-created_at").first()
BoardFeed.objects.filter(id=item["id"]).update(
refreshed_at=datetime.utcnow(),
last_article_at=last_article.created_at if last_article else None,
frequency=frequency or 0
)
def check_conditions(conditions, entry):
if not conditions:
return True
for condition in conditions:
if condition["type"] == "in":
if condition["in"] not in entry[condition["field"]]:
return False
return True
def resolve_url(entry_link):
url = str(entry_link)
content_type = None
content_length = MAX_PARSABLE_CONTENT_LENGTH + 1 # don't parse null content-types
depth = 10
while depth > 0:
depth -= 1
try:
response = requests.head(url, timeout=DEFAULT_REQUEST_TIMEOUT, verify=False, stream=True)
except RequestException:
log.warning(f"Failed to resolve URL: {url}")
return None, content_type, content_length
if 300 < response.status_code < 400:
url = response.headers["location"] # follow redirect
else:
content_type = response.headers.get("content-type")
content_length = int(response.headers.get("content-length") or 0)
break
return url, content_type, content_length
def parse_domain(url):
domain = urlparse(url).netloc
if domain.startswith("www."):
domain = domain[4:]
return domain
def parse_datetime(entry):
published_time = entry.get("published_parsed") or entry.get("updated_parsed")
if published_time:
return datetime.fromtimestamp(mktime(published_time))
return datetime.utcnow()
def parse_title(entry):
title = entry.get("title") or entry.get("description") or entry.get("summary")
return re.sub("<[^<]+?>", "", title).strip()
def parse_link(entry):
if entry.get("link"):
return entry["link"]
if entry.get("links"):
return entry["links"][0]["href"]
return None
def parse_rss_image(entry):
if entry.get("media_content"):
images = [m["url"] for m in entry["media_content"] if m.get("medium") == "image" and m.get("url")]
if images:
return images[0]
if entry.get("image"):
if isinstance(entry["image"], dict):
return entry["image"].get("href")
return entry["image"]
return None
def parse_rss_text_and_image(entry):
if not entry.get("summary"):
return "", ""
bs = BeautifulSoup(entry["summary"], features="lxml")
text = re.sub(r"\s\s+", " ", bs.text or "").strip()
img_tags = bs.findAll("img")
for img_tag in img_tags:
src = img_tag.get("src", None)
if src:
return text, src
return text, ""
def load_page_safe(url):
try:
response = requests.get(
url=url,
timeout=DEFAULT_REQUEST_TIMEOUT,
headers=DEFAULT_REQUEST_HEADERS,
stream=True # the most important part — stream response to prevent loading everything into memory
)
except RequestException as ex:
log.warning(f"Error parsing the page: {url} {ex}")
return ""
html = io.StringIO()
total_bytes = 0
for chunk in response.iter_content(chunk_size=100 * 1024, decode_unicode=True):
total_bytes += len(chunk)
if total_bytes >= MAX_PARSABLE_CONTENT_LENGTH:
return "" # reject too big pages
html.write(chunk)
return html.getvalue()
def load_and_parse_full_article_text_and_image(url):
article = NewspaperArticle(url)
article.set_html(load_page_safe(url)) # safer than article.download()
article.parse()
article.nlp()
return article.summary, article.top_image
if __name__ == '__main__':
update()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Scene_Generator/random_object.py
|
import bpy
import os
import glob
from os.path import isdir, join
import sys
import random
import mathutils
from fuzzywuzzy import fuzz, process
from contextlib import contextmanager
import addon_utils
import math
scene_dir = os.environ['SCENE_DIR']
model_dir = os.environ['MODEL_DIR']
sub_dir = os.environ['SUB_DIR']
class SceneGenerator:
scene = bpy.context.scene
envmap_camera = bpy.data.objects["Envmap_Camera"]
render_camera = bpy.data.objects["Camera"]
camera_limits = [(0.8, 1.3), (-0.1, 0.1), (-3.14, 3.14)]
envmaps = glob.glob('{}/hdris/*'.format(scene_dir))
tables = [obj.name for obj in scene.objects if "Table" in obj.name]
imported_objects = []
table = {}
nodes = bpy.data.scenes[0].node_tree.nodes
car = None
def __init__(self):
self.models = find_scene_data()
self.material = bpy.data.materials['Mix']
self.normal_material = bpy.data.materials['Normals']
self.scene.use_nodes = False
def random_material(self, objects):
mat_prop(self.material, 'Base Color', random_colour())
mat_prop(self.material, 'Subsurface', random.uniform(0, 0.2))
mat_prop(self.material, 'Subsurface Color', random_colour())
mat_prop(self.material, 'Metallic', random.uniform(0, 1))
mat_prop(self.material, 'Specular', random.uniform(0.3, 1))
mat_prop(self.material, 'Roughness', random.uniform(0, 0.6))
for obj in objects:
if obj.type != 'CAMERA' and obj.type != 'LAMP':
if obj.data.materials:
obj.data.materials[0] = self.material
else:
obj.data.materials.append(self.material)
obj.active_material = self.material
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.material_slot_assign()
bpy.ops.object.mode_set(mode='OBJECT')
obj.select = True
def surface_normals(self):
for obj in self.scene.objects:
if obj.type == 'MESH':
# obj.data.materials.append(self.normal_material)
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
obj.data.materials.clear()
obj.data.materials.append(self.normal_material)
bpy.context.object.active_material_index = 0
bpy.ops.object.material_slot_assign()
# obj.active_material = self.normal_material
bpy.ops.object.mode_set(mode='OBJECT')
# bpy.context.scene.objects.active = obj
# bpy.ops.object.mode_set(mode='EDIT')
# bpy.context.object.active_material_index = 1
# bpy.ops.object.material_slot_assign()
# bpy.context.object.active_material_index = 0
# bpy.ops.object.mode_set(mode='OBJECT')
obj.select = True
def place_random_object(self, name):
path = random.choice(self.models)
bpy.ops.import_scene.obj(filepath=path)
for material in bpy.data.materials:
if material != self.material and material != self.normal_material:
bpy.data.materials.remove(material)
objects = bpy.context.selected_objects[:]
bpy.ops.transform.resize(value=(0.1, 0.1, 0.1), constraint_axis=(False, False, False),
constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED',
proportional_edit_falloff='SMOOTH', proportional_size=1)
random_rotation(objects[0], self.camera_limits)
#text_file = open("{}/meta/{}.txt".format(scene_dir, name), "w+")
#text_file.write("Model: {}".format(path))
#text_file.close()
# bpy.ops.mesh.uv_texture_remove()
self.car = objects
return path
def clear_objects(self, objects):
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
def random_render(self, name='test'):
self.scene.camera = self.render_camera
bpy.data.cameras[self.scene.camera.name].clip_start = 0.0001
bpy.data.cameras[self.scene.camera.name].clip_end = 1000
bpy.data.cameras[self.scene.camera.name].angle = 1.05
random_rotation(self.scene.camera, self.camera_limits)
bpy.context.scene.world.cycles_visibility.camera = False
self.random_material(self.car)
bpy.ops.view3d.camera_to_view_selected()
envmap_id = self.light_scene()
self.scene.view_settings.view_transform = 'Default'
self.scene.render.image_settings.file_format = 'PNG'
self.scene.render.resolution_percentage = 100
self.scene.render.resolution_x = 256
self.scene.render.resolution_y = 256
move_object(self.scene.camera, (-1, 0.0, 0.0))
bpy.context.scene.world.cycles_visibility.camera = False
bpy.data.scenes['Scene'].render.filepath = "{}/{}/left/{}".format(scene_dir, sub_dir,
'{}.png'.format(name))
bpy.ops.render.render(write_still=True)
move_object(self.scene.camera, (2, 0.0, 0.0))
bpy.data.scenes['Scene'].render.filepath = "{}/{}/right/{}".format(scene_dir, sub_dir,
'{}.png'.format(name))
bpy.ops.render.render(write_still=True)
bpy.context.scene.world.cycles_visibility.camera = True
for obj in self.car:
obj.hide_render = True
bpy.data.scenes['Scene'].render.filepath = "{}/{}/bg/{}".format(scene_dir, sub_dir,
'{}.png'.format(name))
bpy.ops.render.render(write_still=True)
bpy.context.scene.world.cycles_visibility.camera = False
for obj in self.car:
obj.hide_render = False
#self.scene.use_nodes = True
#self.scene.use_nodes = False
self.surface_normals()
bpy.data.scenes['Scene'].render.filepath = "{}/{}/norms/{}".format(scene_dir, sub_dir,
'{}.png'.format(name))
bpy.ops.render.render(write_still=True)
bpy.context.scene.render.layers["RenderLayer"].use_sky = True
def light_scene(self):
envmap = random.choice(self.envmaps)
bpy.data.images.load(envmap, check_existing=False)
self.scene.world.use_nodes = True
self.scene.world.node_tree.nodes['Environment Texture'].image = bpy.data.images[os.path.basename(envmap)]
return os.path.basename(envmap).split('.')[0]
def render_envmap(self, name='test'):
for obj in self.car:
obj.hide_render = True
self.scene.render.resolution_x = 64
self.scene.render.resolution_y = 64
bpy.context.scene.world.cycles_visibility.camera = True
self.scene.render.resolution_percentage = 100
self.envmap_camera.rotation_euler = self.render_camera.rotation_euler
self.scene.camera = self.envmap_camera
self.scene.view_settings.view_transform = 'Raw'
self.scene.render.image_settings.file_format = 'HDR'
bpy.data.scenes['Scene'].render.filepath = "{}/{}/envmaps/{}".format(scene_dir,sub_dir, '{}.hdr'.format(name))
bpy.ops.render.render(write_still=True)
for obj in self.car:
obj.hide_render = False
def mat_prop(mat, property, val):
mat.node_tree.nodes["Principled BSDF"].inputs[property].default_value = val
def random_colour():
return (random.uniform(0.0, 1.0), random.uniform(0.0, 1.0), random.uniform(0.0, 1.0), 1)
def move_object(object, vector):
rightvec = mathutils.Vector(vector)
inv = object.matrix_world.copy()
inv.invert()
vec_rot = rightvec * inv
object.location = object.location + vec_rot
def random_rotation(object, limits):
object.rotation_euler.x = random.uniform(limits[0][0], limits[0][1])
object.rotation_euler.y = random.uniform(limits[1][0], limits[1][1])
object.rotation_euler.z = random.uniform(limits[2][0], limits[2][1])
def find_scene_data():
categories = dict()
models = glob.glob('{}/*/*.obj'.format(model_dir))
return models
def extract_material(category, materials, limit=4):
return [i[0] for i in process.extract(category, materials, limit=limit)]
def main():
prefix = 0
for arg in sys.argv:
if 'prefix' in arg:
prefix = int(arg.split('=')[1])
bpy.context.scene.render.engine = 'CYCLES'
try:
bpy.context.scene.cycles.device = 'GPU'
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.render.tile_x = 256
bpy.context.scene.render.tile_y = 256
except TypeError:
bpy.context.scene.render.tile_x = 32
bpy.context.scene.render.tile_y = 32
pass
# bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
prefs = bpy.context.user_preferences.addons['cycles'].preferences
# bpy.ops.wm.addon_enable(module='materials_utils')
print(prefs.compute_device_type)
generator = SceneGenerator()
filename = generator.place_random_object(prefix)
filename = os.path.dirname(filename).split('/')[-1]
for i in range(prefix, prefix + 10):
generator.random_render(str(i))
generator.render_envmap(str(i))
generator.clear_objects(generator.car)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"SUB_DIR",
"SCENE_DIR",
"MODEL_DIR"
] |
[]
|
["SUB_DIR", "SCENE_DIR", "MODEL_DIR"]
|
python
| 3 | 0 | |
api/controllers/api_test.go
|
package controllers_test
import (
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/convox/rack/api/controllers"
"github.com/convox/rack/api/models"
"github.com/convox/rack/test"
"github.com/stretchr/testify/assert"
)
// Note: these tests don't use the api helpers to ensure a naked
// client can connect
func TestNoPassword(t *testing.T) {
models.TestProvider.On("SystemGet").Return(nil, nil)
aws := test.StubAws(test.DescribeConvoxStackCycle("convox-test"))
defer aws.Close()
defer os.Setenv("RACK", os.Getenv("RACK"))
os.Setenv("RACK", "convox-test")
assert.HTTPSuccess(t, controllers.HandlerFunc, "GET", "http://convox/system", nil)
}
func TestBasicAuth(t *testing.T) {
models.TestProvider.On("SystemGet").Return(nil, nil)
assert := assert.New(t)
aws := test.StubAws(test.DescribeConvoxStackCycle("convox-test"))
defer aws.Close()
defer os.Setenv("PASSWORD", os.Getenv("PASSWORD"))
defer os.Setenv("RACK", os.Getenv("RACK"))
os.Setenv("PASSWORD", "keymaster")
os.Setenv("RACK", "convox-test")
req, _ := http.NewRequest("GET", "http://convox/system", nil)
w := httptest.NewRecorder()
controllers.HandlerFunc(w, req)
if !assert.Equal(401, w.Code) {
return
}
w = httptest.NewRecorder()
req.SetBasicAuth("", "keymaster")
controllers.HandlerFunc(w, req)
assert.Equal(200, w.Code)
}
|
[
"\"RACK\"",
"\"PASSWORD\"",
"\"RACK\""
] |
[] |
[
"RACK",
"PASSWORD"
] |
[]
|
["RACK", "PASSWORD"]
|
go
| 2 | 0 | |
mne/forward/forward.py
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
from time import time
from copy import deepcopy
import re
import numpy as np
from scipy import linalg, sparse
import shutil
import os
from os import path as op
import tempfile
from ..io import RawArray, Info
from ..io.constants import FIFF
from ..io.open import fiff_open
from ..io.tree import dir_tree_find
from ..io.tag import find_tag, read_tag
from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
write_named_matrix)
from ..io.meas_info import read_bad_channels, write_info
from ..io.pick import (pick_channels_forward, pick_info, pick_channels,
pick_types)
from ..io.write import (write_int, start_block, end_block,
write_coord_trans, write_ch_info, write_name_list,
write_string, start_file, end_file, write_id)
from ..io.base import BaseRaw
from ..evoked import Evoked, EvokedArray
from ..epochs import BaseEpochs
from ..source_space import (_read_source_spaces_from_tree,
find_source_space_hemi, _set_source_space_vertices,
_write_source_spaces_to_fid)
from ..source_estimate import _BaseSourceEstimate
from ..transforms import (transform_surface_to, invert_transform,
write_trans)
from ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn,
run_subprocess, check_fname, logger, verbose, fill_doc,
_validate_type, _check_compensation_grade, _check_option)
from ..label import Label
from ..fixes import einsum
class Forward(dict):
"""Forward class to represent info from forward solution."""
def copy(self):
"""Copy the Forward instance."""
return Forward(deepcopy(self))
def __repr__(self):
"""Summarize forward info instead of printing all."""
entr = '<Forward'
nchan = len(pick_types(self['info'], meg=True, eeg=False, exclude=[]))
entr += ' | ' + 'MEG channels: %d' % nchan
nchan = len(pick_types(self['info'], meg=False, eeg=True, exclude=[]))
entr += ' | ' + 'EEG channels: %d' % nchan
src_types = np.array([src['type'] for src in self['src']])
if (src_types == 'surf').all():
entr += (' | Source space: Surface with %d vertices'
% self['nsource'])
elif (src_types == 'vol').all():
entr += (' | Source space: Volume with %d grid points'
% self['nsource'])
elif (src_types == 'discrete').all():
entr += (' | Source space: Discrete with %d dipoles'
% self['nsource'])
else:
count_string = ''
if (src_types == 'surf').any():
count_string += '%d surface, ' % (src_types == 'surf').sum()
if (src_types == 'vol').any():
count_string += '%d volume, ' % (src_types == 'vol').sum()
if (src_types == 'discrete').any():
count_string += '%d discrete, ' \
% (src_types == 'discrete').sum()
count_string = count_string.rstrip(', ')
entr += (' | Source space: Mixed (%s) with %d vertices'
% (count_string, self['nsource']))
if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:
entr += (' | Source orientation: Unknown')
elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
entr += (' | Source orientation: Fixed')
elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
entr += (' | Source orientation: Free')
entr += '>'
return entr
def _block_diag(A, n):
"""Construct a block diagonal from a packed structure.
You have to try it on a matrix to see what it's doing.
If A is not sparse, then returns a sparse block diagonal "bd",
diagonalized from the
elements in "A".
"A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
Each submatrix is ma x "n", and these submatrices are
placed down the diagonal of the matrix.
If A is already sparse, then the operation is reversed, yielding
a block
row matrix, where each set of n columns corresponds to a block element
from the block diagonal.
Parameters
----------
A : array
The matrix
n : int
The block size
Returns
-------
bd : sparse matrix
The block diagonal matrix
"""
if sparse.issparse(A): # then make block sparse
raise NotImplementedError('sparse reversal not implemented yet')
ma, na = A.shape
bdn = na // int(n) # number of submatrices
if na % n > 0:
raise ValueError('Width of matrix must be a multiple of n')
tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = jj * np.ones(ma, dtype=np.int)[:, None]
jj = jj.T.ravel() # column indices foreach sparse bd
bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
return bd
def _inv_block_diag(A, n):
"""Construct an inverse block diagonal from a packed structure.
You have to try it on a matrix to see what it's doing.
"A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
Each submatrix is ma x "n", and the inverses of these submatrices
are placed down the diagonal of the matrix.
Parameters
----------
A : array
The matrix.
n : int
The block size.
Returns
-------
bd : sparse matrix
The block diagonal matrix.
"""
ma, na = A.shape
bdn = na // int(n) # number of submatrices
if na % n > 0:
raise ValueError('Width of matrix must be a multiple of n')
# modify A in-place to invert each sub-block
A = A.copy()
for start in range(0, na, 3):
# this is a view
A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])
tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = jj * np.ones(ma, dtype=np.int)[:, None]
jj = jj.T.ravel() # column indices foreach sparse bd
bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
return bd
def _get_tag_int(fid, node, name, id_):
"""Check we have an appropriate tag."""
tag = find_tag(fid, node, id_)
if tag is None:
fid.close()
raise ValueError(name + ' tag not found')
return int(tag.data)
def _read_one(fid, node):
"""Read all interesting stuff for one forward solution."""
# This function assumes the fid is open as a context manager
if node is None:
return None
one = Forward()
one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',
FIFF.FIFF_MNE_SOURCE_ORIENTATION)
one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',
FIFF.FIFF_MNE_COORD_FRAME)
one['nsource'] = _get_tag_int(fid, node, 'Number of sources',
FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
one['nchan'] = _get_tag_int(fid, node, 'Number of channels',
FIFF.FIFF_NCHAN)
try:
one['sol'] = _read_named_matrix(fid, node,
FIFF.FIFF_MNE_FORWARD_SOLUTION,
transpose=True)
one['_orig_sol'] = one['sol']['data'].copy()
except Exception:
logger.error('Forward solution data not found')
raise
try:
fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD
one['sol_grad'] = _read_named_matrix(fid, node, fwd_type,
transpose=True)
one['_orig_sol_grad'] = one['sol_grad']['data'].copy()
except Exception:
one['sol_grad'] = None
if one['sol']['data'].shape[0] != one['nchan'] or \
(one['sol']['data'].shape[1] != one['nsource'] and
one['sol']['data'].shape[1] != 3 * one['nsource']):
raise ValueError('Forward solution matrix has wrong dimensions')
if one['sol_grad'] is not None:
if one['sol_grad']['data'].shape[0] != one['nchan'] or \
(one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and
one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):
raise ValueError('Forward solution gradient matrix has '
'wrong dimensions')
return one
def _read_forward_meas_info(tree, fid):
"""Read light measurement info from forward operator.
Parameters
----------
tree : tree
FIF tree structure.
fid : file id
The file id.
Returns
-------
info : instance of Info
The measurement info.
"""
# This function assumes fid is being used as a context manager
info = Info()
# Information from the MRI file
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise ValueError('No parent MEG information found in operator')
parent_mri = parent_mri[0]
tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)
info['mri_file'] = tag.data if tag is not None else None
tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)
info['mri_id'] = tag.data if tag is not None else None
# Information from the MEG file
parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
if len(parent_meg) == 0:
raise ValueError('No parent MEG information found in operator')
parent_meg = parent_meg[0]
tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)
info['meas_file'] = tag.data if tag is not None else None
tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)
info['meas_id'] = tag.data if tag is not None else None
# Add channel information
chs = list()
for k in range(parent_meg['nent']):
kind = parent_meg['directory'][k].kind
pos = parent_meg['directory'][k].pos
if kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
info['chs'] = chs
info._update_redundant()
# Get the MRI <-> head coordinate transformation
tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
coord_head = FIFF.FIFFV_COORD_HEAD
coord_mri = FIFF.FIFFV_COORD_MRI
coord_device = FIFF.FIFFV_COORD_DEVICE
coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if tag is None:
raise ValueError('MRI/head coordinate transformation not found')
cand = tag.data
if cand['from'] == coord_mri and cand['to'] == coord_head:
info['mri_head_t'] = cand
else:
raise ValueError('MRI/head coordinate transformation not found')
# Get the MEG device <-> head coordinate transformation
tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)
if tag is None:
raise ValueError('MEG/head coordinate transformation not found')
cand = tag.data
if cand['from'] == coord_device and cand['to'] == coord_head:
info['dev_head_t'] = cand
elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:
info['ctf_head_t'] = cand
else:
raise ValueError('MEG/head coordinate transformation not found')
info['bads'] = read_bad_channels(fid, parent_meg)
# clean up our bad list, old versions could have non-existent bads
info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]
# Check if a custom reference has been applied
tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF)
if tag is None:
tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11
info['custom_ref_applied'] = bool(tag.data) if tag is not None else False
info._check_consistency()
return info
def _subject_from_forward(forward):
"""Get subject id from inverse operator."""
return forward['src'][0].get('subject_his_id', None)
@verbose
def _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):
"""Merge loaded MEG and EEG forward dicts into one dict."""
if megfwd is not None and eegfwd is not None:
if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or
megfwd['source_ori'] != eegfwd['source_ori'] or
megfwd['nsource'] != eegfwd['nsource'] or
megfwd['coord_frame'] != eegfwd['coord_frame']):
raise ValueError('The MEG and EEG forward solutions do not match')
fwd = megfwd
fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]
fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]
fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']
fwd['sol']['row_names'] = (fwd['sol']['row_names'] +
eegfwd['sol']['row_names'])
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],
eegfwd['sol_grad']['data']]
fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],
eegfwd['_orig_sol_grad']]
fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +
eegfwd['sol_grad']['nrow'])
fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +
eegfwd['sol_grad']['row_names'])
fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']
logger.info(' MEG and EEG forward solutions combined')
elif megfwd is not None:
fwd = megfwd
else:
fwd = eegfwd
return fwd
@verbose
def read_forward_solution(fname, include=(), exclude=(), verbose=None):
"""Read a forward solution a.k.a. lead field.
Parameters
----------
fname : string
The file name, which should end with -fwd.fif or -fwd.fif.gz.
include : list, optional
List of names of channels to include. If empty all channels
are included.
exclude : list, optional
List of names of channels to exclude. If empty include all
channels.
%(verbose)s
Returns
-------
fwd : instance of Forward
The forward solution.
See Also
--------
write_forward_solution, make_forward_solution
Notes
-----
Forward solutions, which are derived from an original forward solution with
free orientation, are always stored on disk as forward solution with free
orientation in X/Y/Z RAS coordinates. To apply any transformation to the
forward operator (surface orientation, fixed orientation) please apply
:func:`convert_forward_solution` after reading the forward solution with
:func:`read_forward_solution`.
Forward solutions, which are derived from an original forward solution with
fixed orientation, are stored on disk as forward solution with fixed
surface-based orientations. Please note that the transformation to
surface-based, fixed orientation cannot be reverted after loading the
forward solution with :func:`read_forward_solution`.
"""
check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz'))
# Open the file, create directory
logger.info('Reading forward solution from %s...' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
# Find all forward solutions
fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
if len(fwds) == 0:
raise ValueError('No forward solutions in %s' % fname)
# Parent MRI data
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise ValueError('No parent MRI information in %s' % fname)
parent_mri = parent_mri[0]
src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)
for s in src:
s['id'] = find_source_space_hemi(s)
fwd = None
# Locate and read the forward solutions
megnode = None
eegnode = None
for k in range(len(fwds)):
tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
if tag is None:
raise ValueError('Methods not listed for one of the forward '
'solutions')
if tag.data == FIFF.FIFFV_MNE_MEG:
megnode = fwds[k]
elif tag.data == FIFF.FIFFV_MNE_EEG:
eegnode = fwds[k]
megfwd = _read_one(fid, megnode)
if megfwd is not None:
if is_fixed_orient(megfwd):
ori = 'fixed'
else:
ori = 'free'
logger.info(' Read MEG forward solution (%d sources, '
'%d channels, %s orientations)'
% (megfwd['nsource'], megfwd['nchan'], ori))
eegfwd = _read_one(fid, eegnode)
if eegfwd is not None:
if is_fixed_orient(eegfwd):
ori = 'fixed'
else:
ori = 'free'
logger.info(' Read EEG forward solution (%d sources, '
'%d channels, %s orientations)'
% (eegfwd['nsource'], eegfwd['nchan'], ori))
fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)
# Get the MRI <-> head coordinate transformation
tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
if tag is None:
raise ValueError('MRI/head coordinate transformation not found')
mri_head_t = tag.data
if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
mri_head_t = invert_transform(mri_head_t)
if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
fid.close()
raise ValueError('MRI/head coordinate transformation not '
'found')
fwd['mri_head_t'] = mri_head_t
#
# get parent MEG info
#
fwd['info'] = _read_forward_meas_info(tree, fid)
# MNE environment
parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if len(parent_env) > 0:
parent_env = parent_env[0]
tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
if tag is not None:
fwd['info']['working_dir'] = tag.data
tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
if tag is not None:
fwd['info']['command_line'] = tag.data
# Transform the source spaces to the correct coordinate frame
# if necessary
# Make sure forward solution is in either the MRI or HEAD coordinate frame
if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):
raise ValueError('Only forward solutions computed in MRI or head '
'coordinates are acceptable')
# Transform each source space to the HEAD or MRI coordinate frame,
# depending on the coordinate frame of the forward solution
# NOTE: the function transform_surface_to will also work on discrete and
# volume sources
nuse = 0
for s in src:
try:
s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)
except Exception as inst:
raise ValueError('Could not transform source space (%s)' % inst)
nuse += s['nuse']
# Make sure the number of sources match after transformation
if nuse != fwd['nsource']:
raise ValueError('Source spaces do not match the forward solution.')
logger.info(' Source spaces transformed to the forward solution '
'coordinate frame')
fwd['src'] = src
# Handle the source locations and orientations
fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]
for ss in src], axis=0)
# Store original source orientations
fwd['_orig_source_ori'] = fwd['source_ori']
# Deal with include and exclude
pick_channels_forward(fwd, include=include, exclude=exclude, copy=False)
if is_fixed_orient(fwd, orig=True):
fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :]
for _src in fwd['src']], axis=0)
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
else:
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = False
return Forward(fwd)
@verbose
def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
copy=True, use_cps=True, verbose=None):
"""Convert forward solution between different source orientations.
Parameters
----------
fwd : Forward
The forward solution to modify.
surf_ori : bool, optional (default False)
Use surface-based source coordinate system? Note that force_fixed=True
implies surf_ori=True.
force_fixed : bool, optional (default False)
Force fixed source orientation mode?
copy : bool
Whether to return a new instance or modify in place.
use_cps : bool (default True)
Whether to use cortical patch statistics to define normal
orientations. Only used when surf_ori and/or force_fixed are True.
%(verbose)s
Returns
-------
fwd : Forward
The modified forward solution.
"""
fwd = fwd.copy() if copy else fwd
if force_fixed is True:
surf_ori = True
if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed:
raise ValueError(
'Forward operator was generated with sources from a '
'volume source space. Conversion to fixed orientation is not '
'possible. Consider using a discrete source space if you have '
'meaningful normal orientations.')
if surf_ori:
if use_cps:
if any(s.get('patch_inds') is not None for s in fwd['src']):
use_ave_nn = True
logger.info(' Average patch normals will be employed in '
'the rotation to the local surface coordinates..'
'..')
else:
use_ave_nn = False
logger.info(' No patch info available. The standard source '
'space normals will be employed in the rotation '
'to the local surface coordinates....')
else:
use_ave_nn = False
# We need to change these entries (only):
# 1. source_nn
# 2. sol['data']
# 3. sol['ncol']
# 4. sol_grad['data']
# 5. sol_grad['ncol']
# 6. source_ori
if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_ave_nn):
# Fixed
fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]
for s in fwd['src']], axis=0)
if not is_fixed_orient(fwd, orig=True):
logger.info(' Changing to fixed-orientation forward '
'solution with surface-based source orientations...')
fix_rot = _block_diag(fwd['source_nn'].T, 1)
# newer versions of numpy require explicit casting here, so *= no
# longer works
fwd['sol']['data'] = (fwd['_orig_sol'] *
fix_rot).astype('float32')
fwd['sol']['ncol'] = fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse.block_diag([fix_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
elif surf_ori: # Free, surf-oriented
# Rotate the local source coordinate systems
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
logger.info(' Converting to surface-based source orientations...')
# Actually determine the source orientations
pp = 0
for s in fwd['src']:
if s['type'] in ['surf', 'discrete']:
for p in range(s['nuse']):
# Project out the surface normal and compute SVD
if use_ave_nn and s.get('patch_inds') is not None:
nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]
nn = np.sum(nn, axis=0)[:, np.newaxis]
nn /= linalg.norm(nn)
else:
nn = s['nn'][s['vertno'][p], :][:, np.newaxis]
U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
# Make sure that ez is in the direction of nn
if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:
U *= -1.0
fwd['source_nn'][pp:pp + 3, :] = U.T
pp += 3
else:
pp += 3 * s['nuse']
# Rotate the solution components as well
if force_fixed:
fwd['source_nn'] = fwd['source_nn'][2::3, :]
fix_rot = _block_diag(fwd['source_nn'].T, 1)
# newer versions of numpy require explicit casting here, so *= no
# longer works
fwd['sol']['data'] = (fwd['_orig_sol'] *
fix_rot).astype('float32')
fwd['sol']['ncol'] = fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse.block_diag([fix_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
else:
surf_rot = _block_diag(fwd['source_nn'].T, 3)
fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot
fwd['sol']['ncol'] = 3 * fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse.block_diag([surf_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 9 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = True
else: # Free, cartesian
logger.info(' Cartesian source orientations...')
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
fwd['sol']['data'] = fwd['_orig_sol'].copy()
fwd['sol']['ncol'] = 3 * fwd['nsource']
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()
fwd['sol_grad']['ncol'] = 9 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = False
logger.info(' [done]')
return fwd
@verbose
def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
"""Write forward solution to a file.
Parameters
----------
fname : str
File name to save the forward solution to. It should end with -fwd.fif
or -fwd.fif.gz.
fwd : Forward
Forward solution.
overwrite : bool
If True, overwrite destination file (if it exists).
%(verbose)s
See Also
--------
read_forward_solution
Notes
-----
Forward solutions, which are derived from an original forward solution with
free orientation, are always stored on disk as forward solution with free
orientation in X/Y/Z RAS coordinates. Transformations (surface orientation,
fixed orientation) will be reverted. To reapply any transformation to the
forward operator please apply :func:`convert_forward_solution` after
reading the forward solution with :func:`read_forward_solution`.
Forward solutions, which are derived from an original forward solution with
fixed orientation, are stored on disk as forward solution with fixed
surface-based orientations. Please note that the transformation to
surface-based, fixed orientation cannot be reverted after loading the
forward solution with :func:`read_forward_solution`.
"""
check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz'))
# check for file existence
_check_fname(fname, overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
#
# MNE env
#
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = fwd['info'].get('working_dir', None)
if data is not None:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = fwd['info'].get('command_line', None)
if data is not None:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
#
# Information from the MRI file
#
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])
if fwd['info']['mri_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])
# store the MRI to HEAD transform in MRI file
write_coord_trans(fid, fwd['info']['mri_head_t'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# write measurement info
write_forward_meas_info(fid, fwd['info'])
# invert our original source space transform
src = list()
for s in fwd['src']:
s = deepcopy(s)
try:
# returns source space to original coordinate frame
# usually MRI
s = transform_surface_to(s, fwd['mri_head_t']['from'],
fwd['mri_head_t'])
except Exception as inst:
raise ValueError('Could not transform source space (%s)' % inst)
src.append(s)
#
# Write the source spaces (again)
#
_write_source_spaces_to_fid(fid, src)
n_vert = sum([ss['nuse'] for ss in src])
if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
n_col = n_vert
else:
n_col = 3 * n_vert
# Undo transformations
sol = fwd['_orig_sol'].copy()
if fwd['sol_grad'] is not None:
sol_grad = fwd['_orig_sol_grad'].copy()
else:
sol_grad = None
if fwd['surf_ori'] is True:
if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
warn('The forward solution, which is stored on disk now, is based '
'on a forward solution with fixed orientation. Please note '
'that the transformation to surface-based, fixed orientation '
'cannot be reverted after loading the forward solution with '
'read_forward_solution.', RuntimeWarning)
else:
warn('This forward solution is based on a forward solution with '
'free orientation. The original forward solution is stored '
'on disk in X/Y/Z RAS coordinates. Any transformation '
'(surface orientation or fixed orientation) will be '
'reverted. To reapply any transformation to the forward '
'operator please apply convert_forward_solution after '
'reading the forward solution with read_forward_solution.',
RuntimeWarning)
#
# MEG forward solution
#
picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,
exclude=[])
picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,
exclude=[])
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]
row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]
if n_meg > 0:
meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,
row_names=row_names_meg, col_names=[])
_transpose_named_matrix(meg_solution)
start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,
fwd['_orig_source_ori'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
write_int(fid, FIFF.FIFF_NCHAN, n_meg)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)
if sol_grad is not None:
meg_solution_grad = dict(data=sol_grad[picks_meg],
nrow=n_meg, ncol=n_col * 3,
row_names=row_names_meg, col_names=[])
_transpose_named_matrix(meg_solution_grad)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
meg_solution_grad)
end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
#
# EEG forward solution
#
if n_eeg > 0:
eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,
row_names=row_names_eeg, col_names=[])
_transpose_named_matrix(eeg_solution)
start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,
fwd['_orig_source_ori'])
write_int(fid, FIFF.FIFF_NCHAN, n_eeg)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)
if sol_grad is not None:
eeg_solution_grad = dict(data=sol_grad[picks_eeg],
nrow=n_eeg, ncol=n_col * 3,
row_names=row_names_eeg, col_names=[])
_transpose_named_matrix(eeg_solution_grad)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
eeg_solution_grad)
end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def is_fixed_orient(forward, orig=False):
"""Check if the forward operator is fixed orientation.
Parameters
----------
forward : instance of Forward
The forward.
orig : bool
If True, consider the original source orientation.
If False (default), consider the current source orientation.
Returns
-------
fixed_ori : bool
Whether or not it is fixed orientation.
"""
if orig: # if we want to know about the original version
fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
else: # most of the time we want to know about the current version
fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
return fixed_ori
def write_forward_meas_info(fid, info):
"""Write measurement info stored in forward solution.
Parameters
----------
fid : file id
The file id
info : instance of Info
The measurement info.
"""
info._check_consistency()
#
# Information from the MEG file
#
start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# get transformation from CTF and DEVICE to HEAD coordinate frame
meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))
if meg_head_t is None:
fid.close()
raise ValueError('Head<-->sensor transform not found')
write_coord_trans(fid, meg_head_t)
if 'chs' in info:
# Channel information
write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))
for k, c in enumerate(info['chs']):
# Scan numbers may have been messed up
c = deepcopy(c)
c['scanno'] = k + 1
write_ch_info(fid, c)
if 'bads' in info and len(info['bads']) > 0:
# Bad channels
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
def _select_orient_forward(forward, info, noise_cov=None, copy=True):
"""Prepare forward solution for inverse solvers."""
# fwd['sol']['row_names'] may be different order from fwd['info']['chs']
fwd_sol_ch_names = forward['sol']['row_names']
all_ch_names = set(fwd_sol_ch_names)
all_bads = set(info['bads'])
if noise_cov is not None:
all_ch_names &= set(noise_cov['names'])
all_bads |= set(noise_cov['bads'])
else:
noise_cov = dict(bads=info['bads'])
ch_names = [c['ch_name'] for c in info['chs']
if c['ch_name'] not in all_bads and
c['ch_name'] in all_ch_names]
if not len(info['bads']) == len(noise_cov['bads']) or \
not all(b in noise_cov['bads'] for b in info['bads']):
logger.info('info["bads"] and noise_cov["bads"] do not match, '
'excluding bad channels from both')
# check the compensation grade
_check_compensation_grade(forward['info'], info, 'forward')
n_chan = len(ch_names)
logger.info("Computing inverse operator with %d channels." % n_chan)
forward = pick_channels_forward(forward, ch_names, ordered=True,
copy=copy)
info_idx = [info['ch_names'].index(name) for name in ch_names]
info_picked = pick_info(info, info_idx)
forward['info']._check_consistency()
info_picked._check_consistency()
return forward, info_picked
@verbose
def compute_orient_prior(forward, loose=0.2, verbose=None):
"""Compute orientation prior.
Parameters
----------
forward : instance of Forward
Forward operator.
loose : float
The loose orientation parameter (between 0 and 1).
%(verbose)s
Returns
-------
orient_prior : ndarray, shape (n_vertices,)
Orientation priors.
See Also
--------
compute_depth_prior
"""
is_fixed_ori = is_fixed_orient(forward)
n_sources = forward['sol']['data'].shape[1]
loose = float(loose)
if not (0 <= loose <= 1):
raise ValueError('loose value should be between 0 and 1, '
'got %s.' % (loose,))
orient_prior = np.ones(n_sources, dtype=np.float)
if loose > 0.:
if is_fixed_ori:
raise ValueError('loose must be 0. with forward operator '
'with fixed orientation, got %s' % (loose,))
if loose < 1:
if not forward['surf_ori']:
raise ValueError('Forward operator is not oriented in surface '
'coordinates. loose parameter should be 1 '
'not %s.' % (loose,))
logger.info('Applying loose dipole orientations. Loose value '
'of %s.' % loose)
orient_prior[0::3] *= loose
orient_prior[1::3] *= loose
return orient_prior
def _restrict_gain_matrix(G, info):
"""Restrict gain matrix entries for optimal depth weighting."""
# Figure out which ones have been used
if len(info['chs']) != G.shape[0]:
raise ValueError('G.shape[0] (%d) and length of info["chs"] (%d) '
'do not match' % (G.shape[0], len(info['chs'])))
for meg, eeg, kind in (
('grad', False, 'planar'),
('mag', False, 'magnetometer or axial gradiometer'),
(False, True, 'EEG')):
sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[])
if len(sel) > 0:
logger.info(' %d %s channels' % (len(sel), kind))
break
else:
warn('Could not find MEG or EEG channels to limit depth channels')
sel = slice(None)
return G[sel]
@verbose
def compute_depth_prior(forward, info, is_fixed_ori=None,
exp=0.8, limit=10.0,
patch_areas=None, limit_depth_chs=False,
combine_xyz='spectral', noise_cov=None, rank=None,
verbose=None):
"""Compute depth prior for depth weighting.
Parameters
----------
forward : instance of Forward
The forward solution.
info : instance of Info
The measurement info.
is_fixed_ori : bool | None
Deprecated, will be removed in 0.19.
exp : float
Exponent for the depth weighting, must be between 0 and 1.
limit : float | None
The upper bound on depth weighting.
Can be None to be bounded by the largest finite prior.
patch_areas : ndarray | None
Deprecated, will be removed in 0.19.
limit_depth_chs : bool | 'whiten'
How to deal with multiple channel types in depth weighting.
The default is True, which whitens based on the source sensitivity
of the highest-SNR channel type. See Notes for details.
.. versionchanged:: 0.18
Added the "whiten" option.
combine_xyz : 'spectral' | 'fro'
When a loose (or free) orientation is used, how the depth weighting
for each triplet should be calculated.
If 'spectral', use the squared spectral norm of Gk.
If 'fro', use the squared Frobenius norm of Gk.
.. versionadded:: 0.18
noise_cov : instance of Covariance | None
The noise covariance to use to whiten the gain matrix when
``limit_depth_chs='whiten'``.
.. versionadded:: 0.18
%(rank_None)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
depth_prior : ndarray, shape (n_vertices,)
The depth prior.
See Also
--------
compute_orient_prior
Notes
-----
The defaults used by the minimum norm code and sparse solvers differ.
In particular, the values for MNE are::
compute_depth_prior(..., limit=10., limit_depth_chs=True,
combine_xyz='spectral')
In sparse solvers and LCMV, the values are::
compute_depth_prior(..., limit=None, limit_depth_chs='whiten',
combine_xyz='fro')
The ``limit_depth_chs`` argument can take the following values:
* :data:`python:True` (default)
Use only grad channels in depth weighting (equivalent to MNE C
minimum-norm code). If grad channels aren't present, only mag
channels will be used (if no mag, then eeg). This makes the depth
prior dependent only on the sensor geometry (and relationship
to the sources).
* ``'whiten'``
Compute a whitener and apply it to the gain matirx before computing
the depth prior. In this case ``noise_cov`` must not be None.
Whitening the gain matrix makes the depth prior
depend on both sensor geometry and the data of interest captured
by the noise covariance (e.g., projections, SNR).
.. versionadded:: 0.18
* :data:`python:False`
Use all channels. Not recommended since the depth weighting will be
biased toward whichever channel type has the largest values in
SI units (such as EEG being orders of magnitude larger than MEG).
"""
from ..cov import Covariance, compute_whitener
if isinstance(forward, Forward):
patch_areas = forward.get('patch_areas', None)
is_fixed_ori = is_fixed_orient(forward)
G = forward['sol']['data']
else:
warn('Parameters G, is_fixed_ori, and patch_areas are '
'deprecated and will be removed in 0.19, pass in the forward '
'solution directly.', DeprecationWarning)
G = forward
_validate_type(is_fixed_ori, bool, 'is_fixed_ori')
logger.info('Creating the depth weighting matrix...')
_validate_type(noise_cov, (Covariance, None), 'noise_cov',
'Covariance or None')
_validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs')
if isinstance(limit_depth_chs, str):
if limit_depth_chs != 'whiten':
raise ValueError('limit_depth_chs, if str, must be "whiten", got '
'%s' % (limit_depth_chs,))
if not isinstance(noise_cov, Covariance):
raise ValueError('With limit_depth_chs="whiten", noise_cov must be'
' a Covariance, got %s' % (type(noise_cov),))
if combine_xyz is not False: # private / expert option
_check_option('combine_xyz', combine_xyz, ('fro', 'spectral'))
# If possible, pick best depth-weighting channels
if limit_depth_chs is True:
G = _restrict_gain_matrix(G, info)
elif limit_depth_chs == 'whiten':
whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank,
verbose=False)
G = np.dot(whitener, G)
# Compute the gain matrix
if is_fixed_ori or combine_xyz in ('fro', False):
d = np.sum(G ** 2, axis=0)
if not (is_fixed_ori or combine_xyz is False):
d = d.reshape(-1, 3).sum(axis=1)
# Spherical leadfield can be zero at the center
d[d == 0.] = np.min(d[d != 0.])
else: # 'spectral'
# n_pos = G.shape[1] // 3
# The following is equivalent to this, but 4-10x faster
# d = np.zeros(n_pos)
# for k in range(n_pos):
# Gk = G[:, 3 * k:3 * (k + 1)]
# x = np.dot(Gk.T, Gk)
# d[k] = linalg.svdvals(x)[0]
G.shape = (G.shape[0], -1, 3)
d = np.linalg.norm(einsum('svj,svk->vjk', G, G), # vector dot products
ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.)
G.shape = (G.shape[0], -1)
# XXX Currently the fwd solns never have "patch_areas" defined
if patch_areas is not None:
if not is_fixed_ori and combine_xyz is False:
patch_areas = np.repeat(patch_areas, 3)
d /= patch_areas ** 2
logger.info(' Patch areas taken into account in the depth '
'weighting')
w = 1.0 / d
if limit is not None:
ws = np.sort(w)
weight_limit = limit ** 2
if limit_depth_chs is False:
# match old mne-python behavor
# we used to do ind = np.argmin(ws), but this is 0 by sort above
n_limit = 0
limit = ws[0] * weight_limit
else:
# match C code behavior
limit = ws[-1]
n_limit = len(d)
if ws[-1] > weight_limit * ws[0]:
ind = np.where(ws > weight_limit * ws[0])[0][0]
limit = ws[ind]
n_limit = ind
logger.info(' limit = %d/%d = %f'
% (n_limit + 1, len(d),
np.sqrt(limit / ws[0])))
scale = 1.0 / limit
logger.info(' scale = %g exp = %g' % (scale, exp))
w = np.minimum(w / limit, 1)
depth_prior = w ** exp
if not (is_fixed_ori or combine_xyz is False):
depth_prior = np.repeat(depth_prior, 3)
return depth_prior
def _stc_src_sel(src, stc, on_missing='raise',
extra=', likely due to forward calculations'):
"""Select the vertex indices of a source space using a source estimate."""
if isinstance(stc, list):
vertices = stc
else:
assert isinstance(stc, _BaseSourceEstimate)
vertices = stc._vertices_list
del stc
if not len(src) == len(vertices):
raise RuntimeError('Mismatch between number of source spaces (%s) and '
'STC vertices (%s)' % (len(src), len(vertices)))
src_sels, stc_sels, out_vertices = [], [], []
src_offset = stc_offset = 0
for s, v in zip(src, vertices):
joint_sel = np.intersect1d(s['vertno'], v)
src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset)
src_offset += len(s['vertno'])
idx = np.searchsorted(v, joint_sel)
stc_sels.append(idx + stc_offset)
stc_offset += len(v)
out_vertices.append(np.array(v)[idx])
src_sel = np.concatenate(src_sels)
stc_sel = np.concatenate(stc_sels)
assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices)
n_stc = sum(len(v) for v in vertices)
n_joint = len(src_sel)
if n_joint != n_stc:
msg = ('Only %i of %i SourceEstimate %s found in '
'source space%s'
% (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices',
extra))
if on_missing == 'raise':
raise RuntimeError(msg)
elif on_missing == 'warn':
warn(msg)
else:
assert on_missing == 'ignore'
return src_sel, stc_sel, out_vertices
def _fill_measurement_info(info, fwd, sfreq):
"""Fill the measurement info of a Raw or Evoked object."""
sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])
info = pick_info(info, sel)
info['bads'] = []
# this is probably correct based on what's done in meas_info.py...
info['meas_id'] = fwd['info']['meas_id']
info['file_id'] = info['meas_id']
now = time()
sec = np.floor(now)
usec = 1e6 * (now - sec)
info['meas_date'] = (int(sec), int(usec))
info['highpass'] = 0.0
info['lowpass'] = sfreq / 2.0
info['sfreq'] = sfreq
info['projs'] = []
return info
@verbose
def _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise',
verbose=None):
"""Apply forward model and return data, times, ch_names."""
if not is_fixed_orient(fwd):
raise ValueError('Only fixed-orientation forward operators are '
'supported.')
if np.all(stc.data > 0):
warn('Source estimate only contains currents with positive values. '
'Use pick_ori="normal" when computing the inverse to compute '
'currents not current magnitudes.')
max_cur = np.max(np.abs(stc.data))
if max_cur > 1e-7: # 100 nAm threshold for warning
warn('The maximum current magnitude is %0.1f nAm, which is very large.'
' Are you trying to apply the forward model to noise-normalized '
'(dSPM, sLORETA, or eLORETA) values? The result will only be '
'correct if currents (in units of Am) are used.'
% (1e9 * max_cur))
src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)
gain = fwd['sol']['data'][:, src_sel]
# save some memory if possible
stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel
logger.info('Projecting source estimate to sensor space...')
data = np.dot(gain, stc.data[stc_sel, start:stop])
logger.info('[done]')
times = deepcopy(stc.times[start:stop])
return data, times
@verbose
def apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True,
on_missing='raise', verbose=None):
"""Project source space currents to sensor space using a forward operator.
The sensor space data is computed for all channels present in fwd. Use
pick_channels_forward or pick_types_forward to restrict the solution to a
subset of channels.
The function returns an Evoked object, which is constructed from
evoked_template. The evoked_template should be from the same MEG system on
which the original data was acquired. An exception will be raised if the
forward operator contains channels that are not present in the template.
Parameters
----------
fwd : Forward
Forward operator to use.
stc : SourceEstimate
The source estimate from which the sensor space data is computed.
info : instance of Info
Measurement info to generate the evoked.
start : int, optional
Index of first time sample (index not time is seconds).
stop : int, optional
Index of first time sample not to include (index not time is seconds).
use_cps : bool (default True)
Whether to use cortical patch statistics to define normal
orientations when converting to fixed orientation (if necessary).
.. versionadded:: 0.15
%(on_missing)s Default is "raise".
.. versionadded:: 0.18
%(verbose)s
Returns
-------
evoked : Evoked
Evoked object with computed sensor space data.
See Also
--------
apply_forward_raw: Compute sensor space data and return a Raw object.
"""
# make sure evoked_template contains all channels in fwd
for ch_name in fwd['sol']['row_names']:
if ch_name not in info['ch_names']:
raise ValueError('Channel %s of forward operator not present in '
'evoked_template.' % ch_name)
# project the source estimate to the sensor space
if not is_fixed_orient(fwd):
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps)
data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)
# fill the measurement info
sfreq = float(1.0 / stc.tstep)
info_out = _fill_measurement_info(info, fwd, sfreq)
evoked = EvokedArray(data, info_out, times[0], nave=1)
evoked.times = times
evoked.first = int(np.round(evoked.times[0] * sfreq))
evoked.last = evoked.first + evoked.data.shape[1] - 1
return evoked
@verbose
def apply_forward_raw(fwd, stc, info, start=None, stop=None,
on_missing='raise', verbose=None):
"""Project source space currents to sensor space using a forward operator.
The sensor space data is computed for all channels present in fwd. Use
pick_channels_forward or pick_types_forward to restrict the solution to a
subset of channels.
The function returns a Raw object, which is constructed using provided
info. The info object should be from the same MEG system on which the
original data was acquired. An exception will be raised if the forward
operator contains channels that are not present in the info.
Parameters
----------
fwd : Forward
Forward operator to use. Has to be fixed-orientation.
stc : SourceEstimate
The source estimate from which the sensor space data is computed.
info : instance of Info
The measurement info.
start : int, optional
Index of first time sample (index not time is seconds).
stop : int, optional
Index of first time sample not to include (index not time is seconds).
%(on_missing)s Default is "raise".
.. versionadded:: 0.18
%(verbose)s
Returns
-------
raw : Raw object
Raw object with computed sensor space data.
See Also
--------
apply_forward: Compute sensor space data and return an Evoked object.
"""
# make sure info contains all channels in fwd
for ch_name in fwd['sol']['row_names']:
if ch_name not in info['ch_names']:
raise ValueError('Channel %s of forward operator not present in '
'info.' % ch_name)
# project the source estimate to the sensor space
data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)
sfreq = 1.0 / stc.tstep
info = _fill_measurement_info(info, fwd, sfreq)
info['projs'] = []
# store sensor data in Raw object using the info
raw = RawArray(data, info)
raw.preload = True
raw._first_samps = np.array([int(np.round(times[0] * sfreq))])
raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])
raw._projector = None
raw._update_times()
return raw
@fill_doc
def restrict_forward_to_stc(fwd, stc, on_missing='ignore'):
"""Restrict forward operator to active sources in a source estimate.
Parameters
----------
fwd : instance of Forward
Forward operator.
stc : instance of SourceEstimate
Source estimate.
%(on_missing)s Default is "ignore".
.. versionadded:: 0.18
Returns
-------
fwd_out : instance of Forward
Restricted forward operator.
See Also
--------
restrict_forward_to_label
"""
_validate_type(on_missing, str, 'on_missing')
_check_option('on_missing', on_missing, ('ignore', 'warn', 'raise'))
src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)
del stc
return _restrict_forward_to_src_sel(fwd, src_sel)
def _restrict_forward_to_src_sel(fwd, src_sel):
fwd_out = deepcopy(fwd)
# figure out the vertno we are keeping
idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']]
for si, s in enumerate(fwd['src'])], axis=-1)
assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2
assert idx_sel.shape[1] == fwd['nsource']
idx_sel = idx_sel[:, src_sel]
fwd_out['source_rr'] = fwd['source_rr'][src_sel]
fwd_out['nsource'] = len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['source_nn'] = fwd['source_nn'][idx]
fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]
if fwd['sol_grad'] is not None:
fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad]
fwd_out['sol']['ncol'] = len(idx)
if is_fixed_orient(fwd, orig=True):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx]
if fwd['sol_grad'] is not None:
fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad]
vertices = [idx_sel[1][idx_sel[0] == si]
for si in range(len(fwd_out['src']))]
_set_source_space_vertices(fwd_out['src'], vertices)
return fwd_out
def restrict_forward_to_label(fwd, labels):
"""Restrict forward operator to labels.
Parameters
----------
fwd : Forward
Forward operator.
labels : instance of Label | list
Label object or list of label objects.
Returns
-------
fwd_out : dict
Restricted forward operator.
See Also
--------
restrict_forward_to_stc
"""
vertices = [np.array([], int), np.array([], int)]
if not isinstance(labels, list):
labels = [labels]
# Get vertices separately of each hemisphere from all label
for label in labels:
_validate_type(label, Label, "label", "Label or list")
i = 0 if label.hemi == 'lh' else 1
vertices[i] = np.append(vertices[i], label.vertices)
# Remove duplicates and sort
vertices = [np.unique(vert_hemi) for vert_hemi in vertices]
fwd_out = deepcopy(fwd)
fwd_out['source_rr'] = np.zeros((0, 3))
fwd_out['nsource'] = 0
fwd_out['source_nn'] = np.zeros((0, 3))
fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))
fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0))
if fwd['sol_grad'] is not None:
fwd_out['sol_grad']['data'] = np.zeros(
(fwd['sol_grad']['data'].shape[0], 0))
fwd_out['_orig_sol_grad'] = np.zeros(
(fwd['_orig_sol_grad'].shape[0], 0))
fwd_out['sol']['ncol'] = 0
nuse_lh = fwd['src'][0]['nuse']
for i in range(2):
fwd_out['src'][i]['vertno'] = np.array([], int)
fwd_out['src'][i]['nuse'] = 0
fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
fwd_out['src'][i]['inuse'].fill(0)
fwd_out['src'][i]['use_tris'] = np.array([[]], int)
fwd_out['src'][i]['nuse_tri'] = np.array([0])
# src_sel is idx to cols in fwd that are in any label per hemi
src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i])
src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel)
# Reconstruct each src
vertno = fwd['src'][i]['vertno'][src_sel]
fwd_out['src'][i]['inuse'][vertno] = 1
fwd_out['src'][i]['nuse'] += len(vertno)
fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0]
# Reconstruct part of fwd that is not sol data
src_sel += i * nuse_lh # Add column shift to right hemi
fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],
fwd['source_rr'][src_sel]])
fwd_out['nsource'] += len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['source_nn'] = np.vstack(
[fwd_out['source_nn'], fwd['source_nn'][idx]])
fwd_out['sol']['data'] = np.hstack(
[fwd_out['sol']['data'], fwd['sol']['data'][:, idx]])
if fwd['sol_grad'] is not None:
fwd_out['sol_grad']['data'] = np.hstack(
[fwd_out['sol_grad']['data'],
fwd['sol_rad']['data'][:, idx_grad]])
fwd_out['sol']['ncol'] += len(idx)
if is_fixed_orient(fwd, orig=True):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['_orig_sol'] = np.hstack(
[fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]])
if fwd['sol_grad'] is not None:
fwd_out['_orig_sol_grad'] = np.hstack(
[fwd_out['_orig_sol_grad'],
fwd['_orig_sol_grad'][:, idx_grad]])
return fwd_out
def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
mindist=None, bem=None, mri=None, trans=None,
eeg=True, meg=True, fixed=False, grad=False,
mricoord=False, overwrite=False, subjects_dir=None,
verbose=None):
"""Calculate a forward solution for a subject using MNE-C routines.
This is kept around for testing purposes.
This function wraps to mne_do_forward_solution, so the mne
command-line tools must be installed and accessible from Python.
Parameters
----------
subject : str
Name of the subject.
meas : Raw | Epochs | Evoked | str
If Raw or Epochs, a temporary evoked file will be created and
saved to a temporary directory. If str, then it should be a
filename to a file with measurement information the mne
command-line tools can understand (i.e., raw or evoked).
fname : str | None
Destination forward solution filename. If None, the solution
will be created in a temporary directory, loaded, and deleted.
src : str | None
Source space name. If None, the MNE default is used.
spacing : str
The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a
recursively subdivided icosahedron, or ``'oct#'`` for a recursively
subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.
mindist : float | str | None
Minimum distance of sources from inner skull surface (in mm).
If None, the MNE default value is used. If string, 'all'
indicates to include all points.
bem : str | None
Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None
(Default), the MNE default will be used.
mri : str | None
The name of the trans file in FIF format.
If None, trans must not be None.
trans : dict | str | None
File name of the trans file in text format.
If None, mri must not be None.
eeg : bool
If True (Default), include EEG computations.
meg : bool
If True (Default), include MEG computations.
fixed : bool
If True, make a fixed-orientation forward solution (Default:
False). Note that fixed-orientation inverses can still be
created from free-orientation forward solutions.
grad : bool
If True, compute the gradient of the field with respect to the
dipole coordinates as well (Default: False).
mricoord : bool
If True, calculate in MRI coordinates (Default: False).
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
%(verbose)s
See Also
--------
make_forward_solution
Returns
-------
fwd : Forward
The generated forward solution.
"""
if not has_mne_c():
raise RuntimeError('mne command line tools could not be found')
# check for file existence
temp_dir = tempfile.mkdtemp()
if fname is None:
fname = op.join(temp_dir, 'temp-fwd.fif')
_check_fname(fname, overwrite)
_validate_type(subject, "str", "subject")
# check for meas to exist as string, or try to make evoked
if isinstance(meas, str):
if not op.isfile(meas):
raise IOError('measurement file "%s" could not be found' % meas)
elif isinstance(meas, (BaseRaw, BaseEpochs, Evoked)):
meas_file = op.join(temp_dir, 'info.fif')
write_info(meas_file, meas.info)
meas = meas_file
else:
raise ValueError('meas must be string, Raw, Epochs, or Evoked')
# deal with trans/mri
if mri is not None and trans is not None:
raise ValueError('trans and mri cannot both be specified')
if mri is None and trans is None:
# MNE allows this to default to a trans/mri in the subject's dir,
# but let's be safe here and force the user to pass us a trans/mri
raise ValueError('Either trans or mri must be specified')
if trans is not None:
_validate_type(trans, "str", "trans")
if not op.isfile(trans):
raise IOError('trans file "%s" not found' % trans)
if mri is not None:
# deal with trans
if not isinstance(mri, str):
if isinstance(mri, dict):
mri_data = deepcopy(mri)
mri = op.join(temp_dir, 'mri-trans.fif')
try:
write_trans(mri, mri_data)
except Exception:
raise IOError('mri was a dict, but could not be '
'written to disk as a transform file')
else:
raise ValueError('trans must be a string or dict (trans)')
if not op.isfile(mri):
raise IOError('trans file "%s" could not be found' % trans)
# deal with meg/eeg
if not meg and not eeg:
raise ValueError('meg or eeg (or both) must be True')
path, fname = op.split(fname)
if not op.splitext(fname)[1] == '.fif':
raise ValueError('Forward name does not end with .fif')
path = op.abspath(path)
# deal with mindist
if mindist is not None:
if isinstance(mindist, str):
if not mindist.lower() == 'all':
raise ValueError('mindist, if string, must be "all"')
mindist = ['--all']
else:
mindist = ['--mindist', '%g' % mindist]
# src, spacing, bem
for element, name in zip((src, spacing, bem), ("src", "spacing", "bem")):
if element is not None:
_validate_type(element, "str", name, "string or None")
# put together the actual call
cmd = ['mne_do_forward_solution',
'--subject', subject,
'--meas', meas,
'--fwd', fname,
'--destdir', path]
if src is not None:
cmd += ['--src', src]
if spacing is not None:
if spacing.isdigit():
pass # spacing in mm
else:
# allow both "ico4" and "ico-4" style values
match = re.match(r"(oct|ico)-?(\d+)$", spacing)
if match is None:
raise ValueError("Invalid spacing parameter: %r" % spacing)
spacing = '-'.join(match.groups())
cmd += ['--spacing', spacing]
if mindist is not None:
cmd += mindist
if bem is not None:
cmd += ['--bem', bem]
if mri is not None:
cmd += ['--mri', '%s' % mri]
if trans is not None:
cmd += ['--trans', '%s' % trans]
if not meg:
cmd.append('--eegonly')
if not eeg:
cmd.append('--megonly')
if fixed:
cmd.append('--fixed')
if grad:
cmd.append('--grad')
if mricoord:
cmd.append('--mricoord')
if overwrite:
cmd.append('--overwrite')
env = os.environ.copy()
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
env['SUBJECTS_DIR'] = subjects_dir
try:
logger.info('Running forward solution generation command with '
'subjects_dir %s' % subjects_dir)
run_subprocess(cmd, env=env)
except Exception:
raise
else:
fwd = read_forward_solution(op.join(path, fname), verbose=False)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
return fwd
@verbose
def average_forward_solutions(fwds, weights=None):
"""Average forward solutions.
Parameters
----------
fwds : list of Forward
Forward solutions to average. Each entry (dict) should be a
forward solution.
weights : array | None
Weights to apply to each forward solution in averaging. If None,
forward solutions will be equally weighted. Weights must be
non-negative, and will be adjusted to sum to one.
Returns
-------
fwd : Forward
The averaged forward solution.
"""
# check for fwds being a list
_validate_type(fwds, list, "fwds")
if not len(fwds) > 0:
raise ValueError('fwds must not be empty')
# check weights
if weights is None:
weights = np.ones(len(fwds))
weights = np.asanyarray(weights) # in case it's a list, convert it
if not np.all(weights >= 0):
raise ValueError('weights must be non-negative')
if not len(weights) == len(fwds):
raise ValueError('weights must be None or the same length as fwds')
w_sum = np.sum(weights)
if not w_sum > 0:
raise ValueError('weights cannot all be zero')
weights /= w_sum
# check our forward solutions
for fwd in fwds:
# check to make sure it's a forward solution
_validate_type(fwd, dict, "each entry in fwds", "dict")
# check to make sure the dict is actually a fwd
check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',
'source_rr', 'source_ori', 'surf_ori', 'coord_frame',
'mri_head_t', 'nsource']
if not all(key in fwd for key in check_keys):
raise KeyError('forward solution dict does not have all standard '
'entries, cannot compute average.')
# check forward solution compatibility
if any(fwd['sol'][k] != fwds[0]['sol'][k]
for fwd in fwds[1:] for k in ['nrow', 'ncol']):
raise ValueError('Forward solutions have incompatible dimensions')
if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]
for k in ['source_ori', 'surf_ori', 'coord_frame']):
raise ValueError('Forward solutions have incompatible orientations')
# actually average them (solutions and gradients)
fwd_ave = deepcopy(fwds[0])
fwd_ave['sol']['data'] *= weights[0]
fwd_ave['_orig_sol'] *= weights[0]
for fwd, w in zip(fwds[1:], weights[1:]):
fwd_ave['sol']['data'] += w * fwd['sol']['data']
fwd_ave['_orig_sol'] += w * fwd['_orig_sol']
if fwd_ave['sol_grad'] is not None:
fwd_ave['sol_grad']['data'] *= weights[0]
fwd_ave['_orig_sol_grad'] *= weights[0]
for fwd, w in zip(fwds[1:], weights[1:]):
fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']
fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']
return fwd_ave
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"path/filepath"
"strings"
"time"
"github.com/adnaan/gomodest-starter/app"
"github.com/go-chi/chi"
"github.com/go-chi/valve"
)
// fileServer conveniently sets up a http.FileServer handler to serve
// static files from a http.FileSystem.
func fileServer(r chi.Router, path string, root http.FileSystem) {
if strings.ContainsAny(path, "{}*") {
panic("FileServer does not permit any URL parameters.")
}
if path != "/" && path[len(path)-1] != '/' {
r.Get(path, http.RedirectHandler(path+"/", 301).ServeHTTP)
path += "/"
}
path += "*"
r.Get(path, func(w http.ResponseWriter, r *http.Request) {
rctx := chi.RouteContext(r.Context())
pathPrefix := strings.TrimSuffix(rctx.RoutePattern(), "/*")
fs := http.StripPrefix(pathPrefix, http.FileServer(root))
fs.ServeHTTP(w, r)
})
}
func main() {
// Our graceful valve shut-off package to manage code preemption and
// shutdown signaling.
vv := valve.New()
baseCtx := vv.Context()
configFile := flag.String("config", "", "path to config file")
envPrefix := os.Getenv("ENV_PREFIX")
if envPrefix == "" {
envPrefix = "app"
}
flag.Parse()
cfg, err := app.LoadConfig(*configFile, envPrefix)
if err != nil {
log.Fatal(err)
}
r := app.Router(baseCtx, cfg)
workDir, _ := os.Getwd()
public := http.Dir(filepath.Join(workDir, "./", "public", "assets"))
fileServer(r, "/static", public)
srv := &http.Server{
ReadTimeout: time.Duration(cfg.ReadTimeoutSecs) * time.Second,
WriteTimeout: time.Duration(cfg.WriteTimeoutSecs) * time.Second,
Addr: fmt.Sprintf(":%d", cfg.Port),
Handler: r,
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
// sig is a ^C, handle it
fmt.Println("shutting down..")
// first valv
vv.Shutdown(20 * time.Second)
// create context with timeout
ctx, cancel := context.WithTimeout(baseCtx, 20*time.Second)
defer cancel()
// start http shutdown
srv.Shutdown(ctx)
// verify, in worst case call cancel via defer
select {
case <-time.After(21 * time.Second):
fmt.Println("not all connections done")
case <-ctx.Done():
}
}
}()
log.Println("http server is listening...")
srv.ListenAndServe()
}
|
[
"\"ENV_PREFIX\""
] |
[] |
[
"ENV_PREFIX"
] |
[]
|
["ENV_PREFIX"]
|
go
| 1 | 0 | |
Algorithms/Implementation/Beautiful Triplets/Solution.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the beautifulTriplets function below.
static int beautifulTriplets(int d, int[] arr) {
HashMap<Integer, Integer> map = new HashMap<>();
for (int i = 0; i < arr.length; i++) {
if (map.containsKey(arr[i])) {
map.put(arr[i], map.get(arr[i]) + 1);
} else {
map.put(arr[i], 1);
}
}
int counter = 0;
// i < j < k
// find = d + a[i]
for (int i = 0; i < arr.length; i++) {
if (i == 0 || arr[i] != arr[i - 1]) {
int j = d + arr[i];
int k = d + j;
if (map.containsKey(j) && map.containsKey(k)) {
counter += map.get(arr[i]) * map.get(j) * map.get(k);
}
}
}
return counter;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nd = scanner.nextLine().split(" ");
int n = Integer.parseInt(nd[0]);
int d = Integer.parseInt(nd[1]);
int[] arr = new int[n];
String[] arrItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < n; i++) {
int arrItem = Integer.parseInt(arrItems[i]);
arr[i] = arrItem;
}
int result = beautifulTriplets(d, arr);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
pkg/object/gs.go
|
//go:build !nogs
// +build !nogs
/*
* JuiceFS, Copyright 2018 Juicedata, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package object
import (
"context"
"fmt"
"io"
"net/url"
"os"
"strings"
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/storage"
"github.com/pkg/errors"
"golang.org/x/oauth2/google"
"google.golang.org/api/iterator"
)
type gs struct {
DefaultObjectStorage
client *storage.Client
bucket string
region string
pageToken string
}
func (g *gs) String() string {
return fmt.Sprintf("gs://%s/", g.bucket)
}
func (g *gs) Create() error {
// check if the bucket is already exists
if objs, err := g.List("", "", 1); err == nil && len(objs) > 0 {
return nil
}
projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
if projectID == "" {
projectID, _ = metadata.ProjectID()
}
if projectID == "" {
cred, err := google.FindDefaultCredentials(context.Background())
if err == nil {
projectID = cred.ProjectID
}
}
if projectID == "" {
return errors.New("GOOGLE_CLOUD_PROJECT environment variable must be set")
}
// Guess region when region is not provided
if g.region == "" {
zone, err := metadata.Zone()
if err == nil && len(zone) > 2 {
g.region = zone[:len(zone)-2]
}
if g.region == "" {
return errors.New("Could not guess region to create bucket")
}
}
err := g.client.Bucket(g.bucket).Create(ctx, projectID, &storage.BucketAttrs{
Name: g.bucket,
StorageClass: "regional",
Location: g.region,
})
if err != nil && strings.Contains(err.Error(), "You already own this bucket") {
return nil
}
return err
}
func (g *gs) Head(key string) (Object, error) {
attrs, err := g.client.Bucket(g.bucket).Object(key).Attrs(ctx)
if err != nil {
if err == storage.ErrObjectNotExist {
err = os.ErrNotExist
}
return nil, err
}
return &obj{
key,
attrs.Size,
attrs.Updated,
strings.HasSuffix(key, "/"),
}, nil
}
func (g *gs) Get(key string, off, limit int64) (io.ReadCloser, error) {
reader, err := g.client.Bucket(g.bucket).Object(key).NewRangeReader(ctx, off, limit)
if err != nil {
return nil, err
}
return reader, nil
}
func (g *gs) Put(key string, data io.Reader) error {
writer := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
_, err := io.Copy(writer, data)
if err != nil {
return err
}
return writer.Close()
}
func (g *gs) Copy(dst, src string) error {
srcObj := g.client.Bucket(g.bucket).Object(src)
dstObj := g.client.Bucket(g.bucket).Object(dst)
_, err := dstObj.CopierFrom(srcObj).Run(ctx)
return err
}
func (g *gs) Delete(key string) error {
if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != storage.ErrObjectNotExist {
return err
}
return nil
}
func (g *gs) List(prefix, marker string, limit int64) ([]Object, error) {
if marker != "" && g.pageToken == "" {
// last page
return nil, nil
}
objectIterator := g.client.Bucket(g.bucket).Objects(ctx, &storage.Query{Prefix: prefix})
pager := iterator.NewPager(objectIterator, int(limit), g.pageToken)
var entries []*storage.ObjectAttrs
nextPageToken, err := pager.NextPage(&entries)
if err != nil {
return nil, err
}
g.pageToken = nextPageToken
n := len(entries)
objs := make([]Object, n)
for i := 0; i < n; i++ {
item := entries[i]
objs[i] = &obj{item.Name, item.Size, item.Updated, strings.HasSuffix(item.Name, "/")}
}
return objs, nil
}
func newGS(endpoint, accessKey, secretKey string) (ObjectStorage, error) {
if !strings.Contains(endpoint, "://") {
endpoint = fmt.Sprintf("gs://%s", endpoint)
}
uri, err := url.ParseRequestURI(endpoint)
if err != nil {
return nil, errors.Errorf("Invalid endpoint: %v, error: %v", endpoint, err)
}
hostParts := strings.Split(uri.Host, ".")
bucket := hostParts[0]
var region string
if len(hostParts) > 1 {
region = hostParts[1]
}
client, err := storage.NewClient(ctx)
if err != nil {
return nil, err
}
return &gs{client: client, bucket: bucket, region: region}, nil
}
func init() {
Register("gs", newGS)
}
|
[
"\"GOOGLE_CLOUD_PROJECT\""
] |
[] |
[
"GOOGLE_CLOUD_PROJECT"
] |
[]
|
["GOOGLE_CLOUD_PROJECT"]
|
go
| 1 | 0 | |
nflfan/config.py
|
from __future__ import absolute_import, division, print_function
import codecs
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import copy
import os
import os.path as path
import sys
import toml
import nfldb
import nflfan.provider as provider
import nflfan.score as score
_xdg_home = os.getenv('XDG_CONFIG_HOME')
"""XDG user configuration directory."""
if not _xdg_home:
home = os.getenv('HOME')
if not home:
_xdg_home = ''
else:
_xdg_home = path.join(home, '.config')
_data_paths = [
path.join(_xdg_home, 'nflfan'),
path.join(sys.prefix, 'share', 'nflfan'),
]
"""A list of paths to check for loading data files."""
builtin_providers = {
'yahoo': provider.Yahoo,
'espn': provider.ESPN,
}
"""The default set of providers defined by nflfan."""
def load_config(providers=builtin_providers, file_path=''):
"""
Reads and loads the configuration file containing fantasy football
league information.
The return value is a dictionary mapping provider name (e.g.,
`yahoo`) to a list of leagues for that provider. Each league
is guaranteed to have at least a `name`, `season`, `phase`
and `scoring` attributes filled in as values that are not
`None`. Providers also have their own specific mandatory fields:
If no configuration file can be found, then an `IOError` is raised.
"""
def prov_leagues(d):
return ((k, d[k]) for k in sorted(d.keys()) if isinstance(d[k], dict))
schema = {
'all': {
'req': provider.Provider.conf_required,
'opt': provider.Provider.conf_optional,
},
}
for prov in providers.values():
schema[prov.provider_name] = {
'req': prov.conf_required, 'opt': prov.conf_optional,
}
raw = toml.loads(get_data('config.toml', file_path=file_path))
scoring = merge(raw['scoring'])
conf = {'leagues': OrderedDict()}
for pname in sorted(raw.keys()):
prov = raw[pname]
if pname == 'scoring':
continue
if not isinstance(prov, dict):
conf[pname] = prov
continue
conf['leagues'][pname] = OrderedDict()
for lg_name, lg in prov_leagues(prov):
lg['league_name'] = lg_name
lg['provider_class'] = providers[pname]
apply_schema(schema, scoring, pname, prov, lg)
lg = provider.League(lg['season'], lg['phase'], lg['league_id'],
pname, lg_name, lg['scoring'], lg)
conf['leagues'][pname][lg_name] = lg
return conf
def merge(s):
"""
Given a nesting of TOML dictionaries, return a flat list of each
scheme in `s`. This applies the inheritance used is configuration
files so that each scheme has each attribute fully resolved.
"""
def settings_and_subschemes(d, defaults):
settings, subs = {}, {}
for k, v in d.items():
if isinstance(v, dict):
subs[k] = v
else:
settings[k] = v
for k, v in defaults.items():
if k not in settings:
settings[k] = v
return copy.deepcopy(settings), subs
def merge(d, defaults, name):
settings, subs = settings_and_subschemes(d, defaults)
schemes[name] = settings
for subname, subscheme in subs.items():
fullname = '%s.%s' % (name, subname)
merge(subscheme, settings, fullname)
schemes = {}
for name, scheme in s.items():
merge(scheme, {}, name)
return schemes
def get_data(name, file_path=''):
"""
Reads the contents of a configuration data file with name
`name`. If `file_path` is given, then it is used if it exists.
If no file can be found, then an `IOError` is raised.
"""
if file_path:
paths = [file_path] + _data_paths
else:
paths = _data_paths
for fp in map(lambda p: path.join(p, name), paths):
try:
with codecs.open(fp) as fp:
return fp.read()
except IOError:
pass
raise IOError("Could not find configuration file %s" % name)
def cache_path():
"""
Returns a file path to the cache directory. If a cache directory
does not exist, one is created.
If there is a problem creating a cache directory, an `IOError`
exception is raised.
"""
for fp in _data_paths:
if os.access(fp, os.R_OK):
cdir = path.join(fp, 'data')
if not os.access(cdir, os.R_OK):
try:
os.mkdir(cdir)
except IOError as e:
raise IOError(e + ' (please create a cache directory)')
return cdir
raise IOError('could not find or create a cache directory')
def apply_schema(schema, scoring, prov_name, prov, lg):
"""
Applies the scheme for the provider `prov_name` to the league `lg`
while using `prov` as a dictionary of default values for `lg`.
`scoring` should be a dictionary mapping names to scoring schemes.
The `schema` should be a dictionary mapping provider name to its
set of required and optional fields. Namely, each value should be
a dictionary with two keys: `req` and `opt`, where each correspond
to a list of required and optional fields, respectively. There
must also be an `all` key in `schema` that specifies required and
optional fields for every provider.
If a required field in the provider's scheme is missing, then a
`ValueError` is raised.
"""
def get_scoring(ref):
try:
return score.ScoreSchema(ref, scoring[ref])
except KeyError:
raise KeyError("Scoring scheme %s does not exist." % ref)
def val(key, required=False):
v = lg.get(key, prov.get(key, None))
if required and v is None:
raise ValueError("Provider %s must have %s." % (prov_name, key))
elif key == 'scoring':
return get_scoring(v)
elif key == 'phase':
v = nfldb.Enums.season_phase[v.lower().title()]
return v
for r in schema['all']['req'] + schema[prov_name]['req']:
lg[r] = val(r, required=True)
for o in schema['all']['opt'] + schema[prov_name]['opt']:
lg[o] = val(o)
|
[] |
[] |
[
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["HOME", "XDG_CONFIG_HOME"]
|
python
| 2 | 0 | |
simplerest/wsgi.py
|
"""
WSGI config for simplerest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simplerest.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
generating_scripts/generate_by_word.py
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from dante_by_word.data_preparation import build_vocab
from dante_by_word.text_processing import clean_comedy, prettify_text, special_tokens
from dante_by_word.generate_dante import generate_text
from utils import save_vocab, load_vocab
working_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dante_by_word')
divine_comedy_file = os.path.join(os.path.dirname(working_dir), "divina_commedia", "divina_commedia_accent_UTF-8.txt")
with open(divine_comedy_file,"r") as f:
divine_comedy = f.read()
divine_comedy = clean_comedy(divine_comedy, special_tokens)
vocab, idx2word, word2idx = build_vocab(divine_comedy)
# Path where the vocab is saved
logs_dir = os.path.join(working_dir, 'logs')
os.makedirs(logs_dir, exist_ok = True)
vocab_file = os.path.join(logs_dir, 'vocab.json')
vocab, idx2word, word2idx = load_vocab(vocab_file)
# Length of the vocabulary
vocab_size = len(vocab)
# Path where the model is saved
models_dir = os.path.join(working_dir, 'models')
os.makedirs(models_dir, exist_ok = True)
model_file = os.path.join(models_dir, "dante_by_word_model.h5")
model = tf.keras.models.load_model(model_file)
SEQ_LENGTH = model.get_layer('embedding').output.shape[1]
EMBEDDING_DIM = model.get_layer('embedding').output.shape[2]
for l in model.layers:
if l.name == 'first_lstm':
RNN_TYPE = '2lstm'
break
if l.name == 'last_lstm':
RNN_TYPE = 'lstm'
break
if l.name == 'first_gru':
RNN_TYPE = '2gru'
break
if l.name == 'last_gru':
RNN_TYPE = 'gru'
break
if 'lstm' in RNN_TYPE:
RNN_UNITS = model.get_layer('last_lstm').output.shape[-1]
if 'gru' in RNN_TYPE:
RNN_UNITS = model.get_layer('last_gru').output.shape[-1]
model.summary()
model_filename = 'model_by_word_seq{}_emb{}_{}{}'.format(SEQ_LENGTH, EMBEDDING_DIM, RNN_TYPE, RNN_UNITS)
print("\nMODEL: {}\n".format(model_filename))
os.makedirs(os.path.join(logs_dir, model_filename), exist_ok = True)
output_file = os.path.join(logs_dir, model_filename, "output.txt")
raw_output_file = os.path.join(logs_dir, model_filename, "raw_output.txt")
divine_comedy = divine_comedy.split()
# index_eoc = divine_comedy.index(special_tokens['END_OF_CANTO']) + 1
indexes = [i for i, x in enumerate(divine_comedy) if x == special_tokens['END_OF_CANTO'] and i > SEQ_LENGTH]
index_eoc = np.random.choice(indexes) + 1
start_idx = max(0, index_eoc - SEQ_LENGTH)
start_string = ' '.join(divine_comedy[start_idx:index_eoc])
#print(start_string)
generated_text = generate_text(model, special_tokens, vocab_size, word2idx, idx2word, SEQ_LENGTH, start_string, temperature=1.0)
#print(prettify_text(generated_text, special_tokens))
with open(output_file,"w") as f:
f.write(prettify_text(generated_text, special_tokens))
with open(raw_output_file,"w") as f:
f.write(generated_text)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
test/e2e/vsphere_volume_diskformat.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
/*
Test to verify diskformat specified in storage-class is being honored while volume creation.
Valid and supported options are eagerzeroedthick, zeroedthick and thin
Steps
1. Create StorageClass with diskformat set to valid type
2. Create PVC which uses the StorageClass created in step 1.
3. Wait for PV to be provisioned.
4. Wait for PVC's status to become Bound
5. Create pod using PVC on specific node.
6. Wait for Disk to be attached to the node.
7. Get node VM's devices and find PV's Volume Disk.
8. Get Backing Info of the Volume Disk and obtain EagerlyScrub and ThinProvisioned
9. Based on the value of EagerlyScrub and ThinProvisioned, verify diskformat is correct.
10. Delete pod and Wait for Volume Disk to be detached from the Node.
11. Delete PVC, PV and Storage Class
*/
var _ = framework.KubeDescribe("Volume Disk Format [Volumes]", func() {
f := framework.NewDefaultFramework("volume-disk-format")
var (
client clientset.Interface
namespace string
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
if !isNodeLabeled {
nodeLabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel["vsphere_e2e_label"] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(client, nodeName, "vsphere_e2e_label", nodeLabelValue)
isNodeLabeled = true
}
})
AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(client, nodeName, "vsphere_e2e_label")
}
})
It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: eagerzeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick")
})
It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: zeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick")
})
It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: thin")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin")
})
})
func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
framework.Logf("Invoking Test for DiskFomat: %s", diskFormat)
scParameters := make(map[string]string)
scParameters["diskformat"] = diskFormat
By("Creating Storage Class With DiskFormat")
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters)
storageclass, err := client.StorageV1beta1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1beta1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
Expect(err).NotTo(HaveOccurred())
defer func() {
client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil)
}()
By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// Get new copy of the claim
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
/*
PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info
to check EagerlyScrub and ThinProvisioned property
*/
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done")
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
Expect(verifyDiskFormat(nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, nodeName, volumePaths)
}
func verifyDiskFormat(nodeName string, pvVolumePath string, diskFormat string) bool {
By("Verifing disk format")
eagerlyScrub := false
thinProvisioned := false
diskFound := false
pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath)
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
Expect(err).NotTo(HaveOccurred())
f := find.NewFinder(govMoMiClient.Client, true)
ctx, _ := context.WithCancel(context.Background())
vm, err := f.VirtualMachine(ctx, os.Getenv("VSPHERE_WORKING_DIR")+nodeName)
Expect(err).NotTo(HaveOccurred())
vmDevices, err := vm.Device(ctx)
Expect(err).NotTo(HaveOccurred())
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))
for _, disk := range disks {
backing := disk.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backingFileName := filepath.Base(backing.FileName) + filepath.Ext(backing.FileName)
if backingFileName == pvvmdkfileName {
diskFound = true
if backing.EagerlyScrub != nil {
eagerlyScrub = *backing.EagerlyScrub
}
if backing.ThinProvisioned != nil {
thinProvisioned = *backing.ThinProvisioned
}
break
}
}
Expect(diskFound).To(BeTrue(), "Failed to find disk")
isDiskFormatCorrect := false
if diskFormat == "eagerzeroedthick" {
if eagerlyScrub == true && thinProvisioned == false {
isDiskFormatCorrect = true
}
} else if diskFormat == "zeroedthick" {
if eagerlyScrub == false && thinProvisioned == false {
isDiskFormatCorrect = true
}
} else if diskFormat == "thin" {
if eagerlyScrub == false && thinProvisioned == true {
isDiskFormatCorrect = true
}
}
return isDiskFormatCorrect
}
|
[
"\"VSPHERE_WORKING_DIR\""
] |
[] |
[
"VSPHERE_WORKING_DIR"
] |
[]
|
["VSPHERE_WORKING_DIR"]
|
go
| 1 | 0 | |
hiren/settings.py
|
"""
Django settings for hiren project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import json
import datetime
import raven
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
try:
with open(BASE_DIR + '/' + 'config.local.json') as f:
JSON_DATA = json.load(f)
except FileNotFoundError:
with open(BASE_DIR + '/' + 'config.json') as f:
JSON_DATA = json.load(f)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key'])
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
"compressor",
'aurora'
]
if DEBUG is False:
INSTALLED_APPS += [
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'hiren.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'NAME': JSON_DATA['db_name'],
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': JSON_DATA['db_user'],
'PASSWORD': JSON_DATA['db_password'],
'HOST': 'localhost',
'PORT': '',
'ATOMIC': True,
'CONN_MAX_AGE': 600,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# django compress
COMPRESS_ROOT = os.path.join(BASE_DIR, "static")
COMPRESS_OFFLINE = True
# logger
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'formatters': {
'main_formatter': {
'format': '%(levelname)s:%(name)s: %(message)s '
'(%(asctime)s; %(filename)s:%(lineno)d)',
'datefmt': "%Y-%m-%d %H:%M:%S",
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'main_formatter',
},
'production_file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + '/logs/main.log',
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 7,
'formatter': 'main_formatter',
'filters': ['require_debug_false'],
},
'debug_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + '/logs/main_debug.log',
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 7,
'formatter': 'main_formatter',
'filters': ['require_debug_true'],
},
'null': {
"class": 'logging.NullHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['null', ],
},
'py.warnings': {
'handlers': ['null', ],
},
'': {
'handlers': ['console', 'production_file', 'debug_file'],
'level': "DEBUG",
},
}
}
# Login settings
LOGIN_URL = '/'
# django-debug-toolbar
INTERNAL_IPS = ['127.0.0.1']
# sentry.io
if not DEBUG:
RAVEN_CONFIG = {
'dsn': JSON_DATA['sentry_dsn'],
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(os.path.dirname(os.pardir)),
}
|
[] |
[] |
[
"SECRET_KEY",
"DEBUG"
] |
[]
|
["SECRET_KEY", "DEBUG"]
|
python
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oauthlocal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
mutate.go
|
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"strings"
"k8s.io/api/admission/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func cleanName(name string) string {
return strings.ReplaceAll(name, "_", "-")
}
func useExternalVault(pod *v1.Pod) (bool, string) {
if os.Getenv("VAULT_ADDR_HTTPS") == "" {
return false, ""
}
// if val, ok := pod.ObjectMeta.Labels["sidecar.istio.io/inject"]; ok && val == "false" {
if _, ok := pod.ObjectMeta.Labels["workflows.argoproj.io/workflow"]; ok {
log.Printf("Will use external Vault address for workflow %s", pod.Name)
return true, os.Getenv("VAULT_ADDR_HTTPS")
}
return false, ""
}
func shouldInject(pod *v1.Pod) bool {
// Inject Minio credentials into notebook pods (condition: has notebook-name label)
if _, ok := pod.ObjectMeta.Labels["notebook-name"]; ok {
log.Printf("Found notebook name for %s/%s; injecting", pod.Namespace, pod.Name)
return true
}
// Inject Minio credentials into argo workflow pods (condition: has workflows.argoproj.io/workflow label)
if _, ok := pod.ObjectMeta.Labels["workflows.argoproj.io/workflow"]; ok {
log.Printf("Found argo workflow name for %s/%s; injecting", pod.Namespace, pod.Name)
return true
}
// Inject Minio credentials into pod requesting credentials (condition: has add-default-minio-creds annotation)
if _, ok := pod.ObjectMeta.Annotations["data.statcan.gc.ca/inject-minio-creds"]; ok {
log.Printf("Found minio credential annotation on %s/%s; injecting", pod.Namespace, pod.Name)
return true
}
return false
}
func mutate(request v1beta1.AdmissionRequest, instances []Instance) (v1beta1.AdmissionResponse, error) {
response := v1beta1.AdmissionResponse{}
// Default response
response.Allowed = true
response.UID = request.UID
// Decode the pod object
var err error
pod := v1.Pod{}
if err := json.Unmarshal(request.Object.Raw, &pod); err != nil {
return response, fmt.Errorf("unable to decode Pod %w", err)
}
// Identify the data classification of the pod, defaulting to unclassified if unset
dataClassification := "unclassified"
if val, ok := pod.ObjectMeta.Labels["data.statcan.gc.ca/classification"]; ok {
dataClassification = val
}
if shouldInject(&pod) {
patch := v1beta1.PatchTypeJSONPatch
response.PatchType = &patch
response.AuditAnnotations = map[string]string{
"minio-admission-controller": "Added minio credentials",
}
// Handle https://github.com/StatCan/aaw-minio-credential-injector/issues/10
var roleName string
if pod.Namespace != "" {
roleName = cleanName("profile-" + pod.Namespace)
} else if request.Namespace != "" {
roleName = cleanName("profile-" + request.Namespace)
} else {
return response, fmt.Errorf("pod and request namespace were empty. Cannot determine the namespace.")
}
patches := []map[string]interface{}{
{
"op": "add",
"path": "/metadata/annotations/vault.hashicorp.com~1agent-inject",
"value": "true",
},
{
"op": "add",
"path": "/metadata/annotations/vault.hashicorp.com~1agent-pre-populate",
"value": "false",
},
{
"op": "add",
"path": "/metadata/annotations/vault.hashicorp.com~1role",
"value": roleName,
},
}
if useExternal, vaultAddr := useExternalVault(&pod); useExternal {
patches = append(patches, map[string]interface{}{
"op": "add",
"path": fmt.Sprintf("/metadata/annotations/vault.hashicorp.com~1service"),
"value": vaultAddr,
})
}
for _, instance := range instances {
// Only apply to the relevant instances
if instance.Classification != dataClassification {
continue
}
instanceId := strings.ReplaceAll(instance.Name, "_", "-")
patches = append(patches, map[string]interface{}{
"op": "add",
"path": fmt.Sprintf("/metadata/annotations/vault.hashicorp.com~1agent-inject-secret-%s", instanceId),
"value": fmt.Sprintf("%s/keys/%s", instance.Name, roleName),
})
patches = append(patches, map[string]interface{}{
"op": "add",
"path": fmt.Sprintf("/metadata/annotations/vault.hashicorp.com~1agent-inject-template-%s", instanceId),
"value": fmt.Sprintf(`
{{- with secret "%s/keys/%s" }}
export MINIO_URL="%s"
export MINIO_ACCESS_KEY="{{ .Data.accessKeyId }}"
export MINIO_SECRET_KEY="{{ .Data.secretAccessKey }}"
export AWS_ACCESS_KEY_ID="{{ .Data.accessKeyId }}"
export AWS_SECRET_ACCESS_KEY="{{ .Data.secretAccessKey }}"
{{- end }}
`, instance.Name, roleName, instance.ServiceUrl),
})
patches = append(patches, map[string]interface{}{
"op": "add",
"path": fmt.Sprintf("/metadata/annotations/vault.hashicorp.com~1agent-inject-secret-%s.json", instanceId),
"value": fmt.Sprintf("%s/keys/%s", instance.Name, roleName),
})
patches = append(patches, map[string]interface{}{
"op": "add",
"path": fmt.Sprintf("/metadata/annotations/vault.hashicorp.com~1agent-inject-template-%s.json", instanceId),
"value": fmt.Sprintf(`
{{- with secret "%s/keys/%s" }}
{
"MINIO_URL": "%s",
"MINIO_ACCESS_KEY": "{{ .Data.accessKeyId }}",
"MINIO_SECRET_KEY": "{{ .Data.secretAccessKey }}",
"AWS_ACCESS_KEY_ID": "{{ .Data.accessKeyId }}",
"AWS_SECRET_ACCESS_KEY": "{{ .Data.secretAccessKey }}"
}
{{- end }}
`, instance.Name, roleName, instance.ServiceUrl),
})
}
response.Patch, err = json.Marshal(patches)
if err != nil {
return response, err
}
response.Result = &metav1.Status{
Status: metav1.StatusSuccess,
}
} else {
log.Printf("Not injecting the pod %s/%s", pod.Namespace, pod.Name)
}
return response, nil
}
|
[
"\"VAULT_ADDR_HTTPS\"",
"\"VAULT_ADDR_HTTPS\""
] |
[] |
[
"VAULT_ADDR_HTTPS"
] |
[]
|
["VAULT_ADDR_HTTPS"]
|
go
| 1 | 0 | |
wsgi.py
|
"""
WSGI config for beaker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/worker/test_run.py
|
import os
import os.path
from datetime import datetime
from paperboy.worker import run
from paperboy.config import NotebookConfig, NotebookMetadataConfig
from paperboy.config import JobConfig, JobMetadataConfig
from paperboy.config import ReportConfig, ReportMetadataConfig
class TestRun:
def test_runlocal(self):
os.environ['IEX_TOKEN'] = 'Tpk_ecc89ddf30a611e9958142010a80043c'
os.makedirs(os.path.abspath(os.path.expanduser('~/Downloads')), exist_ok=True)
now = datetime.now()
notebook = NotebookConfig(name='MyNotebook',
id='Notebook-1',
meta=NotebookMetadataConfig(
notebook='{\n "cells": [\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {\n "tags": [\n "parameters"\n ]\n },\n "outputs": [],\n "source": [\n "ticker = \'aapl\'"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": [\n "ticker = ticker.upper()\\n",\n "\\n",\n "from IPython.display import HTML\\n",\n "HTML(\'<h1>Report for {}</h1>\'.format(ticker))"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": [\n "%matplotlib inline\\n",\n "import pyEX\\n",\n "import pandas as pd\\n",\n "import seaborn as sns\\n",\n "\\n",\n "sns.set()"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": [\n "HTML(\'<h2>Performance</h2>\')"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": [\n "c = pyEX.Client(\'Tpk_ecc89ddf30a611e9958142010a80043c\', version=\'sandbox\')\\n",\n "df = c.chartDF(ticker)\\n",\n "df[[\'open\', \'high\', \'low\', \'close\']].plot()"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": [\n "HTML(\'<h2>Peer Correlation</h2>\')"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": [\n "peers = c.peers(ticker)\\n",\n "# doest work for test\\n",\n "peers = [\'AAPL\', \'IBM\', \'NFLX\', \'MSFT\', \'INTC\']\\n",\n "to_merge = {x: c.chartDF(x) for x in peers}\\n",\n "to_merge.update({ticker: df})\\n",\n "all = sorted(list(set(peers + [ticker])))\\n",\n "rets = pd.concat(to_merge)\\n",\n "rets = rets.unstack(0)[\'changePercent\'][all]\\n",\n "rets = rets.corr()\\n",\n "rets[\'symbol\'] = rets.index\\n",\n "sns.heatmap(rets.corr())"\n ]\n },\n {\n "cell_type": "code",\n "execution_count": null,\n "metadata": {},\n "outputs": [],\n "source": []\n }\n ],\n "metadata": {\n "kernelspec": {\n "display_name": "Python 3",\n "language": "python",\n "name": "python3"\n },\n "language_info": {\n "codemirror_mode": {\n "name": "ipython",\n "version": 3\n },\n "file_extension": ".py",\n "mimetype": "text/x-python",\n "name": "python",\n "nbconvert_exporter": "python",\n "pygments_lexer": "ipython3",\n "version": "3.7.3"\n }\n },\n "nbformat": 4,\n "nbformat_minor": 4\n}'
),
config=None
)
job = JobConfig(name='MyJob',
id='Job-1',
meta=JobMetadataConfig(
notebook=notebook,
interval='minutely',
level='production',
reports=6,
created=now,
modified=now,
config=None
),
config=None
)
reports = [
ReportConfig(name='MyJob-Report-0',
id='Report-0',
meta=ReportMetadataConfig(notebook=notebook,
job=job,
parameters='{"ticker": "AAPL"}',
type='convert',
output='html',
strip_code=True,
template='',
created=now,
modified=now,
config=None
),
config=None
)
]
run(job, reports)
|
[] |
[] |
[
"IEX_TOKEN"
] |
[]
|
["IEX_TOKEN"]
|
python
| 1 | 0 | |
tests/integration/modules/test_pip.py
|
# -*- coding: utf-8 -*-
"""
tests.integration.modules.pip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import pprint
import re
import shutil
import sys
import tempfile
import pytest
import salt.utils.files
import salt.utils.path
import salt.utils.platform
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from tests.support.case import ModuleCase
from tests.support.helpers import patched_environ, slowTest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
@skipIf(
salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, "virtualenv not installed"
)
@pytest.mark.windows_whitelisted
class PipModuleTest(ModuleCase):
def setUp(self):
super(PipModuleTest, self).setUp()
self.venv_test_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
# Remove the venv test directory
self.addCleanup(shutil.rmtree, self.venv_test_dir, ignore_errors=True)
self.venv_dir = os.path.join(self.venv_test_dir, "venv")
self.pip_temp = os.path.join(self.venv_test_dir, ".pip-temp")
if not os.path.isdir(self.pip_temp):
os.makedirs(self.pip_temp)
self.patched_environ = patched_environ(
PIP_SOURCE_DIR="",
PIP_BUILD_DIR="",
__cleanup__=[k for k in os.environ if k.startswith("PIP_")],
)
self.patched_environ.__enter__()
self.addCleanup(self.patched_environ.__exit__)
def _create_virtualenv(self, path):
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, one windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
python = os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
self.fail(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
# We're running off a virtualenv, and we don't want to create a virtualenv off of
# a virtualenv
kwargs = {"python": python}
except AttributeError:
# We're running off of the system python
kwargs = {}
self.run_function("virtualenv.create", [path], **kwargs)
def _check_download_error(self, ret):
"""
Checks to see if a download error looks transitory
"""
return any(w in ret for w in ["URLError", "Download error"])
def pip_successful_install(self, target, expect=("irc3-plugins-test", "pep8",)):
"""
isolate regex for extracting `successful install` message from pip
"""
expect = set(expect)
expect_str = "|".join(expect)
success = re.search(
r"^.*Successfully installed\s([^\n]+)(?:Clean.*)?", target, re.M | re.S
)
success_for = (
re.findall(
r"({0})(?:-(?:[\d\.-]))?".format(expect_str), success.groups()[0]
)
if success
else []
)
return expect.issubset(set(success_for))
@slowTest
def test_issue_2087_missing_pip(self):
# Let's create the testing virtualenv
self._create_virtualenv(self.venv_dir)
# Let's remove the pip binary
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
site_dir = self.run_function(
"virtualenv.get_distribution_path", [self.venv_dir, "pip"]
)
if salt.utils.platform.is_windows():
pip_bin = os.path.join(self.venv_dir, "Scripts", "pip.exe")
site_dir = os.path.join(self.venv_dir, "lib", "site-packages")
if not os.path.isfile(pip_bin):
self.skipTest("Failed to find the pip binary to the test virtualenv")
os.remove(pip_bin)
# Also remove the pip dir from site-packages
# This is needed now that we're using python -m pip instead of the
# pip binary directly. python -m pip will still work even if the
# pip binary is missing
shutil.rmtree(os.path.join(site_dir, "pip"))
# Let's run a pip depending functions
for func in ("pip.freeze", "pip.list"):
ret = self.run_function(func, bin_env=self.venv_dir)
self.assertIn(
"Command required for '{0}' not found: "
"Could not find a `pip` binary".format(func),
ret,
)
@slowTest
def test_requirements_as_list_of_chains__cwd_set__absolute_file_path(self):
self._create_virtualenv(self.venv_dir)
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, "requirements1.txt")
req1b_filename = os.path.join(self.venv_dir, "requirements1b.txt")
req2_filename = os.path.join(self.venv_dir, "requirements2.txt")
req2b_filename = os.path.join(self.venv_dir, "requirements2b.txt")
with salt.utils.files.fopen(req1_filename, "w") as f:
f.write("-r requirements1b.txt\n")
with salt.utils.files.fopen(req1b_filename, "w") as f:
f.write("irc3-plugins-test\n")
with salt.utils.files.fopen(req2_filename, "w") as f:
f.write("-r requirements2b.txt\n")
with salt.utils.files.fopen(req2b_filename, "w") as f:
f.write("pep8\n")
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
"pip.install",
requirements=requirements_list,
bin_env=self.venv_dir,
cwd=self.venv_dir,
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
found = self.pip_successful_install(ret["stdout"])
self.assertTrue(found)
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_requirements_as_list_of_chains__cwd_not_set__absolute_file_path(self):
self._create_virtualenv(self.venv_dir)
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, "requirements1.txt")
req1b_filename = os.path.join(self.venv_dir, "requirements1b.txt")
req2_filename = os.path.join(self.venv_dir, "requirements2.txt")
req2b_filename = os.path.join(self.venv_dir, "requirements2b.txt")
with salt.utils.files.fopen(req1_filename, "w") as f:
f.write("-r requirements1b.txt\n")
with salt.utils.files.fopen(req1b_filename, "w") as f:
f.write("irc3-plugins-test\n")
with salt.utils.files.fopen(req2_filename, "w") as f:
f.write("-r requirements2b.txt\n")
with salt.utils.files.fopen(req2b_filename, "w") as f:
f.write("pep8\n")
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
"pip.install", requirements=requirements_list, bin_env=self.venv_dir
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
found = self.pip_successful_install(ret["stdout"])
self.assertTrue(found)
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_requirements_as_list__absolute_file_path(self):
self._create_virtualenv(self.venv_dir)
req1_filename = os.path.join(self.venv_dir, "requirements.txt")
req2_filename = os.path.join(self.venv_dir, "requirements2.txt")
with salt.utils.files.fopen(req1_filename, "w") as f:
f.write("irc3-plugins-test\n")
with salt.utils.files.fopen(req2_filename, "w") as f:
f.write("pep8\n")
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
"pip.install", requirements=requirements_list, bin_env=self.venv_dir
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
found = self.pip_successful_install(ret["stdout"])
self.assertTrue(found)
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_requirements_as_list__non_absolute_file_path(self):
self._create_virtualenv(self.venv_dir)
# Create a requirements file that depends on another one.
req1_filename = "requirements.txt"
req2_filename = "requirements2.txt"
req_cwd = self.venv_dir
req1_filepath = os.path.join(req_cwd, req1_filename)
req2_filepath = os.path.join(req_cwd, req2_filename)
with salt.utils.files.fopen(req1_filepath, "w") as f:
f.write("irc3-plugins-test\n")
with salt.utils.files.fopen(req2_filepath, "w") as f:
f.write("pep8\n")
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
"pip.install",
requirements=requirements_list,
bin_env=self.venv_dir,
cwd=req_cwd,
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
found = self.pip_successful_install(ret["stdout"])
self.assertTrue(found)
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_chained_requirements__absolute_file_path(self):
self._create_virtualenv(self.venv_dir)
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, "requirements.txt")
req2_filename = os.path.join(self.venv_dir, "requirements2.txt")
with salt.utils.files.fopen(req1_filename, "w") as f:
f.write("-r requirements2.txt")
with salt.utils.files.fopen(req2_filename, "w") as f:
f.write("pep8")
ret = self.run_function(
"pip.install", requirements=req1_filename, bin_env=self.venv_dir
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
self.assertIn("installed pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_chained_requirements__non_absolute_file_path(self):
self._create_virtualenv(self.venv_dir)
# Create a requirements file that depends on another one.
req_basepath = self.venv_dir
req1_filename = "requirements.txt"
req2_filename = "requirements2.txt"
req1_file = os.path.join(self.venv_dir, req1_filename)
req2_file = os.path.join(self.venv_dir, req2_filename)
with salt.utils.files.fopen(req1_file, "w") as f:
f.write("-r requirements2.txt")
with salt.utils.files.fopen(req2_file, "w") as f:
f.write("pep8")
ret = self.run_function(
"pip.install",
requirements=req1_filename,
cwd=req_basepath,
bin_env=self.venv_dir,
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
self.assertIn("installed pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_issue_4805_nested_requirements(self):
self._create_virtualenv(self.venv_dir)
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, "requirements.txt")
req2_filename = os.path.join(self.venv_dir, "requirements2.txt")
with salt.utils.files.fopen(req1_filename, "w") as f:
f.write("-r requirements2.txt")
with salt.utils.files.fopen(req2_filename, "w") as f:
f.write("pep8")
ret = self.run_function(
"pip.install",
requirements=req1_filename,
bin_env=self.venv_dir,
timeout=300,
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
if self._check_download_error(ret["stdout"]):
self.skipTest("Test skipped due to pip download error")
self.assertEqual(ret["retcode"], 0)
self.assertIn("installed pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_pip_uninstall(self):
# Let's create the testing virtualenv
self._create_virtualenv(self.venv_dir)
ret = self.run_function("pip.install", ["pep8"], bin_env=self.venv_dir)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
if self._check_download_error(ret["stdout"]):
self.skipTest("Test skipped due to pip download error")
self.assertEqual(ret["retcode"], 0)
self.assertIn("installed pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
ret = self.run_function("pip.uninstall", ["pep8"], bin_env=self.venv_dir)
if not isinstance(ret, dict):
self.fail(
"The 'pip.uninstall' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
self.assertIn("uninstalled pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_pip_install_upgrade(self):
# Create the testing virtualenv
self._create_virtualenv(self.venv_dir)
ret = self.run_function("pip.install", ["pep8==1.3.4"], bin_env=self.venv_dir)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
if self._check_download_error(ret["stdout"]):
self.skipTest("Test skipped due to pip download error")
self.assertEqual(ret["retcode"], 0)
self.assertIn("installed pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
ret = self.run_function(
"pip.install", ["pep8"], bin_env=self.venv_dir, upgrade=True
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
if self._check_download_error(ret["stdout"]):
self.skipTest("Test skipped due to pip download error")
self.assertEqual(ret["retcode"], 0)
self.assertIn("installed pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
ret = self.run_function("pip.uninstall", ["pep8"], bin_env=self.venv_dir)
if not isinstance(ret, dict):
self.fail(
"The 'pip.uninstall' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
self.assertEqual(ret["retcode"], 0)
self.assertIn("uninstalled pep8", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_pip_install_multiple_editables(self):
editables = [
"git+https://github.com/jek/blinker.git#egg=Blinker",
"git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting",
]
# Create the testing virtualenv
self._create_virtualenv(self.venv_dir)
ret = self.run_function(
"pip.install",
[],
editable="{0}".format(",".join(editables)),
bin_env=self.venv_dir,
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
if self._check_download_error(ret["stdout"]):
self.skipTest("Test skipped due to pip download error")
self.assertEqual(ret["retcode"], 0)
self.assertIn("Successfully installed Blinker SaltTesting", ret["stdout"])
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@slowTest
def test_pip_install_multiple_editables_and_pkgs(self):
editables = [
"git+https://github.com/jek/blinker.git#egg=Blinker",
"git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting",
]
# Create the testing virtualenv
self._create_virtualenv(self.venv_dir)
ret = self.run_function(
"pip.install",
["pep8"],
editable="{0}".format(",".join(editables)),
bin_env=self.venv_dir,
)
if not isinstance(ret, dict):
self.fail(
"The 'pip.install' command did not return the excepted dictionary. Output:\n{}".format(
ret
)
)
try:
if self._check_download_error(ret["stdout"]):
self.skipTest("Test skipped due to pip download error")
self.assertEqual(ret["retcode"], 0)
for package in ("Blinker", "SaltTesting", "pep8"):
self.assertRegex(
ret["stdout"],
r"(?:.*)(Successfully installed)(?:.*)({0})(?:.*)".format(package),
)
except KeyError as exc:
self.fail(
"The returned dictionary is missing an expected key. Error: '{}'. Dictionary: {}".format(
exc, pprint.pformat(ret)
)
)
@skipIf(not os.path.isfile("pip3"), "test where pip3 is installed")
@skipIf(
salt.utils.platform.is_windows(), "test specific for linux usage of /bin/python"
)
def test_system_pip3(self):
self.run_function(
"pip.install", pkgs=["lazyimport==0.0.1"], bin_env="/bin/pip3"
)
ret1 = self.run_function("cmd.run", "/bin/pip3 freeze | grep lazyimport")
self.run_function("pip.uninstall", pkgs=["lazyimport"], bin_env="/bin/pip3")
ret2 = self.run_function("cmd.run", "/bin/pip3 freeze | grep lazyimport")
assert "lazyimport==0.0.1" in ret1
assert ret2 == ""
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
logs/plugin_bot.go
|
package logs
import (
"context"
"database/sql"
"fmt"
"os"
"time"
"emperror.dev/errors"
"github.com/jonas747/yagpdb/bot/paginatedmessages"
"github.com/jonas747/yagpdb/common/config"
"github.com/jonas747/dcmd"
"github.com/jonas747/discordgo"
"github.com/jonas747/dstate"
"github.com/jonas747/yagpdb/bot"
"github.com/jonas747/yagpdb/bot/eventsystem"
"github.com/jonas747/yagpdb/commands"
"github.com/jonas747/yagpdb/common"
"github.com/jonas747/yagpdb/logs/models"
"github.com/volatiletech/null"
"github.com/volatiletech/sqlboiler/boil"
)
var _ bot.BotInitHandler = (*Plugin)(nil)
var _ commands.CommandProvider = (*Plugin)(nil)
func (p *Plugin) AddCommands() {
commands.AddRootCommands(p, cmdLogs, cmdWhois, cmdNicknames, cmdUsernames, cmdMigrate)
}
func (p *Plugin) BotInit() {
eventsystem.AddHandlerAsyncLastLegacy(p, bot.ConcurrentEventHandler(HandleQueueEvt), eventsystem.EventGuildMemberUpdate, eventsystem.EventGuildMemberAdd, eventsystem.EventMemberFetched)
// eventsystem.AddHandlerAsyncLastLegacy(bot.ConcurrentEventHandler(HandleGC), eventsystem.EventGuildCreate)
eventsystem.AddHandlerAsyncLast(p, HandleMsgDelete, eventsystem.EventMessageDelete, eventsystem.EventMessageDeleteBulk)
eventsystem.AddHandlerFirstLegacy(p, HandlePresenceUpdate, eventsystem.EventPresenceUpdate)
go EvtProcesser()
go EvtProcesserGCs()
}
var cmdLogs = &commands.YAGCommand{
Cooldown: 5,
CmdCategory: commands.CategoryTool,
Name: "Logs",
Aliases: []string{"log"},
Description: "Creates a log of the last messages in the current channel.",
LongDescription: "This includes deleted messages within an hour (or 12 hours for premium servers)",
Arguments: []*dcmd.ArgDef{
&dcmd.ArgDef{Name: "Count", Default: 100, Type: &dcmd.IntArg{Min: 2, Max: 250}},
},
RunFunc: func(cmd *dcmd.Data) (interface{}, error) {
num := cmd.Args[0].Int()
l, err := CreateChannelLog(cmd.Context(), nil, cmd.GS.ID, cmd.CS.ID, cmd.Msg.Author.Username, cmd.Msg.Author.ID, num)
if err != nil {
if err == ErrChannelBlacklisted {
return "This channel is blacklisted from creating message logs, this can be changed in the control panel.", nil
}
return "", err
}
return CreateLink(cmd.GS.ID, l.ID), err
},
}
var cmdWhois = &commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Whois",
Description: "Shows information about a user",
Aliases: []string{"whoami"},
RunInDM: false,
Arguments: []*dcmd.ArgDef{
{Name: "User", Type: &commands.MemberArg{}},
},
RunFunc: func(parsed *dcmd.Data) (interface{}, error) {
config, err := GetConfig(common.PQ, parsed.Context(), parsed.GS.ID)
if err != nil {
return nil, err
}
member := commands.ContextMS(parsed.Context())
memberCPY := parsed.GS.MemberCopy(true, member.ID)
if memberCPY != nil {
member = memberCPY
}
if parsed.Args[0].Value != nil {
member = parsed.Args[0].Value.(*dstate.MemberState)
}
nick := ""
if member.Nick != "" {
nick = " (" + member.Nick + ")"
}
joinedAtStr := ""
joinedAtDurStr := ""
if !member.MemberSet {
joinedAtStr = "Couldn't find out"
joinedAtDurStr = "Couldn't find out"
} else {
joinedAtStr = member.JoinedAt.UTC().Format(time.RFC822)
dur := time.Since(member.JoinedAt)
joinedAtDurStr = common.HumanizeDuration(common.DurationPrecisionHours, dur)
}
if joinedAtDurStr == "" {
joinedAtDurStr = "Less than an hour ago"
}
t := bot.SnowflakeToTime(member.ID)
createdDurStr := common.HumanizeDuration(common.DurationPrecisionHours, time.Since(t))
if createdDurStr == "" {
createdDurStr = "Less than an hour ago"
}
var memberStatus string
state := [4]string{"Playing", "Streaming", "Listening", "Watching"}
if !member.PresenceSet || member.PresenceGame == nil {
memberStatus = fmt.Sprintf("Has no active status or is invisible/offline.")
} else {
if member.PresenceGame.Type == 4 {
memberStatus = fmt.Sprintf("%s: %s", member.PresenceGame.Name, member.PresenceGame.State)
} else {
memberStatus = fmt.Sprintf("%s: %s", state[member.PresenceGame.Type], member.PresenceGame.Name)
}
}
embed := &discordgo.MessageEmbed{
Title: fmt.Sprintf("%s#%04d%s", member.Username, member.Discriminator, nick),
Fields: []*discordgo.MessageEmbedField{
&discordgo.MessageEmbedField{
Name: "ID",
Value: discordgo.StrID(member.ID),
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Avatar",
Value: "[Link](" + discordgo.EndpointUserAvatar(member.ID, member.StrAvatar()) + ")",
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Account Created",
Value: t.UTC().Format(time.RFC822),
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Account Age",
Value: createdDurStr,
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Joined Server At",
Value: joinedAtStr,
Inline: true,
}, &discordgo.MessageEmbedField{
Name: "Join Server Age",
Value: joinedAtDurStr,
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Status",
Value: memberStatus,
Inline: true,
},
},
Thumbnail: &discordgo.MessageEmbedThumbnail{
URL: discordgo.EndpointUserAvatar(member.ID, member.StrAvatar()),
},
}
if config.UsernameLoggingEnabled.Bool {
usernames, err := GetUsernames(parsed.Context(), member.ID, 5, 0)
if err != nil {
return err, err
}
usernamesStr := "```\n"
for _, v := range usernames {
usernamesStr += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Username.String)
}
usernamesStr += "```"
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "5 last usernames",
Value: usernamesStr,
})
} else {
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "Usernames",
Value: "Username tracking disabled",
})
}
if config.NicknameLoggingEnabled.Bool {
nicknames, err := GetNicknames(parsed.Context(), member.ID, parsed.GS.ID, 5, 0)
if err != nil {
return err, err
}
nicknameStr := "```\n"
if len(nicknames) < 1 {
nicknameStr += "No nicknames tracked"
} else {
for _, v := range nicknames {
nicknameStr += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Nickname.String)
}
}
nicknameStr += "```"
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "5 last nicknames",
Value: nicknameStr,
})
} else {
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "Nicknames",
Value: "Nickname tracking disabled",
})
}
return embed, nil
},
}
var cmdUsernames = &commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Usernames",
Description: "Shows past usernames of a user.",
Aliases: []string{"unames", "un"},
RunInDM: true,
Arguments: []*dcmd.ArgDef{
{Name: "User", Type: dcmd.User},
},
RunFunc: func(parsed *dcmd.Data) (interface{}, error) {
if parsed.GS != nil {
config, err := GetConfig(common.PQ, parsed.Context(), parsed.GS.ID)
if err != nil {
return nil, err
}
if !config.UsernameLoggingEnabled.Bool {
return "Username logging is disabled on this server", nil
}
}
_, err := paginatedmessages.CreatePaginatedMessage(parsed.GS.ID, parsed.CS.ID, 1, 0, func(p *paginatedmessages.PaginatedMessage, page int) (*discordgo.MessageEmbed, error) {
target := parsed.Msg.Author
if parsed.Args[0].Value != nil {
target = parsed.Args[0].Value.(*discordgo.User)
}
offset := (page - 1) * 15
usernames, err := GetUsernames(context.Background(), target.ID, 15, offset)
if err != nil {
return nil, err
}
if len(usernames) < 1 && page > 1 {
return nil, paginatedmessages.ErrNoResults
}
out := fmt.Sprintf("Past username of **%s#%s** ```\n", target.Username, target.Discriminator)
for _, v := range usernames {
out += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Username.String)
}
out += "```"
if len(usernames) < 1 {
out = `No logged usernames`
}
embed := &discordgo.MessageEmbed{
Color: 0x277ee3,
Title: "Usernames of " + target.Username + "#" + target.Discriminator,
Description: out,
}
return embed, nil
})
return nil, err
},
}
var cmdNicknames = &commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Nicknames",
Description: "Shows past nicknames of a user.",
Aliases: []string{"nn"},
RunInDM: false,
Arguments: []*dcmd.ArgDef{
{Name: "User", Type: dcmd.User},
},
RunFunc: func(parsed *dcmd.Data) (interface{}, error) {
config, err := GetConfig(common.PQ, parsed.Context(), parsed.GS.ID)
if err != nil {
return nil, err
}
target := parsed.Msg.Author
if parsed.Args[0].Value != nil {
target = parsed.Args[0].Value.(*discordgo.User)
}
if !config.NicknameLoggingEnabled.Bool {
return "Nickname logging is disabled on this server", nil
}
_, err = paginatedmessages.CreatePaginatedMessage(parsed.GS.ID, parsed.CS.ID, 1, 0, func(p *paginatedmessages.PaginatedMessage, page int) (*discordgo.MessageEmbed, error) {
offset := (page - 1) * 15
nicknames, err := GetNicknames(context.Background(), target.ID, parsed.GS.ID, 15, offset)
if err != nil {
return nil, err
}
if page > 1 && len(nicknames) < 1 {
return nil, paginatedmessages.ErrNoResults
}
out := fmt.Sprintf("Past nicknames of **%s#%s** ```\n", target.Username, target.Discriminator)
for _, v := range nicknames {
out += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Nickname.String)
}
out += "```"
if len(nicknames) < 1 {
out = `No nicknames tracked`
}
embed := &discordgo.MessageEmbed{
Color: 0x277ee3,
Title: "Nicknames of " + target.Username + "#" + target.Discriminator,
Description: out,
}
return embed, nil
})
return nil, err
},
}
// Mark all log messages with this id as deleted
func HandleMsgDelete(evt *eventsystem.EventData) (retry bool, err error) {
if evt.Type == eventsystem.EventMessageDelete {
err := markLoggedMessageAsDeleted(evt.Context(), evt.MessageDelete().ID)
if err != nil {
return true, errors.WithStackIf(err)
}
return false, nil
}
for _, m := range evt.MessageDeleteBulk().Messages {
err := markLoggedMessageAsDeleted(evt.Context(), m)
if err != nil {
return true, errors.WithStackIf(err)
}
}
return false, nil
}
func markLoggedMessageAsDeleted(ctx context.Context, mID int64) error {
_, err := models.Messages2s(models.Messages2Where.ID.EQ(mID)).UpdateAllG(ctx,
models.M{"deleted": true})
return err
}
func HandlePresenceUpdate(evt *eventsystem.EventData) {
pu := evt.PresenceUpdate()
gs := evt.GS
gs.RLock()
defer gs.RUnlock()
ms := gs.Member(false, pu.User.ID)
if ms == nil || !ms.PresenceSet || !ms.MemberSet {
queueEvt(pu)
return
}
if pu.User.Username != "" && pu.User.Username != ms.Username {
queueEvt(pu)
return
}
if pu.Nick != ms.Nick {
queueEvt(pu)
return
}
}
// While presence update is sent when user changes username.... MAKES NO SENSE IMO BUT WHATEVER
// Also check nickname incase the user came online
func HandleQueueEvt(evt *eventsystem.EventData) {
queueEvt(evt.EvtInterface)
}
func queueEvt(evt interface{}) {
if os.Getenv("YAGPDB_LOGS_DISABLE_USERNAME_TRACKING") != "" {
return
}
select {
case evtChan <- evt:
return
default:
go func() {
evtChan <- evt
}()
}
}
func HandleGC(evt *eventsystem.EventData) {
gc := evt.GuildCreate()
evtChanGC <- &LightGC{
GuildID: gc.ID,
Members: gc.Members,
}
}
// type UsernameListing struct {
// gorm.Model
// UserID int64 `gorm:"index"`
// Username string
// }
// type NicknameListing struct {
// gorm.Model
// UserID int64 `gorm:"index"`
// GuildID string
// Nickname string
// }
func CheckUsername(exec boil.ContextExecutor, ctx context.Context, usernameStmt *sql.Stmt, user *discordgo.User) error {
var lastUsername string
row := usernameStmt.QueryRow(user.ID)
err := row.Scan(&lastUsername)
if err == nil && lastUsername == user.Username {
// Not changed
return nil
}
if err != nil && err != sql.ErrNoRows {
// Other error
return nil
}
logger.Debug("User changed username, old:", lastUsername, " | new:", user.Username)
listing := &models.UsernameListing{
UserID: null.Int64From(user.ID),
Username: null.StringFrom(user.Username),
}
err = listing.Insert(ctx, exec, boil.Infer())
if err != nil {
logger.WithError(err).WithField("user", user.ID).Error("failed setting last username")
}
return err
}
func CheckNickname(exec boil.ContextExecutor, ctx context.Context, nicknameStmt *sql.Stmt, userID, guildID int64, nickname string) error {
var lastNickname string
row := nicknameStmt.QueryRow(userID, guildID)
err := row.Scan(&lastNickname)
if err == sql.ErrNoRows && nickname == "" {
// don't need to be putting this in the database as the first record for the user
return nil
}
if err == nil && lastNickname == nickname {
// Not changed
return nil
}
if err != sql.ErrNoRows && err != nil {
return err
}
logger.Debug("User changed nickname, old:", lastNickname, " | new:", nickname)
listing := &models.NicknameListing{
UserID: null.Int64From(userID),
GuildID: null.StringFrom(discordgo.StrID(guildID)),
Nickname: null.StringFrom(nickname),
}
err = listing.Insert(ctx, exec, boil.Infer())
if err != nil {
logger.WithError(err).WithField("guild", guildID).WithField("user", userID).Error("failed setting last nickname")
}
return err
}
// func CheckNicknameBulk(gDB *gorm.DB, guildID int64, members []*discordgo.Member) {
// ids := make([]int64, 0, len(members))
// for _, v := range members {
// ids = append(ids, v.User.ID)
// }
// rows, err := gDB.CommonDB().Query(
// "select distinct on(user_id) nickname,user_id from nickname_listings where user_id = ANY ($1) AND guild_id=$2 order by user_id,id desc;", pq.Int64Array(ids), guildID)
// if err != nil {
// logger.WithError(err).Error("Failed querying current nicknames")
// }
// // Value is wether the nickname was identical
// queriedUsers := make(map[int64]bool)
// for rows.Next() {
// var nickname string
// var userID int64
// err = rows.Scan(&nickname, &userID)
// if err != nil {
// logger.WithError(err).Error("Error while scanning")
// continue
// }
// for _, member := range members {
// if member.User.ID == userID {
// if member.Nick == nickname {
// // Already have the last username tracked
// queriedUsers[userID] = true
// } else {
// queriedUsers[userID] = false
// logger.Debug("CHANGED Nick: ", nickname, " : ", member.Nick)
// }
// break
// }
// }
// }
// rows.Close()
// for _, member := range members {
// unchanged, queried := queriedUsers[member.User.ID]
// if queried && unchanged {
// continue
// }
// if !queried && member.Nick == "" {
// // don't need to be putting this in the database as the first record for the user
// continue
// }
// logger.Debug("User changed nickname, new: ", member.Nick)
// listing := NicknameListing{
// UserID: member.User.ID,
// GuildID: discordgo.StrID(guildID),
// Nickname: member.Nick,
// }
// err = gDB.Create(&listing).Error
// if err != nil {
// logger.WithError(err).Error("Failed setting nickname")
// }
// }
// }
// func CheckUsernameBulk(gDB *gorm.DB, users []*discordgo.User) {
// ids := make([]int64, 0, len(users))
// for _, v := range users {
// ids = append(ids, v.ID)
// }
// rows, err := gDB.CommonDB().Query(
// "select distinct on(user_id) username,user_id from username_listings where user_id = ANY ($1) order by user_id,id desc;", pq.Int64Array(ids))
// if err != nil {
// logger.WithError(err).Error("Failed querying current usernames")
// }
// unchangedUsers := make(map[int64]bool)
// for rows.Next() {
// var username string
// var userID int64
// err = rows.Scan(&username, &userID)
// if err != nil {
// logger.WithError(err).Error("Error while scanning")
// continue
// }
// // var foundUser *discordgo.User
// for _, user := range users {
// if user.ID == userID {
// if user.Username == username {
// // Already have the last username tracked
// unchangedUsers[userID] = true
// }
// break
// }
// }
// }
// rows.Close()
// for _, user := range users {
// if unchanged, ok := unchangedUsers[user.ID]; ok && unchanged {
// continue
// }
// logger.Debug("User changed username, new: ", user.Username)
// listing := UsernameListing{
// UserID: user.ID,
// Username: user.Username,
// }
// err = gDB.Create(&listing).Error
// if err != nil {
// logger.WithError(err).Error("Failed setting username")
// }
// }
// }
var (
evtChan = make(chan interface{}, 1000)
evtChanGC = make(chan *LightGC)
)
type UserGuildPair struct {
GuildID int64
User *discordgo.User
}
var confEnableUsernameTracking = config.RegisterOption("yagpdb.enable_username_tracking", "Enable username tracking", true)
// Queue up all the events and process them one by one, because of limited connections
func EvtProcesser() {
queuedMembers := make([]*discordgo.Member, 0)
queuedUsers := make([]*UserGuildPair, 0)
ticker := time.NewTicker(time.Second * 10)
enabled := confEnableUsernameTracking.GetBool()
for {
select {
case e := <-evtChan:
if !enabled {
continue
}
switch t := e.(type) {
case *discordgo.PresenceUpdate:
if t.User.Username == "" {
continue
}
queuedUsers = append(queuedUsers, &UserGuildPair{GuildID: t.GuildID, User: t.User})
case *discordgo.GuildMemberUpdate:
queuedMembers = append(queuedMembers, t.Member)
case *discordgo.GuildMemberAdd:
queuedMembers = append(queuedMembers, t.Member)
case *discordgo.Member:
queuedMembers = append(queuedMembers, t)
}
case <-ticker.C:
if !enabled {
continue
}
started := time.Now()
err := ProcessBatch(queuedUsers, queuedMembers)
logger.Debugf("Updated %d members and %d users in %s", len(queuedMembers), len(queuedUsers), time.Since(started).String())
if err == nil {
// reset the slices
queuedUsers = queuedUsers[:0]
queuedMembers = queuedMembers[:0]
} else {
logger.WithError(err).Error("failed batch updating usernames and nicknames")
}
}
}
}
func ProcessBatch(users []*UserGuildPair, members []*discordgo.Member) error {
configs := make([]*models.GuildLoggingConfig, 0)
err := common.SqlTX(func(tx *sql.Tx) error {
nickStatement, err := tx.Prepare("select nickname from nickname_listings where user_id=$1 AND guild_id=$2 order by id desc limit 1;")
if err != nil {
return errors.WrapIf(err, "nick stmnt prepare")
}
usernameStatement, err := tx.Prepare("select username from username_listings where user_id=$1 order by id desc limit 1;")
if err != nil {
return errors.WrapIf(err, "username stmnt prepare")
}
// first find all the configs
OUTERUSERS:
for _, v := range users {
for _, c := range configs {
if c.GuildID == v.GuildID {
continue OUTERUSERS
}
}
config, err := GetConfigCached(tx, v.GuildID)
if err != nil {
return errors.WrapIf(err, "users_configs")
}
configs = append(configs, config)
}
OUTERMEMBERS:
for _, v := range members {
for _, c := range configs {
if c.GuildID == v.GuildID {
continue OUTERMEMBERS
}
}
config, err := GetConfigCached(tx, v.GuildID)
if err != nil {
return errors.WrapIf(err, "members_configs")
}
configs = append(configs, config)
}
// update users first
OUTERUSERS_UPDT:
for _, v := range users {
// check if username logging is disabled
for _, c := range configs {
if c.GuildID == v.GuildID {
if !c.UsernameLoggingEnabled.Bool {
continue OUTERUSERS_UPDT
}
break
}
}
err = CheckUsername(tx, context.Background(), usernameStatement, v.User)
if err != nil {
return errors.WrapIf(err, "user username check")
}
}
// update members
for _, v := range members {
checkNick := false
checkUser := false
// find config
for _, c := range configs {
if c.GuildID == v.GuildID {
checkNick = c.NicknameLoggingEnabled.Bool
checkUser = c.UsernameLoggingEnabled.Bool
break
}
}
if !checkNick && !checkUser {
continue
}
err = CheckUsername(tx, context.Background(), usernameStatement, v.User)
if err != nil {
return errors.WrapIf(err, "members username check")
}
err = CheckNickname(tx, context.Background(), nickStatement, v.User.ID, v.GuildID, v.Nick)
if err != nil {
return errors.WrapIf(err, "members nickname check")
}
}
return nil
})
return err
}
type LightGC struct {
GuildID int64
Members []*discordgo.Member
}
func EvtProcesserGCs() {
for {
<-evtChanGC
// tx := common.GORM.Begin()
// conf, err := GetConfig(gc.GuildID)
// if err != nil {
// logger.WithError(err).Error("Failed fetching config")
// continue
// }
// started := time.Now()
// users := make([]*discordgo.User, len(gc.Members))
// for i, m := range gc.Members {
// users[i] = m.User
// }
// if conf.NicknameLoggingEnabled {
// CheckNicknameBulk(tx, gc.GuildID, gc.Members)
// }
// if conf.UsernameLoggingEnabled {
// CheckUsernameBulk(tx, users)
// }
// err = tx.Commit().Error
// if err != nil {
// logger.WithError(err).Error("Failed committing transaction")
// continue
// }
// if len(gc.Members) > 100 {
// logger.Infof("Checked %d members in %s", len(gc.Members), time.Since(started).String())
// // Make sure this dosen't use all our resources
// time.Sleep(time.Second * 25)
// } else {
// time.Sleep(time.Second * 15)
// }
}
}
const CacheKeyConfig bot.GSCacheKey = "logs_config"
func GetConfigCached(exec boil.ContextExecutor, gID int64) (*models.GuildLoggingConfig, error) {
gs := bot.State.Guild(true, gID)
if gs == nil {
return GetConfig(exec, context.Background(), gID)
}
v, err := gs.UserCacheFetch(CacheKeyConfig, func() (interface{}, error) {
conf, err := GetConfig(exec, context.Background(), gID)
return conf, err
})
if err != nil {
return nil, err
}
return v.(*models.GuildLoggingConfig), nil
}
|
[
"\"YAGPDB_LOGS_DISABLE_USERNAME_TRACKING\""
] |
[] |
[
"YAGPDB_LOGS_DISABLE_USERNAME_TRACKING"
] |
[]
|
["YAGPDB_LOGS_DISABLE_USERNAME_TRACKING"]
|
go
| 1 | 0 | |
rgd/asgi.py
|
"""
ASGI config for ResonantGeoData project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rgd.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
core_req = ["requests", "numpy", "pandas", "appdirs>=1.4.4", "tqdm>=4.27.0", "plotly>=4.0.0"]
extras_req = {
"dev" : ["twine", "black", "pytest", "pytest-cov"],
"test" : ["pytest", "pytest-cov"],
"docs" : ["sphinx-rtd-theme>=0.5.0", "nbsphinx>=0.7.1"]
}
extras_req["all"] = [p for r in extras_req.values() for p in r]
if 'IS_VECTORAI_NIGHTLY' in os.environ.keys():
from datetime import datetime
name = 'vectorai-nightly'
version = '0.2.2' + '.' + datetime.today().date().__str__().replace('-', '.')
else:
name = 'vectorai'
version = '0.2.2'
setup(
name=name,
version=version,
author="OnSearch Pty Ltd",
author_email="[email protected]",
description="A Python framework for building vector based applications. Encode, query and analyse data using vectors.",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="vector, embeddings, machinelearning, ai, artificialintelligence, nlp, tensorflow, pytorch, nearestneighbors, search, analytics, clustering, dimensionalityreduction",
url="https://github.com/vector-ai/vectorai",
license="Apache",
packages=find_packages(exclude=["tests*"]),
python_requires=">=3",
install_requires=core_req,
extras_require=extras_req,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Manufacturing",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Multimedia :: Sound/Audio :: Conversion",
"Topic :: Multimedia :: Video :: Conversion",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
plugins/govppmux/plugin_impl_govppmux.go
|
// Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package govppmux
import (
"context"
"encoding/gob"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"git.fd.io/govpp.git/adapter"
govppapi "git.fd.io/govpp.git/api"
govpp "git.fd.io/govpp.git/core"
"git.fd.io/govpp.git/proxy"
"github.com/pkg/errors"
"go.ligato.io/cn-infra/v2/datasync/resync"
"go.ligato.io/cn-infra/v2/health/statuscheck"
"go.ligato.io/cn-infra/v2/infra"
"go.ligato.io/cn-infra/v2/logging"
"go.ligato.io/cn-infra/v2/rpc/rest"
"go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls"
"go.ligato.io/vpp-agent/v3/plugins/vpp"
"go.ligato.io/vpp-agent/v3/plugins/vpp/binapi"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp1904"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp1908"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp2001"
)
var (
disabledSocketClient = os.Getenv("GOVPPMUX_NOSOCK") != ""
)
// Plugin is the govppmux plugin implementation.
type Plugin struct {
Deps
config *Config
vpeHandler vppcalls.VppCoreAPI
binapiVersion vpp.Version
vppConn *govpp.Connection
vppConChan chan govpp.ConnectionEvent
lastConnErr error
vppapiChan govppapi.Channel
statsMu sync.Mutex
statsAdapter adapter.StatsAPI
statsConn *govpp.StatsConnection
proxy *proxy.Server
// infoMu synchonizes access to fields
// vppInfo and lastEvent
infoMu sync.Mutex
vppInfo VPPInfo
lastEvent govpp.ConnectionEvent
cancel context.CancelFunc
wg sync.WaitGroup
}
// Deps defines dependencies for the govppmux plugin.
type Deps struct {
infra.PluginDeps
HTTPHandlers rest.HTTPHandlers
StatusCheck statuscheck.PluginStatusWriter
Resync *resync.Plugin
}
// Init is the entry point called by Agent Core. A single binary-API connection to VPP is established.
func (p *Plugin) Init() (err error) {
if p.config, err = p.loadConfig(); err != nil {
return err
}
p.Log.Debugf("config: %+v", p.config)
// set GoVPP config
govpp.HealthCheckProbeInterval = p.config.HealthCheckProbeInterval
govpp.HealthCheckReplyTimeout = p.config.HealthCheckReplyTimeout
govpp.HealthCheckThreshold = p.config.HealthCheckThreshold
govpp.DefaultReplyTimeout = p.config.ReplyTimeout
// register REST API handlers
p.registerHandlers(p.HTTPHandlers)
var address string
useShm := disabledSocketClient || p.config.ConnectViaShm || p.config.ShmPrefix != ""
if useShm {
address = p.config.ShmPrefix
} else {
address = p.config.BinAPISocketPath
}
// TODO: Async connect & automatic reconnect support is not yet implemented in the agent,
// so synchronously wait until connected to VPP.
startTime := time.Now()
p.Log.Debugf("connecting to VPP..")
vppAdapter := NewVppAdapter(address, useShm)
p.vppConn, p.vppConChan, err = govpp.AsyncConnect(vppAdapter, p.config.RetryConnectCount, p.config.RetryConnectTimeout)
if err != nil {
return err
}
// wait for connection event
for {
event, ok := <-p.vppConChan
if !ok {
return errors.Errorf("VPP connection state channel closed")
}
if event.State == govpp.Connected {
break
} else if event.State == govpp.Failed || event.State == govpp.Disconnected {
return errors.Errorf("unable to establish connection to VPP (%v)", event.Error)
} else {
p.Log.Debugf("VPP connection state: %+v", event)
}
}
took := time.Since(startTime)
p.Log.Debugf("connection to VPP established (took %s)", took.Round(time.Millisecond))
if err := p.updateVPPInfo(); err != nil {
return errors.WithMessage(err, "retrieving VPP info failed")
}
// Connect to VPP status socket
var statsSocket string
if p.config.StatsSocketPath != "" {
statsSocket = p.config.StatsSocketPath
} else {
statsSocket = adapter.DefaultStatsSocket
}
statsAdapter := NewStatsAdapter(statsSocket)
if statsAdapter == nil {
p.Log.Warnf("Unable to connect to the VPP statistics socket, nil stats adapter", err)
} else if p.statsConn, err = govpp.ConnectStats(statsAdapter); err != nil {
p.Log.Warnf("Unable to connect to the VPP statistics socket, %v", err)
p.statsAdapter = nil
}
if p.config.ProxyEnabled {
// register binapi messages to gob package (required for proxy)
msgList := binapi.Versions[p.binapiVersion]
for _, msg := range msgList.AllMessages() {
gob.Register(msg)
}
err := p.startProxy(NewVppAdapter(address, useShm), NewStatsAdapter(statsSocket))
if err != nil {
return err
}
p.Log.Infof("VPP proxy ready")
}
return nil
}
// AfterInit reports status check.
func (p *Plugin) AfterInit() error {
// Register providing status reports (push mode)
p.StatusCheck.Register(p.PluginName, nil)
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.OK, nil)
var ctx context.Context
ctx, p.cancel = context.WithCancel(context.Background())
p.wg.Add(1)
go p.handleVPPConnectionEvents(ctx)
return nil
}
// Close cleans up the resources allocated by the govppmux plugin.
func (p *Plugin) Close() error {
p.cancel()
p.wg.Wait()
defer func() {
if p.vppConn != nil {
p.vppConn.Disconnect()
}
if p.statsAdapter != nil {
if err := p.statsAdapter.Disconnect(); err != nil {
p.Log.Errorf("VPP statistics socket adapter disconnect error: %v", err)
}
}
}()
if p.proxy != nil {
p.proxy.DisconnectBinapi()
p.proxy.DisconnectStats()
}
return nil
}
func (p *Plugin) Version() vpp.Version {
return p.binapiVersion
}
func (p *Plugin) CheckCompatiblity(msgs ...govppapi.Message) error {
p.infoMu.Lock()
defer p.infoMu.Unlock()
if p.vppapiChan == nil {
apiChan, err := p.vppConn.NewAPIChannel()
if err != nil {
return err
}
p.vppapiChan = apiChan
}
return p.vppapiChan.CheckCompatiblity(msgs...)
}
func (p *Plugin) Stats() govppapi.StatsProvider {
if p.statsConn == nil {
return nil
}
return p
}
func (p *Plugin) BinapiVersion() vpp.Version {
return p.binapiVersion
}
// VPPInfo returns information about VPP session.
func (p *Plugin) VPPInfo() VPPInfo {
p.infoMu.Lock()
defer p.infoMu.Unlock()
return p.vppInfo
}
// IsPluginLoaded returns true if plugin is loaded.
func (p *Plugin) IsPluginLoaded(plugin string) bool {
p.infoMu.Lock()
defer p.infoMu.Unlock()
for _, p := range p.vppInfo.Plugins {
if p.Name == plugin {
return true
}
}
return false
}
func (p *Plugin) updateVPPInfo() (err error) {
if p.vppConn == nil {
return fmt.Errorf("VPP connection is nil")
}
p.vppapiChan, err = p.vppConn.NewAPIChannel()
if err != nil {
return err
}
p.binapiVersion, err = binapi.CompatibleVersion(p.vppapiChan)
if err != nil {
return err
}
p.vpeHandler, err = vppcalls.NewHandler(p)
if err != nil {
return errors.New("no compatible VPP handler found")
}
ctx := context.TODO()
version, err := p.vpeHandler.RunCli(ctx, "show version verbose")
if err != nil {
p.Log.Warnf("RunCli error: %v", err)
} else {
p.Log.Debugf("vpp# show version verbose\n%s", version)
}
cmdline, err := p.vpeHandler.RunCli(ctx, "show version cmdline")
if err != nil {
p.Log.Warnf("RunCli error: %v", err)
} else {
out := strings.Replace(cmdline, "\n", "", -1)
p.Log.Debugf("vpp# show version cmdline:\n%s", out)
}
ver, err := p.vpeHandler.GetVersion(ctx)
if err != nil {
return err
}
session, err := p.vpeHandler.GetSession(ctx)
if err != nil {
return err
}
p.Log.WithFields(logging.Fields{
"PID": session.PID,
"ClientID": session.ClientIdx,
}).Infof("VPP version: %v", ver.Version)
modules, err := p.vpeHandler.GetModules(ctx)
if err != nil {
return err
}
p.Log.Debugf("VPP has %d core modules: %v", len(modules), modules)
plugins, err := p.vpeHandler.GetPlugins(ctx)
if err != nil {
return err
}
sort.Slice(plugins, func(i, j int) bool { return plugins[i].Name < plugins[j].Name })
p.Log.Debugf("VPP loaded %d plugins", len(plugins))
for _, plugin := range plugins {
p.Log.Debugf(" - plugin: %v", plugin)
}
p.infoMu.Lock()
p.vppInfo = VPPInfo{
Connected: true,
VersionInfo: *ver,
SessionInfo: *session,
Plugins: plugins,
}
p.infoMu.Unlock()
p.Log.Debugf("found %d registered VPP handlers", len(vpp.GetHandlers()))
for name, handler := range vpp.GetHandlers() {
versions := handler.Versions()
p.Log.Debugf("- handler: %-10s has %d versions: %v", name, len(versions), versions)
}
return nil
}
// handleVPPConnectionEvents handles VPP connection events.
func (p *Plugin) handleVPPConnectionEvents(ctx context.Context) {
defer p.wg.Done()
for {
select {
case event, ok := <-p.vppConChan:
if !ok {
p.lastConnErr = errors.Errorf("VPP connection state channel closed")
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.Error, p.lastConnErr)
return
}
if event.State == govpp.Connected {
if err := p.updateVPPInfo(); err != nil {
p.Log.Errorf("updating VPP info failed: %v", err)
}
if p.config.ReconnectResync && p.lastConnErr != nil {
p.Log.Info("Starting resync after VPP reconnect")
if p.Resync != nil {
p.Resync.DoResync()
p.lastConnErr = nil
} else {
p.Log.Warn("Expected resync after VPP reconnect could not start because of missing Resync plugin")
}
}
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.OK, nil)
} else if event.State == govpp.Failed || event.State == govpp.Disconnected {
p.infoMu.Lock()
p.vppInfo.Connected = false
p.infoMu.Unlock()
p.lastConnErr = errors.Errorf("VPP connection lost (event: %+v)", event)
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.Error, p.lastConnErr)
} else {
p.Log.Debugf("VPP connection state: %+v", event)
}
p.infoMu.Lock()
p.lastEvent = event
p.infoMu.Unlock()
case <-ctx.Done():
return
}
}
}
func (p *Plugin) startProxy(vppapi adapter.VppAPI, statsapi adapter.StatsAPI) (err error) {
p.Log.Infof("starting VPP proxy")
p.proxy, err = proxy.NewServer()
if err != nil {
return errors.WithMessage(err, "creating proxy failed")
}
if err = p.proxy.ConnectBinapi(vppapi); err != nil {
return errors.WithMessage(err, "connecting binapi for proxy failed")
}
if err = p.proxy.ConnectStats(statsapi); err != nil {
return errors.WithMessage(err, "connecting stats for proxy failed")
}
return nil
}
|
[
"\"GOVPPMUX_NOSOCK\""
] |
[] |
[
"GOVPPMUX_NOSOCK"
] |
[]
|
["GOVPPMUX_NOSOCK"]
|
go
| 1 | 0 | |
buildchain/buildchain/lint.py
|
# coding: utf-8
"""Tasks for the linting.
This module runs the linting tools for several languages.
It provides a top level task to run all the linting tools, and each linting tool
is run in its own sub-task (so that you can run a single one and/or run several
linting tools in parallel).
Overview:
┌──────────────┐
╱───>│ lint:python │
╱ └──────────────┘
╱ ┌──────────────┐
╱ ───>│ lint:yaml │
┌────────┐╱ └──────────────┘
│ lint │
└────────┘╲ ┌──────────────┐
╲ ───>│ lint:shell │
╲ └──────────────┘
╲ ┌──────────────┐
╲───>│ lint:go │
└──────────────┘
"""
import os
import shlex
from pathlib import Path
import subprocess
from typing import Callable, Iterator, List, Optional, Tuple
import doit # type: ignore
from buildchain import config
from buildchain import constants
from buildchain import types
from buildchain import utils
def task_lint() -> Iterator[types.TaskDict]:
"""Run the linting tools."""
for create_lint_task in LINTERS:
yield create_lint_task()
# Python {{{
def lint_python() -> types.TaskDict:
"""Run Python linting."""
buildchain = constants.ROOT/'buildchain'
python_sources : List[Path] = [
buildchain/'dodo.py',
*buildchain.glob('buildchain/*.py'),
*buildchain.glob('buildchain/targets/*.py'),
]
cmd = ' '.join(map(shlex.quote, ['tox', '-e', 'lint-python']))
env = {'PATH': os.environ['PATH'], 'OSTYPE': os.uname().sysname}
return {
'name': 'python',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_python.__doc__,
'actions': [doit.action.CmdAction(cmd, env=env)],
'file_dep': python_sources,
}
# }}}
# Shell {{{
def lint_shell() -> types.TaskDict:
"""Run shell scripts linting."""
shell_scripts = [
filepath for filepath in utils.git_ls() if '.sh' in filepath.suffixes
]
return {
'name': 'shell',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_shell.__doc__,
'actions': [['tox', '-e', 'lint-shell']],
'file_dep': shell_scripts,
}
# }}}
# YAML {{{
def lint_yaml() -> types.TaskDict:
"""Run YAML linting."""
return {
'name': 'yaml',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_yaml.__doc__,
'actions': [['tox', '-e', 'lint-yaml']],
'file_dep': [
constants.ROOT/'eve/main.yml',
constants.ROOT/'salt/metalk8s/defaults.yaml'
],
}
# }}}
# Go {{{
def check_go_fmt() -> Optional[doit.exceptions.TaskError]:
"""Check if Go code is properly formatted."""
cwd = constants.STORAGE_OPERATOR_ROOT
cmd = [
config.ExtCommand.GOFMT.value, '-s', '-d',
*tuple(constants.STORAGE_OPERATOR_FMT_ARGS)
]
diff = subprocess.check_output(cmd, cwd=cwd)
if diff:
return doit.exceptions.TaskError(
msg='badly formatted Go code, please run `doit.sh format:go`'
)
return None
def check_go_codegen() -> Optional[doit.exceptions.TaskError]:
"""Check if the generated files are up to date."""
cwd = constants.STORAGE_OPERATOR_ROOT
git_diff = [config.ExtCommand.GIT.value, 'diff']
base = subprocess.check_output(git_diff)
for target in ('k8s', 'crds'):
cmd = [config.ExtCommand.OPERATOR_SDK.value, 'generate', target]
subprocess.check_call(cmd, cwd=cwd)
current = subprocess.check_output(git_diff)
# If the diff changed after running the code generation that means that
# the generated files are not in sync with the "source" files.
if current != base:
return doit.exceptions.TaskError(
msg='outdated generated Go files, did you run `doit.sh codegen:go`?'
)
return None
def lint_go() -> types.TaskDict:
"""Run Go linting."""
return {
'name': 'go',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_go.__doc__,
'actions': [check_go_fmt, check_go_codegen],
'task_dep': [
'check_for:gofmt', 'check_for:operator-sdk', 'check_for:git'
],
'file_dep': list(constants.STORAGE_OPERATOR_SOURCES),
}
# }}}
# List of available linter task.
LINTERS : Tuple[Callable[[], types.TaskDict], ...] = (
lint_python,
lint_shell,
lint_yaml,
lint_go,
)
__all__ = utils.export_only_tasks(__name__)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
celery-sqs/app.py
|
from celery import Celery
from kombu.utils.url import safequote
import os
AWS_ACCESS_KEY = os.getenv('AWS_ACCESS_KEY', 'ABCDEFGHIJKLMNOPQRST')
AWS_SECRET_KEY = os.getenv('AWS_SECRET_KEY', 'ZYXK7NiynG/TogH8Nj+P9nlE73sq3')
AWS_REGION = os.getenv('AWS_REGION', 'ap-northeast-2')
AWS_USER_ID= os.getenv('AWS_USER_ID', '191919191919')
SQS_NAME=os.getenv('SQS_NAME', 'test_sqs_1')
broker_url = "sqs://{AWS_ACCESS_KEY}:{AWS_SECRET_KEY}@".format(
AWS_ACCESS_KEY=safequote(AWS_ACCESS_KEY), AWS_SECRET_KEY=safequote(AWS_SECRET_KEY),
)
broker_transport_options = {
'region': AWS_REGION,
'visibility_timeout': 3600, # 1 hour
'polling_interval': 0.3,
'wait_time_seconds': 15,
'queue_name_prefix': '',
'predefined-queues': {
'test1': {
'url': 'https://{AWS_REGION}.queue.amazonaws.com/{AWS_USER_ID}/{SQS_NAME}'.format(
AWS_REGION=AWS_REGION,
AWS_USER_ID=AWS_USER_ID,
SQS_NAME=SQS_NAME,
),
'access_key_id': AWS_ACCESS_KEY,
'secret_access_key': AWS_SECRET_KEY,
}
}
}
app = Celery('tasks', broker=broker_url, backend='rpc://')
@app.task
def add(x, y):
return x + y
if __name__ == '__main__':
r0 = add.delay(2,5)
r1 = add.delay(2,7)
r2 = add.delay(7,5)
r3 = add.delay(2,3)
print(r0.ready())
print(r0.ready())
print(r0.ready())
print(r0.ready())
|
[] |
[] |
[
"AWS_REGION",
"AWS_USER_ID",
"SQS_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY"
] |
[]
|
["AWS_REGION", "AWS_USER_ID", "SQS_NAME", "AWS_ACCESS_KEY", "AWS_SECRET_KEY"]
|
python
| 5 | 0 | |
Lib/lib-tk/Tkinter.py
|
"""Wrapper functions for Tcl/Tk.
Tkinter provides classes which allow the display, positioning and
control of widgets. Toplevel widgets are Tk and Toplevel. Other
widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton,
Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox
LabelFrame and PanedWindow.
Properties of the widgets are specified with keyword arguments.
Keyword arguments have the same name as the corresponding resource
under Tk.
Widgets are positioned with one of the geometry managers Place, Pack
or Grid. These managers can be called with methods place, pack, grid
available in every Widget.
Actions are bound to events by resources (e.g. keyword argument
command) or with the method bind.
Example (Hello, World):
import Tkinter
from Tkconstants import *
tk = Tkinter.Tk()
frame = Tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
frame.pack(fill=BOTH,expand=1)
label = Tkinter.Label(frame, text="Hello, World")
label.pack(fill=X, expand=1)
button = Tkinter.Button(frame,text="Exit",command=tk.destroy)
button.pack(side=BOTTOM)
tk.mainloop()
"""
__version__ = "$Revision: 81008 $"
import sys
if sys.platform == "win32":
# Attempt to configure Tcl/Tk without requiring PATH
import FixTk
import _tkinter # If this fails your Python may not be configured for Tk
tkinter = _tkinter # b/w compat for export
TclError = _tkinter.TclError
from types import *
from Tkconstants import *
wantobjects = 1
TkVersion = float(_tkinter.TK_VERSION)
TclVersion = float(_tkinter.TCL_VERSION)
READABLE = _tkinter.READABLE
WRITABLE = _tkinter.WRITABLE
EXCEPTION = _tkinter.EXCEPTION
# These are not always defined, e.g. not on Win32 with Tk 8.0 :-(
try: _tkinter.createfilehandler
except AttributeError: _tkinter.createfilehandler = None
try: _tkinter.deletefilehandler
except AttributeError: _tkinter.deletefilehandler = None
def _flatten(tuple):
"""Internal function."""
res = ()
for item in tuple:
if type(item) in (TupleType, ListType):
res = res + _flatten(item)
elif item is not None:
res = res + (item,)
return res
try: _flatten = _tkinter._flatten
except AttributeError: pass
def _cnfmerge(cnfs):
"""Internal function."""
if type(cnfs) is DictionaryType:
return cnfs
elif type(cnfs) in (NoneType, StringType):
return cnfs
else:
cnf = {}
for c in _flatten(cnfs):
try:
cnf.update(c)
except (AttributeError, TypeError), msg:
print "_cnfmerge: fallback due to:", msg
for k, v in c.items():
cnf[k] = v
return cnf
try: _cnfmerge = _tkinter._cnfmerge
except AttributeError: pass
class Event:
"""Container for the properties of an event.
Instances of this type are generated if one of the following events occurs:
KeyPress, KeyRelease - for keyboard events
ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events
Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,
Colormap, Gravity, Reparent, Property, Destroy, Activate,
Deactivate - for window events.
If a callback function for one of these events is registered
using bind, bind_all, bind_class, or tag_bind, the callback is
called with an Event as first argument. It will have the
following attributes (in braces are the event types for which
the attribute is valid):
serial - serial number of event
num - mouse button pressed (ButtonPress, ButtonRelease)
focus - whether the window has the focus (Enter, Leave)
height - height of the exposed window (Configure, Expose)
width - width of the exposed window (Configure, Expose)
keycode - keycode of the pressed key (KeyPress, KeyRelease)
state - state of the event as a number (ButtonPress, ButtonRelease,
Enter, KeyPress, KeyRelease,
Leave, Motion)
state - state as a string (Visibility)
time - when the event occurred
x - x-position of the mouse
y - y-position of the mouse
x_root - x-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
y_root - y-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
char - pressed character (KeyPress, KeyRelease)
send_event - see X/Windows documentation
keysym - keysym of the event as a string (KeyPress, KeyRelease)
keysym_num - keysym of the event as a number (KeyPress, KeyRelease)
type - type of the event as a number
widget - widget in which the event occurred
delta - delta of wheel movement (MouseWheel)
"""
pass
_support_default_root = 1
_default_root = None
def NoDefaultRoot():
"""Inhibit setting of default root window.
Call this function to inhibit that the first instance of
Tk is used for windows without an explicit parent window.
"""
global _support_default_root
_support_default_root = 0
global _default_root
_default_root = None
del _default_root
def _tkerror(err):
"""Internal function."""
pass
def _exit(code='0'):
"""Internal function. Calling it will throw the exception SystemExit."""
raise SystemExit, code
_varnum = 0
class Variable:
"""Class to define value holders for e.g. buttons.
Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations
that constrain the type of the value returned from get()."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a variable
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
global _varnum
if not master:
master = _default_root
self._master = master
self._tk = master.tk
if name:
self._name = name
else:
self._name = 'PY_VAR' + repr(_varnum)
_varnum += 1
if value is not None:
self.set(value)
elif not self._tk.call("info", "exists", self._name):
self.set(self._default)
def __del__(self):
"""Unset the variable in Tcl."""
self._tk.globalunsetvar(self._name)
def __str__(self):
"""Return the name of the variable in Tcl."""
return self._name
def set(self, value):
"""Set the variable to VALUE."""
return self._tk.globalsetvar(self._name, value)
def get(self):
"""Return value of variable."""
return self._tk.globalgetvar(self._name)
def trace_variable(self, mode, callback):
"""Define a trace callback for the variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CALLBACK must be a function which is called when
the variable is read, written or undefined.
Return the name of the callback.
"""
cbname = self._master._register(callback)
self._tk.call("trace", "variable", self._name, mode, cbname)
return cbname
trace = trace_variable
def trace_vdelete(self, mode, cbname):
"""Delete the trace callback for a variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CBNAME is the name of the callback returned from trace_variable or trace.
"""
self._tk.call("trace", "vdelete", self._name, mode, cbname)
self._master.deletecommand(cbname)
def trace_vinfo(self):
"""Return all trace callback information."""
return map(self._tk.split, self._tk.splitlist(
self._tk.call("trace", "vinfo", self._name)))
def __eq__(self, other):
"""Comparison for equality (==).
Note: if the Variable's master matters to behavior
also compare self._master == other._master
"""
return self.__class__.__name__ == other.__class__.__name__ \
and self._name == other._name
class StringVar(Variable):
"""Value holder for strings variables."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a string variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return value of variable as string."""
value = self._tk.globalgetvar(self._name)
if isinstance(value, basestring):
return value
return str(value)
class IntVar(Variable):
"""Value holder for integer variables."""
_default = 0
def __init__(self, master=None, value=None, name=None):
"""Construct an integer variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def set(self, value):
"""Set the variable to value, converting booleans to integers."""
if isinstance(value, bool):
value = int(value)
return Variable.set(self, value)
def get(self):
"""Return the value of the variable as an integer."""
return getint(self._tk.globalgetvar(self._name))
class DoubleVar(Variable):
"""Value holder for float variables."""
_default = 0.0
def __init__(self, master=None, value=None, name=None):
"""Construct a float variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0.0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a float."""
return getdouble(self._tk.globalgetvar(self._name))
class BooleanVar(Variable):
"""Value holder for boolean variables."""
_default = False
def __init__(self, master=None, value=None, name=None):
"""Construct a boolean variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to False)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a bool."""
return self._tk.getboolean(self._tk.globalgetvar(self._name))
def mainloop(n=0):
"""Run the main loop of Tcl."""
_default_root.tk.mainloop(n)
getint = int
getdouble = float
def getboolean(s):
"""Convert true and false to integer values 1 and 0."""
return _default_root.tk.getboolean(s)
# Methods defined on both toplevel and interior widgets
class Misc:
"""Internal class.
Base class which defines methods common for interior widgets."""
# XXX font command?
_tclCommands = None
def destroy(self):
"""Internal function.
Delete all Tcl commands created for
this widget in the Tcl interpreter."""
if self._tclCommands is not None:
for name in self._tclCommands:
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
self._tclCommands = None
def deletecommand(self, name):
"""Internal function.
Delete the Tcl command provided in NAME."""
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
try:
self._tclCommands.remove(name)
except ValueError:
pass
def tk_strictMotif(self, boolean=None):
"""Set Tcl internal variable, whether the look and feel
should adhere to Motif.
A parameter of 1 means adhere to Motif (e.g. no color
change if mouse passes over slider).
Returns the set value."""
return self.tk.getboolean(self.tk.call(
'set', 'tk_strictMotif', boolean))
def tk_bisque(self):
"""Change the color scheme to light brown as used in Tk 3.6 and before."""
self.tk.call('tk_bisque')
def tk_setPalette(self, *args, **kw):
"""Set a new color scheme for all widget elements.
A single color as argument will cause that all colors of Tk
widget elements are derived from this.
Alternatively several keyword parameters and its associated
colors can be given. The following keywords are valid:
activeBackground, foreground, selectColor,
activeForeground, highlightBackground, selectBackground,
background, highlightColor, selectForeground,
disabledForeground, insertBackground, troughColor."""
self.tk.call(('tk_setPalette',)
+ _flatten(args) + _flatten(kw.items()))
def tk_menuBar(self, *args):
"""Do not use. Needed in Tk 3.6 and earlier."""
pass # obsolete since Tk 4.0
def wait_variable(self, name='PY_VAR'):
"""Wait until the variable is modified.
A parameter of type IntVar, StringVar, DoubleVar or
BooleanVar must be given."""
self.tk.call('tkwait', 'variable', name)
waitvar = wait_variable # XXX b/w compat
def wait_window(self, window=None):
"""Wait until a WIDGET is destroyed.
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'window', window._w)
def wait_visibility(self, window=None):
"""Wait until the visibility of a WIDGET changes
(e.g. it appears).
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'visibility', window._w)
def setvar(self, name='PY_VAR', value='1'):
"""Set Tcl variable NAME to VALUE."""
self.tk.setvar(name, value)
def getvar(self, name='PY_VAR'):
"""Return value of Tcl variable NAME."""
return self.tk.getvar(name)
getint = int
getdouble = float
def getboolean(self, s):
"""Return a boolean value for Tcl boolean values true and false given as parameter."""
return self.tk.getboolean(s)
def focus_set(self):
"""Direct input focus to this widget.
If the application currently does not have the focus
this widget will get the focus if the application gets
the focus through the window manager."""
self.tk.call('focus', self._w)
focus = focus_set # XXX b/w compat?
def focus_force(self):
"""Direct input focus to this widget even if the
application does not have the focus. Use with
caution!"""
self.tk.call('focus', '-force', self._w)
def focus_get(self):
"""Return the widget which has currently the focus in the
application.
Use focus_displayof to allow working with several
displays. Return None if application does not have
the focus."""
name = self.tk.call('focus')
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_displayof(self):
"""Return the widget which has currently the focus on the
display where this widget is located.
Return None if the application does not have the focus."""
name = self.tk.call('focus', '-displayof', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_lastfor(self):
"""Return the widget which would have the focus if top level
for this widget gets the focus from the window manager."""
name = self.tk.call('focus', '-lastfor', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def tk_focusFollowsMouse(self):
"""The widget under mouse will get automatically focus. Can not
be disabled easily."""
self.tk.call('tk_focusFollowsMouse')
def tk_focusNext(self):
"""Return the next widget in the focus order which follows
widget which has currently the focus.
The focus order first goes to the next child, then to
the children of the child recursively and then to the
next sibling which is higher in the stacking order. A
widget is omitted if it has the takefocus resource set
to 0."""
name = self.tk.call('tk_focusNext', self._w)
if not name: return None
return self._nametowidget(name)
def tk_focusPrev(self):
"""Return previous widget in the focus order. See tk_focusNext for details."""
name = self.tk.call('tk_focusPrev', self._w)
if not name: return None
return self._nametowidget(name)
def after(self, ms, func=None, *args):
"""Call function once after given time.
MS specifies the time in milliseconds. FUNC gives the
function which shall be called. Additional parameters
are given as parameters to the function call. Return
identifier to cancel scheduling with after_cancel."""
if not func:
# I'd rather use time.sleep(ms*0.001)
self.tk.call('after', ms)
else:
def callit():
try:
func(*args)
finally:
try:
self.deletecommand(name)
except TclError:
pass
name = self._register(callit)
return self.tk.call('after', ms, name)
def after_idle(self, func, *args):
"""Call FUNC once if the Tcl main loop has no event to
process.
Return an identifier to cancel the scheduling with
after_cancel."""
return self.after('idle', func, *args)
def after_cancel(self, id):
"""Cancel scheduling of function identified with ID.
Identifier returned by after or after_idle must be
given as first parameter."""
try:
data = self.tk.call('after', 'info', id)
# In Tk 8.3, splitlist returns: (script, type)
# In Tk 8.4, splitlist may return (script, type) or (script,)
script = self.tk.splitlist(data)[0]
self.deletecommand(script)
except TclError:
pass
self.tk.call('after', 'cancel', id)
def bell(self, displayof=0):
"""Ring a display's bell."""
self.tk.call(('bell',) + self._displayof(displayof))
# Clipboard handling:
def clipboard_get(self, **kw):
"""Retrieve data from the clipboard on window's display.
The window keyword defaults to the root window of the Tkinter
application.
The type keyword specifies the form in which the data is
to be returned and should be an atom name such as STRING
or FILE_NAME. Type defaults to STRING, except on X11, where the default
is to try UTF8_STRING and fall back to STRING.
This command is equivalent to:
selection_get(CLIPBOARD)
"""
if 'type' not in kw and self._windowingsystem == 'x11':
try:
kw['type'] = 'UTF8_STRING'
return self.tk.call(('clipboard', 'get') + self._options(kw))
except TclError:
del kw['type']
return self.tk.call(('clipboard', 'get') + self._options(kw))
def clipboard_clear(self, **kw):
"""Clear the data in the Tk clipboard.
A widget specified for the optional displayof keyword
argument specifies the target display."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'clear') + self._options(kw))
def clipboard_append(self, string, **kw):
"""Append STRING to the Tk clipboard.
A widget specified at the optional displayof keyword
argument specifies the target display. The clipboard
can be retrieved with selection_get."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'append') + self._options(kw)
+ ('--', string))
# XXX grab current w/o window argument
def grab_current(self):
"""Return widget which has currently the grab in this application
or None."""
name = self.tk.call('grab', 'current', self._w)
if not name: return None
return self._nametowidget(name)
def grab_release(self):
"""Release grab for this widget if currently set."""
self.tk.call('grab', 'release', self._w)
def grab_set(self):
"""Set grab for this widget.
A grab directs all events to this and descendant
widgets in the application."""
self.tk.call('grab', 'set', self._w)
def grab_set_global(self):
"""Set global grab for this widget.
A global grab directs all events to this and
descendant widgets on the display. Use with caution -
other applications do not get events anymore."""
self.tk.call('grab', 'set', '-global', self._w)
def grab_status(self):
"""Return None, "local" or "global" if this widget has
no, a local or a global grab."""
status = self.tk.call('grab', 'status', self._w)
if status == 'none': status = None
return status
def option_add(self, pattern, value, priority = None):
"""Set a VALUE (second parameter) for an option
PATTERN (first parameter).
An optional third parameter gives the numeric priority
(defaults to 80)."""
self.tk.call('option', 'add', pattern, value, priority)
def option_clear(self):
"""Clear the option database.
It will be reloaded if option_add is called."""
self.tk.call('option', 'clear')
def option_get(self, name, className):
"""Return the value for an option NAME for this widget
with CLASSNAME.
Values with higher priority override lower values."""
return self.tk.call('option', 'get', self._w, name, className)
def option_readfile(self, fileName, priority = None):
"""Read file FILENAME into the option database.
An optional second parameter gives the numeric
priority."""
self.tk.call('option', 'readfile', fileName, priority)
def selection_clear(self, **kw):
"""Clear the current X selection."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('selection', 'clear') + self._options(kw))
def selection_get(self, **kw):
"""Return the contents of the current X selection.
A keyword parameter selection specifies the name of
the selection and defaults to PRIMARY. A keyword
parameter displayof specifies a widget on the display
to use. A keyword parameter type specifies the form of data to be
fetched, defaulting to STRING except on X11, where UTF8_STRING is tried
before STRING."""
if 'displayof' not in kw: kw['displayof'] = self._w
if 'type' not in kw and self._windowingsystem == 'x11':
try:
kw['type'] = 'UTF8_STRING'
return self.tk.call(('selection', 'get') + self._options(kw))
except TclError:
del kw['type']
return self.tk.call(('selection', 'get') + self._options(kw))
def selection_handle(self, command, **kw):
"""Specify a function COMMAND to call if the X
selection owned by this widget is queried by another
application.
This function must return the contents of the
selection. The function will be called with the
arguments OFFSET and LENGTH which allows the chunking
of very long selections. The following keyword
parameters can be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
name = self._register(command)
self.tk.call(('selection', 'handle') + self._options(kw)
+ (self._w, name))
def selection_own(self, **kw):
"""Become owner of X selection.
A keyword parameter selection specifies the name of
the selection (default PRIMARY)."""
self.tk.call(('selection', 'own') +
self._options(kw) + (self._w,))
def selection_own_get(self, **kw):
"""Return owner of X selection.
The following keyword parameter can
be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
if 'displayof' not in kw: kw['displayof'] = self._w
name = self.tk.call(('selection', 'own') + self._options(kw))
if not name: return None
return self._nametowidget(name)
def send(self, interp, cmd, *args):
"""Send Tcl command CMD to different interpreter INTERP to be executed."""
return self.tk.call(('send', interp, cmd) + args)
def lower(self, belowThis=None):
"""Lower this widget in the stacking order."""
self.tk.call('lower', self._w, belowThis)
def tkraise(self, aboveThis=None):
"""Raise this widget in the stacking order."""
self.tk.call('raise', self._w, aboveThis)
lift = tkraise
def colormodel(self, value=None):
"""Useless. Not implemented in Tk."""
return self.tk.call('tk', 'colormodel', self._w, value)
def winfo_atom(self, name, displayof=0):
"""Return integer which represents atom NAME."""
args = ('winfo', 'atom') + self._displayof(displayof) + (name,)
return getint(self.tk.call(args))
def winfo_atomname(self, id, displayof=0):
"""Return name of atom with identifier ID."""
args = ('winfo', 'atomname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_cells(self):
"""Return number of cells in the colormap for this widget."""
return getint(
self.tk.call('winfo', 'cells', self._w))
def winfo_children(self):
"""Return a list of all widgets which are children of this widget."""
result = []
for child in self.tk.splitlist(
self.tk.call('winfo', 'children', self._w)):
try:
# Tcl sometimes returns extra windows, e.g. for
# menus; those need to be skipped
result.append(self._nametowidget(child))
except KeyError:
pass
return result
def winfo_class(self):
"""Return window class name of this widget."""
return self.tk.call('winfo', 'class', self._w)
def winfo_colormapfull(self):
"""Return true if at the last color request the colormap was full."""
return self.tk.getboolean(
self.tk.call('winfo', 'colormapfull', self._w))
def winfo_containing(self, rootX, rootY, displayof=0):
"""Return the widget which is at the root coordinates ROOTX, ROOTY."""
args = ('winfo', 'containing') \
+ self._displayof(displayof) + (rootX, rootY)
name = self.tk.call(args)
if not name: return None
return self._nametowidget(name)
def winfo_depth(self):
"""Return the number of bits per pixel."""
return getint(self.tk.call('winfo', 'depth', self._w))
def winfo_exists(self):
"""Return true if this widget exists."""
return getint(
self.tk.call('winfo', 'exists', self._w))
def winfo_fpixels(self, number):
"""Return the number of pixels for the given distance NUMBER
(e.g. "3c") as float."""
return getdouble(self.tk.call(
'winfo', 'fpixels', self._w, number))
def winfo_geometry(self):
"""Return geometry string for this widget in the form "widthxheight+X+Y"."""
return self.tk.call('winfo', 'geometry', self._w)
def winfo_height(self):
"""Return height of this widget."""
return getint(
self.tk.call('winfo', 'height', self._w))
def winfo_id(self):
"""Return identifier ID for this widget."""
return self.tk.getint(
self.tk.call('winfo', 'id', self._w))
def winfo_interps(self, displayof=0):
"""Return the name of all Tcl interpreters for this display."""
args = ('winfo', 'interps') + self._displayof(displayof)
return self.tk.splitlist(self.tk.call(args))
def winfo_ismapped(self):
"""Return true if this widget is mapped."""
return getint(
self.tk.call('winfo', 'ismapped', self._w))
def winfo_manager(self):
"""Return the window mananger name for this widget."""
return self.tk.call('winfo', 'manager', self._w)
def winfo_name(self):
"""Return the name of this widget."""
return self.tk.call('winfo', 'name', self._w)
def winfo_parent(self):
"""Return the name of the parent of this widget."""
return self.tk.call('winfo', 'parent', self._w)
def winfo_pathname(self, id, displayof=0):
"""Return the pathname of the widget given by ID."""
args = ('winfo', 'pathname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_pixels(self, number):
"""Rounded integer value of winfo_fpixels."""
return getint(
self.tk.call('winfo', 'pixels', self._w, number))
def winfo_pointerx(self):
"""Return the x coordinate of the pointer on the root window."""
return getint(
self.tk.call('winfo', 'pointerx', self._w))
def winfo_pointerxy(self):
"""Return a tuple of x and y coordinates of the pointer on the root window."""
return self._getints(
self.tk.call('winfo', 'pointerxy', self._w))
def winfo_pointery(self):
"""Return the y coordinate of the pointer on the root window."""
return getint(
self.tk.call('winfo', 'pointery', self._w))
def winfo_reqheight(self):
"""Return requested height of this widget."""
return getint(
self.tk.call('winfo', 'reqheight', self._w))
def winfo_reqwidth(self):
"""Return requested width of this widget."""
return getint(
self.tk.call('winfo', 'reqwidth', self._w))
def winfo_rgb(self, color):
"""Return tuple of decimal values for red, green, blue for
COLOR in this widget."""
return self._getints(
self.tk.call('winfo', 'rgb', self._w, color))
def winfo_rootx(self):
"""Return x coordinate of upper left corner of this widget on the
root window."""
return getint(
self.tk.call('winfo', 'rootx', self._w))
def winfo_rooty(self):
"""Return y coordinate of upper left corner of this widget on the
root window."""
return getint(
self.tk.call('winfo', 'rooty', self._w))
def winfo_screen(self):
"""Return the screen name of this widget."""
return self.tk.call('winfo', 'screen', self._w)
def winfo_screencells(self):
"""Return the number of the cells in the colormap of the screen
of this widget."""
return getint(
self.tk.call('winfo', 'screencells', self._w))
def winfo_screendepth(self):
"""Return the number of bits per pixel of the root window of the
screen of this widget."""
return getint(
self.tk.call('winfo', 'screendepth', self._w))
def winfo_screenheight(self):
"""Return the number of pixels of the height of the screen of this widget
in pixel."""
return getint(
self.tk.call('winfo', 'screenheight', self._w))
def winfo_screenmmheight(self):
"""Return the number of pixels of the height of the screen of
this widget in mm."""
return getint(
self.tk.call('winfo', 'screenmmheight', self._w))
def winfo_screenmmwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in mm."""
return getint(
self.tk.call('winfo', 'screenmmwidth', self._w))
def winfo_screenvisual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the default
colormodel of this screen."""
return self.tk.call('winfo', 'screenvisual', self._w)
def winfo_screenwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in pixel."""
return getint(
self.tk.call('winfo', 'screenwidth', self._w))
def winfo_server(self):
"""Return information of the X-Server of the screen of this widget in
the form "XmajorRminor vendor vendorVersion"."""
return self.tk.call('winfo', 'server', self._w)
def winfo_toplevel(self):
"""Return the toplevel widget of this widget."""
return self._nametowidget(self.tk.call(
'winfo', 'toplevel', self._w))
def winfo_viewable(self):
"""Return true if the widget and all its higher ancestors are mapped."""
return getint(
self.tk.call('winfo', 'viewable', self._w))
def winfo_visual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the
colormodel of this widget."""
return self.tk.call('winfo', 'visual', self._w)
def winfo_visualid(self):
"""Return the X identifier for the visual for this widget."""
return self.tk.call('winfo', 'visualid', self._w)
def winfo_visualsavailable(self, includeids=0):
"""Return a list of all visuals available for the screen
of this widget.
Each item in the list consists of a visual name (see winfo_visual), a
depth and if INCLUDEIDS=1 is given also the X identifier."""
data = self.tk.split(
self.tk.call('winfo', 'visualsavailable', self._w,
includeids and 'includeids' or None))
if type(data) is StringType:
data = [self.tk.split(data)]
return map(self.__winfo_parseitem, data)
def __winfo_parseitem(self, t):
"""Internal function."""
return t[:1] + tuple(map(self.__winfo_getint, t[1:]))
def __winfo_getint(self, x):
"""Internal function."""
return int(x, 0)
def winfo_vrootheight(self):
"""Return the height of the virtual root window associated with this
widget in pixels. If there is no virtual root window return the
height of the screen."""
return getint(
self.tk.call('winfo', 'vrootheight', self._w))
def winfo_vrootwidth(self):
"""Return the width of the virtual root window associated with this
widget in pixel. If there is no virtual root window return the
width of the screen."""
return getint(
self.tk.call('winfo', 'vrootwidth', self._w))
def winfo_vrootx(self):
"""Return the x offset of the virtual root relative to the root
window of the screen of this widget."""
return getint(
self.tk.call('winfo', 'vrootx', self._w))
def winfo_vrooty(self):
"""Return the y offset of the virtual root relative to the root
window of the screen of this widget."""
return getint(
self.tk.call('winfo', 'vrooty', self._w))
def winfo_width(self):
"""Return the width of this widget."""
return getint(
self.tk.call('winfo', 'width', self._w))
def winfo_x(self):
"""Return the x coordinate of the upper left corner of this widget
in the parent."""
return getint(
self.tk.call('winfo', 'x', self._w))
def winfo_y(self):
"""Return the y coordinate of the upper left corner of this widget
in the parent."""
return getint(
self.tk.call('winfo', 'y', self._w))
def update(self):
"""Enter event loop until all pending events have been processed by Tcl."""
self.tk.call('update')
def update_idletasks(self):
"""Enter event loop until all idle callbacks have been called. This
will update the display of windows but not process events caused by
the user."""
self.tk.call('update', 'idletasks')
def bindtags(self, tagList=None):
"""Set or get the list of bindtags for this widget.
With no argument return the list of all bindtags associated with
this widget. With a list of strings as argument the bindtags are
set to this list. The bindtags determine in which order events are
processed (see bind)."""
if tagList is None:
return self.tk.splitlist(
self.tk.call('bindtags', self._w))
else:
self.tk.call('bindtags', self._w, tagList)
def _bind(self, what, sequence, func, add, needcleanup=1):
"""Internal function."""
if type(func) is StringType:
self.tk.call(what + (sequence, func))
elif func:
funcid = self._register(func, self._substitute,
needcleanup)
cmd = ('%sif {"[%s %s]" == "break"} break\n'
%
(add and '+' or '',
funcid, self._subst_format_str))
self.tk.call(what + (sequence, cmd))
return funcid
elif sequence:
return self.tk.call(what + (sequence,))
else:
return self.tk.splitlist(self.tk.call(what))
def bind(self, sequence=None, func=None, add=None):
"""Bind to this widget at event SEQUENCE a call to function FUNC.
SEQUENCE is a string of concatenated event
patterns. An event pattern is of the form
<MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one
of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,
Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,
B3, Alt, Button4, B4, Double, Button5, B5 Triple,
Mod1, M1. TYPE is one of Activate, Enter, Map,
ButtonPress, Button, Expose, Motion, ButtonRelease
FocusIn, MouseWheel, Circulate, FocusOut, Property,
Colormap, Gravity Reparent, Configure, KeyPress, Key,
Unmap, Deactivate, KeyRelease Visibility, Destroy,
Leave and DETAIL is the button number for ButtonPress,
ButtonRelease and DETAIL is the Keysym for KeyPress and
KeyRelease. Examples are
<Control-Button-1> for pressing Control and mouse button 1 or
<Alt-A> for pressing A and the Alt key (KeyPress can be omitted).
An event pattern can also be a virtual event of the form
<<AString>> where AString can be arbitrary. This
event can be generated by event_generate.
If events are concatenated they must appear shortly
after each other.
FUNC will be called if the event sequence occurs with an
instance of Event as argument. If the return value of FUNC is
"break" no further bound function is invoked.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function.
Bind will return an identifier to allow deletion of the bound function with
unbind without memory leak.
If FUNC or SEQUENCE is omitted the bound function or list
of bound events are returned."""
return self._bind(('bind', self._w), sequence, func, add)
def unbind(self, sequence, funcid=None):
"""Unbind for this widget for event SEQUENCE the
function identified with FUNCID."""
self.tk.call('bind', self._w, sequence, '')
if funcid:
self.deletecommand(funcid)
def bind_all(self, sequence=None, func=None, add=None):
"""Bind to all widgets at an event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function. See bind for the return value."""
return self._bind(('bind', 'all'), sequence, func, add, 0)
def unbind_all(self, sequence):
"""Unbind for all widgets for event SEQUENCE all functions."""
self.tk.call('bind', 'all' , sequence, '')
def bind_class(self, className, sequence=None, func=None, add=None):
"""Bind to widgets with bindtag CLASSNAME at event
SEQUENCE a call of function FUNC. An additional
boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or
whether it will replace the previous function. See bind for
the return value."""
return self._bind(('bind', className), sequence, func, add, 0)
def unbind_class(self, className, sequence):
"""Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE
all functions."""
self.tk.call('bind', className , sequence, '')
def mainloop(self, n=0):
"""Call the mainloop of Tk."""
self.tk.mainloop(n)
def quit(self):
"""Quit the Tcl interpreter. All widgets will be destroyed."""
self.tk.quit()
def _getints(self, string):
"""Internal function."""
if string:
return tuple(map(getint, self.tk.splitlist(string)))
def _getdoubles(self, string):
"""Internal function."""
if string:
return tuple(map(getdouble, self.tk.splitlist(string)))
def _getboolean(self, string):
"""Internal function."""
if string:
return self.tk.getboolean(string)
def _displayof(self, displayof):
"""Internal function."""
if displayof:
return ('-displayof', displayof)
if displayof is None:
return ('-displayof', self._w)
return ()
@property
def _windowingsystem(self):
"""Internal function."""
try:
return self._root()._windowingsystem_cached
except AttributeError:
ws = self._root()._windowingsystem_cached = \
self.tk.call('tk', 'windowingsystem')
return ws
def _options(self, cnf, kw = None):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
res = ()
for k, v in cnf.items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if hasattr(v, '__call__'):
v = self._register(v)
elif isinstance(v, (tuple, list)):
nv = []
for item in v:
if not isinstance(item, (basestring, int)):
break
elif isinstance(item, int):
nv.append('%d' % item)
else:
# format it to proper Tcl code if it contains space
nv.append(('{%s}' if ' ' in item else '%s') % item)
else:
v = ' '.join(nv)
res = res + ('-'+k, v)
return res
def nametowidget(self, name):
"""Return the Tkinter instance of a widget identified by
its Tcl name NAME."""
name = str(name).split('.')
w = self
if not name[0]:
w = w._root()
name = name[1:]
for n in name:
if not n:
break
w = w.children[n]
return w
_nametowidget = nametowidget
def _register(self, func, subst=None, needcleanup=1):
"""Return a newly created Tcl function. If this
function is called, the Python function FUNC will
be executed. An optional function SUBST can
be given which will be executed before FUNC."""
f = CallWrapper(func, subst, self).__call__
name = repr(id(f))
try:
func = func.im_func
except AttributeError:
pass
try:
name = name + func.__name__
except AttributeError:
pass
self.tk.createcommand(name, f)
if needcleanup:
if self._tclCommands is None:
self._tclCommands = []
self._tclCommands.append(name)
return name
register = _register
def _root(self):
"""Internal function."""
w = self
while w.master: w = w.master
return w
_subst_format = ('%#', '%b', '%f', '%h', '%k',
'%s', '%t', '%w', '%x', '%y',
'%A', '%E', '%K', '%N', '%W', '%T', '%X', '%Y', '%D')
_subst_format_str = " ".join(_subst_format)
def _substitute(self, *args):
"""Internal function."""
if len(args) != len(self._subst_format): return args
getboolean = self.tk.getboolean
getint = int
def getint_event(s):
"""Tk changed behavior in 8.4.2, returning "??" rather more often."""
try:
return int(s)
except ValueError:
return s
nsign, b, f, h, k, s, t, w, x, y, A, E, K, N, W, T, X, Y, D = args
# Missing: (a, c, d, m, o, v, B, R)
e = Event()
# serial field: valid vor all events
# number of button: ButtonPress and ButtonRelease events only
# height field: Configure, ConfigureRequest, Create,
# ResizeRequest, and Expose events only
# keycode field: KeyPress and KeyRelease events only
# time field: "valid for events that contain a time field"
# width field: Configure, ConfigureRequest, Create, ResizeRequest,
# and Expose events only
# x field: "valid for events that contain a x field"
# y field: "valid for events that contain a y field"
# keysym as decimal: KeyPress and KeyRelease events only
# x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,
# KeyRelease,and Motion events
e.serial = getint(nsign)
e.num = getint_event(b)
try: e.focus = getboolean(f)
except TclError: pass
e.height = getint_event(h)
e.keycode = getint_event(k)
e.state = getint_event(s)
e.time = getint_event(t)
e.width = getint_event(w)
e.x = getint_event(x)
e.y = getint_event(y)
e.char = A
try: e.send_event = getboolean(E)
except TclError: pass
e.keysym = K
e.keysym_num = getint_event(N)
e.type = T
try:
e.widget = self._nametowidget(W)
except KeyError:
e.widget = W
e.x_root = getint_event(X)
e.y_root = getint_event(Y)
try:
e.delta = getint(D)
except ValueError:
e.delta = 0
return (e,)
def _report_exception(self):
"""Internal function."""
import sys
exc, val, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
root = self._root()
root.report_callback_exception(exc, val, tb)
def _configure(self, cmd, cnf, kw):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
cnf = {}
for x in self.tk.split(
self.tk.call(_flatten((self._w, cmd)))):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if type(cnf) is StringType:
x = self.tk.split(
self.tk.call(_flatten((self._w, cmd, '-'+cnf))))
return (x[0][1:],) + x[1:]
self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
# These used to be defined in Widget:
def configure(self, cnf=None, **kw):
"""Configure resources of a widget.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method keys.
"""
return self._configure('configure', cnf, kw)
config = configure
def cget(self, key):
"""Return the resource value for a KEY given as string."""
return self.tk.call(self._w, 'cget', '-' + key)
__getitem__ = cget
def __setitem__(self, key, value):
self.configure({key: value})
def __contains__(self, key):
raise TypeError("Tkinter objects don't support 'in' tests.")
def keys(self):
"""Return a list of all resource names of this widget."""
return map(lambda x: x[0][1:],
self.tk.split(self.tk.call(self._w, 'configure')))
def __str__(self):
"""Return the window path name of this widget."""
return self._w
# Pack methods that apply to the master
_noarg_ = ['_noarg_']
def pack_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'pack', 'propagate', self._w))
else:
self.tk.call('pack', 'propagate', self._w, flag)
propagate = pack_propagate
def pack_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call('pack', 'slaves', self._w)))
slaves = pack_slaves
# Place method that applies to the master
def place_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call(
'place', 'slaves', self._w)))
# Grid methods that apply to the master
def grid_bbox(self, column=None, row=None, col2=None, row2=None):
"""Return a tuple of integer coordinates for the bounding
box of this widget controlled by the geometry manager grid.
If COLUMN, ROW is given the bounding box applies from
the cell with row and column 0 to the specified
cell. If COL2 and ROW2 are given the bounding box
starts at that cell.
The returned integers specify the offset of the upper left
corner in the master widget and the width and height.
"""
args = ('grid', 'bbox', self._w)
if column is not None and row is not None:
args = args + (column, row)
if col2 is not None and row2 is not None:
args = args + (col2, row2)
return self._getints(self.tk.call(*args)) or None
bbox = grid_bbox
def _grid_configure(self, command, index, cnf, kw):
"""Internal function."""
if type(cnf) is StringType and not kw:
if cnf[-1:] == '_':
cnf = cnf[:-1]
if cnf[:1] != '-':
cnf = '-'+cnf
options = (cnf,)
else:
options = self._options(cnf, kw)
if not options:
res = self.tk.call('grid',
command, self._w, index)
words = self.tk.splitlist(res)
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if not value:
value = None
elif '.' in value:
value = getdouble(value)
else:
value = getint(value)
dict[key] = value
return dict
res = self.tk.call(
('grid', command, self._w, index)
+ options)
if len(options) == 1:
if not res: return None
# In Tk 7.5, -width can be a float
if '.' in res: return getdouble(res)
return getint(res)
def grid_columnconfigure(self, index, cnf={}, **kw):
"""Configure column INDEX of a grid.
Valid resources are minsize (minimum size of the column),
weight (how much does additional space propagate to this column)
and pad (how much space to let additionally)."""
return self._grid_configure('columnconfigure', index, cnf, kw)
columnconfigure = grid_columnconfigure
def grid_location(self, x, y):
"""Return a tuple of column and row which identify the cell
at which the pixel at position X and Y inside the master
widget is located."""
return self._getints(
self.tk.call(
'grid', 'location', self._w, x, y)) or None
def grid_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given, the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'grid', 'propagate', self._w))
else:
self.tk.call('grid', 'propagate', self._w, flag)
def grid_rowconfigure(self, index, cnf={}, **kw):
"""Configure row INDEX of a grid.
Valid resources are minsize (minimum size of the row),
weight (how much does additional space propagate to this row)
and pad (how much space to let additionally)."""
return self._grid_configure('rowconfigure', index, cnf, kw)
rowconfigure = grid_rowconfigure
def grid_size(self):
"""Return a tuple of the number of column and rows in the grid."""
return self._getints(
self.tk.call('grid', 'size', self._w)) or None
size = grid_size
def grid_slaves(self, row=None, column=None):
"""Return a list of all slaves of this widget
in its packing order."""
args = ()
if row is not None:
args = args + ('-row', row)
if column is not None:
args = args + ('-column', column)
return map(self._nametowidget,
self.tk.splitlist(self.tk.call(
('grid', 'slaves', self._w) + args)))
# Support for the "event" command, new in Tk 4.2.
# By Case Roole.
def event_add(self, virtual, *sequences):
"""Bind a virtual event VIRTUAL (of the form <<Name>>)
to an event SEQUENCE such that the virtual event is triggered
whenever SEQUENCE occurs."""
args = ('event', 'add', virtual) + sequences
self.tk.call(args)
def event_delete(self, virtual, *sequences):
"""Unbind a virtual event VIRTUAL from SEQUENCE."""
args = ('event', 'delete', virtual) + sequences
self.tk.call(args)
def event_generate(self, sequence, **kw):
"""Generate an event SEQUENCE. Additional
keyword arguments specify parameter of the event
(e.g. x, y, rootx, rooty)."""
args = ('event', 'generate', self._w, sequence)
for k, v in kw.items():
args = args + ('-%s' % k, str(v))
self.tk.call(args)
def event_info(self, virtual=None):
"""Return a list of all virtual events or the information
about the SEQUENCE bound to the virtual event VIRTUAL."""
return self.tk.splitlist(
self.tk.call('event', 'info', virtual))
# Image related commands
def image_names(self):
"""Return a list of all existing image names."""
return self.tk.call('image', 'names')
def image_types(self):
"""Return a list of all available image types (e.g. phote bitmap)."""
return self.tk.call('image', 'types')
class CallWrapper:
"""Internal class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit, msg:
raise SystemExit, msg
except:
self.widget._report_exception()
class XView:
"""Mix-in class for querying and changing the horizontal position
of a widget's window."""
def xview(self, *args):
"""Query and change the horizontal position of the view."""
res = self.tk.call(self._w, 'xview', *args)
if not args:
return self._getdoubles(res)
def xview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total width of the canvas is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured in "units"
or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
class YView:
"""Mix-in class for querying and changing the vertical position
of a widget's window."""
def yview(self, *args):
"""Query and change the vertical position of the view."""
res = self.tk.call(self._w, 'yview', *args)
if not args:
return self._getdoubles(res)
def yview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total height of the canvas is off-screen to the top."""
self.tk.call(self._w, 'yview', 'moveto', fraction)
def yview_scroll(self, number, what):
"""Shift the y-view according to NUMBER which is measured in
"units" or "pages" (WHAT)."""
self.tk.call(self._w, 'yview', 'scroll', number, what)
class Wm:
"""Provides functions for the communication with the window manager."""
def wm_aspect(self,
minNumer=None, minDenom=None,
maxNumer=None, maxDenom=None):
"""Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given."""
return self._getints(
self.tk.call('wm', 'aspect', self._w,
minNumer, minDenom,
maxNumer, maxDenom))
aspect = wm_aspect
def wm_attributes(self, *args):
"""This subcommand returns or sets platform specific attributes
The first form returns a list of the platform specific flags and
their values. The second form returns the value for the specific
option. The third form sets one or more of the values. The values
are as follows:
On Windows, -disabled gets or sets whether the window is in a
disabled state. -toolwindow gets or sets the style of the window
to toolwindow (as defined in the MSDN). -topmost gets or sets
whether this is a topmost window (displays above all other
windows).
On Macintosh, XXXXX
On Unix, there are currently no special attribute values.
"""
args = ('wm', 'attributes', self._w) + args
return self.tk.call(args)
attributes=wm_attributes
def wm_client(self, name=None):
"""Store NAME in WM_CLIENT_MACHINE property of this widget. Return
current value."""
return self.tk.call('wm', 'client', self._w, name)
client = wm_client
def wm_colormapwindows(self, *wlist):
"""Store list of window names (WLIST) into WM_COLORMAPWINDOWS property
of this widget. This list contains windows whose colormaps differ from their
parents. Return current list of widgets if WLIST is empty."""
if len(wlist) > 1:
wlist = (wlist,) # Tk needs a list of windows here
args = ('wm', 'colormapwindows', self._w) + wlist
return map(self._nametowidget, self.tk.call(args))
colormapwindows = wm_colormapwindows
def wm_command(self, value=None):
"""Store VALUE in WM_COMMAND property. It is the command
which shall be used to invoke the application. Return current
command if VALUE is None."""
return self.tk.call('wm', 'command', self._w, value)
command = wm_command
def wm_deiconify(self):
"""Deiconify this widget. If it was never mapped it will not be mapped.
On Windows it will raise this widget and give it the focus."""
return self.tk.call('wm', 'deiconify', self._w)
deiconify = wm_deiconify
def wm_focusmodel(self, model=None):
"""Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None."""
return self.tk.call('wm', 'focusmodel', self._w, model)
focusmodel = wm_focusmodel
def wm_frame(self):
"""Return identifier for decorative frame of this widget if present."""
return self.tk.call('wm', 'frame', self._w)
frame = wm_frame
def wm_geometry(self, newGeometry=None):
"""Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return
current value if None is given."""
return self.tk.call('wm', 'geometry', self._w, newGeometry)
geometry = wm_geometry
def wm_grid(self,
baseWidth=None, baseHeight=None,
widthInc=None, heightInc=None):
"""Instruct the window manager that this widget shall only be
resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and
height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the
number of grid units requested in Tk_GeometryRequest."""
return self._getints(self.tk.call(
'wm', 'grid', self._w,
baseWidth, baseHeight, widthInc, heightInc))
grid = wm_grid
def wm_group(self, pathName=None):
"""Set the group leader widgets for related widgets to PATHNAME. Return
the group leader of this widget if None is given."""
return self.tk.call('wm', 'group', self._w, pathName)
group = wm_group
def wm_iconbitmap(self, bitmap=None, default=None):
"""Set bitmap for the iconified widget to BITMAP. Return
the bitmap if None is given.
Under Windows, the DEFAULT parameter can be used to set the icon
for the widget and any descendents that don't have an icon set
explicitly. DEFAULT can be the relative path to a .ico file
(example: root.iconbitmap(default='myicon.ico') ). See Tk
documentation for more information."""
if default:
return self.tk.call('wm', 'iconbitmap', self._w, '-default', default)
else:
return self.tk.call('wm', 'iconbitmap', self._w, bitmap)
iconbitmap = wm_iconbitmap
def wm_iconify(self):
"""Display widget as icon."""
return self.tk.call('wm', 'iconify', self._w)
iconify = wm_iconify
def wm_iconmask(self, bitmap=None):
"""Set mask for the icon bitmap of this widget. Return the
mask if None is given."""
return self.tk.call('wm', 'iconmask', self._w, bitmap)
iconmask = wm_iconmask
def wm_iconname(self, newName=None):
"""Set the name of the icon for this widget. Return the name if
None is given."""
return self.tk.call('wm', 'iconname', self._w, newName)
iconname = wm_iconname
def wm_iconposition(self, x=None, y=None):
"""Set the position of the icon of this widget to X and Y. Return
a tuple of the current values of X and X if None is given."""
return self._getints(self.tk.call(
'wm', 'iconposition', self._w, x, y))
iconposition = wm_iconposition
def wm_iconwindow(self, pathName=None):
"""Set widget PATHNAME to be displayed instead of icon. Return the current
value if None is given."""
return self.tk.call('wm', 'iconwindow', self._w, pathName)
iconwindow = wm_iconwindow
def wm_maxsize(self, width=None, height=None):
"""Set max WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'maxsize', self._w, width, height))
maxsize = wm_maxsize
def wm_minsize(self, width=None, height=None):
"""Set min WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'minsize', self._w, width, height))
minsize = wm_minsize
def wm_overrideredirect(self, boolean=None):
"""Instruct the window manager to ignore this widget
if BOOLEAN is given with 1. Return the current value if None
is given."""
return self._getboolean(self.tk.call(
'wm', 'overrideredirect', self._w, boolean))
overrideredirect = wm_overrideredirect
def wm_positionfrom(self, who=None):
"""Instruct the window manager that the position of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'positionfrom', self._w, who)
positionfrom = wm_positionfrom
def wm_protocol(self, name=None, func=None):
"""Bind function FUNC to command NAME for this widget.
Return the function bound to NAME if None is given. NAME could be
e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW"."""
if hasattr(func, '__call__'):
command = self._register(func)
else:
command = func
return self.tk.call(
'wm', 'protocol', self._w, name, command)
protocol = wm_protocol
def wm_resizable(self, width=None, height=None):
"""Instruct the window manager whether this width can be resized
in WIDTH or HEIGHT. Both values are boolean values."""
return self.tk.call('wm', 'resizable', self._w, width, height)
resizable = wm_resizable
def wm_sizefrom(self, who=None):
"""Instruct the window manager that the size of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'sizefrom', self._w, who)
sizefrom = wm_sizefrom
def wm_state(self, newstate=None):
"""Query or set the state of this widget as one of normal, icon,
iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only)."""
return self.tk.call('wm', 'state', self._w, newstate)
state = wm_state
def wm_title(self, string=None):
"""Set the title of this widget."""
return self.tk.call('wm', 'title', self._w, string)
title = wm_title
def wm_transient(self, master=None):
"""Instruct the window manager that this widget is transient
with regard to widget MASTER."""
return self.tk.call('wm', 'transient', self._w, master)
transient = wm_transient
def wm_withdraw(self):
"""Withdraw this widget from the screen such that it is unmapped
and forgotten by the window manager. Re-draw it with wm_deiconify."""
return self.tk.call('wm', 'withdraw', self._w)
withdraw = wm_withdraw
class Tk(Misc, Wm):
"""Toplevel widget of Tk which represents mostly the main window
of an application. It has an associated Tcl interpreter."""
_w = '.'
def __init__(self, screenName=None, baseName=None, className='Tk',
useTk=1, sync=0, use=None):
"""Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will
be created. BASENAME will be used for the identification of the profile file (see
readprofile).
It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME
is the name of the widget class."""
self.master = None
self.children = {}
self._tkloaded = 0
# to avoid recursions in the getattr code in case of failure, we
# ensure that self.tk is always _something_.
self.tk = None
if baseName is None:
import sys, os
baseName = os.path.basename(sys.argv[0])
baseName, ext = os.path.splitext(baseName)
if ext not in ('.py', '.pyc', '.pyo'):
baseName = baseName + ext
interactive = 0
self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
if useTk:
self._loadtk()
self.readprofile(baseName, className)
def loadtk(self):
if not self._tkloaded:
self.tk.loadtk()
self._loadtk()
def _loadtk(self):
self._tkloaded = 1
global _default_root
# Version sanity checks
tk_version = self.tk.getvar('tk_version')
if tk_version != _tkinter.TK_VERSION:
raise RuntimeError, \
"tk.h version (%s) doesn't match libtk.a version (%s)" \
% (_tkinter.TK_VERSION, tk_version)
# Under unknown circumstances, tcl_version gets coerced to float
tcl_version = str(self.tk.getvar('tcl_version'))
if tcl_version != _tkinter.TCL_VERSION:
raise RuntimeError, \
"tcl.h version (%s) doesn't match libtcl.a version (%s)" \
% (_tkinter.TCL_VERSION, tcl_version)
if TkVersion < 4.0:
raise RuntimeError, \
"Tk 4.0 or higher is required; found Tk %s" \
% str(TkVersion)
# Create and register the tkerror and exit commands
# We need to inline parts of _register here, _ register
# would register differently-named commands.
if self._tclCommands is None:
self._tclCommands = []
self.tk.createcommand('tkerror', _tkerror)
self.tk.createcommand('exit', _exit)
self._tclCommands.append('tkerror')
self._tclCommands.append('exit')
if _support_default_root and not _default_root:
_default_root = self
self.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self):
"""Destroy this and all descendants widgets. This will
end the application of this Tcl interpreter."""
for c in self.children.values(): c.destroy()
self.tk.call('destroy', self._w)
Misc.destroy(self)
global _default_root
if _support_default_root and _default_root is self:
_default_root = None
def readprofile(self, baseName, className):
"""Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
such a file exists in the home directory."""
import os
if 'HOME' in os.environ: home = os.environ['HOME']
else: home = os.curdir
class_tcl = os.path.join(home, '.%s.tcl' % className)
class_py = os.path.join(home, '.%s.py' % className)
base_tcl = os.path.join(home, '.%s.tcl' % baseName)
base_py = os.path.join(home, '.%s.py' % baseName)
dir = {'self': self}
exec 'from Tkinter import *' in dir
if os.path.isfile(class_tcl):
self.tk.call('source', class_tcl)
if os.path.isfile(class_py):
execfile(class_py, dir)
if os.path.isfile(base_tcl):
self.tk.call('source', base_tcl)
if os.path.isfile(base_py):
execfile(base_py, dir)
def report_callback_exception(self, exc, val, tb):
"""Internal function. It reports exception on sys.stderr."""
import traceback, sys
sys.stderr.write("Exception in Tkinter callback\n")
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
traceback.print_exception(exc, val, tb)
def __getattr__(self, attr):
"Delegate attribute access to the interpreter object"
return getattr(self.tk, attr)
# Ideally, the classes Pack, Place and Grid disappear, the
# pack/place/grid methods are defined on the Widget class, and
# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,
# ...), with pack(), place() and grid() being short for
# pack_configure(), place_configure() and grid_columnconfigure(), and
# forget() being short for pack_forget(). As a practical matter, I'm
# afraid that there is too much code out there that may be using the
# Pack, Place or Grid class, so I leave them intact -- but only as
# backwards compatibility features. Also note that those methods that
# take a master as argument (e.g. pack_propagate) have been moved to
# the Misc class (which now incorporates all methods common between
# toplevel and interior widgets). Again, for compatibility, these are
# copied into the Pack, Place or Grid class.
def Tcl(screenName=None, baseName=None, className='Tk', useTk=0):
return Tk(screenName, baseName, className, useTk)
class Pack:
"""Geometry manager Pack.
Base class to use the methods pack_* in every widget."""
def pack_configure(self, cnf={}, **kw):
"""Pack a widget in the parent widget. Use as options:
after=widget - pack it after you have packed widget
anchor=NSEW (or subset) - position widget according to
given direction
before=widget - pack it before you will pack widget
expand=bool - expand widget if parent size grows
fill=NONE or X or Y or BOTH - fill widget if widget grows
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget.
"""
self.tk.call(
('pack', 'configure', self._w)
+ self._options(cnf, kw))
pack = configure = config = pack_configure
def pack_forget(self):
"""Unmap this widget and do not use it for the packing order."""
self.tk.call('pack', 'forget', self._w)
forget = pack_forget
def pack_info(self):
"""Return information about the packing options
for this widget."""
words = self.tk.splitlist(
self.tk.call('pack', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = pack_info
propagate = pack_propagate = Misc.pack_propagate
slaves = pack_slaves = Misc.pack_slaves
class Place:
"""Geometry manager Place.
Base class to use the methods place_* in every widget."""
def place_configure(self, cnf={}, **kw):
"""Place a widget in the parent widget. Use as options:
in=master - master relative to which the widget is placed
in_=master - see 'in' option description
x=amount - locate anchor of this widget at position x of master
y=amount - locate anchor of this widget at position y of master
relx=amount - locate anchor of this widget between 0.0 and 1.0
relative to width of master (1.0 is right edge)
rely=amount - locate anchor of this widget between 0.0 and 1.0
relative to height of master (1.0 is bottom edge)
anchor=NSEW (or subset) - position anchor according to given direction
width=amount - width of this widget in pixel
height=amount - height of this widget in pixel
relwidth=amount - width of this widget between 0.0 and 1.0
relative to width of master (1.0 is the same width
as the master)
relheight=amount - height of this widget between 0.0 and 1.0
relative to height of master (1.0 is the same
height as the master)
bordermode="inside" or "outside" - whether to take border width of
master widget into account
"""
self.tk.call(
('place', 'configure', self._w)
+ self._options(cnf, kw))
place = configure = config = place_configure
def place_forget(self):
"""Unmap this widget."""
self.tk.call('place', 'forget', self._w)
forget = place_forget
def place_info(self):
"""Return information about the placing options
for this widget."""
words = self.tk.splitlist(
self.tk.call('place', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = place_info
slaves = place_slaves = Misc.place_slaves
class Grid:
"""Geometry manager Grid.
Base class to use the methods grid_* in every widget."""
# Thanks to Masazumi Yoshikawa ([email protected])
def grid_configure(self, cnf={}, **kw):
"""Position a widget in the parent widget in a grid. Use as options:
column=number - use cell identified with given column (starting with 0)
columnspan=number - this widget will span several columns
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
row=number - use cell identified with given row (starting with 0)
rowspan=number - this widget will span several rows
sticky=NSEW - if cell is larger on which sides will this
widget stick to the cell boundary
"""
self.tk.call(
('grid', 'configure', self._w)
+ self._options(cnf, kw))
grid = configure = config = grid_configure
bbox = grid_bbox = Misc.grid_bbox
columnconfigure = grid_columnconfigure = Misc.grid_columnconfigure
def grid_forget(self):
"""Unmap this widget."""
self.tk.call('grid', 'forget', self._w)
forget = grid_forget
def grid_remove(self):
"""Unmap this widget but remember the grid options."""
self.tk.call('grid', 'remove', self._w)
def grid_info(self):
"""Return information about the options
for positioning this widget in a grid."""
words = self.tk.splitlist(
self.tk.call('grid', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = grid_info
location = grid_location = Misc.grid_location
propagate = grid_propagate = Misc.grid_propagate
rowconfigure = grid_rowconfigure = Misc.grid_rowconfigure
size = grid_size = Misc.grid_size
slaves = grid_slaves = Misc.grid_slaves
class BaseWidget(Misc):
"""Internal class."""
def _setup(self, master, cnf):
"""Internal function. Sets up information about children."""
if _support_default_root:
global _default_root
if not master:
if not _default_root:
_default_root = Tk()
master = _default_root
self.master = master
self.tk = master.tk
name = None
if 'name' in cnf:
name = cnf['name']
del cnf['name']
if not name:
name = repr(id(self))
self._name = name
if master._w=='.':
self._w = '.' + name
else:
self._w = master._w + '.' + name
self.children = {}
if self._name in self.master.children:
self.master.children[self._name].destroy()
self.master.children[self._name] = self
def __init__(self, master, widgetName, cnf={}, kw={}, extra=()):
"""Construct a widget with the parent widget MASTER, a name WIDGETNAME
and appropriate options."""
if kw:
cnf = _cnfmerge((cnf, kw))
self.widgetName = widgetName
BaseWidget._setup(self, master, cnf)
if self._tclCommands is None:
self._tclCommands = []
classes = []
for k in cnf.keys():
if type(k) is ClassType:
classes.append((k, cnf[k]))
del cnf[k]
self.tk.call(
(widgetName, self._w) + extra + self._options(cnf))
for k, v in classes:
k.configure(self, v)
def destroy(self):
"""Destroy this and all descendants widgets."""
for c in self.children.values(): c.destroy()
self.tk.call('destroy', self._w)
if self._name in self.master.children:
del self.master.children[self._name]
Misc.destroy(self)
def _do(self, name, args=()):
# XXX Obsolete -- better use self.tk.call directly!
return self.tk.call((self._w, name) + args)
class Widget(BaseWidget, Pack, Place, Grid):
"""Internal class.
Base class for a widget which can be positioned with the geometry managers
Pack, Place or Grid."""
pass
class Toplevel(BaseWidget, Wm):
"""Toplevel widget, e.g. for dialogs."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a toplevel widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, menu, relief, screen, takefocus,
use, visual, width."""
if kw:
cnf = _cnfmerge((cnf, kw))
extra = ()
for wmkey in ['screen', 'class_', 'class', 'visual',
'colormap']:
if wmkey in cnf:
val = cnf[wmkey]
# TBD: a hack needed because some keys
# are not valid as keyword arguments
if wmkey[-1] == '_': opt = '-'+wmkey[:-1]
else: opt = '-'+wmkey
extra = extra + (opt, val)
del cnf[wmkey]
BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)
root = self._root()
self.iconname(root.iconname())
self.title(root.title())
self.protocol("WM_DELETE_WINDOW", self.destroy)
class Button(Widget):
"""Button widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a button widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, repeatdelay,
repeatinterval, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
command, compound, default, height,
overrelief, state, width
"""
Widget.__init__(self, master, 'button', cnf, kw)
def tkButtonEnter(self, *dummy):
self.tk.call('tkButtonEnter', self._w)
def tkButtonLeave(self, *dummy):
self.tk.call('tkButtonLeave', self._w)
def tkButtonDown(self, *dummy):
self.tk.call('tkButtonDown', self._w)
def tkButtonUp(self, *dummy):
self.tk.call('tkButtonUp', self._w)
def tkButtonInvoke(self, *dummy):
self.tk.call('tkButtonInvoke', self._w)
def flash(self):
"""Flash the button.
This is accomplished by redisplaying
the button several times, alternating between active and
normal colors. At the end of the flash the button is left
in the same normal/active state as when the command was
invoked. This command is ignored if the button's state is
disabled.
"""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Invoke the command associated with the button.
The return value is the return value from the command,
or an empty string if there is no command associated with
the button. This command is ignored if the button's state
is disabled.
"""
return self.tk.call(self._w, 'invoke')
# Indices:
# XXX I don't like these -- take them away
def AtEnd():
return 'end'
def AtInsert(*args):
s = 'insert'
for a in args:
if a: s = s + (' ' + a)
return s
def AtSelFirst():
return 'sel.first'
def AtSelLast():
return 'sel.last'
def At(x, y=None):
if y is None:
return '@%r' % (x,)
else:
return '@%r,%r' % (x, y)
class Canvas(Widget, XView, YView):
"""Canvas widget to display graphical elements like lines or text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a canvas widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, closeenough,
confine, cursor, height, highlightbackground, highlightcolor,
highlightthickness, insertbackground, insertborderwidth,
insertofftime, insertontime, insertwidth, offset, relief,
scrollregion, selectbackground, selectborderwidth, selectforeground,
state, takefocus, width, xscrollcommand, xscrollincrement,
yscrollcommand, yscrollincrement."""
Widget.__init__(self, master, 'canvas', cnf, kw)
def addtag(self, *args):
"""Internal function."""
self.tk.call((self._w, 'addtag') + args)
def addtag_above(self, newtag, tagOrId):
"""Add tag NEWTAG to all items above TAGORID."""
self.addtag(newtag, 'above', tagOrId)
def addtag_all(self, newtag):
"""Add tag NEWTAG to all items."""
self.addtag(newtag, 'all')
def addtag_below(self, newtag, tagOrId):
"""Add tag NEWTAG to all items below TAGORID."""
self.addtag(newtag, 'below', tagOrId)
def addtag_closest(self, newtag, x, y, halo=None, start=None):
"""Add tag NEWTAG to item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
self.addtag(newtag, 'closest', x, y, halo, start)
def addtag_enclosed(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items in the rectangle defined
by X1,Y1,X2,Y2."""
self.addtag(newtag, 'enclosed', x1, y1, x2, y2)
def addtag_overlapping(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
self.addtag(newtag, 'overlapping', x1, y1, x2, y2)
def addtag_withtag(self, newtag, tagOrId):
"""Add tag NEWTAG to all items with TAGORID."""
self.addtag(newtag, 'withtag', tagOrId)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses all items with tags specified as arguments."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tag_unbind(self, tagOrId, sequence, funcid=None):
"""Unbind for all items with TAGORID for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'bind', tagOrId, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagOrId, sequence=None, func=None, add=None):
"""Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'bind', tagOrId),
sequence, func, add)
def canvasx(self, screenx, gridspacing=None):
"""Return the canvas x coordinate of pixel position SCREENX rounded
to nearest multiple of GRIDSPACING units."""
return getdouble(self.tk.call(
self._w, 'canvasx', screenx, gridspacing))
def canvasy(self, screeny, gridspacing=None):
"""Return the canvas y coordinate of pixel position SCREENY rounded
to nearest multiple of GRIDSPACING units."""
return getdouble(self.tk.call(
self._w, 'canvasy', screeny, gridspacing))
def coords(self, *args):
"""Return a list of coordinates for the item given in ARGS."""
# XXX Should use _flatten on args
return map(getdouble,
self.tk.splitlist(
self.tk.call((self._w, 'coords') + args)))
def _create(self, itemType, args, kw): # Args: (val, val, ..., cnf={})
"""Internal function."""
args = _flatten(args)
cnf = args[-1]
if type(cnf) in (DictionaryType, TupleType):
args = args[:-1]
else:
cnf = {}
return getint(self.tk.call(
self._w, 'create', itemType,
*(args + self._options(cnf, kw))))
def create_arc(self, *args, **kw):
"""Create arc shaped region with coordinates x1,y1,x2,y2."""
return self._create('arc', args, kw)
def create_bitmap(self, *args, **kw):
"""Create bitmap with coordinates x1,y1."""
return self._create('bitmap', args, kw)
def create_image(self, *args, **kw):
"""Create image item with coordinates x1,y1."""
return self._create('image', args, kw)
def create_line(self, *args, **kw):
"""Create line with coordinates x1,y1,...,xn,yn."""
return self._create('line', args, kw)
def create_oval(self, *args, **kw):
"""Create oval with coordinates x1,y1,x2,y2."""
return self._create('oval', args, kw)
def create_polygon(self, *args, **kw):
"""Create polygon with coordinates x1,y1,...,xn,yn."""
return self._create('polygon', args, kw)
def create_rectangle(self, *args, **kw):
"""Create rectangle with coordinates x1,y1,x2,y2."""
return self._create('rectangle', args, kw)
def create_text(self, *args, **kw):
"""Create text with coordinates x1,y1."""
return self._create('text', args, kw)
def create_window(self, *args, **kw):
"""Create window with coordinates x1,y1,x2,y2."""
return self._create('window', args, kw)
def dchars(self, *args):
"""Delete characters of text items identified by tag or id in ARGS (possibly
several times) from FIRST to LAST character (including)."""
self.tk.call((self._w, 'dchars') + args)
def delete(self, *args):
"""Delete items identified by all tag or ids contained in ARGS."""
self.tk.call((self._w, 'delete') + args)
def dtag(self, *args):
"""Delete tag or id given as last arguments in ARGS from items
identified by first argument in ARGS."""
self.tk.call((self._w, 'dtag') + args)
def find(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'find') + args)) or ()
def find_above(self, tagOrId):
"""Return items above TAGORID."""
return self.find('above', tagOrId)
def find_all(self):
"""Return all items."""
return self.find('all')
def find_below(self, tagOrId):
"""Return all items below TAGORID."""
return self.find('below', tagOrId)
def find_closest(self, x, y, halo=None, start=None):
"""Return item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
return self.find('closest', x, y, halo, start)
def find_enclosed(self, x1, y1, x2, y2):
"""Return all items in rectangle defined
by X1,Y1,X2,Y2."""
return self.find('enclosed', x1, y1, x2, y2)
def find_overlapping(self, x1, y1, x2, y2):
"""Return all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
return self.find('overlapping', x1, y1, x2, y2)
def find_withtag(self, tagOrId):
"""Return all items with TAGORID."""
return self.find('withtag', tagOrId)
def focus(self, *args):
"""Set focus to the first item specified in ARGS."""
return self.tk.call((self._w, 'focus') + args)
def gettags(self, *args):
"""Return tags associated with the first item specified in ARGS."""
return self.tk.splitlist(
self.tk.call((self._w, 'gettags') + args))
def icursor(self, *args):
"""Set cursor at position POS in the item identified by TAGORID.
In ARGS TAGORID must be first."""
self.tk.call((self._w, 'icursor') + args)
def index(self, *args):
"""Return position of cursor as integer in item specified in ARGS."""
return getint(self.tk.call((self._w, 'index') + args))
def insert(self, *args):
"""Insert TEXT in item TAGORID at position POS. ARGS must
be TAGORID POS TEXT."""
self.tk.call((self._w, 'insert') + args)
def itemcget(self, tagOrId, option):
"""Return the resource value for an OPTION for item TAGORID."""
return self.tk.call(
(self._w, 'itemcget') + (tagOrId, '-'+option))
def itemconfigure(self, tagOrId, cnf=None, **kw):
"""Configure resources of an item TAGORID.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method without arguments.
"""
return self._configure(('itemconfigure', tagOrId), cnf, kw)
itemconfig = itemconfigure
# lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,
# so the preferred name for them is tag_lower, tag_raise
# (similar to tag_bind, and similar to the Text widget);
# unfortunately can't delete the old ones yet (maybe in 1.6)
def tag_lower(self, *args):
"""Lower an item TAGORID given in ARGS
(optional below another item)."""
self.tk.call((self._w, 'lower') + args)
lower = tag_lower
def move(self, *args):
"""Move an item TAGORID given in ARGS."""
self.tk.call((self._w, 'move') + args)
def postscript(self, cnf={}, **kw):
"""Print the contents of the canvas to a postscript
file. Valid options: colormap, colormode, file, fontmap,
height, pageanchor, pageheight, pagewidth, pagex, pagey,
rotate, witdh, x, y."""
return self.tk.call((self._w, 'postscript') +
self._options(cnf, kw))
def tag_raise(self, *args):
"""Raise an item TAGORID given in ARGS
(optional above another item)."""
self.tk.call((self._w, 'raise') + args)
lift = tkraise = tag_raise
def scale(self, *args):
"""Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE."""
self.tk.call((self._w, 'scale') + args)
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y, gain=10):
"""Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y, gain)
def select_adjust(self, tagOrId, index):
"""Adjust the end of the selection near the cursor of an item TAGORID to index."""
self.tk.call(self._w, 'select', 'adjust', tagOrId, index)
def select_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'select', 'clear')
def select_from(self, tagOrId, index):
"""Set the fixed end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'from', tagOrId, index)
def select_item(self):
"""Return the item which has the selection."""
return self.tk.call(self._w, 'select', 'item') or None
def select_to(self, tagOrId, index):
"""Set the variable end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'to', tagOrId, index)
def type(self, tagOrId):
"""Return the type of the item TAGORID."""
return self.tk.call(self._w, 'type', tagOrId) or None
class Checkbutton(Widget):
"""Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a checkbutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, offvalue, onvalue, padx, pady, relief,
selectcolor, selectimage, state, takefocus, text, textvariable,
underline, variable, width, wraplength."""
Widget.__init__(self, master, 'checkbutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
def toggle(self):
"""Toggle the button."""
self.tk.call(self._w, 'toggle')
class Entry(Widget, XView):
"""Entry widget which allows to display simple text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct an entry widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, highlightbackground,
highlightcolor, highlightthickness, insertbackground,
insertborderwidth, insertofftime, insertontime, insertwidth,
invalidcommand, invcmd, justify, relief, selectbackground,
selectborderwidth, selectforeground, show, state, takefocus,
textvariable, validate, validatecommand, vcmd, width,
xscrollcommand."""
Widget.__init__(self, master, 'entry', cnf, kw)
def delete(self, first, last=None):
"""Delete text from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Return the text."""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Insert cursor at INDEX."""
self.tk.call(self._w, 'icursor', index)
def index(self, index):
"""Return position of cursor."""
return getint(self.tk.call(
self._w, 'index', index))
def insert(self, index, string):
"""Insert STRING at INDEX."""
self.tk.call(self._w, 'insert', index, string)
def scan_mark(self, x):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x)
def scan_dragto(self, x):
"""Adjust the view of the canvas to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x)
def selection_adjust(self, index):
"""Adjust the end of the selection near the cursor to INDEX."""
self.tk.call(self._w, 'selection', 'adjust', index)
select_adjust = selection_adjust
def selection_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'selection', 'clear')
select_clear = selection_clear
def selection_from(self, index):
"""Set the fixed end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'from', index)
select_from = selection_from
def selection_present(self):
"""Return True if there are characters selected in the entry, False
otherwise."""
return self.tk.getboolean(
self.tk.call(self._w, 'selection', 'present'))
select_present = selection_present
def selection_range(self, start, end):
"""Set the selection from START to END (not included)."""
self.tk.call(self._w, 'selection', 'range', start, end)
select_range = selection_range
def selection_to(self, index):
"""Set the variable end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'to', index)
select_to = selection_to
class Frame(Widget):
"""Frame widget which may contain other widgets and can have a 3D border."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a frame widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, relief, takefocus, visual, width."""
cnf = _cnfmerge((cnf, kw))
extra = ()
if 'class_' in cnf:
extra = ('-class', cnf['class_'])
del cnf['class_']
elif 'class' in cnf:
extra = ('-class', cnf['class'])
del cnf['class']
Widget.__init__(self, master, 'frame', cnf, {}, extra)
class Label(Widget):
"""Label widget which can display text and bitmaps."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a label widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
height, state, width
"""
Widget.__init__(self, master, 'label', cnf, kw)
class Listbox(Widget, XView, YView):
"""Listbox widget which can display a list of strings."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a listbox widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, height, highlightbackground,
highlightcolor, highlightthickness, relief, selectbackground,
selectborderwidth, selectforeground, selectmode, setgrid, takefocus,
width, xscrollcommand, yscrollcommand, listvariable."""
Widget.__init__(self, master, 'listbox', cnf, kw)
def activate(self, index):
"""Activate item identified by INDEX."""
self.tk.call(self._w, 'activate', index)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses the item identified by index in ARGS."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def curselection(self):
"""Return list of indices of currently selected item."""
# XXX Ought to apply self._getints()...
return self.tk.splitlist(self.tk.call(
self._w, 'curselection'))
def delete(self, first, last=None):
"""Delete items from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self, first, last=None):
"""Get list of items from FIRST to LAST (not included)."""
if last:
return self.tk.splitlist(self.tk.call(
self._w, 'get', first, last))
else:
return self.tk.call(self._w, 'get', first)
def index(self, index):
"""Return index of item identified with INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return getint(i)
def insert(self, index, *elements):
"""Insert ELEMENTS at INDEX."""
self.tk.call((self._w, 'insert', index) + elements)
def nearest(self, y):
"""Get index of item which is nearest to y coordinate Y."""
return getint(self.tk.call(
self._w, 'nearest', y))
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the listbox to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def see(self, index):
"""Scroll such that INDEX is visible."""
self.tk.call(self._w, 'see', index)
def selection_anchor(self, index):
"""Set the fixed end oft the selection to INDEX."""
self.tk.call(self._w, 'selection', 'anchor', index)
select_anchor = selection_anchor
def selection_clear(self, first, last=None):
"""Clear the selection from FIRST to LAST (not included)."""
self.tk.call(self._w,
'selection', 'clear', first, last)
select_clear = selection_clear
def selection_includes(self, index):
"""Return 1 if INDEX is part of the selection."""
return self.tk.getboolean(self.tk.call(
self._w, 'selection', 'includes', index))
select_includes = selection_includes
def selection_set(self, first, last=None):
"""Set the selection from FIRST to LAST (not included) without
changing the currently selected elements."""
self.tk.call(self._w, 'selection', 'set', first, last)
select_set = selection_set
def size(self):
"""Return the number of elements in the listbox."""
return getint(self.tk.call(self._w, 'size'))
def itemcget(self, index, option):
"""Return the resource value for an ITEM and an OPTION."""
return self.tk.call(
(self._w, 'itemcget') + (index, '-'+option))
def itemconfigure(self, index, cnf=None, **kw):
"""Configure resources of an ITEM.
The values for resources are specified as keyword arguments.
To get an overview about the allowed keyword arguments
call the method without arguments.
Valid resource names: background, bg, foreground, fg,
selectbackground, selectforeground."""
return self._configure(('itemconfigure', index), cnf, kw)
itemconfig = itemconfigure
class Menu(Widget):
"""Menu widget which allows to display menu bars, pull-down menus and pop-up menus."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct menu widget with the parent MASTER.
Valid resource names: activebackground, activeborderwidth,
activeforeground, background, bd, bg, borderwidth, cursor,
disabledforeground, fg, font, foreground, postcommand, relief,
selectcolor, takefocus, tearoff, tearoffcommand, title, type."""
Widget.__init__(self, master, 'menu', cnf, kw)
def tk_bindForTraversal(self):
pass # obsolete since Tk 4.0
def tk_mbPost(self):
self.tk.call('tk_mbPost', self._w)
def tk_mbUnpost(self):
self.tk.call('tk_mbUnpost')
def tk_traverseToMenu(self, char):
self.tk.call('tk_traverseToMenu', self._w, char)
def tk_traverseWithinMenu(self, char):
self.tk.call('tk_traverseWithinMenu', self._w, char)
def tk_getMenuButtons(self):
return self.tk.call('tk_getMenuButtons', self._w)
def tk_nextMenu(self, count):
self.tk.call('tk_nextMenu', count)
def tk_nextMenuEntry(self, count):
self.tk.call('tk_nextMenuEntry', count)
def tk_invokeMenu(self):
self.tk.call('tk_invokeMenu', self._w)
def tk_firstMenu(self):
self.tk.call('tk_firstMenu', self._w)
def tk_mbButtonDown(self):
self.tk.call('tk_mbButtonDown', self._w)
def tk_popup(self, x, y, entry=""):
"""Post the menu at position X,Y with entry ENTRY."""
self.tk.call('tk_popup', self._w, x, y, entry)
def activate(self, index):
"""Activate entry at INDEX."""
self.tk.call(self._w, 'activate', index)
def add(self, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'add', itemType) +
self._options(cnf, kw))
def add_cascade(self, cnf={}, **kw):
"""Add hierarchical menu item."""
self.add('cascade', cnf or kw)
def add_checkbutton(self, cnf={}, **kw):
"""Add checkbutton menu item."""
self.add('checkbutton', cnf or kw)
def add_command(self, cnf={}, **kw):
"""Add command menu item."""
self.add('command', cnf or kw)
def add_radiobutton(self, cnf={}, **kw):
"""Addd radio menu item."""
self.add('radiobutton', cnf or kw)
def add_separator(self, cnf={}, **kw):
"""Add separator."""
self.add('separator', cnf or kw)
def insert(self, index, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'insert', index, itemType) +
self._options(cnf, kw))
def insert_cascade(self, index, cnf={}, **kw):
"""Add hierarchical menu item at INDEX."""
self.insert(index, 'cascade', cnf or kw)
def insert_checkbutton(self, index, cnf={}, **kw):
"""Add checkbutton menu item at INDEX."""
self.insert(index, 'checkbutton', cnf or kw)
def insert_command(self, index, cnf={}, **kw):
"""Add command menu item at INDEX."""
self.insert(index, 'command', cnf or kw)
def insert_radiobutton(self, index, cnf={}, **kw):
"""Addd radio menu item at INDEX."""
self.insert(index, 'radiobutton', cnf or kw)
def insert_separator(self, index, cnf={}, **kw):
"""Add separator at INDEX."""
self.insert(index, 'separator', cnf or kw)
def delete(self, index1, index2=None):
"""Delete menu items between INDEX1 and INDEX2 (included)."""
if index2 is None:
index2 = index1
num_index1, num_index2 = self.index(index1), self.index(index2)
if (num_index1 is None) or (num_index2 is None):
num_index1, num_index2 = 0, -1
for i in range(num_index1, num_index2 + 1):
if 'command' in self.entryconfig(i):
c = str(self.entrycget(i, 'command'))
if c:
self.deletecommand(c)
self.tk.call(self._w, 'delete', index1, index2)
def entrycget(self, index, option):
"""Return the resource value of an menu item for OPTION at INDEX."""
return self.tk.call(self._w, 'entrycget', index, '-' + option)
def entryconfigure(self, index, cnf=None, **kw):
"""Configure a menu item at INDEX."""
return self._configure(('entryconfigure', index), cnf, kw)
entryconfig = entryconfigure
def index(self, index):
"""Return the index of a menu item identified by INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return getint(i)
def invoke(self, index):
"""Invoke a menu item identified by INDEX and execute
the associated command."""
return self.tk.call(self._w, 'invoke', index)
def post(self, x, y):
"""Display a menu at position X,Y."""
self.tk.call(self._w, 'post', x, y)
def type(self, index):
"""Return the type of the menu item at INDEX."""
return self.tk.call(self._w, 'type', index)
def unpost(self):
"""Unmap a menu."""
self.tk.call(self._w, 'unpost')
def yposition(self, index):
"""Return the y-position of the topmost pixel of the menu item at INDEX."""
return getint(self.tk.call(
self._w, 'yposition', index))
class Menubutton(Widget):
"""Menubutton widget, obsolete since Tk8.0."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'menubutton', cnf, kw)
class Message(Widget):
"""Message widget to display multiline text. Obsolete since Label does it too."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'message', cnf, kw)
class Radiobutton(Widget):
"""Radiobutton widget which shows only one of several buttons in on-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a radiobutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, padx, pady, relief, selectcolor, selectimage,
state, takefocus, text, textvariable, underline, value, variable,
width, wraplength."""
Widget.__init__(self, master, 'radiobutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
class Scale(Widget):
"""Scale widget which can display a numerical scale."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scale widget with the parent MASTER.
Valid resource names: activebackground, background, bigincrement, bd,
bg, borderwidth, command, cursor, digits, fg, font, foreground, from,
highlightbackground, highlightcolor, highlightthickness, label,
length, orient, relief, repeatdelay, repeatinterval, resolution,
showvalue, sliderlength, sliderrelief, state, takefocus,
tickinterval, to, troughcolor, variable, width."""
Widget.__init__(self, master, 'scale', cnf, kw)
def get(self):
"""Get the current value as integer or float."""
value = self.tk.call(self._w, 'get')
try:
return getint(value)
except ValueError:
return getdouble(value)
def set(self, value):
"""Set the value to VALUE."""
self.tk.call(self._w, 'set', value)
def coords(self, value=None):
"""Return a tuple (X,Y) of the point along the centerline of the
trough that corresponds to VALUE or the current value if None is
given."""
return self._getints(self.tk.call(self._w, 'coords', value))
def identify(self, x, y):
"""Return where the point X,Y lies. Valid return values are "slider",
"though1" and "though2"."""
return self.tk.call(self._w, 'identify', x, y)
class Scrollbar(Widget):
"""Scrollbar widget which displays a slider at a certain position."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scrollbar widget with the parent MASTER.
Valid resource names: activebackground, activerelief,
background, bd, bg, borderwidth, command, cursor,
elementborderwidth, highlightbackground,
highlightcolor, highlightthickness, jump, orient,
relief, repeatdelay, repeatinterval, takefocus,
troughcolor, width."""
Widget.__init__(self, master, 'scrollbar', cnf, kw)
def activate(self, index):
"""Display the element at INDEX with activebackground and activerelief.
INDEX can be "arrow1","slider" or "arrow2"."""
self.tk.call(self._w, 'activate', index)
def delta(self, deltax, deltay):
"""Return the fractional change of the scrollbar setting if it
would be moved by DELTAX or DELTAY pixels."""
return getdouble(
self.tk.call(self._w, 'delta', deltax, deltay))
def fraction(self, x, y):
"""Return the fractional value which corresponds to a slider
position of X,Y."""
return getdouble(self.tk.call(self._w, 'fraction', x, y))
def identify(self, x, y):
"""Return the element under position X,Y as one of
"arrow1","slider","arrow2" or ""."""
return self.tk.call(self._w, 'identify', x, y)
def get(self):
"""Return the current fractional values (upper and lower end)
of the slider position."""
return self._getdoubles(self.tk.call(self._w, 'get'))
def set(self, *args):
"""Set the fractional values of the slider position (upper and
lower ends as value between 0 and 1)."""
self.tk.call((self._w, 'set') + args)
class Text(Widget, XView, YView):
"""Text widget which can display text in various forms."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a text widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor,
exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, padx, pady,
relief, selectbackground,
selectborderwidth, selectforeground,
setgrid, takefocus,
xscrollcommand, yscrollcommand,
WIDGET-SPECIFIC OPTIONS
autoseparators, height, maxundo,
spacing1, spacing2, spacing3,
state, tabs, undo, width, wrap,
"""
Widget.__init__(self, master, 'text', cnf, kw)
def bbox(self, *args):
"""Return a tuple of (x,y,width,height) which gives the bounding
box of the visible part of the character at the index in ARGS."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tk_textSelectTo(self, index):
self.tk.call('tk_textSelectTo', self._w, index)
def tk_textBackspace(self):
self.tk.call('tk_textBackspace', self._w)
def tk_textIndexCloser(self, a, b, c):
self.tk.call('tk_textIndexCloser', self._w, a, b, c)
def tk_textResetAnchor(self, index):
self.tk.call('tk_textResetAnchor', self._w, index)
def compare(self, index1, op, index2):
"""Return whether between index INDEX1 and index INDEX2 the
relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=."""
return self.tk.getboolean(self.tk.call(
self._w, 'compare', index1, op, index2))
def debug(self, boolean=None):
"""Turn on the internal consistency checks of the B-Tree inside the text
widget according to BOOLEAN."""
return self.tk.getboolean(self.tk.call(
self._w, 'debug', boolean))
def delete(self, index1, index2=None):
"""Delete the characters between INDEX1 and INDEX2 (not included)."""
self.tk.call(self._w, 'delete', index1, index2)
def dlineinfo(self, index):
"""Return tuple (x,y,width,height,baseline) giving the bounding box
and baseline position of the visible part of the line containing
the character at INDEX."""
return self._getints(self.tk.call(self._w, 'dlineinfo', index))
def dump(self, index1, index2=None, command=None, **kw):
"""Return the contents of the widget between index1 and index2.
The type of contents returned in filtered based on the keyword
parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are
given and true, then the corresponding items are returned. The result
is a list of triples of the form (key, value, index). If none of the
keywords are true then 'all' is used by default.
If the 'command' argument is given, it is called once for each element
of the list of triples, with the values of each triple serving as the
arguments to the function. In this case the list is not returned."""
args = []
func_name = None
result = None
if not command:
# Never call the dump command without the -command flag, since the
# output could involve Tcl quoting and would be a pain to parse
# right. Instead just set the command to build a list of triples
# as if we had done the parsing.
result = []
def append_triple(key, value, index, result=result):
result.append((key, value, index))
command = append_triple
try:
if not isinstance(command, str):
func_name = command = self._register(command)
args += ["-command", command]
for key in kw:
if kw[key]: args.append("-" + key)
args.append(index1)
if index2:
args.append(index2)
self.tk.call(self._w, "dump", *args)
return result
finally:
if func_name:
self.deletecommand(func_name)
## new in tk8.4
def edit(self, *args):
"""Internal method
This method controls the undo mechanism and
the modified flag. The exact behavior of the
command depends on the option argument that
follows the edit argument. The following forms
of the command are currently supported:
edit_modified, edit_redo, edit_reset, edit_separator
and edit_undo
"""
return self.tk.call(self._w, 'edit', *args)
def edit_modified(self, arg=None):
"""Get or Set the modified flag
If arg is not specified, returns the modified
flag of the widget. The insert, delete, edit undo and
edit redo commands or the user can set or clear the
modified flag. If boolean is specified, sets the
modified flag of the widget to arg.
"""
return self.edit("modified", arg)
def edit_redo(self):
"""Redo the last undone edit
When the undo option is true, reapplies the last
undone edits provided no other edits were done since
then. Generates an error when the redo stack is empty.
Does nothing when the undo option is false.
"""
return self.edit("redo")
def edit_reset(self):
"""Clears the undo and redo stacks
"""
return self.edit("reset")
def edit_separator(self):
"""Inserts a separator (boundary) on the undo stack.
Does nothing when the undo option is false
"""
return self.edit("separator")
def edit_undo(self):
"""Undoes the last edit action
If the undo option is true. An edit action is defined
as all the insert and delete commands that are recorded
on the undo stack in between two separators. Generates
an error when the undo stack is empty. Does nothing
when the undo option is false
"""
return self.edit("undo")
def get(self, index1, index2=None):
"""Return the text from INDEX1 to INDEX2 (not included)."""
return self.tk.call(self._w, 'get', index1, index2)
# (Image commands are new in 8.0)
def image_cget(self, index, option):
"""Return the value of OPTION of an embedded image at INDEX."""
if option[:1] != "-":
option = "-" + option
if option[-1:] == "_":
option = option[:-1]
return self.tk.call(self._w, "image", "cget", index, option)
def image_configure(self, index, cnf=None, **kw):
"""Configure an embedded image at INDEX."""
return self._configure(('image', 'configure', index), cnf, kw)
def image_create(self, index, cnf={}, **kw):
"""Create an embedded image at INDEX."""
return self.tk.call(
self._w, "image", "create", index,
*self._options(cnf, kw))
def image_names(self):
"""Return all names of embedded images in this widget."""
return self.tk.call(self._w, "image", "names")
def index(self, index):
"""Return the index in the form line.char for INDEX."""
return str(self.tk.call(self._w, 'index', index))
def insert(self, index, chars, *args):
"""Insert CHARS before the characters at INDEX. An additional
tag can be given in ARGS. Additional CHARS and tags can follow in ARGS."""
self.tk.call((self._w, 'insert', index, chars) + args)
def mark_gravity(self, markName, direction=None):
"""Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).
Return the current value if None is given for DIRECTION."""
return self.tk.call(
(self._w, 'mark', 'gravity', markName, direction))
def mark_names(self):
"""Return all mark names."""
return self.tk.splitlist(self.tk.call(
self._w, 'mark', 'names'))
def mark_set(self, markName, index):
"""Set mark MARKNAME before the character at INDEX."""
self.tk.call(self._w, 'mark', 'set', markName, index)
def mark_unset(self, *markNames):
"""Delete all marks in MARKNAMES."""
self.tk.call((self._w, 'mark', 'unset') + markNames)
def mark_next(self, index):
"""Return the name of the next mark after INDEX."""
return self.tk.call(self._w, 'mark', 'next', index) or None
def mark_previous(self, index):
"""Return the name of the previous mark before INDEX."""
return self.tk.call(self._w, 'mark', 'previous', index) or None
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the text to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def search(self, pattern, index, stopindex=None,
forwards=None, backwards=None, exact=None,
regexp=None, nocase=None, count=None, elide=None):
"""Search PATTERN beginning from INDEX until STOPINDEX.
Return the index of the first character of a match or an
empty string."""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def see(self, index):
"""Scroll such that the character at INDEX is visible."""
self.tk.call(self._w, 'see', index)
def tag_add(self, tagName, index1, *args):
"""Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.
Additional pairs of indices may follow in ARGS."""
self.tk.call(
(self._w, 'tag', 'add', tagName, index1) + args)
def tag_unbind(self, tagName, sequence, funcid=None):
"""Unbind for all characters with TAGNAME for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'tag', 'bind', tagName, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagName, sequence, func, add=None):
"""Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'tag', 'bind', tagName),
sequence, func, add)
def tag_cget(self, tagName, option):
"""Return the value of OPTION for tag TAGNAME."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'tag', 'cget', tagName, option)
def tag_configure(self, tagName, cnf=None, **kw):
"""Configure a tag TAGNAME."""
return self._configure(('tag', 'configure', tagName), cnf, kw)
tag_config = tag_configure
def tag_delete(self, *tagNames):
"""Delete all tags in TAGNAMES."""
self.tk.call((self._w, 'tag', 'delete') + tagNames)
def tag_lower(self, tagName, belowThis=None):
"""Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS."""
self.tk.call(self._w, 'tag', 'lower', tagName, belowThis)
def tag_names(self, index=None):
"""Return a list of all tag names."""
return self.tk.splitlist(
self.tk.call(self._w, 'tag', 'names', index))
def tag_nextrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched forward from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'nextrange', tagName, index1, index2))
def tag_prevrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched backwards from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'prevrange', tagName, index1, index2))
def tag_raise(self, tagName, aboveThis=None):
"""Change the priority of tag TAGNAME such that it is higher
than the priority of ABOVETHIS."""
self.tk.call(
self._w, 'tag', 'raise', tagName, aboveThis)
def tag_ranges(self, tagName):
"""Return a list of ranges of text which have tag TAGNAME."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'ranges', tagName))
def tag_remove(self, tagName, index1, index2=None):
"""Remove tag TAGNAME from all characters between INDEX1 and INDEX2."""
self.tk.call(
self._w, 'tag', 'remove', tagName, index1, index2)
def window_cget(self, index, option):
"""Return the value of OPTION of an embedded window at INDEX."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'window', 'cget', index, option)
def window_configure(self, index, cnf=None, **kw):
"""Configure an embedded window at INDEX."""
return self._configure(('window', 'configure', index), cnf, kw)
window_config = window_configure
def window_create(self, index, cnf={}, **kw):
"""Create a window at INDEX."""
self.tk.call(
(self._w, 'window', 'create', index)
+ self._options(cnf, kw))
def window_names(self):
"""Return all names of embedded windows in this widget."""
return self.tk.splitlist(
self.tk.call(self._w, 'window', 'names'))
def yview_pickplace(self, *what):
"""Obsolete function, use see."""
self.tk.call((self._w, 'yview', '-pickplace') + what)
class _setit:
"""Internal class. It wraps the command in the widget OptionMenu."""
def __init__(self, var, value, callback=None):
self.__value = value
self.__var = var
self.__callback = callback
def __call__(self, *args):
self.__var.set(self.__value)
if self.__callback:
self.__callback(self.__value, *args)
class OptionMenu(Menubutton):
"""OptionMenu which allows the user to select a value from a menu."""
def __init__(self, master, variable, value, *values, **kwargs):
"""Construct an optionmenu widget with the parent MASTER, with
the resource textvariable set to VARIABLE, the initially selected
value VALUE, the other menu values VALUES and an additional
keyword argument command."""
kw = {"borderwidth": 2, "textvariable": variable,
"indicatoron": 1, "relief": RAISED, "anchor": "c",
"highlightthickness": 2}
Widget.__init__(self, master, "menubutton", kw)
self.widgetName = 'tk_optionMenu'
menu = self.__menu = Menu(self, name="menu", tearoff=0)
self.menuname = menu._w
# 'command' is the only supported keyword
callback = kwargs.get('command')
if 'command' in kwargs:
del kwargs['command']
if kwargs:
raise TclError, 'unknown option -'+kwargs.keys()[0]
menu.add_command(label=value,
command=_setit(variable, value, callback))
for v in values:
menu.add_command(label=v,
command=_setit(variable, v, callback))
self["menu"] = menu
def __getitem__(self, name):
if name == 'menu':
return self.__menu
return Widget.__getitem__(self, name)
def destroy(self):
"""Destroy this widget and the associated menu."""
Menubutton.destroy(self)
self.__menu = None
class Image:
"""Base class for images."""
_last_id = 0
def __init__(self, imgtype, name=None, cnf={}, master=None, **kw):
self.name = None
if not master:
master = _default_root
if not master:
raise RuntimeError, 'Too early to create image'
self.tk = master.tk
if not name:
Image._last_id += 1
name = "pyimage%r" % (Image._last_id,) # tk itself would use image<x>
# The following is needed for systems where id(x)
# can return a negative number, such as Linux/m68k:
if name[0] == '-': name = '_' + name[1:]
if kw and cnf: cnf = _cnfmerge((cnf, kw))
elif kw: cnf = kw
options = ()
for k, v in cnf.items():
if hasattr(v, '__call__'):
v = self._register(v)
options = options + ('-'+k, v)
self.tk.call(('image', 'create', imgtype, name,) + options)
self.name = name
def __str__(self): return self.name
def __del__(self):
if self.name:
try:
self.tk.call('image', 'delete', self.name)
except TclError:
# May happen if the root was destroyed
pass
def __setitem__(self, key, value):
self.tk.call(self.name, 'configure', '-'+key, value)
def __getitem__(self, key):
return self.tk.call(self.name, 'configure', '-'+key)
def configure(self, **kw):
"""Configure the image."""
res = ()
for k, v in _cnfmerge(kw).items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if hasattr(v, '__call__'):
v = self._register(v)
res = res + ('-'+k, v)
self.tk.call((self.name, 'config') + res)
config = configure
def height(self):
"""Return the height of the image."""
return getint(
self.tk.call('image', 'height', self.name))
def type(self):
"""Return the type of the imgage, e.g. "photo" or "bitmap"."""
return self.tk.call('image', 'type', self.name)
def width(self):
"""Return the width of the image."""
return getint(
self.tk.call('image', 'width', self.name))
class PhotoImage(Image):
"""Widget which can display colored images in GIF, PPM/PGM format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create an image with NAME.
Valid resource names: data, format, file, gamma, height, palette,
width."""
Image.__init__(self, 'photo', name, cnf, master, **kw)
def blank(self):
"""Display a transparent image."""
self.tk.call(self.name, 'blank')
def cget(self, option):
"""Return the value of OPTION."""
return self.tk.call(self.name, 'cget', '-' + option)
# XXX config
def __getitem__(self, key):
return self.tk.call(self.name, 'cget', '-' + key)
# XXX copy -from, -to, ...?
def copy(self):
"""Return a new PhotoImage with the same image as this widget."""
destImage = PhotoImage()
self.tk.call(destImage, 'copy', self.name)
return destImage
def zoom(self,x,y=''):
"""Return a new PhotoImage with the same image as this widget
but zoom it with X and Y."""
destImage = PhotoImage()
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
return destImage
def subsample(self,x,y=''):
"""Return a new PhotoImage based on the same image as this widget
but use only every Xth or Yth pixel."""
destImage = PhotoImage()
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
return destImage
def get(self, x, y):
"""Return the color (red, green, blue) of the pixel at X,Y."""
return self.tk.call(self.name, 'get', x, y)
def put(self, data, to=None):
"""Put row formatted colors to image starting from
position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))"""
args = (self.name, 'put', data)
if to:
if to[0] == '-to':
to = to[1:]
args = args + ('-to',) + tuple(to)
self.tk.call(args)
# XXX read
def write(self, filename, format=None, from_coords=None):
"""Write image to file FILENAME in FORMAT starting from
position FROM_COORDS."""
args = (self.name, 'write', filename)
if format:
args = args + ('-format', format)
if from_coords:
args = args + ('-from',) + tuple(from_coords)
self.tk.call(args)
class BitmapImage(Image):
"""Widget which can display a bitmap."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create a bitmap with NAME.
Valid resource names: background, data, file, foreground, maskdata, maskfile."""
Image.__init__(self, 'bitmap', name, cnf, master, **kw)
def image_names(): return _default_root.tk.call('image', 'names')
def image_types(): return _default_root.tk.call('image', 'types')
class Spinbox(Widget, XView):
"""spinbox widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a spinbox widget with the parent MASTER.
STANDARD OPTIONS
activebackground, background, borderwidth,
cursor, exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, justify, relief,
repeatdelay, repeatinterval,
selectbackground, selectborderwidth
selectforeground, takefocus, textvariable
xscrollcommand.
WIDGET-SPECIFIC OPTIONS
buttonbackground, buttoncursor,
buttondownrelief, buttonuprelief,
command, disabledbackground,
disabledforeground, format, from,
invalidcommand, increment,
readonlybackground, state, to,
validate, validatecommand values,
width, wrap,
"""
Widget.__init__(self, master, 'spinbox', cnf, kw)
def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a
rectangle which encloses the character given by index.
The first two elements of the list give the x and y
coordinates of the upper-left corner of the screen
area covered by the character (in pixels relative
to the widget) and the last two elements give the
width and height of the character, in pixels. The
bounding box may refer to a region outside the
visible area of the window.
"""
return self.tk.call(self._w, 'bbox', index)
def delete(self, first, last=None):
"""Delete one or more elements of the spinbox.
First is the index of the first character to delete,
and last is the index of the character just after
the last one to delete. If last isn't specified it
defaults to first+1, i.e. a single character is
deleted. This command returns an empty string.
"""
return self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Returns the spinbox's string"""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Alter the position of the insertion cursor.
The insertion cursor will be displayed just before
the character given by index. Returns an empty string
"""
return self.tk.call(self._w, 'icursor', index)
def identify(self, x, y):
"""Returns the name of the widget at position x, y
Return value is one of: none, buttondown, buttonup, entry
"""
return self.tk.call(self._w, 'identify', x, y)
def index(self, index):
"""Returns the numerical index corresponding to index
"""
return self.tk.call(self._w, 'index', index)
def insert(self, index, s):
"""Insert string s at index
Returns an empty string.
"""
return self.tk.call(self._w, 'insert', index, s)
def invoke(self, element):
"""Causes the specified element to be invoked
The element could be buttondown or buttonup
triggering the action associated with it.
"""
return self.tk.call(self._w, 'invoke', element)
def scan(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'scan') + args)) or ()
def scan_mark(self, x):
"""Records x and the current view in the spinbox window;
used in conjunction with later scan dragto commands.
Typically this command is associated with a mouse button
press in the widget. It returns an empty string.
"""
return self.scan("mark", x)
def scan_dragto(self, x):
"""Compute the difference between the given x argument
and the x argument to the last scan mark command
It then adjusts the view left or right by 10 times the
difference in x-coordinates. This command is typically
associated with mouse motion events in the widget, to
produce the effect of dragging the spinbox at high speed
through the window. The return value is an empty string.
"""
return self.scan("dragto", x)
def selection(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'selection') + args)) or ()
def selection_adjust(self, index):
"""Locate the end of the selection nearest to the character
given by index,
Then adjust that end of the selection to be at index
(i.e including but not going beyond index). The other
end of the selection is made the anchor point for future
select to commands. If the selection isn't currently in
the spinbox, then a new selection is created to include
the characters between index and the most recent selection
anchor point, inclusive. Returns an empty string.
"""
return self.selection("adjust", index)
def selection_clear(self):
"""Clear the selection
If the selection isn't in this widget then the
command has no effect. Returns an empty string.
"""
return self.selection("clear")
def selection_element(self, element=None):
"""Sets or gets the currently selected element.
If a spinbutton element is specified, it will be
displayed depressed
"""
return self.selection("element", element)
###########################################################################
class LabelFrame(Widget):
"""labelframe widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a labelframe widget with the parent MASTER.
STANDARD OPTIONS
borderwidth, cursor, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, padx, pady, relief,
takefocus, text
WIDGET-SPECIFIC OPTIONS
background, class, colormap, container,
height, labelanchor, labelwidget,
visual, width
"""
Widget.__init__(self, master, 'labelframe', cnf, kw)
########################################################################
class PanedWindow(Widget):
"""panedwindow widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a panedwindow widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor, height,
orient, relief, width
WIDGET-SPECIFIC OPTIONS
handlepad, handlesize, opaqueresize,
sashcursor, sashpad, sashrelief,
sashwidth, showhandle,
"""
Widget.__init__(self, master, 'panedwindow', cnf, kw)
def add(self, child, **kw):
"""Add a child widget to the panedwindow in a new pane.
The child argument is the name of the child widget
followed by pairs of arguments that specify how to
manage the windows. The possible options and values
are the ones accepted by the paneconfigure method.
"""
self.tk.call((self._w, 'add', child) + self._options(kw))
def remove(self, child):
"""Remove the pane containing child from the panedwindow
All geometry management options for child will be forgotten.
"""
self.tk.call(self._w, 'forget', child)
forget=remove
def identify(self, x, y):
"""Identify the panedwindow component at point x, y
If the point is over a sash or a sash handle, the result
is a two element list containing the index of the sash or
handle, and a word indicating whether it is over a sash
or a handle, such as {0 sash} or {2 handle}. If the point
is over any other part of the panedwindow, the result is
an empty list.
"""
return self.tk.call(self._w, 'identify', x, y)
def proxy(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'proxy') + args)) or ()
def proxy_coord(self):
"""Return the x and y pair of the most recent proxy location
"""
return self.proxy("coord")
def proxy_forget(self):
"""Remove the proxy from the display.
"""
return self.proxy("forget")
def proxy_place(self, x, y):
"""Place the proxy at the given x and y coordinates.
"""
return self.proxy("place", x, y)
def sash(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'sash') + args)) or ()
def sash_coord(self, index):
"""Return the current x and y pair for the sash given by index.
Index must be an integer between 0 and 1 less than the
number of panes in the panedwindow. The coordinates given are
those of the top left corner of the region containing the sash.
pathName sash dragto index x y This command computes the
difference between the given coordinates and the coordinates
given to the last sash coord command for the given sash. It then
moves that sash the computed difference. The return value is the
empty string.
"""
return self.sash("coord", index)
def sash_mark(self, index):
"""Records x and y for the sash given by index;
Used in conjunction with later dragto commands to move the sash.
"""
return self.sash("mark", index)
def sash_place(self, index, x, y):
"""Place the sash given by index at the given coordinates
"""
return self.sash("place", index, x, y)
def panecget(self, child, option):
"""Query a management option for window.
Option may be any value allowed by the paneconfigure subcommand
"""
return self.tk.call(
(self._w, 'panecget') + (child, '-'+option))
def paneconfigure(self, tagOrId, cnf=None, **kw):
"""Query or modify the management options for window.
If no option is specified, returns a list describing all
of the available options for pathName. If option is
specified with no value, then the command returns a list
describing the one named option (this list will be identical
to the corresponding sublist of the value returned if no
option is specified). If one or more option-value pairs are
specified, then the command modifies the given widget
option(s) to have the given value(s); in this case the
command returns an empty string. The following options
are supported:
after window
Insert the window after the window specified. window
should be the name of a window already managed by pathName.
before window
Insert the window before the window specified. window
should be the name of a window already managed by pathName.
height size
Specify a height for the window. The height will be the
outer dimension of the window including its border, if
any. If size is an empty string, or if -height is not
specified, then the height requested internally by the
window will be used initially; the height may later be
adjusted by the movement of sashes in the panedwindow.
Size may be any value accepted by Tk_GetPixels.
minsize n
Specifies that the size of the window cannot be made
less than n. This constraint only affects the size of
the widget in the paned dimension -- the x dimension
for horizontal panedwindows, the y dimension for
vertical panedwindows. May be any value accepted by
Tk_GetPixels.
padx n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the X-direction. The value may have any of the forms
accepted by Tk_GetPixels.
pady n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the Y-direction. The value may have any of the forms
accepted by Tk_GetPixels.
sticky style
If a window's pane is larger than the requested
dimensions of the window, this option may be used
to position (or stretch) the window within its pane.
Style is a string that contains zero or more of the
characters n, s, e or w. The string can optionally
contains spaces or commas, but they are ignored. Each
letter refers to a side (north, south, east, or west)
that the window will "stick" to. If both n and s
(or e and w) are specified, the window will be
stretched to fill the entire height (or width) of
its cavity.
width size
Specify a width for the window. The width will be
the outer dimension of the window including its
border, if any. If size is an empty string, or
if -width is not specified, then the width requested
internally by the window will be used initially; the
width may later be adjusted by the movement of sashes
in the panedwindow. Size may be any value accepted by
Tk_GetPixels.
"""
if cnf is None and not kw:
cnf = {}
for x in self.tk.split(
self.tk.call(self._w,
'paneconfigure', tagOrId)):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if type(cnf) == StringType and not kw:
x = self.tk.split(self.tk.call(
self._w, 'paneconfigure', tagOrId, '-'+cnf))
return (x[0][1:],) + x[1:]
self.tk.call((self._w, 'paneconfigure', tagOrId) +
self._options(cnf, kw))
paneconfig = paneconfigure
def panes(self):
"""Returns an ordered list of the child panes."""
return self.tk.call(self._w, 'panes')
######################################################################
# Extensions:
class Studbutton(Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'studbutton', cnf, kw)
self.bind('<Any-Enter>', self.tkButtonEnter)
self.bind('<Any-Leave>', self.tkButtonLeave)
self.bind('<1>', self.tkButtonDown)
self.bind('<ButtonRelease-1>', self.tkButtonUp)
class Tributton(Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'tributton', cnf, kw)
self.bind('<Any-Enter>', self.tkButtonEnter)
self.bind('<Any-Leave>', self.tkButtonLeave)
self.bind('<1>', self.tkButtonDown)
self.bind('<ButtonRelease-1>', self.tkButtonUp)
self['fg'] = self['bg']
self['activebackground'] = self['bg']
######################################################################
# Test:
def _test():
root = Tk()
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text = text + unicode("\nThis should be a cedilla: \347",
"iso-8859-1")
except NameError:
pass # no unicode support
label = Label(root, text=text)
label.pack()
test = Button(root, text="Click me!",
command=lambda root=root: root.test.configure(
text="[%s]" % root.test['text']))
test.pack()
root.test = test
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
root.mainloop()
if __name__ == '__main__':
_test()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
flink-ml-framework/python/setup.py
|
# Copyright 2019 The flink-ai-extended Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'flink_ml_framework/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load flink_ml_framework version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
if platform.system() == "Linux":
build_args += ['-lpthread']
env = os.environ.copy()
env[
'CXXFLAGS'] = '{} -D_GLIBCXX_USE_CXX11_ABI=0 -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
setup(
name='flink_ml_framework',
version=VERSION,
include_package_data=True,
packages=find_packages(),
ext_modules=[CMakeExtension('flink_ml_framework/flink_ml_framework')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
url='https://github.com/flink-extended/dl-on-flink',
license='https://www.apache.org/licenses/LICENSE-2.0'
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
project/core/celery.py
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
app = Celery('core')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/mitchellh/packer/builder/openstack/ssh.go
|
package openstack
import (
"errors"
"fmt"
"log"
"net"
"os"
"time"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
packerssh "github.com/hashicorp/packer/communicator/ssh"
"github.com/hashicorp/packer/helper/multistep"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
// CommHost looks up the host for the communicator.
func CommHost(
client *gophercloud.ServiceClient,
sshinterface string,
sshipversion string) func(multistep.StateBag) (string, error) {
return func(state multistep.StateBag) (string, error) {
s := state.Get("server").(*servers.Server)
// If we have a specific interface, try that
if sshinterface != "" {
if addr := sshAddrFromPool(s, sshinterface, sshipversion); addr != "" {
log.Printf("[DEBUG] Using IP address %s from specified interface %s to connect", addr, sshinterface)
return addr, nil
}
}
// If we have a floating IP, use that
ip := state.Get("access_ip").(*floatingips.FloatingIP)
if ip != nil && ip.IP != "" {
log.Printf("[DEBUG] Using floating IP %s to connect", ip.IP)
return ip.IP, nil
}
if s.AccessIPv4 != "" {
log.Printf("[DEBUG] Using AccessIPv4 %s to connect", s.AccessIPv4)
return s.AccessIPv4, nil
}
// Try to get it from the requested interface
if addr := sshAddrFromPool(s, sshinterface, sshipversion); addr != "" {
log.Printf("[DEBUG] Using IP address %s to connect", addr)
return addr, nil
}
s, err := servers.Get(client, s.ID).Extract()
if err != nil {
return "", err
}
state.Put("server", s)
time.Sleep(1 * time.Second)
return "", errors.New("couldn't determine IP address for server")
}
}
// SSHConfig returns a function that can be used for the SSH communicator
// config for connecting to the instance created over SSH using a private key
// or a password.
func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
if useAgent {
authSock := os.Getenv("SSH_AUTH_SOCK")
if authSock == "" {
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
}
sshAgent, err := net.Dial("unix", authSock)
if err != nil {
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
}
privateKey, hasKey := state.GetOk("privateKey")
if hasKey {
signer, err := ssh.ParsePrivateKey([]byte(privateKey.(string)))
if err != nil {
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
} else {
return &ssh.ClientConfig{
User: username,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Auth: []ssh.AuthMethod{
ssh.Password(password),
ssh.KeyboardInteractive(
packerssh.PasswordKeyboardInteractive(password)),
}}, nil
}
}
}
func sshAddrFromPool(s *servers.Server, desired string, sshIPVersion string) string {
// Get all the addresses associated with this server. This
// was taken directly from Terraform.
for pool, networkAddresses := range s.Addresses {
// If we have an SSH interface specified, skip it if no match
if desired != "" && pool != desired {
log.Printf(
"[INFO] Skipping pool %s, doesn't match requested %s",
pool, desired)
continue
}
elements, ok := networkAddresses.([]interface{})
if !ok {
log.Printf(
"[ERROR] Unknown return type for address field: %#v",
networkAddresses)
continue
}
for _, element := range elements {
var addr string
address := element.(map[string]interface{})
if address["OS-EXT-IPS:type"] == "floating" {
addr = address["addr"].(string)
} else if sshIPVersion == "4" {
if address["version"].(float64) == 4 {
addr = address["addr"].(string)
}
} else if sshIPVersion == "6" {
if address["version"].(float64) == 6 {
addr = fmt.Sprintf("[%s]", address["addr"].(string))
}
} else {
if address["version"].(float64) == 6 {
addr = fmt.Sprintf("[%s]", address["addr"].(string))
} else {
addr = address["addr"].(string)
}
}
if addr != "" {
log.Printf("[DEBUG] Detected address: %s", addr)
return addr
}
}
}
return ""
}
|
[
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AUTH_SOCK"]
|
go
| 1 | 0 | |
process.go
|
package main
/*
#cgo pkg-config: vips
#cgo LDFLAGS: -s -w
#cgo CFLAGS: -O3
#include "vips.h"
*/
import "C"
import (
"context"
"errors"
"math"
"os"
"runtime"
"time"
"unsafe"
"golang.org/x/sync/errgroup"
)
var (
vipsSupportSmartcrop bool
vipsTypeSupportLoad = make(map[imageType]bool)
vipsTypeSupportSave = make(map[imageType]bool)
watermark *C.VipsImage
errSmartCropNotSupported = errors.New("Smart crop is not supported by used version of libvips")
)
type cConfig struct {
JpegProgressive C.int
PngInterlaced C.int
WatermarkOpacity C.double
}
var cConf cConfig
var cstrings = make(map[string]*C.char)
func initVips() {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := C.vips_initialize(); err != 0 {
C.vips_shutdown()
logFatal("unable to start vips!")
}
// Disable libvips cache. Since processing pipeline is fine tuned, we won't get much profit from it.
// Enabled cache can cause SIGSEGV on Musl-based systems like Alpine.
C.vips_cache_set_max_mem(0)
C.vips_cache_set_max(0)
C.vips_concurrency_set(1)
if len(os.Getenv("IMGPROXY_VIPS_LEAK_CHECK")) > 0 {
C.vips_leak_set(C.gboolean(1))
}
if len(os.Getenv("IMGPROXY_VIPS_CACHE_TRACE")) > 0 {
C.vips_cache_set_trace(C.gboolean(1))
}
vipsSupportSmartcrop = C.vips_support_smartcrop() == 1
if int(C.vips_type_find_load_go(C.int(imageTypeJPEG))) != 0 {
vipsTypeSupportLoad[imageTypeJPEG] = true
}
if int(C.vips_type_find_load_go(C.int(imageTypePNG))) != 0 {
vipsTypeSupportLoad[imageTypePNG] = true
}
if int(C.vips_type_find_load_go(C.int(imageTypeWEBP))) != 0 {
vipsTypeSupportLoad[imageTypeWEBP] = true
}
if int(C.vips_type_find_load_go(C.int(imageTypeGIF))) != 0 {
vipsTypeSupportLoad[imageTypeGIF] = true
}
if int(C.vips_type_find_load_go(C.int(imageTypeSVG))) != 0 {
vipsTypeSupportLoad[imageTypeSVG] = true
}
// we load ICO with github.com/mat/besticon/ico and send decoded data to vips
vipsTypeSupportLoad[imageTypeICO] = true
if int(C.vips_type_find_save_go(C.int(imageTypeJPEG))) != 0 {
vipsTypeSupportSave[imageTypeJPEG] = true
}
if int(C.vips_type_find_save_go(C.int(imageTypePNG))) != 0 {
vipsTypeSupportSave[imageTypePNG] = true
}
if int(C.vips_type_find_save_go(C.int(imageTypeWEBP))) != 0 {
vipsTypeSupportSave[imageTypeWEBP] = true
}
if int(C.vips_type_find_save_go(C.int(imageTypeGIF))) != 0 {
vipsTypeSupportSave[imageTypeGIF] = true
}
if int(C.vips_type_find_save_go(C.int(imageTypeICO))) != 0 {
vipsTypeSupportSave[imageTypeICO] = true
}
if conf.JpegProgressive {
cConf.JpegProgressive = C.int(1)
}
if conf.PngInterlaced {
cConf.PngInterlaced = C.int(1)
}
cConf.WatermarkOpacity = C.double(conf.WatermarkOpacity)
if err := vipsPrepareWatermark(); err != nil {
logFatal(err.Error())
}
collectVipsMetrics()
}
func shutdownVips() {
C.clear_image(&watermark)
C.vips_shutdown()
}
func collectVipsMetrics() {
if prometheusEnabled {
go func() {
for range time.Tick(5 * time.Second) {
prometheusVipsMemory.Set(float64(C.vips_tracked_get_mem()))
prometheusVipsMaxMemory.Set(float64(C.vips_tracked_get_mem_highwater()))
prometheusVipsAllocs.Set(float64(C.vips_tracked_get_allocs()))
}
}()
}
}
func cachedCString(str string) *C.char {
if cstr, ok := cstrings[str]; ok {
return cstr
}
cstr := C.CString(str)
cstrings[str] = cstr
return cstr
}
func extractMeta(img *C.VipsImage) (int, int, int, bool) {
width := int(img.Xsize)
height := int(img.Ysize)
angle := C.VIPS_ANGLE_D0
flip := false
orientation := C.vips_get_exif_orientation(img)
if orientation >= 5 && orientation <= 8 {
width, height = height, width
}
if orientation == 3 || orientation == 4 {
angle = C.VIPS_ANGLE_D180
}
if orientation == 5 || orientation == 6 {
angle = C.VIPS_ANGLE_D90
}
if orientation == 7 || orientation == 8 {
angle = C.VIPS_ANGLE_D270
}
if orientation == 2 || orientation == 4 || orientation == 5 || orientation == 7 {
flip = true
}
return width, height, angle, flip
}
func calcScale(width, height int, po *processingOptions, imgtype imageType) float64 {
// If we're going only to crop, we need only to scale down to DPR.
// Scaling up while cropping is not optimal on this stage, we'll do it later if needed.
if po.Resize == resizeCrop {
if po.Dpr < 1 {
return po.Dpr
}
return 1
}
var scale float64
srcW, srcH := float64(width), float64(height)
if (po.Width == 0 || po.Width == width) && (po.Height == 0 || po.Height == height) {
scale = 1
} else {
wr := float64(po.Width) / srcW
hr := float64(po.Height) / srcH
if po.Width == 0 {
scale = hr
} else if po.Height == 0 {
scale = wr
} else if po.Resize == resizeFit {
scale = math.Min(wr, hr)
} else {
scale = math.Max(wr, hr)
}
}
scale = scale * po.Dpr
if !po.Enlarge && scale > 1 && imgtype != imageTypeSVG {
return 1
}
if srcW*scale < 1 {
scale = 1 / srcW
}
if srcH*scale < 1 {
scale = 1 / srcH
}
return scale
}
func calcShink(scale float64, imgtype imageType) int {
switch imgtype {
case imageTypeWEBP:
return int(1.0 / scale)
case imageTypeJPEG:
shrink := int(1.0 / scale)
switch {
case shrink >= 16:
return 8
case shrink >= 8:
return 4
case shrink >= 4:
return 2
}
}
return 1
}
func calcCrop(width, height, cropWidth, cropHeight int, gravity *gravityOptions) (left, top int) {
if gravity.Type == gravityFocusPoint {
pointX := int(float64(width) * gravity.X)
pointY := int(float64(height) * gravity.Y)
left = maxInt(0, minInt(pointX-cropWidth/2, width-cropWidth))
top = maxInt(0, minInt(pointY-cropHeight/2, height-cropHeight))
return
}
left = (width - cropWidth + 1) / 2
top = (height - cropHeight + 1) / 2
if gravity.Type == gravityNorth || gravity.Type == gravityNorthEast || gravity.Type == gravityNorthWest {
top = 0
}
if gravity.Type == gravityEast || gravity.Type == gravityNorthEast || gravity.Type == gravitySouthEast {
left = width - cropWidth
}
if gravity.Type == gravitySouth || gravity.Type == gravitySouthEast || gravity.Type == gravitySouthWest {
top = height - cropHeight
}
if gravity.Type == gravityWest || gravity.Type == gravityNorthWest || gravity.Type == gravitySouthWest {
left = 0
}
return
}
func transformImage(ctx context.Context, img **C.VipsImage, data []byte, po *processingOptions, imgtype imageType) error {
var err error
imgWidth, imgHeight, angle, flip := extractMeta(*img)
hasAlpha := vipsImageHasAlpha(*img)
scale := calcScale(imgWidth, imgHeight, po, imgtype)
if scale != 1 {
if imgtype == imageTypeSVG && data != nil {
// Load SVG with desired scale
if tmp, err := vipsLoadImage(data, imgtype, 1, scale, false); err == nil {
C.swap_and_clear(img, tmp)
} else {
return err
}
scale = 1
} else {
// Do some shrink-on-load
if scale < 1.0 && data != nil {
if shrink := calcShink(scale, imgtype); shrink != 1 {
if tmp, err := vipsLoadImage(data, imgtype, shrink, 1.0, false); err == nil {
C.swap_and_clear(img, tmp)
} else {
return err
}
scale = scale * float64(shrink)
}
}
}
}
if err = vipsRad2Float(img); err != nil {
return err
}
if err = vipsImportColourProfile(img); err != nil {
return err
}
if err = vipsFixColourspace(img); err != nil {
return err
}
if scale != 1 {
if err = vipsResize(img, scale, hasAlpha); err != nil {
return err
}
}
// Update actual image size after resize
imgWidth, imgHeight, _, _ = extractMeta(*img)
checkTimeout(ctx)
if angle != C.VIPS_ANGLE_D0 || flip {
if err = vipsImageCopyMemory(img); err != nil {
return err
}
if angle != C.VIPS_ANGLE_D0 {
if err = vipsRotate(img, angle); err != nil {
return err
}
}
if flip {
if err = vipsFlip(img); err != nil {
return err
}
}
}
checkTimeout(ctx)
cropW, cropH := po.Width, po.Height
if po.Dpr < 1 || (po.Dpr > 1 && po.Resize != resizeCrop) {
cropW = int(float64(cropW) * po.Dpr)
cropH = int(float64(cropH) * po.Dpr)
}
if cropW == 0 {
cropW = imgWidth
} else {
cropW = minInt(cropW, imgWidth)
}
if cropH == 0 {
cropH = imgHeight
} else {
cropH = minInt(cropH, imgHeight)
}
if cropW < imgWidth || cropH < imgHeight {
if po.Gravity.Type == gravitySmart {
if err = vipsImageCopyMemory(img); err != nil {
return err
}
if err = vipsSmartCrop(img, cropW, cropH); err != nil {
return err
}
// Applying additional modifications after smart crop causes SIGSEGV on Alpine
// so we have to copy memory after it
if err = vipsImageCopyMemory(img); err != nil {
return err
}
} else {
left, top := calcCrop(imgWidth, imgHeight, cropW, cropH, &po.Gravity)
if err = vipsCrop(img, left, top, cropW, cropH); err != nil {
return err
}
}
checkTimeout(ctx)
}
if po.Enlarge && po.Resize == resizeCrop && po.Dpr > 1 {
// We didn't enlarge the image before, because is wasn't optimal. Now it's time to do it
if err = vipsResize(img, po.Dpr, hasAlpha); err != nil {
return err
}
if err = vipsImageCopyMemory(img); err != nil {
return err
}
}
if po.Expand && (po.Width > int((*img).Xsize) || po.Height > int((*img).Ysize)) {
if err = vipsEnsureAlpha(img); err != nil {
return err
}
hasAlpha = true
if err = vipsEmbed(img, gravityCenter, C.int(po.Width), C.int(po.Height), 0, 0); err != nil {
return err
}
}
if hasAlpha && (po.Flatten || po.Format == imageTypeJPEG) {
if err = vipsFlatten(img, po.Background); err != nil {
return err
}
}
if po.Blur > 0 {
if err = vipsBlur(img, po.Blur); err != nil {
return err
}
}
if po.Sharpen > 0 {
if err = vipsSharpen(img, po.Sharpen); err != nil {
return err
}
}
checkTimeout(ctx)
if po.Watermark.Enabled {
if err = vipsApplyWatermark(img, &po.Watermark); err != nil {
return err
}
}
return vipsFixColourspace(img)
}
func transformGif(ctx context.Context, img **C.VipsImage, po *processingOptions) error {
imgWidth := int((*img).Xsize)
imgHeight := int((*img).Ysize)
// Double check dimensions because gif may have many frames
if err := checkDimensions(imgWidth, imgHeight); err != nil {
return err
}
frameHeight, err := vipsGetInt(*img, "page-height")
if err != nil {
return err
}
delay, err := vipsGetInt(*img, "gif-delay")
if err != nil {
return err
}
loop, err := vipsGetInt(*img, "gif-loop")
if err != nil {
return err
}
framesCount := minInt(imgHeight/frameHeight, conf.MaxGifFrames)
frames := make([]*C.VipsImage, framesCount)
defer func() {
for _, frame := range frames {
C.clear_image(&frame)
}
}()
var errg errgroup.Group
for i := 0; i < framesCount; i++ {
ind := i
errg.Go(func() error {
var frame *C.VipsImage
if err := vipsExtract(*img, &frame, 0, ind*frameHeight, imgWidth, frameHeight); err != nil {
return err
}
if err := transformImage(ctx, &frame, nil, po, imageTypeGIF); err != nil {
return err
}
frames[ind] = frame
return nil
})
}
if err := errg.Wait(); err != nil {
return err
}
checkTimeout(ctx)
if err := vipsArrayjoin(frames, img); err != nil {
return err
}
vipsSetInt(*img, "page-height", int(frames[0].Ysize))
vipsSetInt(*img, "gif-delay", delay)
vipsSetInt(*img, "gif-loop", loop)
return nil
}
func processImage(ctx context.Context) ([]byte, context.CancelFunc, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if newRelicEnabled {
newRelicCancel := startNewRelicSegment(ctx, "Processing image")
defer newRelicCancel()
}
if prometheusEnabled {
defer startPrometheusDuration(prometheusProcessingDuration)()
}
defer C.vips_cleanup()
po := getProcessingOptions(ctx)
data := getImageData(ctx).Bytes()
imgtype := getImageType(ctx)
if po.Gravity.Type == gravitySmart && !vipsSupportSmartcrop {
return nil, func() {}, errSmartCropNotSupported
}
if po.Format == imageTypeUnknown {
if vipsTypeSupportSave[imgtype] {
po.Format = imgtype
} else {
po.Format = imageTypeJPEG
}
}
img, err := vipsLoadImage(data, imgtype, 1, 1.0, po.Format == imageTypeGIF)
if err != nil {
return nil, func() {}, err
}
defer C.clear_image(&img)
if imgtype == imageTypeGIF && po.Format == imageTypeGIF && vipsIsAnimatedGif(img) {
if err := transformGif(ctx, &img, po); err != nil {
return nil, func() {}, err
}
} else {
if err := transformImage(ctx, &img, data, po, imgtype); err != nil {
return nil, func() {}, err
}
}
checkTimeout(ctx)
if po.Format == imageTypeGIF {
if err := vipsCastUchar(&img); err != nil {
return nil, func() {}, err
}
checkTimeout(ctx)
}
C.vips_strip_meta(img)
return vipsSaveImage(img, po.Format, po.Quality)
}
func vipsPrepareWatermark() error {
data, imgtype, cancel, err := watermarkData()
defer cancel()
if err != nil {
return err
}
if data == nil {
return nil
}
watermark, err = vipsLoadImage(data, imgtype, 1, 1.0, false)
if err != nil {
return err
}
var tmp *C.VipsImage
if C.vips_apply_opacity(watermark, &tmp, C.double(conf.WatermarkOpacity)) != 0 {
return vipsError()
}
C.swap_and_clear(&watermark, tmp)
if tmp = C.vips_image_copy_memory(watermark); tmp == nil {
return vipsError()
}
C.swap_and_clear(&watermark, tmp)
return nil
}
func vipsLoadImage(data []byte, imgtype imageType, shrink int, svgScale float64, allPages bool) (*C.VipsImage, error) {
var img *C.VipsImage
err := C.int(0)
switch imgtype {
case imageTypeJPEG:
err = C.vips_jpegload_go(unsafe.Pointer(&data[0]), C.size_t(len(data)), C.int(shrink), &img)
case imageTypePNG:
err = C.vips_pngload_go(unsafe.Pointer(&data[0]), C.size_t(len(data)), &img)
case imageTypeWEBP:
err = C.vips_webpload_go(unsafe.Pointer(&data[0]), C.size_t(len(data)), C.int(shrink), &img)
case imageTypeGIF:
pages := C.int(1)
if allPages {
pages = -1
}
err = C.vips_gifload_go(unsafe.Pointer(&data[0]), C.size_t(len(data)), pages, &img)
case imageTypeSVG:
err = C.vips_svgload_go(unsafe.Pointer(&data[0]), C.size_t(len(data)), C.double(svgScale), &img)
case imageTypeICO:
rawData, width, height, icoErr := icoData(data)
if icoErr != nil {
return nil, icoErr
}
img = C.vips_image_new_from_memory_copy(unsafe.Pointer(&rawData[0]), C.size_t(width*height*4), C.int(width), C.int(height), 4, C.VIPS_FORMAT_UCHAR)
}
if err != 0 {
return nil, vipsError()
}
return img, nil
}
func vipsSaveImage(img *C.VipsImage, imgtype imageType, quality int) ([]byte, context.CancelFunc, error) {
var ptr unsafe.Pointer
cancel := func() {
C.g_free_go(&ptr)
}
err := C.int(0)
imgsize := C.size_t(0)
switch imgtype {
case imageTypeJPEG:
err = C.vips_jpegsave_go(img, &ptr, &imgsize, C.int(quality), cConf.JpegProgressive)
case imageTypePNG:
if err = C.vips_pngsave_go(img, &ptr, &imgsize, cConf.PngInterlaced, 1); err != 0 {
C.g_free_go(&ptr)
logWarning("Failed to save PNG; Trying not to embed icc profile")
err = C.vips_pngsave_go(img, &ptr, &imgsize, cConf.PngInterlaced, 0)
}
case imageTypeWEBP:
err = C.vips_webpsave_go(img, &ptr, &imgsize, C.int(quality))
case imageTypeGIF:
err = C.vips_gifsave_go(img, &ptr, &imgsize)
case imageTypeICO:
err = C.vips_icosave_go(img, &ptr, &imgsize)
}
if err != 0 {
C.g_free_go(&ptr)
return nil, cancel, vipsError()
}
const maxBufSize = ^uint32(0)
b := (*[maxBufSize]byte)(ptr)[:int(imgsize):int(imgsize)]
return b, cancel, nil
}
func vipsArrayjoin(in []*C.VipsImage, out **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_arrayjoin_go(&in[0], &tmp, C.int(len(in))) != 0 {
return vipsError()
}
C.swap_and_clear(out, tmp)
return nil
}
func vipsIsAnimatedGif(img *C.VipsImage) bool {
return C.vips_is_animated_gif(img) > 0
}
func vipsImageHasAlpha(img *C.VipsImage) bool {
return C.vips_image_hasalpha_go(img) > 0
}
func vipsGetInt(img *C.VipsImage, name string) (int, error) {
var i C.int
if C.vips_image_get_int(img, cachedCString(name), &i) != 0 {
return 0, vipsError()
}
return int(i), nil
}
func vipsSetInt(img *C.VipsImage, name string, value int) {
C.vips_image_set_int(img, cachedCString(name), C.int(value))
}
func vipsCastUchar(img **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_image_get_format(*img) != C.VIPS_FORMAT_UCHAR {
if C.vips_cast_go(*img, &tmp, C.VIPS_FORMAT_UCHAR) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
}
return nil
}
func vipsRad2Float(img **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_image_get_coding(*img) == C.VIPS_CODING_RAD {
if C.vips_rad2float_go(*img, &tmp) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
}
return nil
}
func vipsResize(img **C.VipsImage, scale float64, hasAlpa bool) error {
var tmp *C.VipsImage
if hasAlpa {
if C.vips_resize_with_premultiply(*img, &tmp, C.double(scale)) != 0 {
return vipsError()
}
} else {
if C.vips_resize_go(*img, &tmp, C.double(scale)) != 0 {
return vipsError()
}
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsRotate(img **C.VipsImage, angle int) error {
var tmp *C.VipsImage
if C.vips_rot_go(*img, &tmp, C.VipsAngle(angle)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsFlip(img **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_flip_horizontal_go(*img, &tmp) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsCrop(img **C.VipsImage, left, top, width, height int) error {
var tmp *C.VipsImage
if C.vips_extract_area_go(*img, &tmp, C.int(left), C.int(top), C.int(width), C.int(height)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsExtract(in *C.VipsImage, out **C.VipsImage, left, top, width, height int) error {
if C.vips_extract_area_go(in, out, C.int(left), C.int(top), C.int(width), C.int(height)) != 0 {
return vipsError()
}
return nil
}
func vipsSmartCrop(img **C.VipsImage, width, height int) error {
var tmp *C.VipsImage
if C.vips_smartcrop_go(*img, &tmp, C.int(width), C.int(height)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsEnsureAlpha(img **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_ensure_alpha(*img, &tmp) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsFlatten(img **C.VipsImage, bg rgbColor) error {
var tmp *C.VipsImage
if C.vips_flatten_go(*img, &tmp, C.double(bg.R), C.double(bg.G), C.double(bg.B)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsBlur(img **C.VipsImage, sigma float32) error {
var tmp *C.VipsImage
if C.vips_gaussblur_go(*img, &tmp, C.double(sigma)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsSharpen(img **C.VipsImage, sigma float32) error {
var tmp *C.VipsImage
if C.vips_sharpen_go(*img, &tmp, C.double(sigma)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsImportColourProfile(img **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_need_icc_import(*img) > 0 {
profile, err := cmykProfilePath()
if err != nil {
return err
}
if C.vips_icc_import_go(*img, &tmp, cachedCString(profile)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
}
return nil
}
func vipsFixColourspace(img **C.VipsImage) error {
var tmp *C.VipsImage
if C.vips_image_guess_interpretation(*img) != C.VIPS_INTERPRETATION_sRGB {
if C.vips_colourspace_go(*img, &tmp, C.VIPS_INTERPRETATION_sRGB) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
}
return nil
}
func vipsImageCopyMemory(img **C.VipsImage) error {
var tmp *C.VipsImage
if tmp = C.vips_image_copy_memory(*img); tmp == nil {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsReplicate(img **C.VipsImage, width, height C.int) error {
var tmp *C.VipsImage
if C.vips_replicate_go(*img, &tmp, width, height) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsEmbed(img **C.VipsImage, gravity gravityType, width, height C.int, offX, offY C.int) error {
wmWidth := (*img).Xsize
wmHeight := (*img).Ysize
left := (width-wmWidth+1)/2 + offX
top := (height-wmHeight+1)/2 + offY
if gravity == gravityNorth || gravity == gravityNorthEast || gravity == gravityNorthWest {
top = offY
}
if gravity == gravityEast || gravity == gravityNorthEast || gravity == gravitySouthEast {
left = width - wmWidth - offX
}
if gravity == gravitySouth || gravity == gravitySouthEast || gravity == gravitySouthWest {
top = height - wmHeight - offY
}
if gravity == gravityWest || gravity == gravityNorthWest || gravity == gravitySouthWest {
left = offX
}
if left > width {
left = width - wmWidth
} else if left < -wmWidth {
left = 0
}
if top > height {
top = height - wmHeight
} else if top < -wmHeight {
top = 0
}
var tmp *C.VipsImage
if C.vips_embed_go(*img, &tmp, left, top, width, height) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsResizeWatermark(width, height int) (wm *C.VipsImage, err error) {
wmW := float64(watermark.Xsize)
wmH := float64(watermark.Ysize)
wr := float64(width) / wmW
hr := float64(height) / wmH
scale := math.Min(wr, hr)
if wmW*scale < 1 {
scale = 1 / wmW
}
if wmH*scale < 1 {
scale = 1 / wmH
}
if C.vips_resize_with_premultiply(watermark, &wm, C.double(scale)) != 0 {
err = vipsError()
}
return
}
func vipsApplyWatermark(img **C.VipsImage, opts *watermarkOptions) error {
if watermark == nil {
return nil
}
var wm, tmp *C.VipsImage
defer C.clear_image(&wm)
var err error
imgW := (*img).Xsize
imgH := (*img).Ysize
if opts.Scale == 0 {
if C.vips_copy_go(watermark, &wm) != 0 {
return vipsError()
}
} else {
wmW := maxInt(int(float64(imgW)*opts.Scale), 1)
wmH := maxInt(int(float64(imgH)*opts.Scale), 1)
if wm, err = vipsResizeWatermark(wmW, wmH); err != nil {
return err
}
}
if opts.Replicate {
if err = vipsReplicate(&wm, imgW, imgH); err != nil {
return err
}
} else {
if err = vipsEmbed(&wm, opts.Gravity, imgW, imgH, C.int(opts.OffsetX), C.int(opts.OffsetY)); err != nil {
return err
}
}
if C.vips_apply_watermark(*img, wm, &tmp, C.double(opts.Opacity)) != 0 {
return vipsError()
}
C.swap_and_clear(img, tmp)
return nil
}
func vipsError() error {
return errors.New(C.GoString(C.vips_error_buffer()))
}
|
[
"\"IMGPROXY_VIPS_LEAK_CHECK\"",
"\"IMGPROXY_VIPS_CACHE_TRACE\""
] |
[] |
[
"IMGPROXY_VIPS_CACHE_TRACE",
"IMGPROXY_VIPS_LEAK_CHECK"
] |
[]
|
["IMGPROXY_VIPS_CACHE_TRACE", "IMGPROXY_VIPS_LEAK_CHECK"]
|
go
| 2 | 0 | |
test/e2e/kubectl-ko/ko.go
|
package kubectl_ko
import (
"context"
"fmt"
"os"
"os/exec"
"github.com/kubeovn/kube-ovn/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var _ = Describe("[kubectl-ko]", func() {
f := framework.NewFramework("kubectl-ko", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")))
It("nb show", func() {
output, err := exec.Command("kubectl", "ko", "nbctl", "show").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
})
It("sb show", func() {
output, err := exec.Command("kubectl", "ko", "sbctl", "show").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
})
It("vsctl show", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
output, err := exec.Command("kubectl", "ko", "vsctl", node.Name, "show").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
}
})
It("ofctl show", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
output, err := exec.Command("kubectl", "ko", "ofctl", node.Name, "show", "br-int").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
}
})
It("dpctl show", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
output, err := exec.Command("kubectl", "ko", "dpctl", node.Name, "show").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
}
})
It("appctl list-commands", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
output, err := exec.Command("kubectl", "ko", "appctl", node.Name, "list-commands").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
}
})
It("tcpdump", func() {
pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: " app=kube-ovn-pinger"})
Expect(err).NotTo(HaveOccurred())
pod := pods.Items[0]
output, err := exec.Command("kubectl", "ko", "tcpdump", fmt.Sprintf("kube-system/%s", pod.Name), "-c", "1").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
})
It("trace", func() {
pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: " app=kube-ovn-pinger"})
Expect(err).NotTo(HaveOccurred())
pod := pods.Items[0]
output, err := exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), "114.114.114.114", "icmp").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), "114.114.114.114", "tcp", "80").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), "114.114.114.114", "udp", "53").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
})
It("nb/sb operation", func() {
output, err := exec.Command("kubectl", "ko", "nb", "status").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
output, err = exec.Command("kubectl", "ko", "sb", "status").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
output, err = exec.Command("kubectl", "ko", "nb", "backup").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
output, err = exec.Command("kubectl", "ko", "sb", "backup").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
})
})
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
distributed/tests/test_utils.py
|
import array
import datetime
from functools import partial
import io
import os
import queue
import socket
import sys
from time import sleep
import traceback
import numpy as np
import pytest
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from distributed.metrics import time
from distributed.utils import (
All,
Log,
Logs,
sync,
is_kernel,
is_valid_xml,
ensure_ip,
str_graph,
truncate_exception,
get_traceback,
_maybe_complex,
read_block,
seek_delimiter,
funcname,
ensure_bytes,
open_port,
get_ip_interface,
nbytes,
set_thread_state,
thread_state,
LoopRunner,
parse_bytes,
parse_timedelta,
warn_on_duration,
format_dashboard_link,
LRU,
)
from distributed.utils_test import loop, loop_in_thread # noqa: F401
from distributed.utils_test import div, has_ipv6, inc, throws, gen_test, captured_logger
def test_All(loop):
@gen.coroutine
def throws():
1 / 0
@gen.coroutine
def slow():
yield gen.sleep(10)
@gen.coroutine
def inc(x):
raise gen.Return(x + 1)
@gen.coroutine
def f():
results = yield All([inc(i) for i in range(10)])
assert results == list(range(1, 11))
start = time()
for tasks in [[throws(), slow()], [slow(), throws()]]:
try:
yield All(tasks)
assert False
except ZeroDivisionError:
pass
end = time()
assert end - start < 10
loop.run_sync(f)
def test_sync_error(loop_in_thread):
loop = loop_in_thread
try:
result = sync(loop, throws, 1)
except Exception as exc:
f = exc
assert "hello" in str(exc)
tb = get_traceback()
L = traceback.format_tb(tb)
assert any("throws" in line for line in L)
def function1(x):
return function2(x)
def function2(x):
return throws(x)
try:
result = sync(loop, function1, 1)
except Exception as exc:
assert "hello" in str(exc)
tb = get_traceback()
L = traceback.format_tb(tb)
assert any("function1" in line for line in L)
assert any("function2" in line for line in L)
def test_sync_timeout(loop_in_thread):
loop = loop_in_thread
with pytest.raises(gen.TimeoutError):
sync(loop_in_thread, gen.sleep, 0.5, callback_timeout=0.05)
def test_sync_closed_loop():
loop = IOLoop.current()
loop.close()
IOLoop.clear_current()
IOLoop.clear_instance()
with pytest.raises(RuntimeError) as exc_info:
sync(loop, inc, 1)
exc_info.match("IOLoop is clos(ed|ing)")
def test_is_kernel():
pytest.importorskip("IPython")
assert is_kernel() is False
# @pytest.mark.leaking('fds')
# def test_zzz_leaks(l=[]):
# import os, subprocess
# l.append(b"x" * (17 * 1024**2))
# os.open(__file__, os.O_RDONLY)
# subprocess.Popen('sleep 100', shell=True, stdin=subprocess.DEVNULL)
def test_ensure_ip():
assert ensure_ip("localhost") in ("127.0.0.1", "::1")
assert ensure_ip("123.123.123.123") == "123.123.123.123"
assert ensure_ip("8.8.8.8") == "8.8.8.8"
if has_ipv6():
assert ensure_ip("2001:4860:4860::8888") == "2001:4860:4860::8888"
assert ensure_ip("::1") == "::1"
def test_get_ip_interface():
if sys.platform == "darwin":
assert get_ip_interface("lo0") == "127.0.0.1"
elif sys.platform.startswith("linux"):
assert get_ip_interface("lo") == "127.0.0.1"
else:
pytest.skip("test needs to be enhanced for platform %r" % (sys.platform,))
non_existent_interface = "__non-existent-interface"
expected_error_message = "{!r}.+network interface.+".format(non_existent_interface)
if sys.platform == "darwin":
expected_error_message += "'lo0'"
elif sys.platform.startswith("linux"):
expected_error_message += "'lo'"
with pytest.raises(ValueError, match=expected_error_message):
get_ip_interface(non_existent_interface)
def test_truncate_exception():
e = ValueError("a" * 1000)
assert len(str(e)) >= 1000
f = truncate_exception(e, 100)
assert type(f) == type(e)
assert len(str(f)) < 200
assert "aaaa" in str(f)
e = ValueError("a")
assert truncate_exception(e) is e
def test_get_traceback():
def a(x):
return div(x, 0)
def b(x):
return a(x)
def c(x):
return b(x)
try:
c(1)
except Exception as e:
tb = get_traceback()
assert type(tb).__name__ == "traceback"
def test_str_graph():
dsk = {"x": 1}
assert str_graph(dsk) == dsk
dsk = {("x", 1): (inc, 1)}
assert str_graph(dsk) == {str(("x", 1)): (inc, 1)}
dsk = {("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}
assert str_graph(dsk) == {
str(("x", 1)): (inc, 1),
str(("x", 2)): (inc, str(("x", 1))),
}
dsks = [
{"x": 1},
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))},
{("x", 1): (sum, [1, 2, 3]), ("x", 2): (sum, [("x", 1), ("x", 1)])},
]
for dsk in dsks:
sdsk = str_graph(dsk)
keys = list(dsk)
skeys = [str(k) for k in keys]
assert all(isinstance(k, str) for k in sdsk)
assert dask.get(dsk, keys) == dask.get(sdsk, skeys)
def test_maybe_complex():
assert not _maybe_complex(1)
assert not _maybe_complex("x")
assert _maybe_complex((inc, 1))
assert _maybe_complex([(inc, 1)])
assert _maybe_complex([(inc, 1)])
assert _maybe_complex({"x": (inc, 1)})
def test_read_block():
delimiter = b"\n"
data = delimiter.join([b"123", b"456", b"789"])
f = io.BytesIO(data)
assert read_block(f, 1, 2) == b"23"
assert read_block(f, 0, 1, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 2, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 3, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 5, delimiter=b"\n") == b"123\n456\n"
assert read_block(f, 0, 8, delimiter=b"\n") == b"123\n456\n789"
assert read_block(f, 0, 100, delimiter=b"\n") == b"123\n456\n789"
assert read_block(f, 1, 1, delimiter=b"\n") == b""
assert read_block(f, 1, 5, delimiter=b"\n") == b"456\n"
assert read_block(f, 1, 8, delimiter=b"\n") == b"456\n789"
for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)], [(0, 4), (4, 4), (8, 4)]]:
out = [read_block(f, o, l, b"\n") for o, l in ols]
assert b"".join(filter(None, out)) == data
def test_seek_delimiter_endline():
f = io.BytesIO(b"123\n456\n789")
# if at zero, stay at zero
seek_delimiter(f, b"\n", 5)
assert f.tell() == 0
# choose the first block
for bs in [1, 5, 100]:
f.seek(1)
seek_delimiter(f, b"\n", blocksize=bs)
assert f.tell() == 4
# handle long delimiters well, even with short blocksizes
f = io.BytesIO(b"123abc456abc789")
for bs in [1, 2, 3, 4, 5, 6, 10]:
f.seek(1)
seek_delimiter(f, b"abc", blocksize=bs)
assert f.tell() == 6
# End at the end
f = io.BytesIO(b"123\n456")
f.seek(5)
seek_delimiter(f, b"\n", 5)
assert f.tell() == 7
def test_funcname():
def f():
pass
assert funcname(f) == "f"
assert funcname(partial(f)) == "f"
assert funcname(partial(partial(f))) == "f"
def test_ensure_bytes():
data = [b"1", "1", memoryview(b"1"), bytearray(b"1"), array.array("b", [49])]
for d in data:
result = ensure_bytes(d)
assert isinstance(result, bytes)
assert result == b"1"
def test_ensure_bytes_ndarray():
result = ensure_bytes(np.arange(12))
assert isinstance(result, bytes)
def test_ensure_bytes_pyarrow_buffer():
pa = pytest.importorskip("pyarrow")
buf = pa.py_buffer(b"123")
result = ensure_bytes(buf)
assert isinstance(result, bytes)
def test_nbytes():
def check(obj, expected):
assert nbytes(obj) == expected
assert nbytes(memoryview(obj)) == expected
check(b"123", 3)
check(bytearray(b"4567"), 4)
multi_dim = np.ones(shape=(10, 10))
scalar = np.array(1)
check(multi_dim, multi_dim.nbytes)
check(scalar, scalar.nbytes)
def test_open_port():
port = open_port()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", port))
s.close()
def test_set_thread_state():
with set_thread_state(x=1):
assert thread_state.x == 1
assert not hasattr(thread_state, "x")
def assert_running(loop):
"""
Raise if the given IOLoop is not running.
"""
q = queue.Queue()
loop.add_callback(q.put, 42)
assert q.get(timeout=1) == 42
def assert_not_running(loop):
"""
Raise if the given IOLoop is running.
"""
q = queue.Queue()
try:
loop.add_callback(q.put, 42)
except RuntimeError:
# On AsyncIOLoop, can't add_callback() after the loop is closed
pass
else:
with pytest.raises(queue.Empty):
q.get(timeout=0.02)
def test_loop_runner(loop_in_thread):
# Implicit loop
loop = IOLoop()
loop.make_current()
runner = LoopRunner()
assert runner.loop not in (loop, loop_in_thread)
assert not runner.is_started()
assert_not_running(runner.loop)
runner.start()
assert runner.is_started()
assert_running(runner.loop)
runner.stop()
assert not runner.is_started()
assert_not_running(runner.loop)
# Explicit loop
loop = IOLoop()
runner = LoopRunner(loop=loop)
assert runner.loop is loop
assert not runner.is_started()
assert_not_running(loop)
runner.start()
assert runner.is_started()
assert_running(loop)
runner.stop()
assert not runner.is_started()
assert_not_running(loop)
# Explicit loop, already started
runner = LoopRunner(loop=loop_in_thread)
assert not runner.is_started()
assert_running(loop_in_thread)
runner.start()
assert runner.is_started()
assert_running(loop_in_thread)
runner.stop()
assert not runner.is_started()
assert_running(loop_in_thread)
# Implicit loop, asynchronous=True
loop = IOLoop()
loop.make_current()
runner = LoopRunner(asynchronous=True)
assert runner.loop is loop
assert not runner.is_started()
assert_not_running(runner.loop)
runner.start()
assert runner.is_started()
assert_not_running(runner.loop)
runner.stop()
assert not runner.is_started()
assert_not_running(runner.loop)
# Explicit loop, asynchronous=True
loop = IOLoop()
runner = LoopRunner(loop=loop, asynchronous=True)
assert runner.loop is loop
assert not runner.is_started()
assert_not_running(runner.loop)
runner.start()
assert runner.is_started()
assert_not_running(runner.loop)
runner.stop()
assert not runner.is_started()
assert_not_running(runner.loop)
def test_two_loop_runners(loop_in_thread):
# Loop runners tied to the same loop should cooperate
# ABCCBA
loop = IOLoop()
a = LoopRunner(loop=loop)
b = LoopRunner(loop=loop)
assert_not_running(loop)
a.start()
assert_running(loop)
c = LoopRunner(loop=loop)
b.start()
assert_running(loop)
c.start()
assert_running(loop)
c.stop()
assert_running(loop)
b.stop()
assert_running(loop)
a.stop()
assert_not_running(loop)
# ABCABC
loop = IOLoop()
a = LoopRunner(loop=loop)
b = LoopRunner(loop=loop)
assert_not_running(loop)
a.start()
assert_running(loop)
b.start()
assert_running(loop)
c = LoopRunner(loop=loop)
c.start()
assert_running(loop)
a.stop()
assert_running(loop)
b.stop()
assert_running(loop)
c.stop()
assert_not_running(loop)
# Explicit loop, already started
a = LoopRunner(loop=loop_in_thread)
b = LoopRunner(loop=loop_in_thread)
assert_running(loop_in_thread)
a.start()
assert_running(loop_in_thread)
b.start()
assert_running(loop_in_thread)
a.stop()
assert_running(loop_in_thread)
b.stop()
assert_running(loop_in_thread)
@gen_test()
def test_loop_runner_gen():
runner = LoopRunner(asynchronous=True)
assert runner.loop is IOLoop.current()
assert not runner.is_started()
yield gen.sleep(0.01)
runner.start()
assert runner.is_started()
yield gen.sleep(0.01)
runner.stop()
assert not runner.is_started()
yield gen.sleep(0.01)
def test_parse_bytes():
assert parse_bytes("100") == 100
assert parse_bytes("100 MB") == 100000000
assert parse_bytes("100M") == 100000000
assert parse_bytes("5kB") == 5000
assert parse_bytes("5.4 kB") == 5400
assert parse_bytes("1kiB") == 1024
assert parse_bytes("1Mi") == 2 ** 20
assert parse_bytes("1e6") == 1000000
assert parse_bytes("1e6 kB") == 1000000000
assert parse_bytes("MB") == 1000000
def test_parse_timedelta():
for text, value in [
("1s", 1),
("100ms", 0.1),
("5S", 5),
("5.5s", 5.5),
("5.5 s", 5.5),
("1 second", 1),
("3.3 seconds", 3.3),
("3.3 milliseconds", 0.0033),
("3500 us", 0.0035),
("1 ns", 1e-9),
("2m", 120),
("2 minutes", 120),
(datetime.timedelta(seconds=2), 2),
(datetime.timedelta(milliseconds=100), 0.1),
]:
result = parse_timedelta(text)
assert abs(result - value) < 1e-14
assert parse_timedelta("1ms", default="seconds") == 0.001
assert parse_timedelta("1", default="seconds") == 1
assert parse_timedelta("1", default="ms") == 0.001
assert parse_timedelta(1, default="ms") == 0.001
@gen_test()
def test_all_exceptions_logging():
@gen.coroutine
def throws():
raise Exception("foo1234")
with captured_logger("") as sio:
try:
yield All([throws() for _ in range(5)], quiet_exceptions=Exception)
except Exception:
pass
import gc
gc.collect()
yield gen.sleep(0.1)
assert "foo1234" not in sio.getvalue()
def test_warn_on_duration():
with pytest.warns(None) as record:
with warn_on_duration("10s", "foo"):
pass
assert not record
with pytest.warns(None) as record:
with warn_on_duration("1ms", "foo"):
sleep(0.100)
assert record
assert any("foo" in str(rec.message) for rec in record)
def test_format_bytes_compat():
# moved to dask, but exported here for compatibility
from distributed.utils import format_bytes # noqa
def test_logs():
d = Logs({"123": Log("Hello"), "456": Log("World!")})
text = d._repr_html_()
assert is_valid_xml("<div>" + text + "</div>")
assert "Hello" in text
assert "456" in text
def test_is_valid_xml():
assert is_valid_xml("<a>foo</a>")
with pytest.raises(Exception):
assert is_valid_xml("<a>foo")
def test_format_dashboard_link():
with dask.config.set({"distributed.dashboard.link": "foo"}):
assert format_dashboard_link("host", 1234) == "foo"
assert "host" in format_dashboard_link("host", 1234)
assert "1234" in format_dashboard_link("host", 1234)
try:
os.environ["host"] = "hello"
assert "hello" not in format_dashboard_link("host", 1234)
finally:
del os.environ["host"]
def test_lru():
l = LRU(maxsize=3)
l["a"] = 1
l["b"] = 2
l["c"] = 3
assert list(l.keys()) == ["a", "b", "c"]
# Use "a" and ensure it becomes the most recently used item
l["a"]
assert list(l.keys()) == ["b", "c", "a"]
# Ensure maxsize is respected
l["d"] = 4
assert len(l) == 3
assert list(l.keys()) == ["c", "a", "d"]
|
[] |
[] |
[
"host"
] |
[]
|
["host"]
|
python
| 1 | 0 | |
tests/test_hda.py
|
# Copyright 2021 European Centre for Medium-Range Weather Forecasts (ECMWF)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import os
import pytest
from hda import Client
NO_HDARC = not os.path.exists(os.path.expanduser("~/.hdarc")) and (
"HDA_USER" not in os.environ or "HDA_PASSWORD" not in os.environ
)
@pytest.mark.skipif(NO_HDARC, reason="No access to HDA")
def test_hda_1():
c = Client(url="https://wekeo-broker.apps.mercator.dpi.wekeo.eu/databroker")
r = {
"datasetId": "EO:EUM:DAT:SENTINEL-3:OL_1_EFR___",
"boundingBoxValues": [
{
"name": "bbox",
"bbox": [
1.2653132076552462,
43.50759094045819,
1.575030022744999,
43.711525020845585,
],
}
],
"dateRangeSelectValues": [
{
"name": "position",
"start": "2021-07-03T00:00:00.000Z",
"end": "2021-07-04T00:00:00.000Z",
}
],
"stringChoiceValues": [
{"name": "platformname", "value": "Sentinel-3"},
{"name": "producttype", "value": "OL_1_EFR___"},
],
}
matches = c.search(r)
print(matches)
assert len(matches.results) == 2, matches
# Too large to download
# matches.download()
@pytest.mark.skipif(NO_HDARC, reason="No access to HDA")
def test_hda_2():
c = Client(url="https://wekeo-broker.apps.mercator.dpi.wekeo.eu/databroker")
r = {
"datasetId": "EO:ECMWF:DAT:ERA5_HOURLY_VARIABLES_ON_PRESSURE_LEVELS",
"stringChoiceValues": [{"name": "format", "value": "grib"}],
"multiStringSelectValues": [
{"name": "variable", "value": ["temperature"]},
{"name": "pressure_level", "value": ["500"]},
{"name": "product_type", "value": ["ensemble_mean"]},
{"name": "year", "value": ["2014"]},
{"name": "month", "value": ["11"]},
{"name": "day", "value": ["10"]},
{"name": "time", "value": ["12:00"]},
],
}
matches = c.search(r)
assert len(matches.results) == 1, matches
matches.download()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
nodeup/pkg/model/protokube.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"github.com/blang/semver"
"github.com/golang/glog"
"k8s.io/kops/pkg/assets"
)
// ProtokubeBuilder configures protokube
type ProtokubeBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &ProtokubeBuilder{}
// Build is responsible for generating the options for protokube
func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
useGossip := dns.IsGossipHostname(t.Cluster.Spec.MasterInternalName)
// check is not a master and we are not using gossip (https://github.com/kubernetes/kops/pull/3091)
if !t.IsMaster && !useGossip {
glog.V(2).Infof("skipping the provisioning of protokube on the nodes")
return nil
}
if t.IsMaster {
kubeconfig, err := t.buildPKIKubeconfig("kops")
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: "/var/lib/kops/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
// retrieve the etcd peer certificates and private keys from the keystore
if t.UseEtcdTLS() {
for _, x := range []string{"etcd", "etcd-client"} {
if err := t.BuildCertificateTask(c, x, fmt.Sprintf("%s.pem", x)); err != nil {
return err
}
}
for _, x := range []string{"etcd", "etcd-client"} {
if err := t.BuildPrivateTask(c, x, fmt.Sprintf("%s-key.pem", x)); err != nil {
return err
}
}
}
}
service, err := t.buildSystemdService()
if err != nil {
return err
}
c.AddTask(service)
return nil
}
// buildSystemdService generates the manifest for the protokube service
func (t *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
k8sVersion, err := util.ParseKubernetesVersion(t.Cluster.Spec.KubernetesVersion)
if err != nil || k8sVersion == nil {
return nil, fmt.Errorf("unable to parse KubernetesVersion %q", t.Cluster.Spec.KubernetesVersion)
}
protokubeFlags, err := t.ProtokubeFlags(*k8sVersion)
if err != nil {
return nil, err
}
protokubeFlagsArgs, err := flagbuilder.BuildFlags(protokubeFlags)
if err != nil {
return nil, err
}
dockerArgs := []string{
"/usr/bin/docker", "run",
"-v", "/:/rootfs/",
"-v", "/var/run/dbus:/var/run/dbus",
"-v", "/run/systemd:/run/systemd",
}
// add kubectl only if a master
// path changes depending on distro, and always mount it on /opt/kops/bin
// kubectl is downloaded and installed by other tasks
if t.IsMaster {
dockerArgs = append(dockerArgs, []string{
"-v", t.KubectlPath() + ":/opt/kops/bin:ro",
"--env", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kops/bin",
}...)
}
dockerArgs = append(dockerArgs, []string{
"--net=host",
"--pid=host", // Needed for mounting in a container (when using systemd mounting?)
"--privileged", // We execute in the host namespace
"--env", "KUBECONFIG=/rootfs/var/lib/kops/kubeconfig",
t.ProtokubeEnvironmentVariables(),
t.ProtokubeImageName(),
"/usr/bin/protokube",
}...)
protokubeCommand := strings.Join(dockerArgs, " ") + " " + protokubeFlagsArgs
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Kubernetes Protokube Service")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
manifest.Set("Service", "ExecStartPre", t.ProtokubeImagePullCommand())
manifest.Set("Service", "ExecStart", protokubeCommand)
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Install", "WantedBy", "multi-user.target")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "protokube", manifestString)
service := &nodetasks.Service{
Name: "protokube.service",
Definition: s(manifestString),
}
service.InitDefaults()
return service, nil
}
// ProtokubeImageName returns the docker image for protokube
func (t *ProtokubeBuilder) ProtokubeImageName() string {
name := ""
if t.NodeupConfig.ProtokubeImage != nil && t.NodeupConfig.ProtokubeImage.Name != "" {
name = t.NodeupConfig.ProtokubeImage.Name
}
if name == "" {
// use current default corresponding to this version of nodeup
name = kopsbase.DefaultProtokubeImageName()
}
return name
}
// ProtokubeImagePullCommand returns the command to pull the image
func (t *ProtokubeBuilder) ProtokubeImagePullCommand() string {
source := ""
if t.NodeupConfig.ProtokubeImage != nil {
source = t.NodeupConfig.ProtokubeImage.Source
}
if source == "" {
// Nothing to pull; return dummy value
return "/bin/true"
}
if strings.HasPrefix(source, "http:") || strings.HasPrefix(source, "https:") || strings.HasPrefix(source, "s3:") {
// We preloaded the image; return a dummy value
return "/bin/true"
}
return "/usr/bin/docker pull " + t.NodeupConfig.ProtokubeImage.Source
}
// ProtokubeFlags are the flags for protokube
type ProtokubeFlags struct {
ApplyTaints *bool `json:"applyTaints,omitempty" flag:"apply-taints"`
Channels []string `json:"channels,omitempty" flag:"channels"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
// ClusterID flag is required only for vSphere cloud type, to pass cluster id information to protokube. AWS and GCE workflows ignore this flag.
ClusterID *string `json:"cluster-id,omitempty" flag:"cluster-id"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
EtcdBackupImage string `json:"etcd-backup-image,omitempty" flag:"etcd-backup-image"`
EtcdBackupStore string `json:"etcd-backup-store,omitempty" flag:"etcd-backup-store"`
EtcdImage *string `json:"etcd-image,omitempty" flag:"etcd-image"`
EtcdLeaderElectionTimeout *string `json:"etcd-election-timeout,omitempty" flag:"etcd-election-timeout"`
EtcdHearbeatInterval *string `json:"etcd-heartbeat-interval,omitempty" flag:"etcd-heartbeat-interval"`
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
Master *bool `json:"master,omitempty" flag:"master"`
PeerTLSCaFile *string `json:"peer-ca,omitempty" flag:"peer-ca"`
PeerTLSCertFile *string `json:"peer-cert,omitempty" flag:"peer-cert"`
PeerTLSKeyFile *string `json:"peer-key,omitempty" flag:"peer-key"`
TLSAuth *bool `json:"tls-auth,omitempty" flag:"tls-auth"`
TLSCAFile *string `json:"tls-ca,omitempty" flag:"tls-ca"`
TLSCertFile *string `json:"tls-cert,omitempty" flag:"tls-cert"`
TLSKeyFile *string `json:"tls-key,omitempty" flag:"tls-key"`
Zone []string `json:"zone,omitempty" flag:"zone"`
// ManageEtcd is true if protokube should manage etcd; being replaced by etcd-manager
ManageEtcd bool `json:"manageEtcd,omitempty" flag:"manage-etcd"`
}
// ProtokubeFlags is responsible for building the command line flags for protokube
func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*ProtokubeFlags, error) {
imageVersion := t.Cluster.Spec.EtcdClusters[0].Version
// overrides imageVersion if set
etcdContainerImage := t.Cluster.Spec.EtcdClusters[0].Image
var leaderElectionTimeout string
var heartbeatInterval string
if v := t.Cluster.Spec.EtcdClusters[0].LeaderElectionTimeout; v != nil {
leaderElectionTimeout = convEtcdSettingsToMs(v)
}
if v := t.Cluster.Spec.EtcdClusters[0].HeartbeatInterval; v != nil {
heartbeatInterval = convEtcdSettingsToMs(v)
}
f := &ProtokubeFlags{
Channels: t.NodeupConfig.Channels,
Containerized: fi.Bool(true),
EtcdLeaderElectionTimeout: s(leaderElectionTimeout),
EtcdHearbeatInterval: s(heartbeatInterval),
LogLevel: fi.Int32(4),
Master: b(t.IsMaster),
}
f.ManageEtcd = false
if len(t.NodeupConfig.EtcdManifests) == 0 {
glog.V(4).Infof("no EtcdManifests; protokube will manage etcd")
f.ManageEtcd = true
}
for _, e := range t.Cluster.Spec.EtcdClusters {
// Because we can only specify a single EtcdBackupStore at the moment, we only backup main, not events
if e.Name != "main" {
continue
}
if e.Backups != nil {
if f.EtcdBackupImage == "" {
f.EtcdBackupImage = e.Backups.Image
}
if f.EtcdBackupStore == "" {
f.EtcdBackupStore = e.Backups.BackupStore
}
}
}
// TODO this is dupicate code with etcd model
image := fmt.Sprintf("k8s.gcr.io/etcd:%s", imageVersion)
// override image if set as API value
if etcdContainerImage != "" {
image = etcdContainerImage
}
assets := assets.NewAssetBuilder(t.Cluster, "")
remapped, err := assets.RemapImage(image)
if err != nil {
return nil, fmt.Errorf("unable to remap container %q: %v", image, err)
} else {
image = remapped
}
f.EtcdImage = s(image)
// initialize rbac on Kubernetes >= 1.6 and master
if k8sVersion.Major == 1 && k8sVersion.Minor >= 6 {
f.InitializeRBAC = fi.Bool(true)
}
// check if we are using tls and add the options to protokube
if t.UseEtcdTLS() {
f.PeerTLSCaFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.PeerTLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.PeerTLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
f.TLSCAFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.TLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.TLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
}
if t.UseTLSAuth() {
enableAuth := true
f.TLSAuth = b(enableAuth)
}
zone := t.Cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
f.Zone = append(f.Zone, zone)
} else {
// match by id
f.Zone = append(f.Zone, "*/"+zone)
}
} else {
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
// @TODO: Should we permit wildcard updates if zone is not specified?
//argv = append(argv, "--zone=*/*")
}
if dns.IsGossipHostname(t.Cluster.Spec.MasterInternalName) {
glog.Warningf("MasterInternalName %q implies gossip DNS", t.Cluster.Spec.MasterInternalName)
f.DNSProvider = fi.String("gossip")
// @TODO: This is hacky, but we want it so that we can have a different internal & external name
internalSuffix := t.Cluster.Spec.MasterInternalName
internalSuffix = strings.TrimPrefix(internalSuffix, "api.")
f.DNSInternalSuffix = fi.String(internalSuffix)
}
if t.Cluster.Spec.CloudProvider != "" {
f.Cloud = fi.String(t.Cluster.Spec.CloudProvider)
if f.DNSProvider == nil {
switch kops.CloudProviderID(t.Cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
f.DNSProvider = fi.String("aws-route53")
case kops.CloudProviderDO:
f.DNSProvider = fi.String("digitalocean")
f.ClusterID = fi.String(t.Cluster.Name)
case kops.CloudProviderGCE:
f.DNSProvider = fi.String("google-clouddns")
case kops.CloudProviderVSphere:
f.DNSProvider = fi.String("coredns")
f.ClusterID = fi.String(t.Cluster.ObjectMeta.Name)
f.DNSServer = fi.String(*t.Cluster.Spec.CloudConfig.VSphereCoreDNSServer)
default:
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider", t.Cluster.Spec.CloudProvider)
}
}
}
if f.DNSInternalSuffix == nil {
f.DNSInternalSuffix = fi.String(".internal." + t.Cluster.ObjectMeta.Name)
}
if k8sVersion.Major == 1 && k8sVersion.Minor <= 5 {
f.ApplyTaints = fi.Bool(true)
}
return f, nil
}
// ProtokubeEnvironmentVariables generates the environments variables for docker
func (t *ProtokubeBuilder) ProtokubeEnvironmentVariables() string {
var buffer bytes.Buffer
// TODO write out an environments file for this. This is getting a tad long.
// Pass in required credentials when using user-defined s3 endpoint
if os.Getenv("AWS_REGION") != "" {
buffer.WriteString(" ")
buffer.WriteString("-e 'AWS_REGION=")
buffer.WriteString(os.Getenv("AWS_REGION"))
buffer.WriteString("'")
buffer.WriteString(" ")
}
if os.Getenv("S3_ENDPOINT") != "" {
buffer.WriteString(" ")
buffer.WriteString("-e S3_ENDPOINT=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_ENDPOINT"))
buffer.WriteString("'")
buffer.WriteString(" -e S3_REGION=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_REGION"))
buffer.WriteString("'")
buffer.WriteString(" -e S3_ACCESS_KEY_ID=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_ACCESS_KEY_ID"))
buffer.WriteString("'")
buffer.WriteString(" -e S3_SECRET_ACCESS_KEY=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_SECRET_ACCESS_KEY"))
buffer.WriteString("'")
buffer.WriteString(" ")
}
if kops.CloudProviderID(t.Cluster.Spec.CloudProvider) == kops.CloudProviderDO && os.Getenv("DIGITALOCEAN_ACCESS_TOKEN") != "" {
buffer.WriteString(" ")
buffer.WriteString("-e 'DIGITALOCEAN_ACCESS_TOKEN=")
buffer.WriteString(os.Getenv("DIGITALOCEAN_ACCESS_TOKEN"))
buffer.WriteString("'")
buffer.WriteString(" ")
}
t.writeProxyEnvVars(&buffer)
return buffer.String()
}
func (t *ProtokubeBuilder) writeProxyEnvVars(buffer *bytes.Buffer) {
for _, envVar := range getProxyEnvVars(t.Cluster.Spec.EgressProxy) {
buffer.WriteString(" -e ")
buffer.WriteString(envVar.Name)
buffer.WriteString("=")
buffer.WriteString(envVar.Value)
buffer.WriteString(" ")
}
}
|
[
"\"AWS_REGION\"",
"\"AWS_REGION\"",
"\"S3_ENDPOINT\"",
"\"S3_ENDPOINT\"",
"\"S3_REGION\"",
"\"S3_ACCESS_KEY_ID\"",
"\"S3_SECRET_ACCESS_KEY\"",
"\"DIGITALOCEAN_ACCESS_TOKEN\"",
"\"DIGITALOCEAN_ACCESS_TOKEN\""
] |
[] |
[
"AWS_REGION",
"DIGITALOCEAN_ACCESS_TOKEN",
"S3_SECRET_ACCESS_KEY",
"S3_ACCESS_KEY_ID",
"S3_REGION",
"S3_ENDPOINT"
] |
[]
|
["AWS_REGION", "DIGITALOCEAN_ACCESS_TOKEN", "S3_SECRET_ACCESS_KEY", "S3_ACCESS_KEY_ID", "S3_REGION", "S3_ENDPOINT"]
|
go
| 6 | 0 | |
homeassistant/bootstrap.py
|
"""Provide methods to bootstrap a Home Assistant instance."""
import asyncio
import contextlib
import logging
import logging.handlers
import os
import sys
from time import monotonic
from typing import Any, Dict, Optional, Set
from async_timeout import timeout
import voluptuous as vol
from homeassistant import config as conf_util, config_entries, core, loader
from homeassistant.components import http
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
REQUIRED_NEXT_PYTHON_DATE,
REQUIRED_NEXT_PYTHON_VER,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.setup import DATA_SETUP, async_setup_component
from homeassistant.util.logging import async_activate_log_queue_handler
from homeassistant.util.package import async_get_user_site, is_virtual_env
from homeassistant.util.yaml import clear_secret_cache
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = "home-assistant.log"
# hass.data key for logging information.
DATA_LOGGING = "logging"
DEBUGGER_INTEGRATIONS = {"ptvsd"}
CORE_INTEGRATIONS = ("homeassistant", "persistent_notification")
LOGGING_INTEGRATIONS = {"logger", "system_log", "sentry"}
STAGE_1_INTEGRATIONS = {
# To record data
"recorder",
# To make sure we forward data to other instances
"mqtt_eventstream",
# To provide account link implementations
"cloud",
}
async def async_setup_hass(
*,
config_dir: str,
verbose: bool,
log_rotate_days: int,
log_file: str,
log_no_color: bool,
skip_pip: bool,
safe_mode: bool,
) -> Optional[core.HomeAssistant]:
"""Set up Home Assistant."""
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
async_enable_logging(hass, verbose, log_rotate_days, log_file, log_no_color)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning(
"Skipping pip installation of required modules. This may cause issues"
)
if not await conf_util.async_ensure_config_exists(hass):
_LOGGER.error("Error getting configuration path")
return None
_LOGGER.info("Config directory: %s", config_dir)
config_dict = None
basic_setup_success = False
if not safe_mode:
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try:
config_dict = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(
"Failed to parse configuration.yaml: %s. Activating safe mode", err,
)
else:
if not is_virtual_env():
await async_mount_local_lib_path(config_dir)
basic_setup_success = (
await async_from_config_dict(config_dict, hass) is not None
)
finally:
clear_secret_cache()
if config_dict is None:
safe_mode = True
elif not basic_setup_success:
_LOGGER.warning("Unable to set up core integrations. Activating safe mode")
safe_mode = True
elif (
"frontend" in hass.data.get(DATA_SETUP, {})
and "frontend" not in hass.config.components
):
_LOGGER.warning("Detected that frontend did not load. Activating safe mode")
# Ask integrations to shut down. It's messy but we can't
# do a clean stop without knowing what is broken
hass.async_track_tasks()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP, {})
with contextlib.suppress(asyncio.TimeoutError):
async with timeout(10):
await hass.async_block_till_done()
safe_mode = True
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
if safe_mode:
_LOGGER.info("Starting in safe mode")
hass.config.safe_mode = True
http_conf = (await http.async_get_last_config(hass)) or {}
await async_from_config_dict(
{"safe_mode": {}, "http": http_conf}, hass,
)
return hass
async def async_from_config_dict(
config: ConfigType, hass: core.HomeAssistant
) -> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = monotonic()
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
# Set up core.
_LOGGER.debug("Setting up %s", CORE_INTEGRATIONS)
if not all(
await asyncio.gather(
*(
async_setup_component(hass, domain, config)
for domain in CORE_INTEGRATIONS
)
)
):
_LOGGER.error("Home Assistant core failed to initialize. ")
return None
_LOGGER.debug("Home Assistant core initialized")
core_config = config.get(core.DOMAIN, {})
try:
await conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as config_err:
conf_util.async_log_exception(config_err, "homeassistant", core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return None
await _async_set_up_integrations(hass, config)
stop = monotonic()
_LOGGER.info("Home Assistant initialized in %.2fs", stop - start)
if REQUIRED_NEXT_PYTHON_DATE and sys.version_info[:3] < REQUIRED_NEXT_PYTHON_VER:
msg = (
"Support for the running Python version "
f"{'.'.join(str(x) for x in sys.version_info[:3])} is deprecated and will "
f"be removed in the first release after {REQUIRED_NEXT_PYTHON_DATE}. "
"Please upgrade Python to "
f"{'.'.join(str(x) for x in REQUIRED_NEXT_PYTHON_VER)} or "
"higher."
)
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Python version", "python_version"
)
return hass
@core.callback
def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: Optional[int] = None,
log_file: Optional[str] = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
if not log_no_color:
try:
# pylint: disable=import-outside-toplevel
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this will result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
sys.excepthook = lambda *args: logging.getLogger(None).exception(
"Uncaught exception", exc_info=args # type: ignore
)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
if log_rotate_days:
err_handler: logging.FileHandler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
else:
err_handler = logging.FileHandler(err_log_path, mode="w", delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
logger = logging.getLogger("")
logger.addHandler(err_handler)
logger.setLevel(logging.INFO)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async_activate_log_queue_handler(hass)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, "deps")
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@core.callback
def _get_domains(hass: core.HomeAssistant, config: Dict[str, Any]) -> Set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = {key.split(" ")[0] for key in config.keys() if key != core.DOMAIN}
# Add config entry domains
if not hass.config.safe_mode:
domains.update(hass.config_entries.async_domains())
# Make sure the Hass.io component is loaded
if "HASSIO" in os.environ:
domains.add("hassio")
return domains
async def _async_set_up_integrations(
hass: core.HomeAssistant, config: Dict[str, Any]
) -> None:
"""Set up all the integrations."""
async def async_setup_multi_components(domains: Set[str]) -> None:
"""Set up multiple domains. Log on failure."""
futures = {
domain: hass.async_create_task(async_setup_component(hass, domain, config))
for domain in domains
}
await asyncio.wait(futures.values())
errors = [domain for domain in domains if futures[domain].exception()]
for domain in errors:
exception = futures[domain].exception()
_LOGGER.error(
"Error setting up integration %s - received exception",
domain,
exc_info=(type(exception), exception, exception.__traceback__),
)
domains = _get_domains(hass, config)
# Start up debuggers. Start these first in case they want to wait.
debuggers = domains & DEBUGGER_INTEGRATIONS
if debuggers:
_LOGGER.debug("Starting up debuggers %s", debuggers)
await async_setup_multi_components(debuggers)
domains -= DEBUGGER_INTEGRATIONS
# Resolve all dependencies of all components so we can find the logging
# and integrations that need faster initialization.
resolved_domains_task = asyncio.gather(
*(loader.async_component_dependencies(hass, domain) for domain in domains),
return_exceptions=True,
)
# Finish resolving domains
for dep_domains in await resolved_domains_task:
# Result is either a set or an exception. We ignore exceptions
# It will be properly handled during setup of the domain.
if isinstance(dep_domains, set):
domains.update(dep_domains)
# setup components
logging_domains = domains & LOGGING_INTEGRATIONS
stage_1_domains = domains & STAGE_1_INTEGRATIONS
stage_2_domains = domains - logging_domains - stage_1_domains
if logging_domains:
_LOGGER.info("Setting up %s", logging_domains)
await async_setup_multi_components(logging_domains)
# Kick off loading the registries. They don't need to be awaited.
asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
hass.helpers.area_registry.async_get_registry(),
)
if stage_1_domains:
await async_setup_multi_components(stage_1_domains)
# Load all integrations
after_dependencies: Dict[str, Set[str]] = {}
for int_or_exc in await asyncio.gather(
*(loader.async_get_integration(hass, domain) for domain in stage_2_domains),
return_exceptions=True,
):
# Exceptions are handled in async_setup_component.
if isinstance(int_or_exc, loader.Integration) and int_or_exc.after_dependencies:
after_dependencies[int_or_exc.domain] = set(int_or_exc.after_dependencies)
last_load = None
while stage_2_domains:
domains_to_load = set()
for domain in stage_2_domains:
after_deps = after_dependencies.get(domain)
# Load if integration has no after_dependencies or they are
# all loaded
if not after_deps or not after_deps - hass.config.components:
domains_to_load.add(domain)
if not domains_to_load or domains_to_load == last_load:
break
_LOGGER.debug("Setting up %s", domains_to_load)
await async_setup_multi_components(domains_to_load)
last_load = domains_to_load
stage_2_domains -= domains_to_load
# These are stage 2 domains that never have their after_dependencies
# satisfied.
if stage_2_domains:
_LOGGER.debug("Final set up: %s", stage_2_domains)
await async_setup_multi_components(stage_2_domains)
# Wrap up startup
await hass.async_block_till_done()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
disentanglement_lib/data/ground_truth/mpi3d.py
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPI3D data set."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from disentanglement_lib.data.ground_truth import ground_truth_data
from disentanglement_lib.data.ground_truth import util
import numpy as np
import tensorflow as tf
class MPI3D(ground_truth_data.GroundTruthData):
"""MPI3D dataset.
MPI3D datasets have been introduced as a part of NEURIPS 2019 Disentanglement
Competition.(http://www.disentanglement-challenge.com).
There are three different datasets:
1. Simplistic rendered images (mpi3d_toy).
2. Realistic rendered images (mpi3d_realistic).
3. Real world images (mpi3d_real).
Currently, mpi3d_toy and mpi3d_realistic are publicly available. More details about this
dataset can be found in "On the Transfer of Inductive Bias from Simulation to
the Real World: a New Disentanglement Dataset"
(https://arxiv.org/abs/1906.03292).
The ground-truth factors of variation in the dataset are:
0 - Object color (4 different values)
1 - Object shape (4 different values)
2 - Object size (2 different values)
3 - Camera height (3 different values)
4 - Background colors (3 different values)
5 - First DOF (40 different values)
6 - Second DOF (40 different values)
"""
def __init__(self, mode="mpi3d_toy"):
if mode == "mpi3d_toy":
mpi3d_path = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "mpi3d_toy",
"mpi3d_toy.npz")
if not tf.io.gfile.exists(mpi3d_path):
raise ValueError(
"Dataset '{}' not found. Make sure the dataset is publicly available and downloaded correctly."
.format(mode))
else:
with tf.io.gfile.GFile(mpi3d_path, "rb") as f:
data = np.load(f)
elif mode == "mpi3d_realistic":
mpi3d_path = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "mpi3d_realistic",
"mpi3d_realistic.npz")
if not tf.io.gfile.exists(mpi3d_path):
raise ValueError(
"Dataset '{}' not found. Make sure the dataset is publicly available and downloaded correctly."
.format(mode))
else:
with tf.io.gfile.GFile(mpi3d_path, "rb") as f:
data = np.load(f)
elif mode == "mpi3d_real":
mpi3d_path = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "mpi3d_real",
"mpi3d_real.npz")
if not tf.io.gfile.exists(mpi3d_path):
raise ValueError(
"Dataset '{}' not found. Make sure the dataset is publicly available and downloaded correctly."
.format(mode))
else:
with tf.io.gfile.GFile(mpi3d_path, "rb") as f:
data = np.load(f)
else:
raise ValueError("Unknown mode provided.")
self.images = data["images"]
self.factor_sizes = [4, 4, 2, 3, 3, 40, 40]
self.latent_factor_indices = [0, 1, 2, 3, 4, 5, 6]
self.num_total_factors = 7
self.state_space = util.SplitDiscreteStateSpace(self.factor_sizes,
self.latent_factor_indices)
self.factor_bases = np.prod(self.factor_sizes) / np.cumprod(
self.factor_sizes)
@property
def num_factors(self):
return self.state_space.num_latent_factors
@property
def factors_num_values(self):
return self.factor_sizes
@property
def observation_shape(self):
return [64, 64, 3]
def sample_factors(self, num, random_state):
"""Sample a batch of factors Y."""
return self.state_space.sample_latent_factors(num, random_state)
def sample_observations_from_factors(self, factors, random_state):
all_factors = self.state_space.sample_all_factors(factors, random_state)
indices = np.array(np.dot(all_factors, self.factor_bases), dtype=np.int64)
return self.images[indices] / 255.
|
[] |
[] |
[
"DISENTANGLEMENT_LIB_DATA"
] |
[]
|
["DISENTANGLEMENT_LIB_DATA"]
|
python
| 1 | 0 | |
migrations/main.go
|
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/go-pg/migrations"
"github.com/go-pg/pg"
"github.com/joho/godotenv"
)
const usageText = `This program runs command on the db. Supported commands are:
- init - creates version info table in the database
- up - runs all available migrations.
- up [target] - runs available migrations up to the target one.
- down - reverts last migration.
- reset - reverts all migrations.
- version - prints current db version.
- set_version [version] - sets db version without running migrations.
Usage:
go run *.go <command> [args]
`
func init() {
if err := godotenv.Load(); err != nil {
log.Print("No .env file found")
}
}
func main() {
flag.Usage = usage
flag.Parse()
options, err := pg.ParseURL(os.Getenv("DATABASE_URL"))
if err != nil {
fmt.Printf("unable to parse database url")
exitf(err.Error())
}
db := pg.Connect(options)
oldVersion, newVersion, err := migrations.Run(db, flag.Args()...)
if err != nil {
exitf(err.Error())
}
if newVersion != oldVersion {
fmt.Printf("migrated from version %d to %d\n", oldVersion, newVersion)
} else {
fmt.Printf("version is %d\n", oldVersion)
}
}
func usage() {
fmt.Print(usageText)
flag.PrintDefaults()
os.Exit(2)
}
func errorf(s string, args ...interface{}) {
fmt.Fprintf(os.Stderr, s+"\n", args...)
}
func exitf(s string, args ...interface{}) {
errorf(s, args...)
os.Exit(1)
}
|
[
"\"DATABASE_URL\""
] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
go
| 1 | 0 | |
ddpm_proteins/utils.py
|
import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# singleton msa transformer
msa_instances = None
def get_msa_transformer():
global msa_instances
if not exists(msa_instances):
msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S")
batch_converter = alphabet.get_batch_converter()
return msa_model, batch_converter
return msa_instances
# MSA embedding related functions
VOCAB = ProteinVocabulary()
def ids_to_aa_str(x):
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
is_char = lambda c: isinstance(c, str) and len(c) == 1
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_aa_str(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(is_char, out)):
return ''.join(out)
return out
def aa_str_to_embed_input(x):
assert isinstance(x, list), 'input must be a list'
out = []
for el in x:
if isinstance(el, list):
out.append(aa_str_to_embed_input(el))
elif isinstance(el, str):
out.append((None, el))
else:
raise TypeError('type must be either list or string')
return out
def apc(x):
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12)
normalized = x - avg
return normalized
def symmetrize(x):
return x + x.transpose(-1, -2)
def pad_image_to(tensor, size, value = 0.):
remainder = size - tensor.shape[-1]
tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)
return tensor
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
@torch.no_grad()
def get_msa_attention_embedding(
model,
batch_converter,
aa_str,
id,
fetch_msas_fn = lambda t: [],
cache = True
):
device = next(model.parameters()).device
cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')
if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):
try:
loaded = torch.load(cache_full_path).to(device)
except:
loaded = None
if exists(loaded):
return loaded
msas = default(fetch_msas_fn(aa_str), [])
seq_with_msas = [aa_str, *msas]
embed_inputs = aa_str_to_embed_input(seq_with_msas)
_, _, msa_batch_tokens = batch_converter(embed_inputs)
results = model(msa_batch_tokens.to(device), need_head_weights = True)
attentions = results['row_attentions']
attentions = attentions[..., 1:, 1:]
attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')
attentions = apc(symmetrize(attentions))
if cache:
print(f'caching to {cache_full_path}')
torch.save(attentions, cache_full_path)
return attentions
def get_msa_attention_embeddings(
model,
batch_converter,
seqs,
ids,
fetch_msas_fn = lambda t: [],
cache = True
):
n = seqs.shape[1]
seqs = rearrange(seqs, 'b n -> b () n')
aa_strs = ids_to_aa_str(seqs.cpu().tolist())
embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]
embeds_list = [pad_image_to(embed, n) for embed in embeds_list]
embeds = torch.cat(embeds_list, dim = 0)
return embeds
# training utils
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
def save_heatmap(tensor, filepath, dpi = 200, return_image = False):
heatmap = sn.heatmap(tensor.cpu().numpy())
figure = heatmap.get_figure()
figure.savefig(filepath, dpi = dpi)
plt.clf()
if not return_image:
return
return Image.open(filepath)
|
[] |
[] |
[
"CLEAR_CACHE",
"CACHE_PATH"
] |
[]
|
["CLEAR_CACHE", "CACHE_PATH"]
|
python
| 2 | 0 | |
main.go
|
package main
import (
"fmt"
"net"
"net/http"
"os"
"time"
)
func getColor() string {
return os.Getenv("COLOR")
}
func tcpHandler(c net.Conn) {
for {
c.Write([]byte(fmt.Sprintf("The color is #%s", getColor())))
c.Write([]byte(fmt.Sprintln()))
time.Sleep(5 * time.Second)
}
}
func serveTCP() {
ln, err := net.Listen("tcp", ":8081")
if err != nil {
// handle error but not today
}
for {
conn, err := ln.Accept()
if err != nil {
// handle error but not today
}
go tcpHandler(conn)
}
}
func httpHandler(w http.ResponseWriter, r *http.Request) {
color := getColor()
fmt.Printf("Serving color: #%s", color)
fmt.Println()
fmt.Fprintf(w, "<body bgcolor=\"#%s\"><h1>#%s</h1></body>", color, color)
}
func main() {
color := getColor()
fmt.Printf("Booted with color: #%s", color)
fmt.Println()
go serveTCP()
http.HandleFunc("/", httpHandler)
fmt.Println("listening with http on :8080 and tcp on :8081")
http.ListenAndServe(":8080", nil)
}
|
[
"\"COLOR\""
] |
[] |
[
"COLOR"
] |
[]
|
["COLOR"]
|
go
| 1 | 0 | |
neutron/tests/unit/objects/test_objects.py
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
from oslo_versionedobjects import fixture
from neutron import objects
from neutron.objects import base
from neutron.tests import base as test_base
# NOTE: The hashes in this list should only be changed if they come with a
# corresponding version bump in the affected objects. Please keep the list in
# alphabetic order.
object_data = {
'AddressScope': '1.0-dd0dfdb67775892d3adc090e28e43bd8',
'Agent': '1.1-64b670752d57b3c7602cb136e0338507',
'AllowedAddressPair': '1.0-9f9186b6f952fbf31d257b0458b852c0',
'AutoAllocatedTopology': '1.0-74642e58c53bf3610dc224c59f81b242',
'ConntrackHelper': '1.0-b1a50cfe18178db50c7f206e75613f4b',
'DefaultSecurityGroup': '1.0-971520cb2e0ec06d747885a0cf78347f',
'DistributedPortBinding': '1.0-39c0d17b281991dcb66716fee5a8bef2',
'DNSNameServer': '1.0-bf87a85327e2d812d1666ede99d9918b',
'ExternalNetwork': '1.0-53d885e033cb931f9bb3bdd6bbe3f0ce',
'DVRMacAddress': '1.0-d3c61a8338d20da74db2364d4d6554f2',
'ExtraDhcpOpt': '1.0-632f689cbeb36328995a7aed1d0a78d3',
'FlatAllocation': '1.0-bf666f24f4642b047eeca62311fbcb41',
'Flavor': '1.0-82194de5c9aafce08e8527bb7977f5c6',
'FlavorServiceProfileBinding': '1.0-a2c8731e16cefdac4571f80abf1f8930',
'FloatingIP': '1.0-0205cc99ec79e8089d641ed1b565ddae',
'FloatingIPDNS': '1.0-ee3db848500fa1825235f701828c06d5',
'GeneveAllocation': '1.0-d5f76e8eac60a778914d61dd8e23e90f',
'GeneveEndpoint': '1.0-040f026996b5952e2ae4ccd40ac61ca6',
'GreAllocation': '1.0-9ee1bbc4d999bea84c99425484b11ac5',
'GreEndpoint': '1.0-040f026996b5952e2ae4ccd40ac61ca6',
'IPAllocation': '1.0-47251b4c6d45c3b5feb0297fe5c461f2',
'IPAllocationPool': '1.0-371016a6480ed0b4299319cb46d9215d',
'IpamAllocation': '1.0-ace65431abd0a7be84cc4a5f32d034a3',
'IpamAllocationPool': '1.0-c4fa1460ed1b176022ede7af7d1510d5',
'IpamSubnet': '1.0-713de401682a70f34891e13af645fa08',
'L3HARouterAgentPortBinding': '1.0-d1d7ee13f35d56d7e225def980612ee5',
'L3HARouterNetwork': '1.0-87acea732853f699580179a94d2baf91',
'L3HARouterVRIdAllocation': '1.0-37502aebdbeadc4f9e3bd5e9da714ab9',
'MeteringLabel': '1.0-cc4b620a3425222447cbe459f62de533',
'MeteringLabelRule': '1.0-b5c5717e7bab8d1af1623156012a5842',
'Log': '1.0-6391351c0f34ed34375a19202f361d24',
'Network': '1.0-f2f6308f79731a767b92b26b0f4f3849',
'NetworkDhcpAgentBinding': '1.0-6eeceb5fb4335cd65a305016deb41c68',
'NetworkDNSDomain': '1.0-420db7910294608534c1e2e30d6d8319',
'NetworkPortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3',
'NetworkRBAC': '1.2-192845c5ed0718e1c54fac36936fcd7d',
'NetworkSegment': '1.0-57b7f2960971e3b95ded20cbc59244a8',
'NetworkSegmentRange': '1.0-bdec1fffc9058ea676089b1f2f2b3cf3',
'Port': '1.5-98f35183d876c9beb188f4bf44d4d886',
'PortBinding': '1.0-3306deeaa6deb01e33af06777d48d578',
'PortBindingLevel': '1.1-50d47f63218f87581b6cd9a62db574e5',
'PortDataPlaneStatus': '1.0-25be74bda46c749653a10357676c0ab2',
'PortDNS': '1.1-c5ca2dc172bdd5fafee3fc986d1d7023',
'PortForwarding': '1.1-db61273978c497239be5389a8aeb1c61',
'PortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3',
'PortUplinkStatusPropagation': '1.0-3cfb3f7da716ca9687e4f04ca72b081d',
'ProviderResourceAssociation': '1.0-05ab2d5a3017e5ce9dd381328f285f34',
'ProvisioningBlock': '1.0-c19d6d05bfa8143533471c1296066125',
'QosBandwidthLimitRule': '1.3-51b662b12a8d1dfa89288d826c6d26d3',
'QosDscpMarkingRule': '1.3-0313c6554b34fd10c753cb63d638256c',
'QosMinimumBandwidthRule': '1.3-314c3419f4799067cc31cc319080adff',
'QosPolicyRBAC': '1.1-192845c5ed0718e1c54fac36936fcd7d',
'QosRuleType': '1.3-7286188edeb3a0386f9cf7979b9700fc',
'QosRuleTypeDriver': '1.0-7d8cb9f0ef661ac03700eae97118e3db',
'QosPolicy': '1.8-4adb0cde3102c10d8970ec9487fd7fe7',
'QosPolicyDefault': '1.0-59e5060eedb1f06dd0935a244d27d11c',
'QosPolicyFloatingIPBinding': '1.0-5625df4205a18778cd6aa40f99be024e',
'QosPolicyRouterGatewayIPBinding': '1.0-da064fbfe5ee18c950b905b483bf59e3',
'QosPolicyNetworkBinding': '1.0-df53a1e0f675aab8d27a1ccfed38dc42',
'QosPolicyPortBinding': '1.0-66cb364ac99aa64523ade07f9f868ea6',
'Quota': '1.0-6bb6a0f1bd5d66a2134ffa1a61873097',
'QuotaUsage': '1.0-6fbf820368681aac7c5d664662605cf9',
'Reservation': '1.0-49929fef8e82051660342eed51b48f2a',
'ResourceDelta': '1.0-a980b37e0a52618b5af8db29af18be76',
'Route': '1.0-a9883a63b416126f9e345523ec09483b',
'Router': '1.0-adb984d9b73aa11566d40abbeb790df1',
'RouterExtraAttributes': '1.0-ef8d61ae2864f0ec9af0ab7939cab318',
'RouterL3AgentBinding': '1.0-c5ba6c95e3a4c1236a55f490cd67da82',
'RouterPort': '1.0-c8c8f499bcdd59186fcd83f323106908',
'RouterRoute': '1.0-07fc5337c801fb8c6ccfbcc5afb45907',
'SecurityGroup': '1.1-f712265418f154f7c080e02857ffe2ef',
'SecurityGroupPortBinding': '1.0-6879d5c0af80396ef5a72934b6a6ef20',
'SecurityGroupRBAC': '1.0-192845c5ed0718e1c54fac36936fcd7d',
'SecurityGroupRule': '1.0-e9b8dace9d48b936c62ad40fe1f339d5',
'SegmentHostMapping': '1.0-521597cf82ead26217c3bd10738f00f0',
'ServiceProfile': '1.0-9beafc9e7d081b8258f3c5cb66ac5eed',
'StandardAttribute': '1.0-617d4f46524c4ce734a6fc1cc0ac6a0b',
'Subnet': '1.0-927155c1fdd5a615cbcb981dda97bce4',
'SubnetPool': '1.0-a0e03895d1a6e7b9d4ab7b0ca13c3867',
'SubnetPoolPrefix': '1.0-13c15144135eb869faa4a76dc3ee3b6c',
'SubnetServiceType': '1.0-05ae4cdb2a9026a697b143926a1add8c',
'SubPort': '1.0-72c8471068db1f0491b5480fe49b52bb',
'Tag': '1.0-1a0d20379920ffa3cebfd3e016d2f7a0',
'Trunk': '1.1-aa3922b39e37fbb89886c2ee8715cf49',
'VlanAllocation': '1.0-72636c1b7d5c8eef987bd09666e64f3e',
'VxlanAllocation': '1.0-934638cd32d00f81d6fbf93c8eb5755a',
'VxlanEndpoint': '1.0-40522eafdcf838758711dfa886cbdb2e',
}
class TestObjectVersions(test_base.BaseTestCase):
def setUp(self):
super(TestObjectVersions, self).setUp()
# NOTE(ihrachys): seed registry with all objects under neutron.objects
# before validating the hashes
objects.register_objects()
def test_versions(self):
checker = fixture.ObjectVersionChecker(
base.NeutronObjectRegistry.obj_classes())
fingerprints = checker.get_hashes()
if os.getenv('GENERATE_HASHES'):
with open('object_hashes.txt', 'w') as hashes_file:
hashes_file.write(pprint.pformat(fingerprints))
expected, actual = checker.test_hashes(object_data)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes in the object_data map in this test module.')
|
[] |
[] |
[
"GENERATE_HASHES"
] |
[]
|
["GENERATE_HASHES"]
|
python
| 1 | 0 | |
core/internal/cltest/cltest.go
|
package cltest
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"math/rand"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/smartcontractkit/chainlink/core/store/dialects"
"github.com/smartcontractkit/chainlink/core/services"
"github.com/smartcontractkit/chainlink/core/services/job"
"github.com/smartcontractkit/chainlink/core/static"
p2ppeer "github.com/libp2p/go-libp2p-core/peer"
"github.com/smartcontractkit/chainlink/core/assets"
"github.com/smartcontractkit/chainlink/core/auth"
"github.com/smartcontractkit/chainlink/core/cmd"
"github.com/smartcontractkit/chainlink/core/gracefulpanic"
"github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper"
"github.com/smartcontractkit/chainlink/core/internal/mocks"
"github.com/smartcontractkit/chainlink/core/logger"
"github.com/smartcontractkit/chainlink/core/services/bulletprooftxmanager"
"github.com/smartcontractkit/chainlink/core/services/chainlink"
"github.com/smartcontractkit/chainlink/core/services/eth"
"github.com/smartcontractkit/chainlink/core/services/pipeline"
"github.com/smartcontractkit/chainlink/core/services/postgres"
strpkg "github.com/smartcontractkit/chainlink/core/store"
"github.com/smartcontractkit/chainlink/core/store/models"
"github.com/smartcontractkit/chainlink/core/store/orm"
"github.com/smartcontractkit/chainlink/core/store/presenters"
"github.com/smartcontractkit/chainlink/core/utils"
"github.com/smartcontractkit/chainlink/core/web"
webpresenters "github.com/smartcontractkit/chainlink/core/web/presenters"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types"
"github.com/DATA-DOG/go-txdb"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/gin-gonic/gin"
"github.com/gobuffalo/packr"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
"github.com/gorilla/websocket"
"github.com/manyminds/api2go/jsonapi"
"github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
"go.uber.org/zap/zapcore"
null "gopkg.in/guregu/null.v4"
"gorm.io/gorm"
)
const (
// RootDir the root directory for cltest
RootDir = "/tmp/chainlink_test"
// APIKey of the fixture API user
APIKey = "2d25e62eaf9143e993acaf48691564b2"
// APISecret of the fixture API user.
APISecret = "1eCP/w0llVkchejFaoBpfIGaLRxZK54lTXBCT22YLW+pdzE4Fafy/XO5LoJ2uwHi"
// APIEmail is the email of the fixture API user
APIEmail = "[email protected]"
// Password just a password we use everywhere for testing
Password = "p4SsW0rD1!@#_"
// SessionSecret is the hardcoded secret solely used for test
SessionSecret = "clsession_test_secret"
// DefaultKeyAddress is the ETH address of the fixture key
DefaultKeyAddress = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4"
// DefaultKeyFixtureFileName is the filename of the fixture key
DefaultKeyFixtureFileName = "testkey-0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4.json"
// DefaultKeyJSON is the JSON for the default key encrypted with fast scrypt and password 'password' (used for fixture file)
DefaultKeyJSON = `{"address":"F67D0290337bca0847005C7ffD1BC75BA9AAE6e4","crypto":{"cipher":"aes-128-ctr","ciphertext":"9c3565050ba4e10ea388bcd17d77c141441ce1be5db339f0201b9ed733d780c6","cipherparams":{"iv":"f968fc947495646ee8b5dbaadb242ec0"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"33ad88742a983dfeb8adcc9a39fdde4cb47f7e23ea2ef80b35723d940959e3fd"},"mac":"b3747959cbbb9b26f861ab82d69154b4ec8108bbac017c1341f6fd3295beceaf"},"id":"8c79a654-96b1-45d9-8978-3efa07578011","version":3}`
// AllowUnstarted enable an application that can be used in tests without being started
AllowUnstarted = "allow_unstarted"
// DefaultPeerID is the peer ID of the fixture p2p key
DefaultPeerID = "12D3KooWApUJaQB2saFjyEUfq6BmysnsSnhLnY5CF9tURYVKgoXK"
// A peer ID without an associated p2p key.
NonExistentPeerID = "12D3KooWAdCzaesXyezatDzgGvCngqsBqoUqnV9PnVc46jsVt2i9"
// DefaultOCRKeyBundleID is the ID of the fixture ocr key bundle
DefaultOCRKeyBundleID = "7f993fb701b3410b1f6e8d4d93a7462754d24609b9b31a4fe64a0cb475a4d934"
)
var (
DefaultP2PPeerID models.PeerID
NonExistentP2PPeerID models.PeerID
// DefaultOCRKeyBundleIDSha256 is the ID of the fixture ocr key bundle
DefaultOCRKeyBundleIDSha256 models.Sha256Hash
FluxAggAddress = common.HexToAddress("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42")
storeCounter uint64
minimumContractPayment = assets.NewLink(100)
)
func init() {
gin.SetMode(gin.TestMode)
gomega.SetDefaultEventuallyTimeout(3 * time.Second)
lvl := logLevelFromEnv()
logger.SetLogger(logger.CreateTestLogger(lvl))
// Register txdb as dialect wrapping postgres
// See: DialectTransactionWrappedPostgres
config := orm.NewConfig()
parsed := config.DatabaseURL()
if parsed.Path == "" {
msg := fmt.Sprintf("invalid DATABASE_URL: `%s`. You must set DATABASE_URL env var to point to your test database. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try DATABASE_URL=postgresql://postgres@localhost:5432/chainlink_test?sslmode=disable", parsed.String())
panic(msg)
}
if !strings.HasSuffix(parsed.Path, "_test") {
msg := fmt.Sprintf("cannot run tests against database named `%s`. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try DATABASE_URL=postgresql://postgres@localhost:5432/chainlink_test?sslmode=disable", parsed.Path[1:])
panic(msg)
}
// Disable SavePoints because they cause random errors for reasons I cannot fathom
// Perhaps txdb's built-in transaction emulation is broken in some subtle way?
// NOTE: That this will cause transaction BEGIN/ROLLBACK to effectively be
// a no-op, this should have no negative impact on normal test operation.
// If you MUST test BEGIN/ROLLBACK behaviour, you will have to configure your
// store to use the raw DialectPostgres dialect and setup a one-use database.
// See BootstrapThrowawayORM() as a convenience function to help you do this.
txdb.Register(string(dialects.TransactionWrappedPostgres), string(dialects.Postgres), parsed.String(), txdb.SavePointOption(nil))
// Seed the random number generator, otherwise separate modules will take
// the same advisory locks when tested with `go test -p N` for N > 1
seed := time.Now().UTC().UnixNano()
logger.Debugf("Using seed: %v", seed)
rand.Seed(seed)
defaultP2PPeerID, err := p2ppeer.Decode(DefaultPeerID)
if err != nil {
panic(err)
}
DefaultP2PPeerID = models.PeerID(defaultP2PPeerID)
nonExistentP2PPeerID, err := p2ppeer.Decode(NonExistentPeerID)
if err != nil {
panic(err)
}
NonExistentP2PPeerID = models.PeerID(nonExistentP2PPeerID)
DefaultOCRKeyBundleIDSha256, err = models.Sha256HashFromHex(DefaultOCRKeyBundleID)
if err != nil {
panic(err)
}
}
func logLevelFromEnv() zapcore.Level {
lvl := zapcore.ErrorLevel
if env := os.Getenv("LOG_LEVEL"); env != "" {
_ = lvl.Set(env)
}
return lvl
}
// TestConfig struct with test store and wsServer
type TestConfig struct {
t testing.TB
*orm.Config
wsServer *httptest.Server
}
// NewConfig returns a new TestConfig
func NewConfig(t testing.TB) (*TestConfig, func()) {
t.Helper()
wsserver, url, cleanup := newWSServer()
config := NewConfigWithWSServer(t, url, wsserver)
// Tests almost always want to request to localhost so its easier to set this here
config.Set("DEFAULT_HTTP_ALLOW_UNRESTRICTED_NETWORK_ACCESS", true)
// Disable gas updater for application tests
config.Set("GAS_UPDATER_ENABLED", false)
// Disable tx re-sending for application tests
config.Set("ETH_TX_RESEND_AFTER_THRESHOLD", 0)
return config, cleanup
}
func NewRandomInt64() int64 {
id := rand.Int63()
return id
}
func MustRandomBytes(t *testing.T, l int) (b []byte) {
t.Helper()
b = make([]byte, l)
/* #nosec G404 */
_, err := rand.Read(b)
if err != nil {
t.Fatal(err)
}
return b
}
// NewTestConfig returns a test configuration
func NewTestConfig(t testing.TB, options ...interface{}) *TestConfig {
t.Helper()
count := atomic.AddUint64(&storeCounter, 1)
rootdir := filepath.Join(RootDir, fmt.Sprintf("%d-%d", time.Now().UnixNano(), count))
rawConfig := orm.NewConfig()
rawConfig.Dialect = dialects.TransactionWrappedPostgres
for _, opt := range options {
switch v := opt.(type) {
case dialects.DialectName:
rawConfig.Dialect = v
}
}
// Unique advisory lock is required otherwise all tests will block each other
rawConfig.AdvisoryLockID = NewRandomInt64()
rawConfig.Set("BRIDGE_RESPONSE_URL", "http://localhost:6688")
rawConfig.Set("ETH_CHAIN_ID", 3)
rawConfig.Set("CHAINLINK_DEV", true)
rawConfig.Set("ETH_GAS_BUMP_THRESHOLD", 3)
rawConfig.Set("MIGRATE_DATABASE", false)
rawConfig.Set("MINIMUM_SERVICE_DURATION", "24h")
rawConfig.Set("MIN_INCOMING_CONFIRMATIONS", 1)
rawConfig.Set("MIN_OUTGOING_CONFIRMATIONS", 6)
rawConfig.Set("MINIMUM_CONTRACT_PAYMENT", minimumContractPayment.Text(10))
rawConfig.Set("ROOT", rootdir)
rawConfig.Set("SESSION_TIMEOUT", "2m")
rawConfig.Set("INSECURE_FAST_SCRYPT", "true")
rawConfig.Set("BALANCE_MONITOR_ENABLED", "false")
rawConfig.Set("P2P_LISTEN_PORT", "12345")
rawConfig.Set("P2P_PEER_ID", DefaultP2PPeerID.String())
rawConfig.Set("DATABASE_TIMEOUT", "5s")
rawConfig.Set("GLOBAL_LOCK_RETRY_INTERVAL", "10ms")
rawConfig.Set("ORM_MAX_OPEN_CONNS", "5")
rawConfig.Set("ORM_MAX_IDLE_CONNS", "2")
rawConfig.SecretGenerator = mockSecretGenerator{}
config := TestConfig{t: t, Config: rawConfig}
return &config
}
// NewConfigWithWSServer return new config with specified wsserver
func NewConfigWithWSServer(t testing.TB, url string, wsserver *httptest.Server) *TestConfig {
t.Helper()
config := NewTestConfig(t)
config.Set("ETH_URL", url)
config.wsServer = wsserver
return config
}
func NewPipelineORM(t testing.TB, config *TestConfig, db *gorm.DB) (pipeline.ORM, postgres.EventBroadcaster, func()) {
t.Helper()
eventBroadcaster := postgres.NewEventBroadcaster(config.DatabaseURL(), 0, 0)
eventBroadcaster.Start()
return pipeline.NewORM(db, config, eventBroadcaster), eventBroadcaster, func() {
eventBroadcaster.Stop()
}
}
func NewEthBroadcaster(t testing.TB, store *strpkg.Store, config *TestConfig) (bulletprooftxmanager.EthBroadcaster, func()) {
t.Helper()
eventBroadcaster := postgres.NewEventBroadcaster(config.DatabaseURL(), 0, 0)
eventBroadcaster.Start()
return bulletprooftxmanager.NewEthBroadcaster(store, config, eventBroadcaster), func() {
eventBroadcaster.Stop()
}
}
// TestApplication holds the test application and test servers
type TestApplication struct {
t testing.TB
*chainlink.ChainlinkApplication
Config *TestConfig
Server *httptest.Server
wsServer *httptest.Server
connectedChannel chan struct{}
Started bool
Backend *backends.SimulatedBackend
Key models.Key
allowUnstarted bool
}
func newWSServer() (*httptest.Server, string, func()) {
return NewWSServer("", nil)
}
// NewWSServer returns a new wsserver
func NewWSServer(msg string, callback func(data []byte)) (*httptest.Server, string, func()) {
upgrader := websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
}
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
logger.PanicIf(err)
for {
_, data, err := conn.ReadMessage()
if err != nil {
break
}
if callback != nil {
callback(data)
}
err = conn.WriteMessage(websocket.BinaryMessage, []byte(msg))
if err != nil {
break
}
}
})
server := httptest.NewServer(handler)
u, err := url.Parse(server.URL)
logger.PanicIf(err)
u.Scheme = "ws"
return server, u.String(), func() {
server.Close()
}
}
// NewApplication creates a New TestApplication along with a NewConfig
// It mocks the keystore with no keys or accounts by default
func NewApplication(t testing.TB, flagsAndDeps ...interface{}) (*TestApplication, func()) {
t.Helper()
c, cfgCleanup := NewConfig(t)
app, cleanup := NewApplicationWithConfig(t, c, flagsAndDeps...)
kst := new(mocks.KeyStoreInterface)
kst.On("Accounts").Return([]accounts.Account{})
app.Store.KeyStore = kst
return app, func() {
cleanup()
cfgCleanup()
}
}
// NewApplicationWithKey creates a new TestApplication along with a new config
// It uses the native keystore and will load any keys that are in the database
func NewApplicationWithKey(t testing.TB, flagsAndDeps ...interface{}) (*TestApplication, func()) {
t.Helper()
config, cfgCleanup := NewConfig(t)
app, cleanup := NewApplicationWithConfigAndKey(t, config, flagsAndDeps...)
return app, func() {
cleanup()
cfgCleanup()
}
}
// NewApplicationWithConfigAndKey creates a new TestApplication with the given testconfig
// it will also provide an unlocked account on the keystore
func NewApplicationWithConfigAndKey(t testing.TB, tc *TestConfig, flagsAndDeps ...interface{}) (*TestApplication, func()) {
t.Helper()
app, cleanup := NewApplicationWithConfig(t, tc, flagsAndDeps...)
for _, dep := range flagsAndDeps {
switch v := dep.(type) {
case models.Key:
MustAddKeyToKeystore(t, &v, app.Store)
app.Key = v
}
}
if app.Key.Address.Address() == utils.ZeroAddress {
app.Key, _ = MustAddRandomKeyToKeystore(t, app.Store, 0)
}
require.NoError(t, app.Store.KeyStore.Unlock(Password))
return app, cleanup
}
// NewApplicationWithConfig creates a New TestApplication with specified test config
func NewApplicationWithConfig(t testing.TB, tc *TestConfig, flagsAndDeps ...interface{}) (*TestApplication, func()) {
t.Helper()
var ethClient eth.Client = ð.NullClient{}
var advisoryLocker postgres.AdvisoryLocker = &postgres.NullAdvisoryLocker{}
var externalInitiatorManager chainlink.ExternalInitiatorManager = &services.NullExternalInitiatorManager{}
for _, flag := range flagsAndDeps {
switch dep := flag.(type) {
case eth.Client:
ethClient = dep
case postgres.AdvisoryLocker:
advisoryLocker = dep
case chainlink.ExternalInitiatorManager:
externalInitiatorManager = dep
}
}
ta := &TestApplication{t: t, connectedChannel: make(chan struct{}, 1)}
appInstance, err := chainlink.NewApplication(tc.Config, ethClient, advisoryLocker, strpkg.InsecureKeyStoreGen, externalInitiatorManager, func(app chainlink.Application) {
ta.connectedChannel <- struct{}{}
})
require.NoError(t, err)
app := appInstance.(*chainlink.ChainlinkApplication)
ta.ChainlinkApplication = app
server := newServer(ta)
tc.Config.Set("CLIENT_NODE_URL", server.URL)
app.Store.Config = tc.Config
for _, flag := range flagsAndDeps {
if flag == AllowUnstarted {
ta.allowUnstarted = true
}
}
ta.Config = tc
ta.Server = server
ta.wsServer = tc.wsServer
return ta, func() {
ta.StopIfStarted()
}
}
func NewEthMocks(t testing.TB) (*mocks.RPCClient, *mocks.GethClient, *mocks.Subscription, func()) {
r := new(mocks.RPCClient)
g := new(mocks.GethClient)
s := new(mocks.Subscription)
var assertMocksCalled func()
switch tt := t.(type) {
case *testing.T:
assertMocksCalled = func() {
r.AssertExpectations(tt)
g.AssertExpectations(tt)
s.AssertExpectations(tt)
}
case *testing.B:
assertMocksCalled = func() {}
}
return r, g, s, assertMocksCalled
}
func NewEthMocksWithStartupAssertions(t testing.TB) (*mocks.RPCClient, *mocks.GethClient, *mocks.Subscription, func()) {
r, g, s, assertMocksCalled := NewEthMocks(t)
g.On("ChainID", mock.Anything).Return(NewTestConfig(t).ChainID(), nil)
g.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe()
r.On("EthSubscribe", mock.Anything, mock.Anything, "newHeads").Return(EmptyMockSubscription(), nil)
s.On("Err").Return(nil).Maybe()
s.On("Unsubscribe").Return(nil).Maybe()
return r, g, s, assertMocksCalled
}
func newServer(app chainlink.Application) *httptest.Server {
engine := web.Router(app)
return httptest.NewServer(engine)
}
func (ta *TestApplication) NewBox() packr.Box {
ta.t.Helper()
return packr.NewBox("../fixtures/operator_ui/dist")
}
func (ta *TestApplication) Start() error {
ta.t.Helper()
ta.Started = true
err := ta.ChainlinkApplication.Start()
return err
}
func (ta *TestApplication) StartAndConnect() error {
ta.t.Helper()
err := ta.Start()
if err != nil {
return err
}
return ta.waitForConnection()
}
// waitForConnection wait for the StartAndConnect callback to be called
func (ta *TestApplication) waitForConnection() error {
select {
case <-time.After(4 * time.Second):
return errors.New("TestApplication#StartAndConnect() timed out")
case <-ta.connectedChannel:
return nil
}
}
// Stop will stop the test application and perform cleanup
func (ta *TestApplication) Stop() error {
ta.t.Helper()
if !ta.Started {
if ta.allowUnstarted {
return nil
}
ta.t.Fatal("TestApplication Stop() called on an unstarted application")
}
// TODO: Here we double close, which is less than ideal.
// We would prefer to invoke a method on an interface that
// cleans up only in test.
ta.ChainlinkApplication.StopIfStarted()
cleanUpStore(ta.t, ta.Store)
if ta.Server != nil {
ta.Server.Close()
}
if ta.wsServer != nil {
ta.wsServer.Close()
}
return nil
}
func (ta *TestApplication) MustSeedNewSession() string {
session := NewSession()
require.NoError(ta.t, ta.Store.SaveSession(&session))
return session.ID
}
// ImportKey adds private key to the application disk keystore, not database.
func (ta *TestApplication) ImportKey(content string) {
_, err := ta.Store.KeyStore.Import([]byte(content), Password)
require.NoError(ta.t, err)
require.NoError(ta.t, ta.Store.KeyStore.Unlock(Password))
}
func (ta *TestApplication) NewHTTPClient() HTTPClientCleaner {
ta.t.Helper()
sessionID := ta.MustSeedNewSession()
return HTTPClientCleaner{
HTTPClient: NewMockAuthenticatedHTTPClient(ta.Config, sessionID),
t: ta.t,
}
}
// NewClientAndRenderer creates a new cmd.Client for the test application
func (ta *TestApplication) NewClientAndRenderer() (*cmd.Client, *RendererMock) {
sessionID := ta.MustSeedNewSession()
r := &RendererMock{}
client := &cmd.Client{
Renderer: r,
Config: ta.Config.Config,
AppFactory: seededAppFactory{ta.ChainlinkApplication},
KeyStoreAuthenticator: CallbackAuthenticator{func(*strpkg.Store, string) (string, error) { return Password, nil }},
FallbackAPIInitializer: &MockAPIInitializer{},
Runner: EmptyRunner{},
HTTP: NewMockAuthenticatedHTTPClient(ta.Config, sessionID),
CookieAuthenticator: MockCookieAuthenticator{},
FileSessionRequestBuilder: &MockSessionRequestBuilder{},
PromptingSessionRequestBuilder: &MockSessionRequestBuilder{},
ChangePasswordPrompter: &MockChangePasswordPrompter{},
}
return client, r
}
func (ta *TestApplication) NewAuthenticatingClient(prompter cmd.Prompter) *cmd.Client {
cookieAuth := cmd.NewSessionCookieAuthenticator(ta.Config.Config, &cmd.MemoryCookieStore{})
client := &cmd.Client{
Renderer: &RendererMock{},
Config: ta.Config.Config,
AppFactory: seededAppFactory{ta.ChainlinkApplication},
KeyStoreAuthenticator: CallbackAuthenticator{func(*strpkg.Store, string) (string, error) { return Password, nil }},
FallbackAPIInitializer: &MockAPIInitializer{},
Runner: EmptyRunner{},
HTTP: cmd.NewAuthenticatedHTTPClient(ta.Config, cookieAuth, models.SessionRequest{}),
CookieAuthenticator: cookieAuth,
FileSessionRequestBuilder: cmd.NewFileSessionRequestBuilder(),
PromptingSessionRequestBuilder: cmd.NewPromptingSessionRequestBuilder(prompter),
ChangePasswordPrompter: &MockChangePasswordPrompter{},
}
return client
}
func (ta *TestApplication) MustCreateJobRun(txHashBytes []byte, blockHashBytes []byte) *models.JobRun {
job := NewJobWithWebInitiator()
err := ta.Store.CreateJob(&job)
require.NoError(ta.t, err)
jr := NewJobRun(job)
txHash := common.BytesToHash(txHashBytes)
jr.RunRequest.TxHash = &txHash
blockHash := common.BytesToHash(blockHashBytes)
jr.RunRequest.BlockHash = &blockHash
err = ta.Store.CreateJobRun(&jr)
require.NoError(ta.t, err)
return &jr
}
// NewStoreWithConfig creates a new store with given config
func NewStoreWithConfig(t testing.TB, config *TestConfig, flagsAndDeps ...interface{}) (*strpkg.Store, func()) {
t.Helper()
var advisoryLocker postgres.AdvisoryLocker = &postgres.NullAdvisoryLocker{}
for _, flag := range flagsAndDeps {
switch dep := flag.(type) {
case postgres.AdvisoryLocker:
advisoryLocker = dep
}
}
s, err := strpkg.NewInsecureStore(config.Config, ð.NullClient{}, advisoryLocker, gracefulpanic.NewSignal())
if err != nil {
require.NoError(t, err)
}
return s, func() {
cleanUpStore(config.t, s)
}
}
// NewStore creates a new store
func NewStore(t testing.TB, flagsAndDeps ...interface{}) (*strpkg.Store, func()) {
t.Helper()
c, cleanup := NewConfig(t)
store, storeCleanup := NewStoreWithConfig(t, c, flagsAndDeps...)
return store, func() {
storeCleanup()
cleanup()
}
}
func cleanUpStore(t testing.TB, store *strpkg.Store) {
t.Helper()
defer func() {
if err := os.RemoveAll(store.Config.RootDir()); err != nil {
logger.Warn("unable to clear test store:", err)
}
}()
logger.Sync()
require.NoError(t, store.Close())
}
func ParseJSON(t testing.TB, body io.Reader) models.JSON {
t.Helper()
b, err := ioutil.ReadAll(body)
require.NoError(t, err)
return models.JSON{Result: gjson.ParseBytes(b)}
}
func ParseJSONAPIErrors(t testing.TB, body io.Reader) *models.JSONAPIErrors {
t.Helper()
b, err := ioutil.ReadAll(body)
require.NoError(t, err)
var respJSON models.JSONAPIErrors
json.Unmarshal(b, &respJSON)
return &respJSON
}
// MustReadFile loads a file but should never fail
func MustReadFile(t testing.TB, file string) []byte {
t.Helper()
content, err := ioutil.ReadFile(file)
require.NoError(t, err)
return content
}
type HTTPClientCleaner struct {
HTTPClient cmd.HTTPClient
t testing.TB
}
func (r *HTTPClientCleaner) Get(path string, headers ...map[string]string) (*http.Response, func()) {
resp, err := r.HTTPClient.Get(path, headers...)
return bodyCleaner(r.t, resp, err)
}
func (r *HTTPClientCleaner) Post(path string, body io.Reader) (*http.Response, func()) {
resp, err := r.HTTPClient.Post(path, body)
return bodyCleaner(r.t, resp, err)
}
func (r *HTTPClientCleaner) Put(path string, body io.Reader) (*http.Response, func()) {
resp, err := r.HTTPClient.Put(path, body)
return bodyCleaner(r.t, resp, err)
}
func (r *HTTPClientCleaner) Patch(path string, body io.Reader, headers ...map[string]string) (*http.Response, func()) {
resp, err := r.HTTPClient.Patch(path, body, headers...)
return bodyCleaner(r.t, resp, err)
}
func (r *HTTPClientCleaner) Delete(path string) (*http.Response, func()) {
resp, err := r.HTTPClient.Delete(path)
return bodyCleaner(r.t, resp, err)
}
func bodyCleaner(t testing.TB, resp *http.Response, err error) (*http.Response, func()) {
t.Helper()
require.NoError(t, err)
return resp, func() { require.NoError(t, resp.Body.Close()) }
}
// ParseResponseBody will parse the given response into a byte slice
func ParseResponseBody(t testing.TB, resp *http.Response) []byte {
t.Helper()
b, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
return b
}
// ParseJSONAPIResponse parses the response and returns the JSONAPI resource.
func ParseJSONAPIResponse(t testing.TB, resp *http.Response, resource interface{}) error {
t.Helper()
input := ParseResponseBody(t, resp)
err := jsonapi.Unmarshal(input, resource)
if err != nil {
return fmt.Errorf("web: unable to unmarshal data, %+v", err)
}
return nil
}
// ParseJSONAPIResponseMeta parses the bytes of the root document and returns a
// map of *json.RawMessage's within the 'meta' key.
func ParseJSONAPIResponseMeta(input []byte) (map[string]*json.RawMessage, error) {
var root map[string]*json.RawMessage
err := json.Unmarshal(input, &root)
if err != nil {
return root, err
}
var meta map[string]*json.RawMessage
err = json.Unmarshal(*root["meta"], &meta)
return meta, err
}
// ParseJSONAPIResponseMetaCount parses the bytes of the root document and
// returns the value of the 'count' key from the 'meta' section.
func ParseJSONAPIResponseMetaCount(input []byte) (int, error) {
meta, err := ParseJSONAPIResponseMeta(input)
if err != nil {
return -1, err
}
var metaCount int
err = json.Unmarshal(*meta["count"], &metaCount)
return metaCount, err
}
// ReadLogs returns the contents of the applications log file as a string
func ReadLogs(config orm.ConfigReader) (string, error) {
logFile := fmt.Sprintf("%s/log.jsonl", config.RootDir())
b, err := ioutil.ReadFile(logFile)
return string(b), err
}
func FindServiceAgreement(t testing.TB, s *strpkg.Store, id string) models.ServiceAgreement {
t.Helper()
sa, err := s.FindServiceAgreement(id)
require.NoError(t, err)
return sa
}
// CreateJobSpecViaWeb creates a jobspec via web using /v2/specs
func CreateJobSpecViaWeb(t testing.TB, app *TestApplication, job models.JobSpec) models.JobSpec {
t.Helper()
marshaled, err := json.Marshal(&job)
assert.NoError(t, err)
return CreateSpecViaWeb(t, app, string(marshaled))
}
// CreateJobSpecViaWeb creates a jobspec via web using /v2/specs
func CreateSpecViaWeb(t testing.TB, app *TestApplication, spec string) models.JobSpec {
t.Helper()
client := app.NewHTTPClient()
resp, cleanup := client.Post("/v2/specs", bytes.NewBufferString(spec))
defer cleanup()
AssertServerResponse(t, resp, http.StatusOK)
var createdJob models.JobSpec
err := ParseJSONAPIResponse(t, resp, &createdJob)
require.NoError(t, err)
return createdJob
}
func CreateJobViaWeb(t testing.TB, app *TestApplication, spec string) job.Job {
t.Helper()
client := app.NewHTTPClient()
resp, cleanup := client.Post("/v2/jobs", bytes.NewBufferString(spec))
defer cleanup()
AssertServerResponse(t, resp, http.StatusOK)
var createdJob job.Job
err := ParseJSONAPIResponse(t, resp, &createdJob)
require.NoError(t, err)
return createdJob
}
func CreateJobViaWeb2(t testing.TB, app *TestApplication, spec string) webpresenters.JobResource {
t.Helper()
client := app.NewHTTPClient()
resp, cleanup := client.Post("/v2/jobs", bytes.NewBufferString(spec))
defer cleanup()
AssertServerResponse(t, resp, http.StatusOK)
var jobResponse webpresenters.JobResource
err := ParseJSONAPIResponse(t, resp, &jobResponse)
require.NoError(t, err)
return jobResponse
}
// CreateJobRunViaWeb creates JobRun via web using /v2/specs/ID/runs
func CreateJobRunViaWeb(t testing.TB, app *TestApplication, j models.JobSpec, body ...string) models.JobRun {
t.Helper()
bodyBuffer := &bytes.Buffer{}
if len(body) > 0 {
bodyBuffer = bytes.NewBufferString(body[0])
}
client := app.NewHTTPClient()
resp, cleanup := client.Post("/v2/specs/"+j.ID.String()+"/runs", bodyBuffer)
defer cleanup()
AssertServerResponse(t, resp, http.StatusOK)
var jr models.JobRun
err := ParseJSONAPIResponse(t, resp, &jr)
require.NoError(t, err)
assert.Equal(t, j.ID, jr.JobSpecID)
return jr
}
func CreateJobRunViaExternalInitiator(
t testing.TB,
app *TestApplication,
j models.JobSpec,
eia auth.Token,
body string,
) models.JobRun {
t.Helper()
headers := make(map[string]string)
headers[static.ExternalInitiatorAccessKeyHeader] = eia.AccessKey
headers[static.ExternalInitiatorSecretHeader] = eia.Secret
url := app.Config.ClientNodeURL() + "/v2/specs/" + j.ID.String() + "/runs"
bodyBuf := bytes.NewBufferString(body)
resp, cleanup := UnauthenticatedPost(t, url, bodyBuf, headers)
defer cleanup()
AssertServerResponse(t, resp, 200)
var jr models.JobRun
err := ParseJSONAPIResponse(t, resp, &jr)
require.NoError(t, err)
assert.Equal(t, j.ID, jr.JobSpecID)
return jr
}
// CreateHelloWorldJobViaWeb creates a HelloWorld JobSpec with the given MockServer Url
func CreateHelloWorldJobViaWeb(t testing.TB, app *TestApplication, url string) models.JobSpec {
t.Helper()
buffer := MustReadFile(t, "testdata/hello_world_job.json")
var job models.JobSpec
err := json.Unmarshal(buffer, &job)
require.NoError(t, err)
data, err := models.Merge(job.Tasks[0].Params, JSONFromString(t, `{"get":"%v"}`, url))
require.NoError(t, err)
job.Tasks[0].Params = data
return CreateJobSpecViaWeb(t, app, job)
}
// UpdateJobRunViaWeb updates jobrun via web using /v2/runs/ID
func UpdateJobRunViaWeb(
t testing.TB,
app *TestApplication,
jr models.JobRun,
bta *models.BridgeTypeAuthentication,
body string,
) models.JobRun {
t.Helper()
client := app.NewHTTPClient()
headers := map[string]string{"Authorization": "Bearer " + bta.IncomingToken}
resp, cleanup := client.Patch("/v2/runs/"+jr.ID.String(), bytes.NewBufferString(body), headers)
defer cleanup()
AssertServerResponse(t, resp, http.StatusOK)
var respJobRun presenters.JobRun
assert.NoError(t, ParseJSONAPIResponse(t, resp, &respJobRun))
assert.Equal(t, jr.ID, respJobRun.ID)
jr = respJobRun.JobRun
return jr
}
// CreateBridgeTypeViaWeb creates a bridgetype via web using /v2/bridge_types
func CreateBridgeTypeViaWeb(
t testing.TB,
app *TestApplication,
payload string,
) *models.BridgeTypeAuthentication {
t.Helper()
client := app.NewHTTPClient()
resp, cleanup := client.Post(
"/v2/bridge_types",
bytes.NewBufferString(payload),
)
defer cleanup()
AssertServerResponse(t, resp, http.StatusOK)
bt := &models.BridgeTypeAuthentication{}
err := ParseJSONAPIResponse(t, resp, bt)
require.NoError(t, err)
return bt
}
// CreateExternalInitiatorViaWeb creates a bridgetype via web using /v2/bridge_types
func CreateExternalInitiatorViaWeb(
t testing.TB,
app *TestApplication,
payload string,
) *presenters.ExternalInitiatorAuthentication {
t.Helper()
client := app.NewHTTPClient()
resp, cleanup := client.Post(
"/v2/external_initiators",
bytes.NewBufferString(payload),
)
defer cleanup()
AssertServerResponse(t, resp, http.StatusCreated)
ei := &presenters.ExternalInitiatorAuthentication{}
err := ParseJSONAPIResponse(t, resp, ei)
require.NoError(t, err)
return ei
}
const (
// DBWaitTimeout is how long we wait by default for something to appear in
// the DB. It needs to be fairly long because integration
// tests rely on it.
DBWaitTimeout = 20 * time.Second
// DBPollingInterval can't be too short to avoid DOSing the test database
DBPollingInterval = 100 * time.Millisecond
// AsertNoActionTimeout shouldn't be too long, or it will slow down tests
AsertNoActionTimeout = 3 * time.Second
)
// WaitForJobRunToComplete waits for a JobRun to reach Completed Status
func WaitForJobRunToComplete(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
) models.JobRun {
t.Helper()
return WaitForJobRunStatus(t, store, jr, models.RunStatusCompleted)
}
// WaitForJobRunToPendBridge waits for a JobRun to reach PendingBridge Status
func WaitForJobRunToPendBridge(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
) models.JobRun {
t.Helper()
return WaitForJobRunStatus(t, store, jr, models.RunStatusPendingBridge)
}
// WaitForJobRunToPendIncomingConfirmations waits for a JobRun to reach PendingIncomingConfirmations Status
func WaitForJobRunToPendIncomingConfirmations(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
) models.JobRun {
t.Helper()
return WaitForJobRunStatus(t, store, jr, models.RunStatusPendingIncomingConfirmations)
}
// WaitForJobRunToPendOutgoingConfirmations waits for a JobRun to reach PendingOutgoingConfirmations Status
func WaitForJobRunToPendOutgoingConfirmations(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
) models.JobRun {
t.Helper()
return WaitForJobRunStatus(t, store, jr, models.RunStatusPendingOutgoingConfirmations)
}
func SendBlocksUntilComplete(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
blockCh chan<- *models.Head,
start int64,
gethClient *mocks.GethClient,
) models.JobRun {
t.Helper()
var err error
block := start
gomega.NewGomegaWithT(t).Eventually(func() models.RunStatus {
h := models.NewHead(big.NewInt(block), NewHash(), NewHash(), 0)
blockCh <- &h
block++
jr, err = store.Unscoped().FindJobRun(jr.ID)
assert.NoError(t, err)
st := jr.GetStatus()
return st
}, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(models.RunStatusCompleted))
return jr
}
// WaitForJobRunStatus waits for a JobRun to reach given status
func WaitForJobRunStatus(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
status models.RunStatus,
) models.JobRun {
t.Helper()
var err error
gomega.NewGomegaWithT(t).Eventually(func() models.RunStatus {
jr, err = store.Unscoped().FindJobRun(jr.ID)
assert.NoError(t, err)
st := jr.GetStatus()
return st
}, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(status))
return jr
}
// JobRunStays tests if a JobRun will consistently stay at the specified status
func JobRunStays(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
status models.RunStatus,
optionalDuration ...time.Duration,
) models.JobRun {
t.Helper()
duration := time.Second
if len(optionalDuration) > 0 {
duration = optionalDuration[0]
}
var err error
gomega.NewGomegaWithT(t).Consistently(func() models.RunStatus {
jr, err = store.FindJobRun(jr.ID)
assert.NoError(t, err)
return jr.GetStatus()
}, duration, DBPollingInterval).Should(gomega.Equal(status))
return jr
}
// JobRunStaysPendingIncomingConfirmations tests if a JobRun will stay at the PendingIncomingConfirmations Status
func JobRunStaysPendingIncomingConfirmations(
t testing.TB,
store *strpkg.Store,
jr models.JobRun,
) models.JobRun {
t.Helper()
return JobRunStays(t, store, jr, models.RunStatusPendingIncomingConfirmations)
}
// Polls until the passed in jobID has count number
// of job spec errors.
func WaitForSpecError(t *testing.T, store *strpkg.Store, jobID models.JobID, count int) []models.JobSpecError {
t.Helper()
g := gomega.NewGomegaWithT(t)
var jse []models.JobSpecError
g.Eventually(func() []models.JobSpecError {
err := store.DB.
Where("job_spec_id = ?", jobID.String()).
Find(&jse).Error
assert.NoError(t, err)
return jse
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(count))
return jse
}
// WaitForSpecErrorV2 polls until the passed in jobID has count number
// of job spec errors.
func WaitForSpecErrorV2(t *testing.T, store *strpkg.Store, jobID int32, count int) []job.SpecError {
t.Helper()
g := gomega.NewGomegaWithT(t)
var jse []job.SpecError
g.Eventually(func() []job.SpecError {
err := store.DB.
Where("job_id = ?", jobID).
Find(&jse).Error
assert.NoError(t, err)
return jse
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(count))
return jse
}
// WaitForRuns waits for the wanted number of runs then returns a slice of the JobRuns
func WaitForRuns(t testing.TB, j models.JobSpec, store *strpkg.Store, want int) []models.JobRun {
t.Helper()
g := gomega.NewGomegaWithT(t)
var jrs []models.JobRun
var err error
if want == 0 {
g.Consistently(func() []models.JobRun {
jrs, err = store.JobRunsFor(j.ID)
assert.NoError(t, err)
return jrs
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want))
} else {
g.Eventually(func() []models.JobRun {
jrs, err = store.JobRunsFor(j.ID)
assert.NoError(t, err)
return jrs
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want))
}
return jrs
}
func WaitForPipelineComplete(t testing.TB, nodeID int, jobID int32, jo job.ORM, timeout, poll time.Duration) pipeline.Run {
t.Helper()
g := gomega.NewGomegaWithT(t)
var pr pipeline.Run
g.Eventually(func() *pipeline.Run {
prs, _, err := jo.PipelineRunsByJobID(jobID, 0, 1000)
assert.NoError(t, err)
for i := range prs {
if !prs[i].Outputs.Null {
if prs[i].Errors.HasError() {
return nil
}
pr = prs[i]
return &prs[i]
}
}
return nil
}, timeout, poll).ShouldNot(gomega.BeNil(), fmt.Sprintf("job %d on node %d not complete", jobID, nodeID))
return pr
}
// AssertRunsStays asserts that the number of job runs for a particular job remains at the provided values
func AssertRunsStays(t testing.TB, j models.JobSpec, store *strpkg.Store, want int) []models.JobRun {
t.Helper()
g := gomega.NewGomegaWithT(t)
var jrs []models.JobRun
var err error
g.Consistently(func() []models.JobRun {
jrs, err = store.JobRunsFor(j.ID)
assert.NoError(t, err)
return jrs
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want))
return jrs
}
// AssertPipelineRunsStays asserts that the number of pipeline runs for a particular job remains at the provided values
func AssertPipelineRunsStays(t testing.TB, pipelineSpecID int32, store *strpkg.Store, want int) []pipeline.Run {
t.Helper()
g := gomega.NewGomegaWithT(t)
var prs []pipeline.Run
g.Consistently(func() []pipeline.Run {
err := store.DB.
Where("pipeline_spec_id = ?", pipelineSpecID).
Find(&prs).Error
assert.NoError(t, err)
return prs
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want))
return prs
}
// WaitForRunsAtLeast waits for at least the passed number of runs to start.
func WaitForRunsAtLeast(t testing.TB, j models.JobSpec, store *strpkg.Store, want int) {
t.Helper()
g := gomega.NewGomegaWithT(t)
if want == 0 {
t.Fatal("must want more than 0 runs when waiting")
} else {
g.Eventually(func() int {
jrs, err := store.JobRunsFor(j.ID)
require.NoError(t, err)
return len(jrs)
}, DBWaitTimeout, DBPollingInterval).Should(gomega.BeNumerically(">=", want))
}
}
func WaitForEthTxAttemptsForEthTx(t testing.TB, store *strpkg.Store, ethTx models.EthTx) []models.EthTxAttempt {
t.Helper()
g := gomega.NewGomegaWithT(t)
var attempts []models.EthTxAttempt
var err error
g.Eventually(func() int {
err = store.DB.Order("created_at desc").Where("eth_tx_id = ?", ethTx.ID).Find(&attempts).Error
assert.NoError(t, err)
return len(attempts)
}, DBWaitTimeout, DBPollingInterval).Should(gomega.BeNumerically(">", 0))
return attempts
}
func WaitForEthTxAttemptCount(t testing.TB, store *strpkg.Store, want int) []models.EthTxAttempt {
t.Helper()
g := gomega.NewGomegaWithT(t)
var txas []models.EthTxAttempt
var err error
g.Eventually(func() []models.EthTxAttempt {
err = store.DB.Find(&txas).Error
assert.NoError(t, err)
return txas
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want))
return txas
}
// AssertEthTxAttemptCountStays asserts that the number of tx attempts remains at the provided value
func AssertEthTxAttemptCountStays(t testing.TB, store *strpkg.Store, want int) []models.EthTxAttempt {
t.Helper()
g := gomega.NewGomegaWithT(t)
var txas []models.EthTxAttempt
var err error
g.Consistently(func() []models.EthTxAttempt {
err = store.DB.Find(&txas).Error
assert.NoError(t, err)
return txas
}, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want))
return txas
}
// WaitForSyncEventCount checks if the sync event count eventually reaches
// the amound specified in parameter want.
func WaitForSyncEventCount(
t testing.TB,
orm *orm.ORM,
want int,
) {
t.Helper()
gomega.NewGomegaWithT(t).Eventually(func() int {
count, err := orm.CountOf(&models.SyncEvent{})
assert.NoError(t, err)
return count
}, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(want))
}
// AssertSyncEventCountStays ensures that the event sync count stays consistent
// for a period of time
func AssertSyncEventCountStays(
t testing.TB,
orm *orm.ORM,
want int,
) {
t.Helper()
gomega.NewGomegaWithT(t).Consistently(func() int {
count, err := orm.CountOf(&models.SyncEvent{})
assert.NoError(t, err)
return count
}, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(want))
}
// ParseISO8601 given the time string it Must parse the time and return it
func ParseISO8601(t testing.TB, s string) time.Time {
t.Helper()
tm, err := time.Parse(time.RFC3339Nano, s)
require.NoError(t, err)
return tm
}
// NullableTime will return a valid nullable time given time.Time
func NullableTime(t time.Time) null.Time {
return null.TimeFrom(t)
}
// ParseNullableTime given a time string parse it into a null.Time
func ParseNullableTime(t testing.TB, s string) null.Time {
t.Helper()
return NullableTime(ParseISO8601(t, s))
}
// Head given the value convert it into an Head
func Head(val interface{}) *models.Head {
var h models.Head
time := uint64(0)
switch t := val.(type) {
case int:
h = models.NewHead(big.NewInt(int64(t)), NewHash(), NewHash(), time)
case uint64:
h = models.NewHead(big.NewInt(int64(t)), NewHash(), NewHash(), time)
case int64:
h = models.NewHead(big.NewInt(t), NewHash(), NewHash(), time)
case *big.Int:
h = models.NewHead(t, NewHash(), NewHash(), time)
default:
logger.Panicf("Could not convert %v of type %T to Head", val, val)
}
return &h
}
// TransactionsFromGasPrices returns transactions matching the given gas prices
func TransactionsFromGasPrices(gasPrices ...int64) []types.Transaction {
txs := make([]types.Transaction, len(gasPrices))
for i, gasPrice := range gasPrices {
txs[i] = *types.NewTransaction(0, common.Address{}, nil, 0, big.NewInt(gasPrice), nil)
}
return txs
}
// BlockWithTransactions returns a new ethereum block with transactions
// matching the given gas prices
func BlockWithTransactions(gasPrices ...int64) *types.Block {
txs := make([]*types.Transaction, len(gasPrices))
for i, gasPrice := range gasPrices {
txs[i] = types.NewTransaction(0, common.Address{}, nil, 0, big.NewInt(gasPrice), nil)
}
return types.NewBlock(&types.Header{}, txs, nil, nil, new(trie.Trie))
}
func StringToHash(s string) common.Hash {
return common.BytesToHash([]byte(s))
}
// AssertServerResponse is used to match against a client response, will print
// any errors returned if the request fails.
func AssertServerResponse(t testing.TB, resp *http.Response, expectedStatusCode int) {
t.Helper()
if resp.StatusCode == expectedStatusCode {
return
}
t.Logf("expected status code %s got %s", http.StatusText(expectedStatusCode), http.StatusText(resp.StatusCode))
if resp.StatusCode >= 300 && resp.StatusCode < 600 {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
assert.FailNowf(t, "Unable to read body", err.Error())
}
var result map[string][]string
err = json.Unmarshal(b, &result)
if err != nil {
assert.FailNowf(t, fmt.Sprintf("Unable to unmarshal json from body '%s'", string(b)), err.Error())
}
assert.FailNowf(t, "Request failed", "Expected %d response, got %d with errors: %s", expectedStatusCode, resp.StatusCode, result["errors"])
} else {
assert.FailNowf(t, "Unexpected response", "Expected %d response, got %d", expectedStatusCode, resp.StatusCode)
}
}
func DecodeSessionCookie(value string) (string, error) {
var decrypted map[interface{}]interface{}
codecs := securecookie.CodecsFromPairs([]byte(SessionSecret))
err := securecookie.DecodeMulti(web.SessionName, value, &decrypted, codecs...)
if err != nil {
return "", err
}
value, ok := decrypted[web.SessionIDKey].(string)
if !ok {
return "", fmt.Errorf("decrypted[web.SessionIDKey] is not a string (%v)", value)
}
return value, nil
}
func MustGenerateSessionCookie(value string) *http.Cookie {
decrypted := map[interface{}]interface{}{web.SessionIDKey: value}
codecs := securecookie.CodecsFromPairs([]byte(SessionSecret))
encoded, err := securecookie.EncodeMulti(web.SessionName, decrypted, codecs...)
if err != nil {
logger.Panic(err)
}
return sessions.NewCookie(web.SessionName, encoded, &sessions.Options{})
}
func NormalizedJSON(t testing.TB, input []byte) string {
t.Helper()
normalized, err := utils.NormalizedJSON(input)
require.NoError(t, err)
return normalized
}
func AssertError(t testing.TB, want bool, err error) {
t.Helper()
if want {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
func UnauthenticatedPost(t testing.TB, url string, body io.Reader, headers map[string]string) (*http.Response, func()) {
t.Helper()
client := http.Client{}
request, err := http.NewRequest("POST", url, body)
require.NoError(t, err)
request.Header.Set("Content-Type", "application/json")
for key, value := range headers {
request.Header.Add(key, value)
}
resp, err := client.Do(request)
require.NoError(t, err)
return resp, func() { resp.Body.Close() }
}
func UnauthenticatedPatch(t testing.TB, url string, body io.Reader, headers map[string]string) (*http.Response, func()) {
t.Helper()
client := http.Client{}
request, err := http.NewRequest("PATCH", url, body)
require.NoError(t, err)
request.Header.Set("Content-Type", "application/json")
for key, value := range headers {
request.Header.Add(key, value)
}
resp, err := client.Do(request)
require.NoError(t, err)
return resp, func() { resp.Body.Close() }
}
func MustParseDuration(t testing.TB, durationStr string) time.Duration {
t.Helper()
duration, err := time.ParseDuration(durationStr)
require.NoError(t, err)
return duration
}
func NewSession(optionalSessionID ...string) models.Session {
session := models.NewSession()
if len(optionalSessionID) > 0 {
session.ID = optionalSessionID[0]
}
return session
}
func AllExternalInitiators(t testing.TB, store *strpkg.Store) []models.ExternalInitiator {
t.Helper()
var all []models.ExternalInitiator
err := store.RawDBWithAdvisoryLock(func(db *gorm.DB) error {
return db.Find(&all).Error
})
require.NoError(t, err)
return all
}
func AllJobs(t testing.TB, store *strpkg.Store) []models.JobSpec {
t.Helper()
var all []models.JobSpec
err := store.ORM.RawDBWithAdvisoryLock(func(db *gorm.DB) error {
return db.Find(&all).Error
})
require.NoError(t, err)
return all
}
func MustAllJobsWithStatus(t testing.TB, store *strpkg.Store, statuses ...models.RunStatus) []*models.JobRun {
t.Helper()
var runs []*models.JobRun
err := store.UnscopedJobRunsWithStatus(func(jr *models.JobRun) {
runs = append(runs, jr)
}, statuses...)
require.NoError(t, err)
return runs
}
func GetLastEthTxAttempt(t testing.TB, store *strpkg.Store) models.EthTxAttempt {
t.Helper()
var txa models.EthTxAttempt
var count int64
err := store.ORM.RawDBWithAdvisoryLock(func(db *gorm.DB) error {
return db.Order("created_at desc").First(&txa).Count(&count).Error
})
require.NoError(t, err)
require.NotEqual(t, 0, count)
return txa
}
type Awaiter chan struct{}
func NewAwaiter() Awaiter { return make(Awaiter) }
func (a Awaiter) ItHappened() { close(a) }
func (a Awaiter) AwaitOrFail(t testing.TB, durationParams ...time.Duration) {
duration := 10 * time.Second
if len(durationParams) > 0 {
duration = durationParams[0]
}
select {
case <-a:
case <-time.After(duration):
t.Fatal("timed out waiting for Awaiter to get ItHappened")
}
}
func CallbackOrTimeout(t testing.TB, msg string, callback func(), durationParams ...time.Duration) {
t.Helper()
duration := 100 * time.Millisecond
if len(durationParams) > 0 {
duration = durationParams[0]
}
done := make(chan struct{})
go func() {
callback()
close(done)
}()
select {
case <-done:
case <-time.After(duration):
t.Fatal(fmt.Sprintf("CallbackOrTimeout: %s timed out", msg))
}
}
func MustParseURL(input string) *url.URL {
u, err := url.Parse(input)
if err != nil {
logger.Panic(err)
}
return u
}
func MustResultString(t *testing.T, input models.RunResult) string {
result := input.Data.Get("result")
require.Equal(t, gjson.String, result.Type, fmt.Sprintf("result type %s is not string", result.Type))
return result.String()
}
// GenericEncode eth encodes values based on the provided types
func GenericEncode(types []string, values ...interface{}) ([]byte, error) {
if len(values) != len(types) {
return nil, errors.New("must include same number of values as types")
}
var args abi.Arguments
for _, t := range types {
ty, _ := abi.NewType(t, "", nil)
args = append(args, abi.Argument{Type: ty})
}
out, err := args.PackValues(values)
if err != nil {
return nil, err
}
return out, nil
}
func MustGenericEncode(types []string, values ...interface{}) []byte {
if len(values) != len(types) {
panic("must include same number of values as types")
}
var args abi.Arguments
for _, t := range types {
ty, _ := abi.NewType(t, "", nil)
args = append(args, abi.Argument{Type: ty})
}
out, err := args.PackValues(values)
if err != nil {
panic(err)
}
return out
}
func MakeRoundStateReturnData(
roundID uint64,
eligible bool,
answer, startAt, timeout, availableFunds, paymentAmount, oracleCount uint64,
) []byte {
var data []byte
if eligible {
data = append(data, utils.EVMWordUint64(1)...)
} else {
data = append(data, utils.EVMWordUint64(0)...)
}
data = append(data, utils.EVMWordUint64(roundID)...)
data = append(data, utils.EVMWordUint64(answer)...)
data = append(data, utils.EVMWordUint64(startAt)...)
data = append(data, utils.EVMWordUint64(timeout)...)
data = append(data, utils.EVMWordUint64(availableFunds)...)
data = append(data, utils.EVMWordUint64(oracleCount)...)
data = append(data, utils.EVMWordUint64(paymentAmount)...)
return data
}
var fluxAggregatorABI = eth.MustGetABI(flux_aggregator_wrapper.FluxAggregatorABI)
func MockFluxAggCall(client *mocks.GethClient, address common.Address, funcName string) *mock.Call {
funcSig := hexutil.Encode(fluxAggregatorABI.Methods[funcName].ID)
if len(funcSig) != 10 {
panic(fmt.Sprintf("Unable to find FluxAgg function with name %s", funcName))
}
return client.On(
"CallContract",
mock.Anything,
mock.MatchedBy(func(callArgs ethereum.CallMsg) bool {
return *callArgs.To == address &&
hexutil.Encode(callArgs.Data)[0:10] == funcSig
}),
mock.Anything)
}
// EthereumLogIterator is the interface provided by gethwrapper representations of EVM
// logs.
type EthereumLogIterator interface{ Next() bool }
// GetLogs drains logs of EVM log representations. Since those log
// representations don't fit into a type hierarchy, this API is a bit awkward.
// It returns the logs as a slice of blank interface{}s, and if rv is non-nil,
// it must be a pointer to a slice for elements of the same type as the logs,
// in which case GetLogs will append the logs to it.
func GetLogs(t *testing.T, rv interface{}, logs EthereumLogIterator) []interface{} {
v := reflect.ValueOf(rv)
require.True(t, rv == nil ||
v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice,
"must pass a slice to receive logs")
var e reflect.Value
if rv != nil {
e = v.Elem()
}
var irv []interface{}
for logs.Next() {
log := reflect.Indirect(reflect.ValueOf(logs)).FieldByName("Event")
if v.Kind() == reflect.Ptr {
e.Set(reflect.Append(e, log))
}
irv = append(irv, log.Interface())
}
return irv
}
func MakeConfigDigest(t *testing.T) ocrtypes.ConfigDigest {
t.Helper()
b := make([]byte, 16)
/* #nosec G404 */
_, err := rand.Read(b)
if err != nil {
t.Fatal(err)
}
return MustBytesToConfigDigest(t, b)
}
func MustBytesToConfigDigest(t *testing.T, b []byte) ocrtypes.ConfigDigest {
t.Helper()
configDigest, err := ocrtypes.BytesToConfigDigest(b)
if err != nil {
t.Fatal(err)
}
return configDigest
}
// MockApplicationEthCalls mocks all calls made by the chainlink application as
// standard when starting and stopping
func MockApplicationEthCalls(t *testing.T, app *TestApplication, ethClient *mocks.Client) (verify func()) {
t.Helper()
// Start
ethClient.On("Dial", mock.Anything).Return(nil)
sub := new(mocks.Subscription)
sub.On("Err").Return(nil)
ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil)
ethClient.On("ChainID", mock.Anything).Return(app.Store.Config.ChainID(), nil)
ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe()
// Stop
sub.On("Unsubscribe").Return(nil)
return func() {
ethClient.AssertExpectations(t)
}
}
func MockSubscribeToLogsCh(gethClient *mocks.GethClient, sub *mocks.Subscription) chan chan<- models.Log {
logsCh := make(chan chan<- models.Log, 1)
gethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).
Return(sub, nil).
Run(func(args mock.Arguments) { // context.Context, ethereum.FilterQuery, chan<- types.Log
logsCh <- args.Get(2).(chan<- types.Log)
})
return logsCh
}
func MustNewJSONSerializable(t *testing.T, s string) pipeline.JSONSerializable {
t.Helper()
js := new(pipeline.JSONSerializable)
err := js.UnmarshalJSON([]byte(s))
require.NoError(t, err)
return *js
}
func BatchElemMatchesHash(req rpc.BatchElem, hash common.Hash) bool {
return req.Method == "eth_getTransactionReceipt" &&
len(req.Args) == 1 && req.Args[0] == hash
}
type SimulateIncomingHeadsArgs struct {
StartBlock, EndBlock int64
BackfillDepth int64
Interval time.Duration
Timeout time.Duration
HeadTrackables []strpkg.HeadTrackable
Hashes map[int64]common.Hash
}
func SimulateIncomingHeads(t *testing.T, args SimulateIncomingHeadsArgs) (cleanup func()) {
t.Helper()
if args.BackfillDepth == 0 {
t.Fatal("BackfillDepth must be > 0")
}
// Build the full chain of heads
heads := make(map[int64]*models.Head)
first := args.StartBlock - args.BackfillDepth
if first < 0 {
first = 0
}
last := args.EndBlock
if last == 0 {
last = args.StartBlock + 300 // If no .EndBlock is provided, assume we want 300 heads
}
for i := first; i <= last; i++ {
// If a particular block should have a particular
// hash, use that. Otherwise, generate a random one.
var hash common.Hash
if args.Hashes != nil {
if h, exists := args.Hashes[i]; exists {
hash = h
}
}
if hash == (common.Hash{}) {
hash = NewHash()
}
heads[i] = &models.Head{Hash: hash, Number: i}
if i > first {
heads[i].Parent = heads[i-1]
}
}
if args.Timeout == 0 {
args.Timeout = 60 * time.Second
}
if args.Interval == 0 {
args.Interval = 250 * time.Millisecond
}
ctx, cancel := context.WithTimeout(context.Background(), args.Timeout)
defer cancel()
chTimeout := time.After(args.Timeout)
chDone := make(chan struct{})
go func() {
current := int64(args.StartBlock)
for {
select {
case <-chDone:
return
case <-chTimeout:
return
default:
// Trim chain to backfill depth
ptr := heads[current]
for i := int64(0); i < args.BackfillDepth && ptr.Parent != nil; i++ {
ptr = ptr.Parent
}
ptr.Parent = nil
for _, ht := range args.HeadTrackables {
ht.OnNewLongestChain(ctx, *heads[current])
}
if args.EndBlock >= 0 && current == args.EndBlock {
return
}
current++
time.Sleep(args.Interval)
}
}
}()
var once sync.Once
return func() {
once.Do(func() {
close(chDone)
cancel()
})
}
}
type HeadTrackableFunc func(context.Context, models.Head)
func (HeadTrackableFunc) Connect(*models.Head) error { return nil }
func (HeadTrackableFunc) Disconnect() {}
func (fn HeadTrackableFunc) OnNewLongestChain(ctx context.Context, head models.Head) {
fn(ctx, head)
}
type testifyExpectationsAsserter interface {
AssertExpectations(t mock.TestingT) bool
}
type fakeT struct{}
func (ft fakeT) Logf(format string, args ...interface{}) {}
func (ft fakeT) Errorf(format string, args ...interface{}) {}
func (ft fakeT) FailNow() {}
func EventuallyExpectationsMet(t *testing.T, mock testifyExpectationsAsserter, timeout time.Duration, interval time.Duration) {
t.Helper()
chTimeout := time.After(timeout)
for {
var ft fakeT
success := mock.AssertExpectations(ft)
if success {
return
}
select {
case <-chTimeout:
mock.AssertExpectations(t)
t.FailNow()
default:
time.Sleep(interval)
}
}
}
func AssertCount(t *testing.T, store *strpkg.Store, model interface{}, expected int64) {
t.Helper()
var count int64
err := store.DB.Model(model).Count(&count).Error
require.NoError(t, err)
require.Equal(t, expected, count)
}
func WaitForCount(t testing.TB, store *strpkg.Store, model interface{}, want int64) {
t.Helper()
g := gomega.NewGomegaWithT(t)
var count int64
var err error
g.Eventually(func() int64 {
err = store.DB.Model(model).Count(&count).Error
assert.NoError(t, err)
return count
}, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(want))
}
func AssertCountStays(t testing.TB, store *strpkg.Store, model interface{}, want int64) {
t.Helper()
g := gomega.NewGomegaWithT(t)
var count int64
var err error
g.Consistently(func() int64 {
err = store.DB.Model(model).Count(&count).Error
assert.NoError(t, err)
return count
}, AsertNoActionTimeout, DBPollingInterval).Should(gomega.Equal(want))
}
func AssertRecordEventually(t *testing.T, store *strpkg.Store, model interface{}, check func() bool) {
t.Helper()
g := gomega.NewGomegaWithT(t)
g.Eventually(func() bool {
err := store.DB.Find(model).Error
require.NoError(t, err, "unable to find record in DB")
return check()
}, DBWaitTimeout, DBPollingInterval).Should(gomega.BeTrue())
}
|
[
"\"LOG_LEVEL\""
] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
go
| 1 | 0 | |
dask_kubernetes/core.py
|
import asyncio
import copy
import getpass
import logging
import os
import string
import time
import uuid
from weakref import finalize
try:
import yaml
except ImportError:
yaml = False
import dask
import dask.distributed
import distributed.security
from distributed.deploy import SpecCluster, ProcessInterface
from distributed.utils import Log, Logs
import kubernetes_asyncio as kubernetes
from kubernetes_asyncio.client.rest import ApiException
from .objects import (
make_pod_from_dict,
make_service_from_dict,
make_pdb_from_dict,
clean_pod_template,
clean_service_template,
clean_pdb_template,
)
from .auth import ClusterAuth
logger = logging.getLogger(__name__)
SCHEDULER_PORT = 8786
class Pod(ProcessInterface):
"""A superclass for Kubernetes Pods
See Also
--------
Worker
Scheduler
"""
def __init__(
self,
cluster,
core_api,
policy_api,
pod_template,
namespace,
loop=None,
**kwargs
):
self._pod = None
self.cluster = cluster
self.core_api = core_api
self.policy_api = policy_api
self.pod_template = copy.deepcopy(pod_template)
self.base_labels = self.pod_template.metadata.labels
self.namespace = namespace
self.name = None
self.loop = loop
self.kwargs = kwargs
super().__init__()
@property
def cluster_name(self):
return self.pod_template.metadata.labels["dask.org/cluster-name"]
async def start(self, **kwargs):
retry_count = 0 # Retry 10 times
while True:
try:
self._pod = await self.core_api.create_namespaced_pod(
self.namespace, self.pod_template
)
return await super().start(**kwargs)
except ApiException as e:
if retry_count < 10:
logger.debug("Error when creating pod, retrying... - %s", str(e))
await asyncio.sleep(1)
retry_count += 1
else:
raise e
async def close(self, **kwargs):
name, namespace = self._pod.metadata.name, self.namespace
if self._pod:
try:
await self.core_api.delete_namespaced_pod(name, namespace)
except ApiException as e:
if e.reason == "Not Found":
logger.debug(
"Pod %s in namespace %s has been deleated already.",
name,
namespace,
)
else:
raise
await super().close(**kwargs)
async def logs(self):
try:
log = await self.core_api.read_namespaced_pod_log(
self._pod.metadata.name, self.namespace
)
except ApiException as e:
if "waiting to start" in str(e):
log = ""
else:
raise e
return Log(log)
async def describe_pod(self):
self._pod = await self.core_api.read_namespaced_pod(
self._pod.metadata.name, self.namespace
)
return self._pod
def __repr__(self):
return "<Pod %s: status=%s>" % (type(self).__name__, self.status)
class Worker(Pod):
"""A Remote Dask Worker controled by Kubernetes
Parameters
----------
scheduler: str
The address of the scheduler
name (optional):
The name passed to the dask-worker CLI at creation time.
"""
def __init__(self, scheduler: str, name=None, **kwargs):
super().__init__(**kwargs)
self.scheduler = scheduler
self.pod_template.metadata.labels["dask.org/component"] = "worker"
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler
)
)
if name is not None:
worker_name_args = ["--name", str(name)]
self.pod_template.spec.containers[0].args += worker_name_args
class Scheduler(Pod):
"""A Remote Dask Scheduler controled by Kubernetes
Parameters
----------
idle_timeout: str, optional
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
service_wait_timeout_s: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
"""
def __init__(self, idle_timeout: str, service_wait_timeout_s: int = None, **kwargs):
super().__init__(**kwargs)
self.cluster._log("Creating scheduler pod on cluster. This may take some time.")
self.service = None
self._idle_timeout = idle_timeout
self._service_wait_timeout_s = service_wait_timeout_s
if self._idle_timeout is not None:
self.pod_template.spec.containers[0].args += [
"--idle-timeout",
self._idle_timeout,
]
self.pdb = None
async def start(self, **kwargs):
await super().start(**kwargs)
while (await self.describe_pod()).status.phase == "Pending":
await asyncio.sleep(0.1)
while self.address is None:
logs = await self.logs()
for line in logs.splitlines():
if "Scheduler at:" in line:
self.address = line.split("Scheduler at:")[1].strip()
await asyncio.sleep(0.1)
self.service = await self._create_service()
self.address = "tcp://{name}.{namespace}:{port}".format(
name=self.service.metadata.name,
namespace=self.namespace,
port=SCHEDULER_PORT,
)
if self.service.spec.type == "LoadBalancer":
# Wait for load balancer to be assigned
start = time.time()
while self.service.status.load_balancer.ingress is None:
if (
self._service_wait_timeout_s > 0
and time.time() > start + self._service_wait_timeout_s
):
raise asyncio.TimeoutError(
"Timed out waiting for Load Balancer to be provisioned."
)
self.service = await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
await asyncio.sleep(0.2)
[loadbalancer_ingress] = self.service.status.load_balancer.ingress
loadbalancer_host = loadbalancer_ingress.hostname or loadbalancer_ingress.ip
self.external_address = "tcp://{host}:{port}".format(
host=loadbalancer_host, port=SCHEDULER_PORT
)
# FIXME Set external address when using nodeport service type
# FIXME Create an optional Ingress just in case folks want to configure one
self.pdb = await self._create_pdb()
async def close(self, **kwargs):
if self.service:
await self.core_api.delete_namespaced_service(
self.cluster_name, self.namespace
)
if self.pdb:
await self.policy_api.delete_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
await super().close(**kwargs)
async def _create_service(self):
service_template_dict = dask.config.get("kubernetes.scheduler-service-template")
self.service_template = clean_service_template(
make_service_from_dict(service_template_dict)
)
self.service_template.metadata.name = self.cluster_name
self.service_template.metadata.labels = copy.deepcopy(self.base_labels)
self.service_template.spec.selector["dask.org/cluster-name"] = self.cluster_name
if self.service_template.spec.type is None:
self.service_template.spec.type = dask.config.get(
"kubernetes.scheduler-service-type"
)
await self.core_api.create_namespaced_service(
self.namespace, self.service_template
)
return await self.core_api.read_namespaced_service(
self.cluster_name, self.namespace
)
async def _create_pdb(self):
pdb_template_dict = dask.config.get("kubernetes.scheduler-pdb-template")
self.pdb_template = clean_pdb_template(make_pdb_from_dict(pdb_template_dict))
self.pdb_template.metadata.name = self.cluster_name
self.pdb_template.spec.labels = copy.deepcopy(self.base_labels)
self.pdb_template.spec.selector.match_labels[
"dask.org/cluster-name"
] = self.cluster_name
await self.policy_api.create_namespaced_pod_disruption_budget(
self.namespace, self.pdb_template
)
return await self.policy_api.read_namespaced_pod_disruption_budget(
self.cluster_name, self.namespace
)
class KubeCluster(SpecCluster):
"""Launch a Dask cluster on Kubernetes
This starts a local Dask scheduler and then dynamically launches
Dask workers on a Kubernetes cluster. The Kubernetes cluster is taken
to be either the current one on which this code is running, or as a
fallback, the default one configured in a kubeconfig file.
**Environments**
Your worker pod image should have a similar environment to your local
environment, including versions of Python, dask, cloudpickle, and any
libraries that you may wish to use (like NumPy, Pandas, or Scikit-Learn).
See examples below for suggestions on how to manage and check for this.
**Network**
Since the Dask scheduler is launched locally, for it to work, we need to
be able to open network connections between this local node and all the
workers nodes on the Kubernetes cluster. If the current process is not
already on a Kubernetes node, some network configuration will likely be
required to make this work.
**Resources**
Your Kubernetes resource limits and requests should match the
``--memory-limit`` and ``--nthreads`` parameters given to the
``dask-worker`` command.
Parameters
----------
pod_template: kubernetes.client.V1Pod
A Kubernetes specification for a Pod for a dask worker.
scheduler_pod_template: kubernetes.client.V1Pod (optional)
A Kubernetes specification for a Pod for a dask scheduler.
Defaults to the pod_template.
name: str (optional)
Name given to the pods. Defaults to ``dask-$USER-random``
namespace: str (optional)
Namespace in which to launch the workers.
Defaults to current namespace if available or "default"
n_workers: int
Number of workers on initial launch.
Use ``scale`` to change this number in the future
env: Dict[str, str]
Dictionary of environment variables to pass to worker pod
host: str
Listen address for local scheduler. Defaults to 0.0.0.0
port: int
Port of local scheduler
auth: List[ClusterAuth] (optional)
Configuration methods to attempt in order. Defaults to
``[InCluster(), KubeConfig()]``.
idle_timeout: str (optional)
The scheduler task will exit after this amount of time
if there are no requests from the client. Default is to
never timeout.
scheduler_service_wait_timeout: int (optional)
Timeout, in seconds, to wait for the remote scheduler service to be ready.
Defaults to 30 seconds.
Set to 0 to disable the timeout (not recommended).
deploy_mode: str (optional)
Run the scheduler as "local" or "remote".
Defaults to ``"local"``.
**kwargs: dict
Additional keyword arguments to pass to LocalCluster
Examples
--------
>>> from dask_kubernetes import KubeCluster, make_pod_spec
>>> pod_spec = make_pod_spec(image='daskdev/dask:latest',
... memory_limit='4G', memory_request='4G',
... cpu_limit=1, cpu_request=1,
... env={'EXTRA_PIP_PACKAGES': 'fastparquet git+https://github.com/dask/distributed'})
>>> cluster = KubeCluster(pod_spec)
>>> cluster.scale(10)
You can also create clusters with worker pod specifications as dictionaries
or stored in YAML files
>>> cluster = KubeCluster.from_yaml('worker-template.yml')
>>> cluster = KubeCluster.from_dict({...})
Rather than explicitly setting a number of workers you can also ask the
cluster to allocate workers dynamically based on current workload
>>> cluster.adapt()
You can pass this cluster directly to a Dask client
>>> from dask.distributed import Client
>>> client = Client(cluster)
You can verify that your local environment matches your worker environments
by calling ``client.get_versions(check=True)``. This will raise an
informative error if versions do not match.
>>> client.get_versions(check=True)
The ``daskdev/dask`` docker images support ``EXTRA_PIP_PACKAGES``,
``EXTRA_APT_PACKAGES`` and ``EXTRA_CONDA_PACKAGES`` environment variables
to help with small adjustments to the worker environments. We recommend
the use of pip over conda in this case due to a much shorter startup time.
These environment variables can be modified directly from the KubeCluster
constructor methods using the ``env=`` keyword. You may list as many
packages as you like in a single string like the following:
>>> pip = 'pyarrow gcsfs git+https://github.com/dask/distributed'
>>> conda = '-c conda-forge scikit-learn'
>>> KubeCluster.from_yaml(..., env={'EXTRA_PIP_PACKAGES': pip,
... 'EXTRA_CONDA_PACKAGES': conda})
You can also start a KubeCluster with no arguments *if* the worker template
is specified in the Dask config files, either as a full template in
``kubernetes.worker-template`` or a path to a YAML file in
``kubernetes.worker-template-path``.
See https://docs.dask.org/en/latest/configuration.html for more
information about setting configuration values.::
$ export DASK_KUBERNETES__WORKER_TEMPLATE_PATH=worker_template.yaml
>>> cluster = KubeCluster() # automatically finds 'worker_template.yaml'
See Also
--------
KubeCluster.from_yaml
KubeCluster.from_dict
KubeCluster.adapt
"""
def __init__(
self,
pod_template=None,
name=None,
namespace=None,
n_workers=None,
host=None,
port=None,
env=None,
auth=ClusterAuth.DEFAULT,
idle_timeout=None,
deploy_mode=None,
interface=None,
protocol=None,
dashboard_address=None,
security=None,
scheduler_service_wait_timeout=None,
scheduler_pod_template=None,
**kwargs
):
self.pod_template = pod_template
self.scheduler_pod_template = scheduler_pod_template
self._generate_name = name
self._namespace = namespace
self._n_workers = n_workers
self._idle_timeout = idle_timeout
self._deploy_mode = deploy_mode
self._protocol = protocol
self._interface = interface
self._dashboard_address = dashboard_address
self._scheduler_service_wait_timeout = scheduler_service_wait_timeout
self.security = security
if self.security and not isinstance(
self.security, distributed.security.Security
):
raise RuntimeError(
"Security object is not a valid distributed.security.Security object"
)
self.host = host
self.port = port
self.env = env
self.auth = auth
self.kwargs = kwargs
super().__init__(**self.kwargs)
def _get_pod_template(self, pod_template, pod_type):
if not pod_template and dask.config.get(
"kubernetes.{}-template".format(pod_type), None
):
d = dask.config.get("kubernetes.{}-template".format(pod_type))
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
if not pod_template and dask.config.get(
"kubernetes.{}-template-path".format(pod_type), None
):
import yaml
fn = dask.config.get("kubernetes.{}-template-path".format(pod_type))
fn = fn.format(**os.environ)
with open(fn) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
pod_template = make_pod_from_dict(d)
return pod_template
def _fill_pod_templates(self, pod_template, pod_type):
pod_template = copy.deepcopy(pod_template)
# Default labels that can't be overwritten
pod_template.metadata.labels["dask.org/cluster-name"] = self._generate_name
pod_template.metadata.labels["dask.org/component"] = pod_type
pod_template.metadata.labels["user"] = escape(getpass.getuser())
pod_template.metadata.labels["app"] = "dask"
pod_template.metadata.namespace = self._namespace
if self.env:
pod_template.spec.containers[0].env.extend(
[
kubernetes.client.V1EnvVar(name=k, value=str(v))
for k, v in self.env.items()
]
)
pod_template.metadata.generate_name = self._generate_name
return pod_template
async def _start(self):
self._generate_name = self._generate_name or dask.config.get("kubernetes.name")
self._namespace = self._namespace or dask.config.get("kubernetes.namespace")
self._idle_timeout = self._idle_timeout or dask.config.get(
"kubernetes.idle-timeout"
)
self._scheduler_service_wait_timeout = (
self._scheduler_service_wait_timeout
or dask.config.get("kubernetes.scheduler-service-wait-timeout")
)
self._deploy_mode = self._deploy_mode or dask.config.get(
"kubernetes.deploy-mode"
)
self._n_workers = (
self._n_workers
if self._n_workers is not None
else dask.config.get("kubernetes.count.start")
)
self.host = self.host or dask.config.get("kubernetes.host")
self.port = (
self.port if self.port is not None else dask.config.get("kubernetes.port")
)
self._protocol = self._protocol or dask.config.get("kubernetes.protocol")
self._interface = self._interface or dask.config.get("kubernetes.interface")
self._dashboard_address = self._dashboard_address or dask.config.get(
"kubernetes.dashboard_address"
)
self.env = (
self.env if self.env is not None else dask.config.get("kubernetes.env")
)
self.pod_template = self._get_pod_template(self.pod_template, pod_type="worker")
self.scheduler_pod_template = self._get_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
if not self.pod_template:
msg = (
"Worker pod specification not provided. See KubeCluster "
"docstring for ways to specify workers"
)
raise ValueError(msg)
base_pod_template = self.pod_template
self.pod_template = clean_pod_template(self.pod_template, pod_type="worker")
if not self.scheduler_pod_template:
self.scheduler_pod_template = base_pod_template
self.scheduler_pod_template.spec.containers[0].args = ["dask-scheduler"]
self.scheduler_pod_template = clean_pod_template(
self.scheduler_pod_template, pod_type="scheduler"
)
await ClusterAuth.load_first(self.auth)
self.core_api = kubernetes.client.CoreV1Api()
self.policy_api = kubernetes.client.PolicyV1beta1Api()
if self._namespace is None:
self._namespace = _namespace_default()
self._generate_name = self._generate_name.format(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **os.environ
)
self._generate_name = escape(self._generate_name)
self.pod_template = self._fill_pod_templates(
self.pod_template, pod_type="worker"
)
self.scheduler_pod_template = self._fill_pod_templates(
self.scheduler_pod_template, pod_type="scheduler"
)
finalize(
self, _cleanup_resources, self._namespace, self.pod_template.metadata.labels
)
common_options = {
"cluster": self,
"core_api": self.core_api,
"policy_api": self.policy_api,
"namespace": self._namespace,
"loop": self.loop,
}
if self._deploy_mode == "local":
self.scheduler_spec = {
"cls": dask.distributed.Scheduler,
"options": {
"protocol": self._protocol,
"interface": self._interface,
"host": self.host,
"port": self.port,
"dashboard_address": self._dashboard_address,
"security": self.security,
},
}
elif self._deploy_mode == "remote":
self.scheduler_spec = {
"cls": Scheduler,
"options": {
"idle_timeout": self._idle_timeout,
"service_wait_timeout_s": self._scheduler_service_wait_timeout,
"pod_template": self.scheduler_pod_template,
**common_options,
},
}
else:
raise RuntimeError("Unknown deploy mode %s" % self._deploy_mode)
self.new_spec = {
"cls": Worker,
"options": {"pod_template": self.pod_template, **common_options},
}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
await super()._start()
@classmethod
def from_dict(cls, pod_spec, **kwargs):
"""Create cluster with worker pod spec defined by Python dictionary
Examples
--------
>>> spec = {
... 'metadata': {},
... 'spec': {
... 'containers': [{
... 'args': ['dask-worker', '$(DASK_SCHEDULER_ADDRESS)',
... '--nthreads', '1',
... '--death-timeout', '60'],
... 'command': None,
... 'image': 'daskdev/dask:latest',
... 'name': 'dask-worker',
... }],
... 'restartPolicy': 'Never',
... }
... }
>>> cluster = KubeCluster.from_dict(spec, namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_yaml
"""
return cls(make_pod_from_dict(pod_spec), **kwargs)
@classmethod
def from_yaml(cls, yaml_path, **kwargs):
"""Create cluster with worker pod spec defined by a YAML file
We can start a cluster with pods defined in an accompanying YAML file
like the following:
.. code-block:: yaml
kind: Pod
metadata:
labels:
foo: bar
baz: quux
spec:
containers:
- image: daskdev/dask:latest
name: dask-worker
args: [dask-worker, $(DASK_SCHEDULER_ADDRESS), --nthreads, '2', --memory-limit, 8GB]
restartPolicy: Never
Examples
--------
>>> cluster = KubeCluster.from_yaml('pod.yaml', namespace='my-ns') # doctest: +SKIP
See Also
--------
KubeCluster.from_dict
"""
if not yaml:
raise ImportError(
"PyYaml is required to use yaml functionality, please install it!"
)
with open(yaml_path) as f:
d = yaml.safe_load(f)
d = dask.config.expand_environment_variables(d)
return cls.from_dict(d, **kwargs)
@property
def namespace(self):
return self.pod_template.metadata.namespace
@property
def name(self):
return self.pod_template.metadata.generate_name
def scale(self, n):
# A shim to maintain backward compatibility
# https://github.com/dask/distributed/issues/3054
maximum = dask.config.get("kubernetes.count.max")
if maximum is not None and maximum < n:
logger.info(
"Tried to scale beyond maximum number of workers %d > %d", n, maximum
)
n = maximum
return super().scale(n)
async def _logs(self, scheduler=True, workers=True):
"""Return logs for the scheduler and workers
Parameters
----------
scheduler : boolean
Whether or not to collect logs for the scheduler
workers : boolean or Iterable[str], optional
A list of worker addresses to select.
Defaults to all workers if `True` or no workers if `False`
Returns
-------
logs: Dict[str]
A dictionary of logs, with one item for the scheduler and one for
each worker
"""
logs = Logs()
if scheduler:
logs["Scheduler"] = await self.scheduler.logs()
if workers:
worker_logs = await asyncio.gather(
*[w.logs() for w in self.workers.values()]
)
for key, log in zip(self.workers, worker_logs):
logs[key] = log
return logs
def _cleanup_resources(namespace, labels):
""" Remove all pods with these labels in this namespace """
import kubernetes
core_api = kubernetes.client.CoreV1Api()
pods = core_api.list_namespaced_pod(namespace, label_selector=format_labels(labels))
for pod in pods.items:
try:
core_api.delete_namespaced_pod(pod.metadata.name, namespace)
logger.info("Deleted pod: %s", pod.metadata.name)
except kubernetes.client.rest.ApiException as e:
# ignore error if pod is already removed
if e.status != 404:
raise
services = core_api.list_namespaced_service(
namespace, label_selector=format_labels(labels)
)
for service in services.items:
try:
core_api.delete_namespaced_service(service.metadata.name, namespace)
logger.info("Deleted service: %s", service.metadata.name)
except kubernetes.client.rest.ApiException as e:
# ignore error if service is already removed
if e.status != 404:
raise
def format_labels(labels):
""" Convert a dictionary of labels into a comma separated string """
if labels:
return ",".join(["{}={}".format(k, v) for k, v in labels.items()])
else:
return ""
def _namespace_default():
"""
Get current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
'default'
Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125
"""
ns_path = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return "default"
def escape(s):
valid_characters = string.ascii_letters + string.digits + "-"
return "".join(c for c in s if c in valid_characters).lower()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
watchman/integration/test_path_generator.py
|
# vim:ts=4:sw=4:et:
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestPathGenerator(WatchmanTestCase.WatchmanTestCase):
def test_path_generator_dot(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
self.assertFileListsEqual(
self.watchmanCommand("query", root, {"path": ["."]})["files"], []
)
self.assertFileListsEqual(
self.watchmanCommand("query", root, {"relative_root": ".", "path": ["."]})[
"files"
],
[],
)
def test_path_generator_case(self):
root = self.mkdtemp()
os.mkdir(os.path.join(root, "foo"))
self.touchRelative(root, "foo", "bar")
self.watchmanCommand("watch", root)
self.assertFileListsEqual(
self.watchmanCommand("query", root, {"fields": ["name"], "path": ["foo"]})[
"files"
],
["foo/bar"],
)
if self.isCaseInsensitive():
os.rename(os.path.join(root, "foo"), os.path.join(root, "Foo"))
self.assertFileListsEqual(
self.watchmanCommand(
"query", root, {"fields": ["name"], "path": ["foo"]} # not Foo!
)["files"],
[],
message="Case insensitive matching not implemented \
for path generator",
)
def test_path_generator_relative_root(self):
root = self.mkdtemp()
os.mkdir(os.path.join(root, "foo"))
self.touchRelative(root, "foo", "bar")
self.watchmanCommand("watch", root)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{"fields": ["name"], "relative_root": "foo", "path": ["bar"]},
)["files"],
["bar"],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{
"fields": ["name"],
"relative_root": "foo",
"path": [{"path": "bar", "depth": -1}],
},
)["files"],
["bar"],
)
if self.isCaseInsensitive():
os.rename(os.path.join(root, "foo"), os.path.join(root, "Foo"))
self.assertFileListsEqual(
self.watchmanCommand(
"query", root, {"fields": ["name"], "path": ["foo"]} # not Foo!
)["files"],
[],
message="Case insensitive matching not implemented \
for path relative_root",
)
def test_path_generator_empty(self):
"""Specifying no input paths should return no results."""
root = self.mkdtemp()
os.mkdir(os.path.join(root, "mydir"))
self.touchRelative(root, "myfile")
self.watchmanCommand("watch", root)
self.assertFileListsEqual(
self.watchmanCommand("query", root, {"fields": ["name"], "path": []})[
"files"
],
[],
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
sdk/servicebus/azure-servicebus/samples/async_samples/session_send_receive_async.py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show sending message(s) to and receiving messages from a Service Bus Queue with session enabled asynchronously.
"""
# pylint: disable=C0111
import os
import asyncio
from azure.servicebus import Message
from azure.servicebus.aio import ServiceBusClient
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
SESSION_QUEUE_NAME = os.environ["SERVICE_BUS_SESSION_QUEUE_NAME"]
SESSION_ID = os.environ['SERVICE_BUS_SESSION_ID']
async def send_single_message(sender):
message = Message("Single session message", session_id=SESSION_ID)
await sender.send_messages(message)
async def send_a_list_of_messages(sender):
messages = [Message("Session Message in list", session_id=SESSION_ID) for _ in range(10)]
await sender.send_messages(messages)
async def send_batch_message(sender):
batch_message = await sender.create_batch()
for _ in range(10):
try:
batch_message.add(Message("Session Message inside a BatchMessage", session_id=SESSION_ID))
except ValueError:
# BatchMessage object reaches max_size.
# New BatchMessage object can be created here to send more data.
break
await sender.send_messages(batch_message)
async def receive_batch_messages(receiver):
session = receiver.session
await session.set_session_state("START")
print("Session state:", await session.get_session_state())
received_msgs = await receiver.receive_messages(max_batch_size=10, max_wait_time=5)
for msg in received_msgs:
print(str(msg))
await msg.complete()
await session.renew_lock()
await session.set_session_state("END")
print("Session state:", await session.get_session_state())
async def main():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
async with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=SESSION_QUEUE_NAME)
async with sender:
await send_single_message(sender)
await send_a_list_of_messages(sender)
await send_batch_message(sender)
print("Send message is done.")
receiver = servicebus_client.get_queue_session_receiver(queue_name=SESSION_QUEUE_NAME, session_id=SESSION_ID)
async with receiver:
await receive_batch_messages(receiver)
print("Receive is done.")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[] |
[] |
[
"SERVICE_BUS_CONNECTION_STR",
"SERVICE_BUS_SESSION_ID",
"SERVICE_BUS_SESSION_QUEUE_NAME"
] |
[]
|
["SERVICE_BUS_CONNECTION_STR", "SERVICE_BUS_SESSION_ID", "SERVICE_BUS_SESSION_QUEUE_NAME"]
|
python
| 3 | 0 | |
internal/config/config.go
|
package config
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"github.com/infracost/infracost/internal/version"
"github.com/joho/godotenv"
"github.com/kelseyhightower/envconfig"
"github.com/sirupsen/logrus"
)
// Spec contains mapping of environment variable names to config values
type ConfigSpec struct { // nolint:golint
NoColor bool `yaml:"no_color,omitempty"`
LogLevel string `yaml:"log_level,omitempty" envconfig:"INFRACOST_LOG_LEVEL"`
APIKey string `yaml:"api_key,omitempty" envconfig:"INFRACOST_API_KEY"`
PricingAPIEndpoint string `yaml:"pricing_api_endpoint,omitempty" envconfig:"INFRACOST_PRICING_API_ENDPOINT"`
DefaultPricingAPIEndpoint string `yaml:"default_pricing_api_endpoint,omitempty" envconfig:"INFRACOST_DEFAULT_PRICING_API_ENDPOINT"`
DashboardAPIEndpoint string `yaml:"dashboard_api_endpoint,omitempty" envconfig:"INFRACOST_DASHBOARD_API_ENDPOINT"`
TerraformCloudHost string `yaml:"terraform_cloud_host,omitempty" envconfig:"TERRAFORM_CLOUD_HOST"`
TerraformCloudToken string `yaml:"terraform_cloud_token,omitempty" envconfig:"TERRAFORM_CLOUD_TOKEN"`
}
var Config *ConfigSpec
func init() {
log.SetFlags(0)
Config = loadConfig()
}
func defaultConfigSpec() ConfigSpec {
return ConfigSpec{
NoColor: false,
DefaultPricingAPIEndpoint: "https://pricing.api.infracost.io",
PricingAPIEndpoint: "https://pricing.api.infracost.io",
DashboardAPIEndpoint: "https://dashboard.api.infracost.io",
}
}
func (c *ConfigSpec) SetLogLevel(l string) error {
c.LogLevel = l
// Disable logging if no log level is set
if c.LogLevel == "" {
logrus.SetOutput(ioutil.Discard)
return nil
}
logrus.SetOutput(os.Stderr)
level, err := logrus.ParseLevel(c.LogLevel)
if err != nil {
return err
}
logrus.SetLevel(level)
return nil
}
func (c *ConfigSpec) IsLogging() bool {
return c.LogLevel != ""
}
func LogSortingFunc(keys []string) {
// Put message at the end
for i, key := range keys {
if key == "msg" && i != len(keys)-1 {
keys[i], keys[len(keys)-1] = keys[len(keys)-1], keys[i]
break
}
}
}
func RootDir() string {
_, b, _, _ := runtime.Caller(0)
return filepath.Join(filepath.Dir(b), "../..")
}
func fileExists(path string) bool {
info, err := os.Stat(path)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
// loadConfig loads the config spec from the config file and environment variables.
// Config is loaded in the following order, with any later ones overriding the previous ones:
// * Default values
// * Config file
// * .env
// * .env.local
// * ENV variables
// * Any command line flags (e.g. --log-level)
func loadConfig() *ConfigSpec {
config := defaultConfigSpec()
err := mergeConfigFileIfExists(&config)
if err != nil {
log.Fatal(err)
}
envLocalPath := filepath.Join(RootDir(), ".env.local")
if fileExists(envLocalPath) {
err = godotenv.Load(envLocalPath)
if err != nil {
log.Fatal(err)
}
}
if fileExists(".env") {
err = godotenv.Load()
if err != nil {
log.Fatal(err)
}
}
err = envconfig.Process("", &config)
if err != nil {
log.Fatal(err)
}
logrus.SetFormatter(&logrus.TextFormatter{
FullTimestamp: true,
DisableColors: true,
SortingFunc: LogSortingFunc,
})
err = config.SetLogLevel(config.LogLevel)
if err != nil {
log.Fatal(err)
}
return &config
}
func GetUserAgent() string {
userAgent := "infracost"
if version.Version != "" {
userAgent += fmt.Sprintf("-%s", version.Version)
}
infracostEnv := getInfracostEnv()
if infracostEnv != "" {
userAgent += fmt.Sprintf(" (%s)", infracostEnv)
}
return userAgent
}
func getInfracostEnv() string {
if IsTest() {
return "test"
} else if IsDev() {
return "dev"
} else if IsTruthy(os.Getenv("GITHUB_ACTIONS")) {
return "github_actions"
} else if IsTruthy(os.Getenv("GITLAB_CI")) {
return "gitlab_ci"
} else if IsTruthy(os.Getenv("CIRCLECI")) {
return "circleci"
} else {
envKeys := os.Environ()
sort.Strings(envKeys)
for _, k := range envKeys {
if strings.HasPrefix(k, "ATLANTIS_") {
return "atlantis"
} else if strings.HasPrefix(k, "BITBUCKET_") {
return "bitbucket"
} else if strings.HasPrefix(k, "JENKINS_") {
return "jenkins"
} else if strings.HasPrefix(k, "CONCOURSE_") {
return "concourse"
}
}
if IsTruthy(os.Getenv("CI")) {
return "ci"
}
}
return ""
}
func IsTest() bool {
return os.Getenv("INFRACOST_ENV") == "test" || strings.HasSuffix(os.Args[0], ".test")
}
func IsDev() bool {
return os.Getenv("INFRACOST_ENV") == "dev"
}
func IsTruthy(s string) bool {
return s == "1" || strings.EqualFold(s, "true")
}
|
[
"\"GITHUB_ACTIONS\"",
"\"GITLAB_CI\"",
"\"CIRCLECI\"",
"\"CI\"",
"\"INFRACOST_ENV\"",
"\"INFRACOST_ENV\""
] |
[] |
[
"GITLAB_CI",
"CIRCLECI",
"CI",
"GITHUB_ACTIONS",
"INFRACOST_ENV"
] |
[]
|
["GITLAB_CI", "CIRCLECI", "CI", "GITHUB_ACTIONS", "INFRACOST_ENV"]
|
go
| 5 | 0 | |
s3-file-delete-listener/src/main.go
|
package main
import (
"context"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
log "github.com/sirupsen/logrus"
)
var (
region = os.Getenv("aws_region")
bucketName = os.Getenv("s3_bucket_name")
)
func main() {
lambda.Start(handler)
}
func handler(ctx context.Context, snsEvent events.SNSEvent) {
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region)},
)
if err != nil {
log.Println("creation of session error: ", err.Error())
}
svc := s3.New(sess)
for _, record := range snsEvent.Records {
log.Println("File name to be deleted: " + record.SNS.Message)
resp, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(record.SNS.Message),
})
if err != nil {
log.Println("Panic Error in deleting file for S3: ", err.Error())
}
log.Println("RESP", resp)
}
}
|
[
"\"aws_region\"",
"\"s3_bucket_name\""
] |
[] |
[
"aws_region",
"s3_bucket_name"
] |
[]
|
["aws_region", "s3_bucket_name"]
|
go
| 2 | 0 | |
tests/integration/test_utils_vcs_git.py
|
from __future__ import annotations
import os
import uuid
from copy import deepcopy
from hashlib import sha1
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from dulwich.client import HTTPUnauthorized
from dulwich.client import get_transport_and_path
from dulwich.repo import Repo
from poetry.core.pyproject.toml import PyProjectTOML
from poetry.console.exceptions import PoetrySimpleConsoleException
from poetry.utils.authenticator import Authenticator
from poetry.vcs.git import Git
from poetry.vcs.git.backend import GitRefSpec
if TYPE_CHECKING:
from _pytest.tmpdir import TempdirFactory
from dulwich.client import FetchPackResult
from dulwich.client import GitClient
from pytest_mock import MockerFixture
from tests.conftest import Config
# these tests are integration as they rely on an external repository
# see `source_url` fixture
pytestmark = pytest.mark.integration
@pytest.fixture(autouse=True)
def git_mock() -> None:
pass
@pytest.fixture(autouse=True)
def setup(config: Config) -> None:
pass
REVISION_TO_VERSION_MAP = {
"b6204750a763268e941cec1f05f8986b6c66913e": "0.1.0", # Annotated Tag
"18d3ff247d288da701fc7f9ce2ec718388fca266": "0.1.1-alpha.0",
"dd07e8d4efb82690e7975b289917a7782fbef29b": "0.2.0-alpha.0",
"7263819922b4cd008afbb447f425a562432dad7d": "0.2.0-alpha.1",
}
BRANCH_TO_REVISION_MAP = {"0.1": "18d3ff247d288da701fc7f9ce2ec718388fca266"}
TAG_TO_REVISION_MAP = {"v0.1.0": "b6204750a763268e941cec1f05f8986b6c66913e"}
REF_TO_REVISION_MAP = {
"branch": BRANCH_TO_REVISION_MAP,
"tag": TAG_TO_REVISION_MAP,
}
@pytest.fixture
def use_system_git_client(config: Config) -> None:
config.merge({"experimental": {"system-git-client": True}})
@pytest.fixture(scope="module")
def source_url() -> str:
return "https://github.com/python-poetry/test-fixture-vcs-repository.git"
@pytest.fixture(scope="module")
def source_directory_name(source_url: str) -> str:
return Git.get_name_from_source_url(url=source_url)
@pytest.fixture(scope="module")
def local_repo(tmpdir_factory: TempdirFactory, source_directory_name: str) -> Repo:
with Repo.init(
tmpdir_factory.mktemp("src") / source_directory_name, mkdir=True
) as repo:
yield repo
@pytest.fixture(scope="module")
def _remote_refs(source_url: str, local_repo: Repo) -> FetchPackResult:
client: GitClient
path: str
client, path = get_transport_and_path(source_url)
return client.fetch(
path, local_repo, determine_wants=local_repo.object_store.determine_wants_all
)
@pytest.fixture
def remote_refs(_remote_refs: FetchPackResult) -> FetchPackResult:
return deepcopy(_remote_refs)
@pytest.fixture(scope="module")
def remote_default_ref(_remote_refs: FetchPackResult) -> bytes:
return _remote_refs.symrefs[b"HEAD"]
@pytest.fixture(scope="module")
def remote_default_branch(remote_default_ref: bytes) -> str:
return remote_default_ref.decode("utf-8").replace("refs/heads/", "")
def test_git_local_info(
source_url: str, remote_refs: FetchPackResult, remote_default_ref: bytes
) -> None:
with Git.clone(url=source_url) as repo:
info = Git.info(repo=repo)
assert info.origin == source_url
assert info.revision == remote_refs.refs[remote_default_ref].decode("utf-8")
def test_git_clone_default_branch_head(
source_url: str,
remote_refs: FetchPackResult,
remote_default_ref: bytes,
mocker: MockerFixture,
):
spy = mocker.spy(Git, "_clone")
spy_legacy = mocker.spy(Git, "_clone_legacy")
with Git.clone(url=source_url) as repo:
assert remote_refs.refs[remote_default_ref] == repo.head()
spy_legacy.assert_not_called()
spy.assert_called()
def test_git_clone_fails_for_non_existent_branch(source_url: str):
branch = uuid.uuid4().hex
with pytest.raises(PoetrySimpleConsoleException) as e:
Git.clone(url=source_url, branch=branch)
assert f"Failed to clone {source_url} at '{branch}'" in str(e.value)
def test_git_clone_fails_for_non_existent_revision(source_url: str):
revision = sha1(uuid.uuid4().bytes).hexdigest()
with pytest.raises(PoetrySimpleConsoleException) as e:
Git.clone(url=source_url, revision=revision)
assert f"Failed to clone {source_url} at '{revision}'" in str(e.value)
def assert_version(repo: Repo, expected_revision: str) -> None:
version = PyProjectTOML(
path=Path(repo.path).joinpath("pyproject.toml")
).poetry_config["version"]
revision = Git.get_revision(repo=repo)
assert revision == expected_revision
assert revision in REVISION_TO_VERSION_MAP
assert version == REVISION_TO_VERSION_MAP[revision]
def test_git_clone_when_branch_is_ref(source_url: str) -> None:
with Git.clone(url=source_url, branch="refs/heads/0.1") as repo:
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
@pytest.mark.parametrize("branch", [*BRANCH_TO_REVISION_MAP.keys()])
def test_git_clone_branch(
source_url: str, remote_refs: FetchPackResult, branch: str
) -> None:
with Git.clone(url=source_url, branch=branch) as repo:
assert_version(repo, BRANCH_TO_REVISION_MAP[branch])
@pytest.mark.parametrize("tag", [*TAG_TO_REVISION_MAP.keys()])
def test_git_clone_tag(source_url: str, remote_refs: FetchPackResult, tag: str) -> None:
with Git.clone(url=source_url, tag=tag) as repo:
assert_version(repo, TAG_TO_REVISION_MAP[tag])
def test_git_clone_multiple_times(
source_url: str, remote_refs: FetchPackResult
) -> None:
for revision in REVISION_TO_VERSION_MAP:
with Git.clone(url=source_url, revision=revision) as repo:
assert_version(repo, revision)
def test_git_clone_revision_is_branch(
source_url: str, remote_refs: FetchPackResult
) -> None:
with Git.clone(url=source_url, revision="0.1") as repo:
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
def test_git_clone_revision_is_ref(
source_url: str, remote_refs: FetchPackResult
) -> None:
with Git.clone(url=source_url, revision="refs/heads/0.1") as repo:
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
@pytest.mark.parametrize(
("revision", "expected_revision"),
[
("0.1", BRANCH_TO_REVISION_MAP["0.1"]),
("v0.1.0", TAG_TO_REVISION_MAP["v0.1.0"]),
*zip(REVISION_TO_VERSION_MAP, REVISION_TO_VERSION_MAP),
],
)
def test_git_clone_revision_is_tag(
source_url: str, remote_refs: FetchPackResult, revision: str, expected_revision: str
) -> None:
with Git.clone(url=source_url, revision=revision) as repo:
assert_version(repo, expected_revision)
def test_git_clone_clones_submodules(source_url: str) -> None:
with Git.clone(url=source_url) as repo:
submodule_package_directory = (
Path(repo.path) / "submodules" / "sample-namespace-packages"
)
assert submodule_package_directory.exists()
assert submodule_package_directory.joinpath("README.md").exists()
assert len(list(submodule_package_directory.glob("*"))) > 1
def test_system_git_fallback_on_http_401(
mocker: MockerFixture,
source_url: str,
) -> None:
spy = mocker.spy(Git, "_clone_legacy")
mocker.patch.object(Git, "_clone", side_effect=HTTPUnauthorized(None, None))
with Git.clone(url=source_url, branch="0.1") as repo:
path = Path(repo.path)
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
spy.assert_called_with(
url="https://github.com/python-poetry/test-fixture-vcs-repository.git",
target=path,
refspec=GitRefSpec(branch="0.1", revision=None, tag=None, ref=b"HEAD"),
)
spy.assert_called_once()
GIT_USERNAME = os.environ.get("POETRY_TEST_INTEGRATION_GIT_USERNAME")
GIT_PASSWORD = os.environ.get("POETRY_TEST_INTEGRATION_GIT_PASSWORD")
HTTP_AUTH_CREDENTIALS_AVAILABLE = not (GIT_USERNAME and GIT_PASSWORD)
@pytest.mark.skipif(
HTTP_AUTH_CREDENTIALS_AVAILABLE,
reason="HTTP authentication credentials not available",
)
def test_configured_repository_http_auth(
mocker: MockerFixture, source_url: str, config: Config
) -> None:
from poetry.vcs.git import backend
spy_clone_legacy = mocker.spy(Git, "_clone_legacy")
spy_get_transport_and_path = mocker.spy(backend, "get_transport_and_path")
config.merge(
{
"repositories": {"git-repo": {"url": source_url}},
"http-basic": {
"git-repo": {
"username": GIT_USERNAME,
"password": GIT_PASSWORD,
}
},
}
)
mocker.patch(
"poetry.vcs.git.backend.get_default_authenticator",
return_value=Authenticator(config=config),
)
with Git.clone(url=source_url, branch="0.1") as repo:
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
spy_clone_legacy.assert_not_called()
spy_get_transport_and_path.assert_called_with(
location=source_url,
username=GIT_USERNAME,
password=GIT_PASSWORD,
)
spy_get_transport_and_path.assert_called_once()
def test_username_password_parameter_is_not_passed_to_dulwich(
mocker: MockerFixture, source_url: str, config: Config
) -> None:
from poetry.vcs.git import backend
spy_clone = mocker.spy(Git, "_clone")
spy_get_transport_and_path = mocker.spy(backend, "get_transport_and_path")
with Git.clone(url=source_url, branch="0.1") as repo:
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
spy_clone.assert_called_once()
spy_get_transport_and_path.assert_called_with(
location=source_url,
)
spy_get_transport_and_path.assert_called_once()
def test_system_git_called_when_configured(
mocker: MockerFixture, source_url: str, use_system_git_client: None
) -> None:
spy_legacy = mocker.spy(Git, "_clone_legacy")
spy = mocker.spy(Git, "_clone")
with Git.clone(url=source_url, branch="0.1") as repo:
path = Path(repo.path)
assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"])
spy.assert_not_called()
spy_legacy.assert_called_once()
spy_legacy.assert_called_with(
url=source_url,
target=path,
refspec=GitRefSpec(branch="0.1", revision=None, tag=None, ref=b"HEAD"),
)
|
[] |
[] |
[
"POETRY_TEST_INTEGRATION_GIT_PASSWORD",
"POETRY_TEST_INTEGRATION_GIT_USERNAME"
] |
[]
|
["POETRY_TEST_INTEGRATION_GIT_PASSWORD", "POETRY_TEST_INTEGRATION_GIT_USERNAME"]
|
python
| 2 | 0 | |
Source/ThirdParty/gyp/pylib/gyp/generator/msvs.py
|
#!/usr/bin/python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ntpath
import posixpath
import os
import re
import subprocess
import sys
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
import gyp.MSVSSettings as MSVSSettings
import gyp.common
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
""" Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if len(path) > 0 and path[-1] == '\\':
path = path[:-1]
return path
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += (
'bash -c "%(cmd)s"')
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Fix the paths
# If the argument starts with a slash, it's probably a command line switch
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = [_FixPath(i) for i in inputs]
outputs = [_FixPath(i) for i in outputs]
tool = MSVSProject.Tool(
'VCCustomBuildTool', {
'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = [_FixPath(i) for i in rule.get('inputs', [])]
raw_outputs = [_FixPath(i) for i in rule.get('outputs', [])]
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename))
rules_file.Create(spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = [_FixPath(i) for i in r.get('inputs', [])]
outputs = [_FixPath(i) for i in r.get('outputs', [])]
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.Write()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
file.write('\tmkdir -p %s\n' % od)
file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
file.write('\t%s\n\n' % cmd)
# Close up the file.
file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=[_FixPath(i) for i in all_inputs],
outputs=[_FixPath(i) for i in all_outputs],
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this."""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument, so that the Win32
CommandLineToArgv function will turn the escaped result back into the
original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this."""
def replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention."""
def replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
list = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(list), 2):
list[i] = delimiters_replacer_regex.sub(replace, list[i])
# Concatenate back into a single string
s = '"'.join(list)
if len(list) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set([_FixPath(i) for i in inputs])
outputs = set([_FixPath(i) for i in outputs])
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set([_FixPath(s) for s in actions_to_add.keys()])
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project
Arguments:
proj_path: Path of the vcproj file to generate.
spec: The target dictionary containing the properties of the target.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) == None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
_GenerateMSVSProject(project, options, version)
def _GenerateMSVSProject(project, options, version):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version=version)
p.Create(spec['target_name'], guid=project.guid, platforms=platforms)
# Get directory project file is in.
gyp_dir = os.path.split(project.path)[0]
gyp_file = posixpath.split(project.build_file)[1]
gyp_path = _NormalizedSource(gyp_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, gyp_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
sources, excluded_sources = _PrepareListOfSources(project, spec,
relative_path_of_gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, gyp_dir, options, spec,
sources, excluded_sources,
actions_to_add)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources))
# Add in files.
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompileHeaderStubs(p, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.Write()
def _GetUniquePlatforms(spec):
"""Return the list of unique platforms for this spec, e.g ['win32', ...]
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version=version)
user_file.Create(spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project. It's a number defined
by Microsoft. May raise an exception.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
'dummy_executable': '1', # .exe
}[spec['type']]
except KeyError, e:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(config, spec)
out_file, vc_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb')
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = [_FixPath(i) for i in include_dirs]
resource_include_dirs = [_FixPath(i) for i in resource_include_dirs]
return include_dirs, resource_include_dirs
def _GetLibraries(config, spec):
"""Returns the list of libraries for this configuration.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
return [re.sub('^(\-l)', '', lib) for lib in libraries]
def _GetOutputFilePathAndTool(spec):
"""Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A pair of (file path, name of the tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'static_library': ('VCLibrarianTool', '$(OutDir)\\lib\\', '.lib'),
'dummy_executable': ('VCLinkerTool', '$(IntDir)\\', '.junk'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ""
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError('Multiple module definition files in one target, '
'target %s lists multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
""" Convert the content of the tools array to a form expected by
VisualStudio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = [_FixPath(i) for i in vsprops_dirs]
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
if not prepared_attrs.has_key('OutputDirectory'):
prepared_attrs['OutputDirectory'] = '$(SolutionDir)$(ConfigurationName)'
if not prepared_attrs.has_key('IntermediateDirectory'):
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(project, spec, relative_path_of_gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
project: the MSVSProject object.
spec: The target dictionary containing the properties of the target.
relative_path_of_gyp_file: The relative path of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources)
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(relative_path_of_gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = [_FixPath(i) for i in sources]
# Convert to proper windows form.
excluded_sources = [_FixPath(i) for i in excluded_sources]
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded)
# Add in dummy file for type none.
if spec['type'] == 'dummy_executable':
# Pull in a dummy main so it can link successfully.
dummy_relpath = gyp.common.RelativePath(
options.depth + '\\tools\\gyp\\gyp_dummy.c', gyp_dir)
sources.append(dummy_relpath)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for config_name, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for config_name, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompileHeaderStubs(p, spec):
# Handle pre-compiled headers source stubs specially.
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.Write()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
if path == '':
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node.keys():
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fixpath_prefix = None
if options.generator_output:
projectDirPath = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fixpath_prefix = gyp.common.RelativePath(projectDirPath,
os.path.dirname(proj_path))
return proj_path, fixpath_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = \
MSVSVersion.SelectVisualStudioVersion(generator_flags.get('msvs_version',
'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
generator_flags = params.get('generator_flags', {})
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version)
fixpath_prefix = None
for build_file in data.keys():
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
|
[] |
[] |
[
"USERNAME",
"PROCESSOR_ARCHITEW6432",
"USERDOMAIN",
"PROCESSOR_ARCHITECTURE"
] |
[]
|
["USERNAME", "PROCESSOR_ARCHITEW6432", "USERDOMAIN", "PROCESSOR_ARCHITECTURE"]
|
python
| 4 | 0 | |
rpsa/upstream.py
|
import pandas as pd
import tweepy
import os
from typing import List
def fetch_sample(screen_names: List[str], size: int = 100):
consumer_key = os.environ["TWITTER_CONSUMER_KEY"]
consumer_secret = os.environ["TWITTER_CONSUMER_SECRET"]
access_token = os.environ["TWITTER_ACCESS_TOKEN"]
access_token_secret = os.environ["TWITTER_ACCESS_TOKEN_SECRET"]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
query = " OR ".join(f"@{sn}" for sn in screen_names)
query += " -filter:retweets"
api_kwargs = {
"q": query,
"lang": "en",
"result_type": "recent",
"tweet_mode": "extended",
"count": min((size, 100)),
}
tweets = {
"id": [],
"text": [],
}
for tweet in tweepy.Cursor(api.search, **api_kwargs).items(size):
tweets["id"].append(tweet.id)
tweets["text"].append(tweet.full_text)
df_dtypes = {"id": "int64", "text": "string"}
df = pd.DataFrame(tweets)
df = df.astype(dtype=df_dtypes, copy=False)
df.set_index("id", inplace=True)
return df
|
[] |
[] |
[
"TWITTER_CONSUMER_SECRET",
"TWITTER_ACCESS_TOKEN",
"TWITTER_ACCESS_TOKEN_SECRET",
"TWITTER_CONSUMER_KEY"
] |
[]
|
["TWITTER_CONSUMER_SECRET", "TWITTER_ACCESS_TOKEN", "TWITTER_ACCESS_TOKEN_SECRET", "TWITTER_CONSUMER_KEY"]
|
python
| 4 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import argparse
import configparser
from enum import Enum
import logging
import os
import pdb
import random
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
assert_equal,
check_json_precision,
connect_nodes,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
MAX_NODES,
p2p_port,
PortSeed,
rpc_port,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
# Timestamp is Dec. 1st, 2019 at 00:00:00
TIMESTAMP_IN_THE_PAST = 1575158400
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain = 'regtest'
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
self.supports_cli = False
self.bind_to_localhost_only = True
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir",
help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath(
__file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument("--with-axionactivation", dest="axionactivation", default=False, action="store_true",
help="Activate axion update on timestamp {}".format(TIMESTAMP_IN_THE_PAST))
self.add_options(parser)
self.options = parser.parse_args()
self.set_test_params()
assert hasattr(
self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
self.config = config
self.options.bitcoind = os.getenv(
"BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bitcoind' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv(
"BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bitcoin-cli' + config["environment"]["EXEEXT"])
self.options.emulator = config["environment"]["EMULATOR"] or None
os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \
config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \
os.environ['PATH']
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli:
if not self.supports_cli:
raise SkipTest(
"--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: {}".format(e.message))
success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
except KeyError:
self.log.exception("Key error")
except Exception:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info(
"Note: bitcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning(
"Not cleaning up dir {} due to perf data".format(
self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning(
"Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error(
"Test failed. Test logging available at {}/test_framework.log".format(self.options.tmpdir))
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(
os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], self.nodes[i])
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(
privkey=n.get_deterministic_priv_key().key,
label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test
# scripts.
def add_nodes(self, num_nodes, extra_args=None,
*, host=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
host=host,
rpc_port=rpc_port(i),
p2p_port=p2p_port(i),
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=self.options.bitcoincli,
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
emulator=self.options.emulator,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
))
if self.options.axionactivation:
self.nodes[i].extend_default_args(
["-axionactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except BaseException:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(
self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], self.nodes[2])
disconnect_nodes(self.nodes[2], self.nodes[1])
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], self.nodes[2])
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
# Private helper methods. These should not be accessed by the subclass
# test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(
self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this
# logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel
# was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit(
) else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so
# log files can be concatenated and sorted)
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
# Use node 0 to create the cache for all other nodes
CACHE_NODE_ID = 0
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug(
"Creating cache directory {}".format(cache_node_dir))
initialize_datadir(
self.options.cachedir,
CACHE_NODE_ID,
self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
host=None,
rpc_port=rpc_port(CACHE_NODE_ID),
p2p_port=p2p_port(CACHE_NODE_ID),
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
emulator=self.options.emulator,
))
if self.options.axionactivation:
self.nodes[CACHE_NODE_ID].extend_default_args(
["-axionactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
self.start_node(CACHE_NODE_ID)
cache_node = self.nodes[CACHE_NODE_ID]
# Wait for RPC connections to be ready
cache_node.wait_for_rpc_connection()
# Set a time in the past, so that blocks don't end up in the future
cache_node.setmocktime(
cache_node.getblockheader(
cache_node.getbestblockhash())['time'])
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
cache_node.generatetoaddress(
nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, "regtest", *paths)
# Remove empty wallets dir
os.rmdir(cache_path('wallets'))
for entry in os.listdir(cache_path()):
# Only keep chainstate and blocks folder
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug(
"Copy cache directory {} to node {}".format(
cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
# Overwrite port/rpcport in bitcoin.conf
initialize_datadir(self.options.tmpdir, i, self.chain)
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if bitcoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("bitcoin-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
return config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
return config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether bitcoin-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
return config["components"].getboolean("ENABLE_ZMQ")
|
[] |
[] |
[
"BITCOINCLI",
"PATH",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "PATH", "BITCOIND"]
|
python
| 3 | 0 | |
sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_receipts.py
DESCRIPTION:
This sample demonstrates how to recognize US sales receipts from a file.
USAGE:
python sample_recognize_receipts.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeReceiptsSample(object):
def recognize_receipts(self):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_forms/receipt/contoso-allinone.jpg"))
# [START recognize_receipts]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_receipts(receipt=f)
receipts = poller.result()
for idx, receipt in enumerate(receipts):
print("--------Recognizing receipt #{}--------".format(idx))
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {} has confidence: {}".format(receipt_type.value, receipt_type.confidence))
merchant_name = receipt.fields.get("MerchantName")
if merchant_name:
print("Merchant Name: {} has confidence: {}".format(merchant_name.value, merchant_name.confidence))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {} has confidence: {}".format(transaction_date.value, transaction_date.confidence))
print("Receipt items:")
for idx, item in enumerate(receipt.fields.get("Items").value):
print("...Item #{}".format(idx))
item_name = item.value.get("Name")
if item_name:
print("......Item Name: {} has confidence: {}".format(item_name.value, item_name.confidence))
item_quantity = item.value.get("Quantity")
if item_quantity:
print("......Item Quantity: {} has confidence: {}".format(item_quantity.value, item_quantity.confidence))
item_price = item.value.get("Price")
if item_price:
print("......Individual Item Price: {} has confidence: {}".format(item_price.value, item_price.confidence))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("......Total Item Price: {} has confidence: {}".format(item_total_price.value, item_total_price.confidence))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} has confidence: {}".format(subtotal.value, subtotal.confidence))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {} has confidence: {}".format(tax.value, tax.confidence))
tip = receipt.fields.get("Tip")
if tip:
print("Tip: {} has confidence: {}".format(tip.value, tip.confidence))
total = receipt.fields.get("Total")
if total:
print("Total: {} has confidence: {}".format(total.value, total.confidence))
print("--------------------------------------")
# [END recognize_receipts]
if __name__ == '__main__':
sample = RecognizeReceiptsSample()
sample.recognize_receipts()
|
[] |
[] |
[
"AZURE_FORM_RECOGNIZER_KEY",
"AZURE_FORM_RECOGNIZER_ENDPOINT"
] |
[]
|
["AZURE_FORM_RECOGNIZER_KEY", "AZURE_FORM_RECOGNIZER_ENDPOINT"]
|
python
| 2 | 0 | |
dgqlTemplate/dgqlTemplate/wsgi.py
|
"""
WSGI config for dgqlTemplate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dgqlTemplate.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bootstrap/ldap.go
|
/*
* Copyright 2017-2019 Kopano and its licensors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package bootstrap
import (
"fmt"
"os"
"strings"
"stash.kopano.io/kc/konnect/identifier"
identifierBackends "stash.kopano.io/kc/konnect/identifier/backends"
ldapDefinitions "stash.kopano.io/kc/konnect/identifier/backends/ldap"
"stash.kopano.io/kc/konnect/identity"
identityManagers "stash.kopano.io/kc/konnect/identity/managers"
)
func newLDAPIdentityManager(bs *bootstrap) (identity.Manager, error) {
logger := bs.cfg.Logger
if bs.authorizationEndpointURI.String() != "" {
return nil, fmt.Errorf("ldap backend is incompatible with authorization-endpoint-uri parameter")
}
bs.authorizationEndpointURI.Path = bs.makeURIPath(apiTypeSignin, "/identifier/_/authorize")
if bs.endSessionEndpointURI.String() != "" {
return nil, fmt.Errorf("ldap backend is incompatible with endsession-endpoint-uri parameter")
}
bs.endSessionEndpointURI.Path = bs.makeURIPath(apiTypeSignin, "/identifier/_/endsession")
if bs.signInFormURI.EscapedPath() == "" {
bs.signInFormURI.Path = bs.makeURIPath(apiTypeSignin, "/identifier")
}
if bs.signedOutURI.EscapedPath() == "" {
bs.signedOutURI.Path = bs.makeURIPath(apiTypeSignin, "/goodbye")
}
// Default LDAP attribute mappings.
attributeMapping := map[string]string{
ldapDefinitions.AttributeLogin: os.Getenv("LDAP_LOGIN_ATTRIBUTE"),
ldapDefinitions.AttributeEmail: os.Getenv("LDAP_EMAIL_ATTRIBUTE"),
ldapDefinitions.AttributeName: os.Getenv("LDAP_NAME_ATTRIBUTE"),
ldapDefinitions.AttributeFamilyName: os.Getenv("LDAP_FAMILY_NAME_ATTRIBUTE"),
ldapDefinitions.AttributeGivenName: os.Getenv("LDAP_GIVEN_NAME_ATTRIBUTE"),
ldapDefinitions.AttributeUUID: os.Getenv("LDAP_UUID_ATTRIBUTE"),
fmt.Sprintf("%s_type", ldapDefinitions.AttributeUUID): os.Getenv("LDAP_UUID_ATTRIBUTE_TYPE"),
}
// Add optional LDAP attribute mappings.
if numericUIDAttribute := os.Getenv("LDAP_UIDNUMBER_ATTRIBUTE"); numericUIDAttribute != "" {
attributeMapping[ldapDefinitions.AttributeNumericUID] = numericUIDAttribute
}
// Sub from LDAP attribute mappings.
var subMapping []string
if subMappingString := os.Getenv("LDAP_SUB_ATTRIBUTES"); subMappingString != "" {
subMapping = strings.Split(subMappingString, " ")
}
identifierBackend, identifierErr := identifierBackends.NewLDAPIdentifierBackend(
bs.cfg,
bs.tlsClientConfig,
os.Getenv("LDAP_URI"),
os.Getenv("LDAP_BINDDN"),
os.Getenv("LDAP_BINDPW"),
os.Getenv("LDAP_BASEDN"),
os.Getenv("LDAP_SCOPE"),
os.Getenv("LDAP_FILTER"),
subMapping,
attributeMapping,
)
if identifierErr != nil {
return nil, fmt.Errorf("failed to create identifier backend: %v", identifierErr)
}
fullAuthorizationEndpointURL := withSchemeAndHost(bs.authorizationEndpointURI, bs.issuerIdentifierURI)
fullSignInFormURL := withSchemeAndHost(bs.signInFormURI, bs.issuerIdentifierURI)
fullSignedOutEndpointURL := withSchemeAndHost(bs.signedOutURI, bs.issuerIdentifierURI)
activeIdentifier, err := identifier.NewIdentifier(&identifier.Config{
Config: bs.cfg,
BaseURI: bs.issuerIdentifierURI,
PathPrefix: bs.makeURIPath(apiTypeSignin, ""),
StaticFolder: bs.identifierClientPath,
LogonCookieName: "__Secure-KKT", // Kopano-Konnect-Token
ScopesConf: bs.identifierScopesConf,
WebAppDisabled: bs.identifierClientDisabled,
AuthorizationEndpointURI: fullAuthorizationEndpointURL,
SignedOutEndpointURI: fullSignedOutEndpointURL,
Backend: identifierBackend,
})
if err != nil {
return nil, fmt.Errorf("failed to create identifier: %v", err)
}
err = activeIdentifier.SetKey(bs.encryptionSecret)
if err != nil {
return nil, fmt.Errorf("invalid --encryption-secret parameter value for identifier: %v", err)
}
identityManagerConfig := &identity.Config{
SignInFormURI: fullSignInFormURL,
SignedOutURI: fullSignedOutEndpointURL,
Logger: logger,
ScopesSupported: bs.cfg.AllowedScopes,
}
identifierIdentityManager := identityManagers.NewIdentifierIdentityManager(identityManagerConfig, activeIdentifier)
logger.Infoln("using identifier backed identity manager")
return identifierIdentityManager, nil
}
|
[
"\"LDAP_LOGIN_ATTRIBUTE\"",
"\"LDAP_EMAIL_ATTRIBUTE\"",
"\"LDAP_NAME_ATTRIBUTE\"",
"\"LDAP_FAMILY_NAME_ATTRIBUTE\"",
"\"LDAP_GIVEN_NAME_ATTRIBUTE\"",
"\"LDAP_UUID_ATTRIBUTE\"",
"\"LDAP_UUID_ATTRIBUTE_TYPE\"",
"\"LDAP_UIDNUMBER_ATTRIBUTE\"",
"\"LDAP_SUB_ATTRIBUTES\"",
"\"LDAP_URI\"",
"\"LDAP_BINDDN\"",
"\"LDAP_BINDPW\"",
"\"LDAP_BASEDN\"",
"\"LDAP_SCOPE\"",
"\"LDAP_FILTER\""
] |
[] |
[
"LDAP_BINDDN",
"LDAP_UUID_ATTRIBUTE",
"LDAP_URI",
"LDAP_FAMILY_NAME_ATTRIBUTE",
"LDAP_BASEDN",
"LDAP_SCOPE",
"LDAP_EMAIL_ATTRIBUTE",
"LDAP_FILTER",
"LDAP_SUB_ATTRIBUTES",
"LDAP_UUID_ATTRIBUTE_TYPE",
"LDAP_UIDNUMBER_ATTRIBUTE",
"LDAP_NAME_ATTRIBUTE",
"LDAP_BINDPW",
"LDAP_LOGIN_ATTRIBUTE",
"LDAP_GIVEN_NAME_ATTRIBUTE"
] |
[]
|
["LDAP_BINDDN", "LDAP_UUID_ATTRIBUTE", "LDAP_URI", "LDAP_FAMILY_NAME_ATTRIBUTE", "LDAP_BASEDN", "LDAP_SCOPE", "LDAP_EMAIL_ATTRIBUTE", "LDAP_FILTER", "LDAP_SUB_ATTRIBUTES", "LDAP_UUID_ATTRIBUTE_TYPE", "LDAP_UIDNUMBER_ATTRIBUTE", "LDAP_NAME_ATTRIBUTE", "LDAP_BINDPW", "LDAP_LOGIN_ATTRIBUTE", "LDAP_GIVEN_NAME_ATTRIBUTE"]
|
go
| 15 | 0 | |
src/main/java/com/sikulix/core/SX.java
|
/*
* Copyright (c) 2017 - sikulix.com - MIT license
*/
package com.sikulix.core;
//import com.sikulix.scripting.JythonHelper;
import com.sikulix.devices.local.LocalDevice;
import org.apache.commons.cli.*;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.builder.fluent.Configurations;
import org.apache.commons.configuration2.ex.ConfigurationException;
import java.awt.*;
import java.io.*;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.URL;
import java.util.*;
import java.util.List;
import java.util.stream.Collectors;
import static com.sikulix.core.SX.NATIVES.HOTKEY;
import static com.sikulix.core.SX.NATIVES.OPENCV;
import static com.sikulix.core.SX.NATIVES.SYSUTIL;
public class SX {
private static long startTime = new Date().getTime();
//<editor-fold desc="00*** logging">
public static final int INFO = 1;
public static final int DEBUG = 3;
public static final int TRACE = 4;
public static final int ERROR = -1;
public static final int FATAL = -2;
private static final SXLog log = new SXLog();
private static void info(String message, Object... args) {
log.info(message, args);
}
public static void debug(String message, Object... args) {
log.debug(message, args);
}
public static void trace(String message, Object... args) {
log.trace(message, args);
}
public static void error(String message, Object... args) {
log.error(message, args);
}
public static void terminate(int retval, String message, Object... args) {
if (retval != 0) {
log.fatal(message, args);
} else {
info(message, args);
}
System.exit(retval);
}
public static void p(String msg, Object... args) {
log.p(msg, args);
}
public static SXLog getSXLog(String className) {
return getSXLog(className, null, -1);
}
public static SXLog getSXLog(String className, int level) {
return getSXLog(className, null, level);
}
public static SXLog getSXLog(String className, String[] args) {
return getSXLog(className, args, -1);
}
public static SXLog getSXLog(String className, String[] args, int level) {
return new SXLog(className, args, level);
}
//</editor-fold>
//<editor-fold desc="01*** init">
private static String sxInstance = null;
private static boolean shouldLock = false;
private static FileOutputStream isRunningFile = null;
static final Class sxGlobalClassReference = SX.class;
static void sxinit(String[] args) {
if (null == sxInstance) {
sxInstance = "SX INIT DONE";
//<editor-fold desc="*** shutdown hook">
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
if (shouldLock && isSet(isRunningFile)) {
try {
isRunningFile.close();
} catch (IOException ex) {
}
}
for (File f : new File(getSXSYSTEMP()).listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
File aFile = new File(dir, name);
boolean isObsolete = false;
long lastTime = aFile.lastModified();
if (lastTime == 0) {
return false;
}
if (lastTime < ((new Date().getTime()) - 7 * 24 * 60 * 60 * 1000)) {
isObsolete = true;
}
if (name.contains("BridJExtractedLibraries") && isObsolete) {
return true;
}
if (name.toLowerCase().contains("sikuli")) {
if (name.contains("Sikulix_")) {
if (isObsolete || aFile.equals(new File(getSXTEMP()))) {
return true;
}
} else {
return true;
}
}
return false;
}
})) {
trace("cleanTemp: " + f.getName());
Content.deleteFileOrFolder("#" + f.getAbsolutePath());
}
}
});
//</editor-fold>
// TODO Content class must be initialized for lock in shutdown
Content.start();
//<editor-fold desc="*** sx lock (not active)">
if (shouldLock) {
File fLock = new File(getSXSYSTEMP(), "SikuliX2-i-s-r-u-n-n-i-n-g");
String shouldTerminate = "";
try {
fLock.createNewFile();
isRunningFile = new FileOutputStream(fLock);
if (isNull(isRunningFile.getChannel().tryLock())) {
shouldTerminate = "SikuliX2 already running";
isRunningFile = null;
}
} catch (Exception ex) {
shouldTerminate = "cannot access SX2 lock: " + ex.toString();
isRunningFile = null;
}
if (isSet(shouldTerminate)) {
terminate(1, shouldTerminate);
}
}
//</editor-fold>
// *** command line args
if (!isNull(args)) {
checkArgs(args);
}
trace("!sxinit: entry");
// *** getAll SX options
loadOptions();
// *** getAll the version info
getSXVERSION();
// *** check how we are running
APPTYPE = "from a jar";
String base = Content.whereIs(sxGlobalClassReference);
if (isSet(base)) {
SXBASEJAR = base;
File jarBase = new File(base);
String jarBaseName = jarBase.getName();
File fJarBase = jarBase.getParentFile();
trace("sxRunningAs: runs as %s in: %s", jarBaseName, fJarBase.getAbsolutePath());
if (jarBaseName.contains("classes")) {
SXPROJEKTf = fJarBase.getParentFile().getParentFile();
trace("sxRunningAs: not jar - supposing Maven project: %s", SXPROJEKTf);
APPTYPE = "in Maven project from classes";
} else if ("target".equals(fJarBase.getName())) {
SXPROJEKTf = fJarBase.getParentFile().getParentFile();
trace("sxRunningAs: folder target detected - supposing Maven project: %s", SXPROJEKTf);
APPTYPE = "in Maven project from some jar";
} else {
if (isWindows()) {
if (jarBaseName.endsWith(".exe")) {
setSXRUNNINGASAPP(true);
APPTYPE = "as application .exe";
}
} else if (isMac()) {
if (fJarBase.getAbsolutePath().contains("SikuliX.app/Content")) {
setSXRUNNINGASAPP(true);
APPTYPE = "as application .app";
if (!fJarBase.getAbsolutePath().startsWith("/Applications")) {
APPTYPE += " (not from /Applications folder)";
}
}
}
}
} else {
terminate(1, "sxRunningAs: no valid Java context for SikuliX available "
+ "(java.security.CodeSource.getLocation() is null)");
}
//TODO i18n SXGlobal_sxinit_complete=complete %.3f
trace("!sxinit: exit %.3f (%s)", (new Date().getTime() - startTime) / 1000.0f, APPTYPE);
}
}
//</editor-fold>
//<editor-fold desc="02*** command line args">
private static List<String> sxArgs = new ArrayList<String>();
private static List<String> userArgs = new ArrayList<String>();
private static CommandLine sxCommandArgs = null;
static void checkArgs(String[] args) {
boolean hasUserArgs = false;
for (String arg : args) {
if ("--".equals(arg)) {
hasUserArgs = true;
continue;
}
if (hasUserArgs) {
trace("checkargs: user: %s", arg);
userArgs.add(arg);
} else {
trace("checkargs: --sx: %s", arg);
sxArgs.add(arg);
}
}
if (sxArgs.size() > 0) {
CommandLineParser parser = new PosixParser();
Options opts = new Options();
opts.addOption(OptionBuilder.hasOptionalArg().create('d'));
opts.addOption(OptionBuilder.hasArg().create('o'));
opts.addOption(OptionBuilder.hasArgs().create('r'));
opts.addOption(OptionBuilder.hasArgs().create('t'));
opts.addOption(OptionBuilder.hasArg(false).create('c'));
opts.addOption(OptionBuilder.hasArg(false).create('q'));
try {
sxCommandArgs = parser.parse(opts, sxArgs.toArray(new String[0]));
} catch (ParseException e) {
terminate(1, "checkArgs: %s", e.getMessage());
}
if (!isNull(sxCommandArgs)) {
if (isArg("q")) {
log.globalStop();
} else if (isArg("d")) {
log.globalOn(log.DEBUG);
}
}
}
//TODO make options from SX args
}
private static boolean isArg(String arg) {
return sxCommandArgs != null && sxCommandArgs.hasOption(arg);
}
private static String getArg(String arg) {
if (sxCommandArgs != null && sxCommandArgs.hasOption(arg)) {
String val = sxCommandArgs.getOptionValue(arg);
return val == null ? "" : val;
}
return null;
}
public static String[] getUserArgs() {
return userArgs.toArray(new String[0]);
}
//</editor-fold>
//<editor-fold desc="03*** check how we are running">
private static String APPTYPE = "?APPTYPE?";
private static String SXBASEJAR = null;
private static File SXPROJEKTf;
private static String SXJYTHONMAVEN;
private static String SXJYTHONLOCAL;
private static String SXJRUBYMAVEN;
private static String SXJRUBYLOCAL;
private static Map<String, String> SXTESSDATAS = new HashMap<String, String>();
private static String SXMAVEN = "https://repo1.maven.org/maven2/";
private static String SXOSSRH = "https://oss.sonatype.org/content/groups/public/";
private static String BASECLASS = "";
public static void setSXBASECLASS() {
log.trace("setBaseClass: start");
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
boolean takeit = false;
for (StackTraceElement traceElement : stackTrace) {
String tName = traceElement.getClassName();
if (takeit) {
BASECLASS = tName;
break;
}
if (tName.equals(SX.class.getName())) {
takeit = true;
}
}
}
public static String getSXBASECLASS() {
return BASECLASS;
}
//</editor-fold>
//<editor-fold desc="04*** getAll SX options at startup">
private static File fOptions = null;
private static String fnOptions = "sxoptions.txt";
private static PropertiesConfiguration SXOPTIONS = null;
private static void loadOptions() {
boolean success = true;
URL urlOptions = SX.class.getClassLoader().getResource("Settings/sxoptions.txt");
if (!isNull(urlOptions)) {
Configurations configs = new Configurations();
try {
SXOPTIONS = configs.properties(urlOptions);
} catch (ConfigurationException cex) {
success = false;
}
} else {
success = false;
}
if (!success) {
terminate(1, "loadOptions: SX Options not available: %s", urlOptions);
}
PropertiesConfiguration extraOptions = null;
File aFile = null;
String argFile = getArg("o");
if (!isNull(argFile)) {
aFile = Content.asFile(argFile);
if (!aFile.isDirectory()) {
if (aFile.exists()) {
fOptions = aFile;
trace("loadOptions: arg: %s (from arg -o)", aFile);
} else {
fnOptions = aFile.getName();
trace("loadOptions: file name given: %s (from arg -o)", fnOptions);
}
}
}
if (isNull(fOptions)) {
for (String sFile : new String[]{getSXUSERWORK(), getSXUSERHOME(), getSXSTORE()}) {
if (isNull(sFile)) {
continue;
}
aFile = Content.asFile(sFile);
trace("loadOptions: check: %s", aFile);
fOptions = new File(aFile, fnOptions);
if (fOptions.exists()) {
break;
} else {
fOptions = null;
}
}
}
if (fOptions != null) {
trace("loadOptions: found Options file at: %s", fOptions);
Configurations configs = new Configurations();
try {
extraOptions = configs.properties(fOptions);
} catch (ConfigurationException cex) {
error("loadOptions: Options not valid: %s", cex.getMessage());
}
if (!isNull(extraOptions)) {
mergeExtraOptions(SXOPTIONS, extraOptions);
}
} else {
trace("loadOptions: no extra Options file found");
}
}
private static void mergeExtraOptions(PropertiesConfiguration baseOptions, PropertiesConfiguration extraOptions) {
if (isNull(extraOptions) || extraOptions.size() == 0) {
return;
}
trace("loadOptions: have to merge extra Options");
Iterator<String> allKeys = extraOptions.getKeys();
while (allKeys.hasNext()) {
String key = allKeys.next();
if ("sxversion".equals(key)) {
baseOptions.setProperty("sxversion_saved", extraOptions.getProperty(key));
continue;
}
if ("sxbuild".equals(key)) {
baseOptions.setProperty("sxbuild_saved", extraOptions.getProperty(key));
continue;
}
Object value = baseOptions.getProperty(key);
if (isNull(value)) {
baseOptions.addProperty(key, extraOptions.getProperty(key));
trace("Option added: %s", key);
} else {
Object extraValue = extraOptions.getProperty(key);
if (!value.getClass().getName().equals(extraValue.getClass().getName()) ||
!value.toString().equals(extraValue.toString())) {
baseOptions.setProperty(key, extraValue);
trace("Option changed: %s = %s", key, extraValue);
}
}
}
}
//</editor-fold>
//<editor-fold desc="05*** handle options at runtime">
public static void loadOptions(String fpOptions) {
error("loadOptions: not yet implemented");
}
public static boolean saveOptions(String fpOptions) {
error("saveOptions: not yet implemented");
return false;
}
public static boolean saveOptions() {
try {
SXOPTIONS.write(new FileWriter(Content.asFile(SX.getSXSTORE(), "sxoptions.txt")));
} catch (Exception e) {
log.error("saveOptions: %s", e);
}
return false;
}
public static boolean hasOptions() {
return SXOPTIONS != null && SXOPTIONS.size() > 0;
}
public static boolean isOption(String pName) {
return isOption(pName, false);
}
public static boolean isOption(String pName, Boolean bDefault) {
if (SXOPTIONS == null) {
return bDefault;
}
String pVal = SXOPTIONS.getString(pName, bDefault.toString()).toLowerCase();
if (pVal.contains("yes") || pVal.contains("true") || pVal.contains("on")) {
return true;
}
return false;
}
public static String getOption(String pName) {
return getOption(pName, "");
}
public static String getOption(String pName, String sDefault) {
if (!hasOptions()) {
return "";
}
return SXOPTIONS.getString(pName, sDefault);
}
public static void setOption(String pName, String sValue) {
SXOPTIONS.setProperty(pName, sValue);
}
public static double getOptionNumber(String pName) {
return getOptionNumber(pName, 0);
}
public static double getOptionNumber(String pName, double nDefault) {
double nVal = SXOPTIONS.getDouble(pName, nDefault);
return nVal;
}
public static Map<String, String> getOptions() {
Map<String, String> mapOptions = new HashMap<String, String>();
if (hasOptions()) {
Iterator<String> allKeys = SXOPTIONS.getKeys();
while (allKeys.hasNext()) {
String key = allKeys.next();
mapOptions.put(key, getOption(key));
}
}
return mapOptions;
}
public static void dumpOptions() {
if (hasOptions()) {
p("*** options dump");
for (String sOpt : getOptions().keySet()) {
p("%s = %s", sOpt, getOption(sOpt));
}
p("*** options dump end");
}
}
//</editor-fold>
//<editor-fold desc="06*** system/java version info">
/**
* @return path seperator : or ;
*/
public static String getSeparator() {
if (isWindows()) {
return ";";
}
return ":";
}
static enum theSystem {
WIN, MAC, LUX, FOO
}
/**
* ***** Property SXSYSTEM *****
*
* @return info about the system running on
*/
public static String getSXSYSTEM() {
if (isNotSet(SYSTEM)) {
String osName = System.getProperty("os.name");
String osVersion = System.getProperty("os.version");
if (osName.toLowerCase().startsWith("windows")) {
SYS = theSystem.WIN;
osName = "Windows";
} else if (osName.toLowerCase().startsWith("mac")) {
SYS = theSystem.MAC;
osName = "Mac OSX";
} else if (osName.toLowerCase().startsWith("linux")) {
SYS = theSystem.LUX;
osName = "Linux";
} else {
terminate(-1, "running on not supported System: %s (%s)", osName, osVersion);
}
SYSTEMVERSION = osVersion;
SYSTEM = String.format("%s (%s)", osName, SYSTEMVERSION);
}
return SYSTEM;
}
static String SYSTEM = "";
static theSystem SYS = theSystem.FOO;
private static String getSYSGENERIC() {
getSXSYSTEM();
if (isWindows()) {
return "windows";
}
if (isMac()) {
return "mac";
}
if (isLinux()) {
return "linux";
}
return "unknown";
}
/**
* ***** Property SXSYSTEMVERSION *****
*
* @return the running system's version info
*/
public static String getSXSYSTEMVERSION() {
if (isNotSet(SYSTEMVERSION)) {
getSXSYSTEM();
}
return SYSTEMVERSION;
}
static String SYSTEMVERSION = "";
/**
* @return true/false
*/
public static boolean isWindows() {
getSXSYSTEM();
return theSystem.WIN.equals(SYS);
}
/**
* @return true/false
*/
public static boolean isLinux() {
getSXSYSTEM();
return theSystem.LUX.equals(SYS);
}
/**
* @return true/false
*/
public static boolean isMac() {
getSXSYSTEM();
return theSystem.MAC.equals(SYS);
}
public static boolean isOSX10() {
return getSXSYSTEMVERSION().startsWith("10.10.") || getSXSYSTEMVERSION().startsWith("10.11.");
}
/**
* ***** Property RUNNINGASAPP *****
*
* @return to know wether running as .exe/.app
*/
public static boolean isSXRUNNINGASAPP() {
if (isNotSet(RUNNINGASAPP)) {
//TODO getASAPP detect running as .exe/.app
setSXRUNNINGASAPP(false);
}
return RUNNINGASAPP;
}
static Boolean RUNNINGASAPP = null;
public static boolean setSXRUNNINGASAPP(boolean val) {
RUNNINGASAPP = val;
return RUNNINGASAPP;
}
/**
* ***** Property JAVAHOME *****
*
* @return the Java installation path
*/
public static String getSXJAVAHOME() {
if (isNotSet(JAVAHOME)) {
String jhome = System.getProperty("java.home");
if (isSet(jhome)) {
JAVAHOME = jhome;
}
}
return JAVAHOME;
}
static String JAVAHOME = "";
/**
* ***** Property JAVAVERSION *****
*
* @return Java version info
*/
public static String getSXJAVAVERSION() {
if (isNotSet(JAVAVERSION)) {
String vJava = System.getProperty("java.runtime.version");
String vVM = System.getProperty("java.vm.version");
String vClass = System.getProperty("java.class.version");
String vSysArch = System.getProperty("os.arch");
int javaVersion = 0;
if (vSysArch == null || !vSysArch.contains("64")) {
terminate(1, "Java arch not 64-Bit or not detected: JavaSystemProperty::os.arch = %s", vSysArch);
}
try {
javaVersion = Integer.parseInt(vJava.substring(2, 3));
JAVAVERSION = String.format("Java %s vm %s class %s arch %s", vJava, vVM, vClass, vSysArch);
} catch (Exception ex) {
terminate(1, "Java version not detected: JavaSystemProperty::java.runtime.version = %s", vJava);
}
if (javaVersion < 7 || javaVersion > 8) {
terminate(1, "Java version must be 7 or 8");
}
}
return JAVAVERSION;
}
static String JAVAVERSION = "";
/**
* ***** Property JAVAVERSIONNUMBER *****
*
* @return Java version number
*/
public static int getSXJAVAVERSIONNUMBER() {
if (isNotSet(JAVAVERSIONNUMBER)) {
JAVAVERSIONNUMBER = Integer.parseInt(getSXJAVAVERSION().substring(5, 6));
}
return JAVAVERSIONNUMBER;
}
static Integer JAVAVERSIONNUMBER = null;
public static boolean isJava8() {
return getSXJAVAVERSIONNUMBER() > 7;
}
public static boolean isJava7() {
return getSXJAVAVERSIONNUMBER() > 6;
}
//</editor-fold>
//<editor-fold desc="07*** temp folders">
/**
* ***** Property SYSTEMP *****
*
* @return the path for temporary stuff according to JavaSystemProperty::java.io.tmpdir
*/
public static String getSXSYSTEMP() {
if (isNotSet(SYSTEMP)) {
String tmpdir = System.getProperty("java.io.tmpdir");
if (!Content.existsFile(tmpdir)) {
terminate(1, "JavaSystemProperty::java.io.tmpdir not valid");
}
SYSTEMP = Content.asFile(tmpdir).getAbsolutePath();
}
return SYSTEMP;
}
static String SYSTEMP = "";
/**
* ***** Property TEMP *****
*
* @return the path to the area where Sikulix stores temporary stuff (located in SYSTEMP)
*/
public static String getSXTEMP() {
if (isNotSet(TEMP)) {
for (String aFile : Content.asFile(SYSTEMP).list()) {
if ((aFile.startsWith("Sikulix") && (new File(aFile).isFile()))
|| (aFile.startsWith("jffi") && aFile.endsWith(".tmp"))) {
Content.deleteFileOrFolder(new File(getSXSYSTEMP(), aFile));
}
}
File fSXTempPath = Content.asFolder(getSXSYSTEMP(), String.format("Sikulix_%d", getRandomInt()));
if (!Content.existsFile(fSXTempPath)) {
terminate(1, "getTEMP: could not create: %s", fSXTempPath.getAbsolutePath());
}
TEMP = fSXTempPath.getAbsolutePath();
}
return TEMP;
}
static String TEMP = "";
/**
* @return a positive random int > 0 using Java's Random().nextInt()
*/
static int getRandomInt() {
int rand = 1 + new Random().nextInt();
return (rand < 0 ? rand * -1 : rand);
}
//</editor-fold>
//<editor-fold desc="08*** user/work/appdata folder">
/**
* ***** Property USERHOME *****
*
* @return the system specific User's home folder
*/
public static String getSXUSERHOME() {
if (isNotSet(USERHOME)) {
String aFolder = System.getProperty("user.home");
if (!Content.existsFile(aFolder)) {
terminate(-1, "getUSERHOME: JavaSystemProperty::user.home not valid");
}
USERHOME = Content.asFile(aFolder).getAbsolutePath();
}
return USERHOME;
}
static String USERHOME = "";
/**
* ***** Property USERWORK *****
*
* @return the working folder from JavaSystemProperty::user.dir
*/
public static String getSXUSERWORK() {
if (isNotSet(USERWORK)) {
String aFolder = System.getProperty("user.dir");
if (!Content.existsFile(aFolder)) {
terminate(-1, "getUSERWORK: JavaSystemProperty::user.dir not valid");
}
USERWORK = Content.asFolder(aFolder).getAbsolutePath();
}
return USERWORK;
}
static String USERWORK = "";
/**
* ***** Property SYSAPPDATA *****
*
* @return the system specific path to the users application storage area
*/
public static String getSXSYSAPPDATA() {
if (isNotSet(SYSAPPDATA)) {
String appDataMsg = "";
File fSysAppPath = null;
if (isWindows()) {
String sDir = System.getenv("APPDATA");
if (isNotSet(sDir)) {
terminate(1, "setSYSAPP: Windows: %s not valid", "%APPDATA%");
}
fSysAppPath = Content.asFile(sDir);
} else if (isMac()) {
fSysAppPath = Content.asFile(getSXUSERHOME(), "Library/Application Support");
} else if (isLinux()) {
fSysAppPath = Content.asFile(getSXUSERHOME());
SXAPPdefault = ".Sikulix/SX2";
}
SYSAPPDATA = fSysAppPath.getAbsolutePath();
}
return SYSAPPDATA;
}
static String SYSAPPDATA = "";
//</editor-fold>
//<editor-fold desc="09*** SX app data folder">
public static String getSXWEBHOME() {
if (isNotSet(SXWEBHOME)) {
SXWEBHOME = SXWEBHOMEdefault;
}
return SXWEBHOME;
}
static String SXWEBHOME = "";
static String SXWEBHOMEdefault = "http://sikulix.com";
public static String getSXWEBDOWNLOAD() {
if (isNotSet(SXWEBDOWNLOAD)) {
SXWEBDOWNLOAD = SXWEBDOWNLOADdefault;
}
return SXWEBDOWNLOAD;
}
static String SXWEBDOWNLOAD = "";
static String SXWEBDOWNLOADdefault = "http://download.sikulix.com";
/**
* ***** Property SXAPPSTORE *****
*
* @return the path to the area in SYSAPPDATA where Sikulix stores all stuff
*/
public static String getSXAPP() {
if (isNotSet(SXAPPSTORE)) {
File fDir = Content.asFolder(getSXSYSAPPDATA(), SXAPPdefault);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXAPP: folder not available or cannot be created: %s", fDir);
}
SXAPPSTORE = fDir.getAbsolutePath();
}
return SXAPPSTORE;
}
static String SXAPPSTORE = "";
static String SXAPPdefault = "Sikulix/SX2";
/**
* ***** Property SXDOWNLOADS *****
*
* @return path where Sikulix stores downloaded stuff
*/
public static String getSXDOWNLOADS() {
if (isNotSet(SXDOWNLOADS)) {
setSXDOWNLOADS(getSXAPP(), SXDOWNLOADSdefault);
}
return SXDOWNLOADS;
}
static String SXDOWNLOADS = "";
static String SXDOWNLOADSdefault = "Downloads";
public static String setSXDOWNLOADS(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXDOWNLOADS: not posssible or not valid: %s", fDir);
}
SXDOWNLOADS = fDir.getAbsolutePath();
return SXDOWNLOADS;
}
/**
* ***** Property SXNATIVE *****
*
* @return path where Sikulix stores the native stuff
*/
public static String getSXNATIVE() {
if (isNotSet(SXNATIVE)) {
setSXNATIVE(getSXAPP(), SXNATIVEdefault);
}
return SXNATIVE;
}
static String SXNATIVE = "";
static String SXNATIVEdefault = "Native";
public static String setSXNATIVE(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (isNotSet(fDir) || !Content.existsFile(fDir) || !fDir.isDirectory()) {
terminate(1, "setSXNATIVE: not posssible or not valid: %s", fDir);
}
SXNATIVE = fDir.getAbsolutePath();
return SXNATIVE;
}
/**
* ***** Property SXLIB *****
*
* @return path to folder containing complementary stuff for scripting languages
*/
public static String getSXLIB() {
if (isNotSet(SXLIB)) {
setSXLIB(getSXAPP(), SXLIBdefault);
}
return SXLIB;
}
static String SXLIB = "";
static String SXLIBdefault = "LIB";
public static String setSXLIB(Object... dirs) {
File fDir = Content.asFolder(dirs, null);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXLIB: not posssible or not valid: %s", fDir);
}
SXLIB = fDir.getAbsolutePath();
return SXLIB;
}
/**
* ***** Property SXSTORE *****
*
* @return path where other stuff is found or stored at runtime (options, logs, ...)
*/
public static String getSXSTORE() {
if (isNotSet(SXSTORE)) {
setSXSTORE(getSXAPP(), SXSTOREdefault);
}
return SXSTORE;
}
static String SXSTORE = "";
static String SXSTOREdefault = "Store";
public static String setSXSTORE(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXSTORE: not posssible or not valid: %s", fDir);
}
SXSTORE = fDir.getAbsolutePath();
return SXSTORE;
}
/**
* ***** Property SXEDITOR *****
*
* @return path to folder containing supporting stuff for Sikulix IDE
*/
public static String getSXEDITOR() {
if (isNotSet(SXEDITOR)) {
setSXEDITOR(getSXAPP(), SXEDITORdefault);
}
return SXEDITOR;
}
static String SXEDITOR = "";
static String SXEDITORdefault = "Extensions/SXEditor";
public static String setSXEDITOR(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXEDITOR: not posssible or not valid: %s", fDir);
}
SXEDITOR = fDir.getAbsolutePath();
return SXEDITOR;
}
/**
* ***** Property SXTESSERACT *****
*
* @return path to folder for stuff supporting Tesseract
*/
public static String getSXTESSERACT() {
if (isNotSet(SXTESSERACT)) {
setSXTESSERACT(getSXAPP(), SXTESSERACTdefault);
}
return SXTESSERACT;
}
static String SXTESSERACT = "";
static String SXTESSERACTdefault = "TESSERACT";
public static String setSXTESSERACT(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXTESSERACT: not posssible or not valid: %s", fDir);
}
SXTESSERACT = fDir.getAbsolutePath();
return SXTESSERACT;
}
/**
* ***** Property EXTENSIONSFOLDER *****
*
* @return path to folder containg extensions or plugins
*/
public static String getSXEXTENSIONSFOLDER() {
if (isNotSet(EXTENSIONSFOLDER)) {
setSXEXTENSIONS(getSXAPP(), EXTENSIONSdefault);
}
return EXTENSIONSFOLDER;
}
static String EXTENSIONSFOLDER = "";
static String EXTENSIONSdefault = "Extensions";
static String[] theExtensions = new String[]{"selenium4sikulix"};
public static String setSXEXTENSIONS(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXEXTENSIONS: not posssible or not valid: %s", fDir);
}
EXTENSIONSFOLDER = fDir.getAbsolutePath();
return EXTENSIONSFOLDER;
}
/**
* ***** Property SXIMAGES *****
*
* @return
*/
public static String getSXIMAGES() {
if (isNotSet(SXIMAGES)) {
setSXIMAGES(getSXAPP(), SXIMAGESdefault);
}
return SXIMAGES;
}
static String SXIMAGES = "";
static String SXIMAGESdefault = "Images";
public static String setSXIMAGES(Object... dirs) {
File fDir = Content.asFolder(dirs);
if (!Content.existsFile(fDir)) {
terminate(1, "setSXIMAGES: not posssible or not valid: %s", fDir);
}
SXIMAGES = fDir.getAbsolutePath();
return SXIMAGES;
}
//</editor-fold>
//<editor-fold desc="10*** SX version info">
/**
* ***** Property VERSION *****
*
* @return Sikulix version
*/
public static String getSXVERSION() {
if (isNotSet(VERSION)) {
String sxVersion = "?sxVersion?";
String sxBuild = "?sxBuild?";
String sxVersionShow = "?sxVersionShow?";
String sxStamp = "?sxStamp?";
sxVersion = SXOPTIONS.getString("sxversion");
sxBuild = SXOPTIONS.getString("sxbuild");
sxBuild = sxBuild.replaceAll("\\-", "");
sxBuild = sxBuild.replaceAll("_", "");
sxBuild = sxBuild.replaceAll("\\:", "");
String sxlocalrepo = Content.slashify(SXOPTIONS.getString("sxlocalrepo"), true);
String sxJythonVersion = SXOPTIONS.getString("sxjython");
String sxJRubyVersion = SXOPTIONS.getString("sxjruby");
debug("getVERSION: version: %s build: %s", sxVersion, sxBuild);
sxStamp = String.format("%s_%s", sxVersion, sxBuild);
// used for download of production versions
String dlProdLink = "https://launchpad.net/raiman/sikulix2013+/";
String dlProdLinkSuffix = "/+download/";
// used for download of development versions (nightly builds)
String dlDevLink = "http://nightly.sikuli.de/";
SXJYTHONMAVEN = "org/python/jython-standalone/"
+ sxJythonVersion + "/jython-standalone-" + sxJythonVersion + ".jar";
SXJYTHONLOCAL = sxlocalrepo + SXJYTHONMAVEN;
SXJRUBYMAVEN = "org/jruby/jruby-complete/"
+ sxJRubyVersion + "/jruby-complete-" + sxJRubyVersion + ".jar";
SXJRUBYLOCAL = sxlocalrepo + SXJRUBYMAVEN;
SXTESSDATAS.put("eng", "http://download.sikulix.com/tesseract-ocr-3.02.eng.tar.gz");
sxLibsCheckName = String.format(sxLibsCheckStamp, sxStamp);
VERSION = sxVersion;
BUILD = sxBuild;
VERSIONSHOW = String.format("%s (%s)", sxVersion, sxBuild);
STAMP = sxStamp;
}
return VERSION;
}
static String VERSION = "";
/**
* ***** Property BUILD *****
*
* @return Sikulix build timestamp
*/
public static String getSXBUILD() {
if (isNotSet(BUILD)) {
getSXVERSION();
}
return BUILD;
}
static String BUILD = "";
/**
* ***** Property VERSIONSHOW *****
*
* @return Version (Build)
*/
public static String getSXVERSIONSHOW() {
if (isNotSet(VERSIONSHOW)) {
getSXVERSION();
}
return VERSIONSHOW;
}
static String VERSIONSHOW = "";
/**
* ***** Property STAMP *****
*
* @return Version_Build
*/
public static String getSXSTAMP() {
if (isNotSet(STAMP)) {
getSXVERSION();
}
return STAMP;
}
static String STAMP = "";
public static boolean isSnapshot() {
return getSXVERSION().endsWith("-SNAPSHOT");
}
//</editor-fold>
//<editor-fold desc="11*** monitor / local device">
/**
* checks, whether Java runs with a valid GraphicsEnvironment (usually means real screens connected)
*
* @return false if Java thinks it has access to screen(s), true otherwise
*/
public static boolean isHeadless() {
return GraphicsEnvironment.isHeadless();
}
public static boolean isTravisCI() {
return SX.isSet(System.getenv("TRAVIS"), "true");
}
/**
* ***** Property LOCALDEVICE *****
*
* @return
*/
public static LocalDevice getSXLOCALDEVICE() {
if (isNotSet(LOCALDEVICE)) {
LOCALDEVICE = (LocalDevice) new LocalDevice().start();
}
return LOCALDEVICE;
}
public static boolean isSetSXLOCALDEVICE() {
return SX.isNotNull(LOCALDEVICE);
}
public static void setSXLOCALDEVICE(LocalDevice LOCALDEVICE) {
SX.LOCALDEVICE = LOCALDEVICE;
}
private static LocalDevice LOCALDEVICE = null;
//</editor-fold>
//<editor-fold desc="12*** handle native libs">
public static File fLibsProvided;
public static boolean useLibsProvided;
public static String linuxNeededLibs = "";
public static String linuxAppSupport = "";
static boolean areLibsExported = false;
static String fpJarLibs = null;
static Map<NATIVES, Boolean> libsLoaded = new HashMap<NATIVES, Boolean>();
static String sxLibsCheckStamp = "MadeForSikuliX_%s";
static String sflibsCheckFileStored = "MadeForSikuliX2";
public static String sxLibsCheckName = "";
public static String sfLibOpencvJava = "_ext_opencv_java";
public static String sfLibJXGrabKey = "_ext_JXGrabKey";
public static String sfLibJIntellitype = "_ext_JIntellitype";
public static String sfLibWinUtil = "_ext_WinUtil";
public static String sfLibMacUtil = "_ext_MacUtil";
public static String sfLibMacHotkey = "_ext_MacHotkeyManager";
static class LibsFilter implements FilenameFilter {
String sAccept = "";
public LibsFilter(String toAccept) {
sAccept = toAccept;
}
@Override
public boolean accept(File dir, String name) {
if (dir.getPath().contains(sAccept)) {
return true;
}
return false;
}
}
static void addToWindowsSystemPath(File fLibsFolder) {
String syspath = SXJNA.WinKernel32.getEnvironmentVariable("PATH");
if (syspath == null) {
terminate(1, "addToWindowsSystemPath: cannot access system path");
} else {
String libsPath = (fLibsFolder.getAbsolutePath()).replaceAll("/", "\\");
if (!syspath.toUpperCase().contains(libsPath.toUpperCase())) {
if (!SXJNA.WinKernel32.setEnvironmentVariable("PATH", libsPath + ";" + syspath)) {
terminate(999, "", "");
}
syspath = SXJNA.WinKernel32.getEnvironmentVariable("PATH");
if (!syspath.toUpperCase().contains(libsPath.toUpperCase())) {
terminate(1, "addToWindowsSystemPath: did not work: %s", syspath);
}
debug("addToWindowsSystemPath: added: %s", libsPath);
}
}
}
static boolean checkJavaUsrPath(File fLibsFolder) {
String fpLibsFolder = fLibsFolder.getAbsolutePath();
Field usrPathsField = null;
boolean contained = false;
try {
usrPathsField = ClassLoader.class.getDeclaredField("usr_paths");
} catch (NoSuchFieldException ex) {
error("checkJavaUsrPath: getAll (%s)", ex);
} catch (SecurityException ex) {
error("checkJavaUsrPath: getAll (%s)", ex);
}
if (usrPathsField != null) {
usrPathsField.setAccessible(true);
try {
//getAll array of paths
String[] javapaths = (String[]) usrPathsField.get(null);
//check if the path to add is already present
for (String p : javapaths) {
if (new File(p).equals(fLibsFolder)) {
contained = true;
break;
}
}
//add the new path
if (!contained) {
final String[] newPaths = Arrays.copyOf(javapaths, javapaths.length + 1);
newPaths[newPaths.length - 1] = fpLibsFolder;
usrPathsField.set(null, newPaths);
debug("checkJavaUsrPath: added to ClassLoader.usrPaths");
contained = true;
}
} catch (IllegalAccessException ex) {
error("checkJavaUsrPath: set (%s)", ex);
} catch (IllegalArgumentException ex) {
error("checkJavaUsrPath: set (%s)", ex);
}
return contained;
}
return false;
}
static void exportLibraries() {
if (areLibsExported) {
return;
}
File fSXNative = Content.asFile(getSXNATIVE());
if (!new File(fSXNative, sxLibsCheckName).exists()) {
debug("exportLibraries: folder empty or has wrong content");
Content.deleteFileOrFolder(fSXNative);
}
if (fSXNative.exists()) {
debug("exportLibraries: folder exists: %s", fSXNative);
} else {
fSXNative.mkdirs();
if (!fSXNative.exists()) {
terminate(1, "exportLibraries: folder not available: %s", fSXNative);
}
debug("exportLibraries: new folder: %s", fSXNative);
fpJarLibs = "/Native/" + getSYSGENERIC();
extractLibraries(sxGlobalClassReference, fpJarLibs, fSXNative);
try {
extractLibraries(Class.forName("com.sikulix.opencv.Sikulix"), fpJarLibs, fSXNative);
} catch (ClassNotFoundException e) {
log.error("exportLibraries: package com.sikulix.opencv not on classpath");
}
if (!new File(fSXNative, sflibsCheckFileStored).exists()) {
terminate(1, "exportLibraries: did not work");
}
new File(fSXNative, sflibsCheckFileStored).renameTo(new File(fSXNative, sxLibsCheckName));
if (!new File(fSXNative, sxLibsCheckName).exists()) {
terminate(1, "exportLibraries: did not work");
}
}
for (String aFile : fSXNative.list()) {
if (aFile.contains("opencv_java")) {
sfLibOpencvJava = aFile;
} else if (aFile.contains("JXGrabKey")) {
sfLibJXGrabKey = aFile;
} else if (aFile.contains("JIntellitype")) {
sfLibJIntellitype = aFile;
} else if (aFile.contains("WinUtil")) {
sfLibWinUtil = aFile;
} else if (aFile.contains("MacUtil")) {
sfLibMacUtil = aFile;
} else if (aFile.contains("MacHotkey")) {
sfLibMacHotkey = aFile;
}
}
areLibsExported = true;
}
private static void extractLibraries(Class classRef, String from, File fTo) {
String classLocation = Content.whereIs(classRef);
List<String> libraries;
String source = classLocation;
String sourceType = " from jar";
if (classLocation.endsWith(".jar")) {
libraries = Content.extractResourcesToFolderFromJar(classLocation, from, fTo, null);
} else {
URL uLibsFrom = classRef.getResource(from);
libraries = Content.extractResourcesToFolder(from, fTo, null);
source = uLibsFrom.toString();
sourceType = "";
}
int libCount = libraries.size();
if (libCount == 0) {
error("extractLibraries: (none)%s: %s", sourceType, source);
} else {
if (libraries.contains("MadeForSikuliX2")) {
libCount--;
}
trace("extractLibraries: (%d)%s: %s", libCount, sourceType, source);
}
}
public static enum NATIVES {
OPENCV, TESSERACT, SYSUTIL, HOTKEY
}
public static boolean loadNative(NATIVES type) {
boolean success = true;
if (libsLoaded.isEmpty()) {
for (NATIVES nType : NATIVES.values()) {
libsLoaded.put(nType, false);
}
exportLibraries();
if (isWindows()) {
addToWindowsSystemPath(Content.asFile(getSXNATIVE()));
if (!checkJavaUsrPath(Content.asFile(getSXNATIVE()))) {
error("exportLibraries: JavaUserPath: see errors - might not work and crash later");
}
String lib = "jawt.dll";
File fJawtDll = new File(Content.asFile(getSXNATIVE()), lib);
Content.deleteFileOrFolder(fJawtDll);
Content.xcopy(new File(getSXJAVAHOME() + "/bin/" + lib), fJawtDll);
if (!fJawtDll.exists()) {
terminate(1, "exportLibraries: problem copying %s", fJawtDll);
}
}
}
if (OPENCV.equals(type) && !libsLoaded.get(OPENCV)) {
loadNativeLibrary(sfLibOpencvJava);
} else if (SYSUTIL.equals(type) && !libsLoaded.get(SYSUTIL)) {
if (isWindows()) {
loadNativeLibrary(sfLibWinUtil);
} else if (isMac()) {
loadNativeLibrary(sfLibMacUtil);
}
} else if (HOTKEY.equals(type) && !libsLoaded.get(HOTKEY)) {
if (isWindows()) {
loadNativeLibrary(sfLibJIntellitype);
} else if (isMac()) {
loadNativeLibrary(sfLibMacHotkey);
} else if (isLinux()) {
loadNativeLibrary(sfLibJXGrabKey);
}
} else {
success = false;
}
if (success) {
libsLoaded.put(type, true);
}
return success;
}
static void loadNativeLibrary(String aLib) {
try {
if (aLib.startsWith("_ext_")) {
error("loadNativeLibrary: loading external library not implemented: %s", aLib);
} else {
String sf_aLib = new File(getSXNATIVE(), aLib).getAbsolutePath();
System.load(sf_aLib);
trace("loadNativeLibrary: bundled: %s", aLib);
}
} catch (UnsatisfiedLinkError ex) {
terminate(1, "loadNativeLibrary: loading library error: %s (%s)", aLib, ex.getMessage());
}
}
//</editor-fold>
//<editor-fold desc="13*** global helper methods">
public static Map<String, String> listPublicMethods(Class clazz) {
return listPublicMethods(clazz, true);
}
public static Map<String, String> listPublicMethods(Class clazz, boolean silent) {
Method[] declaredMethods = clazz.getDeclaredMethods();
Map<String, String> publicMethods = new HashMap<>();
for (Method method : declaredMethods) {
int modifiers = method.getModifiers();
if (Modifier.isPublic(modifiers)) {
int parameterCount = method.getParameterCount();
Class<?>[] exceptionTypes = method.getExceptionTypes();
String throwsException = "";
if (exceptionTypes.length > 0) {
throwsException = "x";
}
String name = method.getName();
String prefix = "";
if (name.startsWith("getAll")) {
prefix = "getAll";
} else if (name.startsWith("get")) {
prefix = "get";
} else if (name.startsWith("set")) {
prefix = "set";
} else if (name.startsWith("isSet")) {
prefix = "isSet";
} else if (name.startsWith("is")) {
prefix = "is";
} else if (name.startsWith("on")) {
prefix = "on";
} else if (name.startsWith("isValid")) {
prefix = "isValid";
} else if (name.startsWith("has")) {
prefix = "has";
} else if (name.startsWith("as")) {
prefix = "as";
} else if (name.startsWith("load")) {
prefix = "load";
} else if (name.startsWith("save")) {
prefix = "save";
} else if (name.startsWith("dump")) {
prefix = "dump";
} else if (name.startsWith("make")) {
prefix = "make";
} else if (name.startsWith("eval")) {
prefix = "eval";
} else if (name.startsWith("exists")) {
prefix = "exists";
} else if (name.startsWith("equals")) {
prefix = "equals";
} else if (name.startsWith("list")) {
prefix = "list";
}
name = name.substring(prefix.length());
publicMethods.put(String.format("%s%s-%d%s", name, SX.isSet(prefix) ? "-" + prefix : "",
parameterCount, throwsException), method.toString());
}
}
if (!silent) {
List<String> publicMethodsKeys = publicMethods.keySet().stream().sorted().collect(Collectors.toList());
for (String entry : publicMethodsKeys) {
if (entry.startsWith("SX") || entry.startsWith("Option")) continue;
log.p("%s", entry);
}
}
return publicMethods;
}
/**
* check wether the given object is in JSON format as ["ID", ...]
*
* @param json
* @return true if object is in JSON format, false otherwise
*/
public static boolean isJSON(Object json) {
if (json instanceof String) {
return ((String) json).trim().startsWith("[\"") || ((String) json).trim().startsWith("{\"");
}
return false;
}
public static void dumpSysProps() {
dumpSysProps(null);
}
public static void dumpSysProps(String filter) {
filter = filter == null ? "" : filter;
p("*** system properties dump " + filter);
Properties sysProps = System.getProperties();
ArrayList<String> keysProp = new ArrayList<String>();
Integer nL = 0;
String entry;
for (Object e : sysProps.keySet()) {
entry = (String) e;
if (entry.length() > nL) {
nL = entry.length();
}
if (filter.isEmpty() || !filter.isEmpty() && entry.contains(filter)) {
keysProp.add(entry);
}
}
Collections.sort(keysProp);
String form = "%-" + nL.toString() + "s = %s";
for (Object e : keysProp) {
p(form, e, sysProps.get(e));
}
p("*** system properties dump end" + filter);
}
public static void show() {
if (hasOptions()) {
dumpOptions();
}
p("***** show environment (%s)", getSXVERSIONSHOW());
p("user.home: %s", getSXUSERHOME());
p("user.dir (work dir): %s", getSXUSERWORK());
p("java.io.tmpdir: %s", getSXSYSTEMP());
p("running on %s", getSXSYSTEM());
p(getSXJAVAVERSION());
p("app data folder: %s", getSXAPP());
p("libs folder: %s", getSXNATIVE());
if (isSet(SXBASEJAR)) {
p("executing jar: %s", SXBASEJAR);
}
Content.dumpClasspath("sikulix");
//TODO ScriptingHelper
// if (isJythonReady) {
// JythonHelper.getAll().showSysPath();
// }
p("***** show environment end");
}
public static boolean isNull(Object obj) {
return null == obj;
}
public static boolean isNotNull(Object obj) {
return null != obj;
}
public static boolean isNotSet(Object obj) {
if (null != obj && obj instanceof String) {
if (((String) obj).isEmpty()) {
return true;
} else {
return false;
}
}
return null == obj;
}
public static boolean isSet(Object obj) {
if (null != obj && obj instanceof String) {
if (((String) obj).isEmpty()) {
return false;
} else {
return true;
}
}
return null != obj;
}
public static boolean isSet(String var, String val) {
if (null != var && null != val) {
if (var.isEmpty()) {
return false;
} else {
return val.equals(var);
}
}
return false;
}
public static void pause(int time) {
try {
Thread.sleep(time * 1000);
} catch (InterruptedException ex) {
}
}
public static void pause(float time) {
try {
Thread.sleep((int) (time * 1000));
} catch (InterruptedException ex) {
}
}
public static void pause(double time) {
try {
Thread.sleep((int) (time * 1000));
} catch (InterruptedException ex) {
}
}
//</editor-fold>
}
|
[
"\"APPDATA\"",
"\"TRAVIS\""
] |
[] |
[
"APPDATA",
"TRAVIS"
] |
[]
|
["APPDATA", "TRAVIS"]
|
java
| 2 | 0 | |
web_app/http.py
|
import os
def slash_join(*args: str) -> str:
"""
Joins ``args`` with a single "/"
>>> slash_join('http://example.com', 'a/', '/foo/')
'http://example.com/a/foo/'
"""
if not args:
return ''
append_slash = args[-1].endswith('/')
url = '/'.join([arg.strip('/') for arg in args])
return url + '/' if append_slash else url
def base_url() -> str:
"""
Returns the FHIR API base URL based on environment variables.
"""
https_commcarehq_org = os.environ['CCHQ_BASE_URL']
proj = os.environ['CCHQ_PROJECT_SPACE']
return slash_join(https_commcarehq_org, f'/a/{proj}/fhir/R4/')
def auth_header() -> dict:
"""
Returns the API Key auth header based on environment variables.
"""
username = os.environ['CCHQ_USERNAME']
api_key = os.environ['CCHQ_API_KEY']
return {'Authorization': f'ApiKey {username}:{api_key}'}
|
[] |
[] |
[
"CCHQ_API_KEY",
"CCHQ_BASE_URL",
"CCHQ_PROJECT_SPACE",
"CCHQ_USERNAME"
] |
[]
|
["CCHQ_API_KEY", "CCHQ_BASE_URL", "CCHQ_PROJECT_SPACE", "CCHQ_USERNAME"]
|
python
| 4 | 0 | |
tests/plugins/test_cluster_integration.py
|
import os
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDP2Plugin, DDPPlugin, DDPShardedPlugin, DeepSpeedPlugin, RPCSequentialPlugin
from pytorch_lightning.plugins.environments import LightningEnvironment, SLURMEnvironment, TorchElasticEnvironment
from pytorch_lightning.utilities import rank_zero_only
from tests.helpers.runif import RunIf
def environment_combinations():
expected = dict(global_rank=3, local_rank=1, node_rank=1, world_size=4)
# Lightning
variables = {
"CUDA_VISIBLE_DEVICES": "0,1,2,4",
"LOCAL_RANK": "1",
"NODE_RANK": "1",
"WORLD_SIZE": "8",
}
environment = LightningEnvironment()
yield environment, variables, expected
# SLURM
variables = {
"CUDA_VISIBLE_DEVICES": "0,1,2,4",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_LOCALID": "1",
"SLURM_NODEID": "1",
"SLURM_PROCID": "3",
"SLURM_NTASKS": "4",
}
environment = SLURMEnvironment()
yield environment, variables, expected
# TorchElastic
variables = {
"CUDA_VISIBLE_DEVICES": "0,1,2,4",
"LOCAL_RANK": "1",
"GROUP_RANK": "1",
"RANK": "3",
"WORLD_SIZE": "4",
"LOCAL_WORLD_SIZE": "2",
}
environment = TorchElasticEnvironment()
yield environment, variables, expected
@pytest.mark.parametrize(
"plugin_cls", [
DDPPlugin,
DDPShardedPlugin,
DDP2Plugin,
pytest.param(DeepSpeedPlugin, marks=RunIf(deepspeed=True)),
pytest.param(RPCSequentialPlugin, marks=RunIf(fairscale_pipe=True)),
]
)
def test_ranks_available_manual_plugin_selection(plugin_cls):
""" Test that the rank information is readily available after Trainer initialization. """
num_nodes = 2
for cluster, variables, expected in environment_combinations():
if plugin_cls == DDP2Plugin:
expected.update(global_rank=expected["node_rank"], world_size=num_nodes)
with mock.patch.dict(os.environ, variables):
plugin = plugin_cls(
parallel_devices=[torch.device("cuda", 1), torch.device("cuda", 2)],
num_nodes=num_nodes,
cluster_environment=cluster,
)
trainer = Trainer(plugins=[plugin])
assert rank_zero_only.rank == expected["global_rank"]
assert trainer.global_rank == expected["global_rank"]
assert trainer.local_rank == expected["local_rank"]
assert trainer.node_rank == expected["node_rank"]
assert trainer.world_size == expected["world_size"]
@pytest.mark.parametrize(
"trainer_kwargs", [
dict(accelerator="ddp", gpus=[1, 2]),
dict(accelerator="ddp_sharded", gpus=[1, 2]),
dict(accelerator="ddp2", gpus=[1, 2]),
dict(accelerator="ddp_cpu", num_processes=2),
dict(accelerator="ddp_spawn", gpus=[1, 2]),
]
)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("torch.cuda.device_count", return_value=4)
def test_ranks_available_automatic_plugin_selection(mock0, mock1, trainer_kwargs):
""" Test that the rank information is readily available after Trainer initialization. """
num_nodes = 2
trainer_kwargs.update(num_nodes=num_nodes)
for cluster, variables, expected in environment_combinations():
if trainer_kwargs["accelerator"] == "ddp2":
expected.update(global_rank=expected["node_rank"], world_size=num_nodes)
if trainer_kwargs["accelerator"] in ("ddp_cpu", "ddp_spawn"):
if isinstance(cluster, (SLURMEnvironment, TorchElasticEnvironment)):
# slurm and torchelastic do not work with spawn plugins
continue
# when using spawn, we don't reach rank > 0 until we call Trainer.fit()
expected.update(global_rank=(expected["node_rank"] * 2), local_rank=0)
with mock.patch.dict(os.environ, variables):
trainer = Trainer(**trainer_kwargs)
assert type(trainer.training_type_plugin.cluster_environment) == type(cluster)
assert rank_zero_only.rank == expected["global_rank"]
assert trainer.global_rank == expected["global_rank"]
assert trainer.local_rank == expected["local_rank"]
assert trainer.node_rank == expected["node_rank"]
assert trainer.world_size == expected["world_size"]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
google/cloud/dataplex_v1/services/metadata_service/client.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dataplex_v1.services.metadata_service import pagers
from google.cloud.dataplex_v1.types import metadata_
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MetadataServiceGrpcTransport
from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport
class MetadataServiceClientMeta(type):
"""Metaclass for the MetadataService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[MetadataServiceTransport]]
_transport_registry["grpc"] = MetadataServiceGrpcTransport
_transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[MetadataServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MetadataServiceClient(metaclass=MetadataServiceClientMeta):
"""Metadata service manages metadata resources such as tables,
filesets and partitions.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dataplex.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MetadataServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MetadataServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def entity_path(
project: str, location: str, lake: str, zone: str, entity: str,
) -> str:
"""Returns a fully-qualified entity string."""
return "projects/{project}/locations/{location}/lakes/{lake}/zones/{zone}/entities/{entity}".format(
project=project, location=location, lake=lake, zone=zone, entity=entity,
)
@staticmethod
def parse_entity_path(path: str) -> Dict[str, str]:
"""Parses a entity path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/lakes/(?P<lake>.+?)/zones/(?P<zone>.+?)/entities/(?P<entity>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def partition_path(
project: str, location: str, lake: str, zone: str, entity: str, partition: str,
) -> str:
"""Returns a fully-qualified partition string."""
return "projects/{project}/locations/{location}/lakes/{lake}/zones/{zone}/entities/{entity}/partitions/{partition}".format(
project=project,
location=location,
lake=lake,
zone=zone,
entity=entity,
partition=partition,
)
@staticmethod
def parse_partition_path(path: str) -> Dict[str, str]:
"""Parses a partition path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/lakes/(?P<lake>.+?)/zones/(?P<zone>.+?)/entities/(?P<entity>.+?)/partitions/(?P<partition>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def zone_path(project: str, location: str, lake: str, zone: str,) -> str:
"""Returns a fully-qualified zone string."""
return "projects/{project}/locations/{location}/lakes/{lake}/zones/{zone}".format(
project=project, location=location, lake=lake, zone=zone,
)
@staticmethod
def parse_zone_path(path: str) -> Dict[str, str]:
"""Parses a zone path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/lakes/(?P<lake>.+?)/zones/(?P<zone>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MetadataServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the metadata service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, MetadataServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MetadataServiceTransport):
# transport is a MetadataServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def get_entity(
self,
request: Union[metadata_.GetEntityRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Get a metadata entity.
Args:
request (Union[google.cloud.dataplex_v1.types.GetEntityRequest, dict]):
The request object. Get metadata entity request.
name (str):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}.``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metadata_.GetEntityRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metadata_.GetEntityRequest):
request = metadata_.GetEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_entity]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_entities(
self,
request: Union[metadata_.ListEntitiesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntitiesPager:
r"""List metadata entities in a zone.
Args:
request (Union[google.cloud.dataplex_v1.types.ListEntitiesRequest, dict]):
The request object. List metadata entities request.
parent (str):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.services.metadata_service.pagers.ListEntitiesPager:
List metadata entities response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metadata_.ListEntitiesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metadata_.ListEntitiesRequest):
request = metadata_.ListEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_entities]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListEntitiesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_partition(
self,
request: Union[metadata_.GetPartitionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Partition:
r"""Get a metadata partition of an entity.
Args:
request (Union[google.cloud.dataplex_v1.types.GetPartitionRequest, dict]):
The request object. Get metadata partition request.
name (str):
Required. The resource name of the partition:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Partition:
Represents partition metadata
contained within entity instances.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metadata_.GetPartitionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metadata_.GetPartitionRequest):
request = metadata_.GetPartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_partition]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_partitions(
self,
request: Union[metadata_.ListPartitionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPartitionsPager:
r"""List metadata partitions of an entity.
Args:
request (Union[google.cloud.dataplex_v1.types.ListPartitionsRequest, dict]):
The request object. List metadata partitions request.
parent (str):
Required. The resource name of the parent entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.services.metadata_service.pagers.ListPartitionsPager:
List metadata partitions response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a metadata_.ListPartitionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, metadata_.ListPartitionsRequest):
request = metadata_.ListPartitionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_partitions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPartitionsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dataplex",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("MetadataServiceClient",)
|
[] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"]
|
python
| 2 | 0 | |
tests/e2e/gc_block_resize_retain_policy.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
cnstypes "github.com/vmware/govmomi/cns/types"
"github.com/vmware/govmomi/object"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
)
var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation policy retain", func() {
f := framework.NewDefaultFramework("gc-resize-reclaim-policy-retain")
var (
client clientset.Interface
clientNewGc clientset.Interface
namespace string
fcdID string
namespaceNewGC string
storagePolicyName string
storageclass *storagev1.StorageClass
pvclaim *v1.PersistentVolumeClaim
err error
volHandle string
svcPVCName string
pv *v1.PersistentVolume
pvcDeleted bool
pvcDeletedInSvc bool
pvDeleted bool
cmd []string
cmd2 []string
pandoraSyncWaitTime int
defaultDatastore *object.Datastore
restConfig *restclient.Config
deleteFCDRequired bool
)
ginkgo.BeforeEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client = f.ClientSet
namespace = f.Namespace.Name
bootstrap()
ginkgo.By("Getting ready nodes on GC 1")
nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores)
scParameters := make(map[string]string)
scParameters[scParamFsType] = ext4FSType
// Set resource quota.
ginkgo.By("Set Resource quota for GC")
svcClient, svNamespace := getSvcClientAndNamespace()
setResourceQuota(svcClient, svNamespace, rqLimit)
// Create Storage class and PVC.
ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true")
scParameters[svStorageClassName] = storagePolicyName
storageclass, err = createStorageClass(client, scParameters, nil, v1.PersistentVolumeReclaimRetain, "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvclaim, err = createPVC(client, namespace, nil, "", storageclass, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Waiting for PVC to be bound.
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for all claims to be in bound state")
persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pv = persistentvolumes[0]
volHandle = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
svcPVCName = pv.Spec.CSI.VolumeHandle
pvcDeleted = false
pvcDeletedInSvc = false
pvDeleted = false
// Replace second element with pod.Name.
cmd = []string{"exec", "", fmt.Sprintf("--namespace=%v", namespace),
"--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"}
// Set up default pandora sync wait time.
pandoraSyncWaitTime = defaultPandoraSyncWaitTime
defaultDatastore = getDefaultDatastore(ctx)
// Get restConfig.
restConfig = getRestConfigClient()
deleteFCDRequired = false
})
ginkgo.AfterEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if !pvcDeleted {
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if !pvDeleted {
err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if !pvcDeletedInSvc {
svcClient, svcNamespace := getSvcClientAndNamespace()
err := svcClient.CoreV1().PersistentVolumeClaims(svcNamespace).Delete(ctx,
svcPVCName, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if deleteFCDRequired {
ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID))
err := e2eVSphere.deleteFCD(ctx, fcdID, defaultDatastore.Reference())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is deleted in Supervisor Cluster")
volumeExists := verifyVolumeExistInSupervisorCluster(svcPVCName)
gomega.Expect(volumeExists).To(gomega.BeFalse())
err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
svcClient, svNamespace := getSvcClientAndNamespace()
setResourceQuota(svcClient, svNamespace, defaultrqLimit)
})
// Combined:
// PV with reclaim policy retain can be resized using new GC PVC.
// PV with reclaim policy can be resized using new GC PVC with pod.
// Steps:
// 1. Create a SC with allowVolumeExpansion set to 'true' and with reclaim
// policy set to 'Retain'.
// 2. create a GC PVC using the SC created in step 1 and wait for binding
// with PV.
// 3. Create a pod in GC to use PVC created in step 2 and file system init.
// 4. Delete GC pod created in step 3.
// 5. Delete GC PVC created in step 2.
// 6. Verify GC PVC is removed but SVC PVC, PV and GC PV still exists.
// 7. Remove claimRef from the PV lingering in GC to get it to Available
// state.
// 8. Create new PVC in GC using the PV lingering in GC using the same SC
// from step 1.
// 9. Verify same SVC PVC is reused.
// 10. Resize PVC in GC.
// 11. Wait for PVC in GC to reach "FilesystemResizePending" state.
// 12. Check using CNS query that size has got updated to what was used in
// step 8.
// 13. Verify size of PV in SVC and GC to same as the one used in the step 8.
// 14. Create a pod in GC to use PVC create in step 6.
// 15. Wait for FS resize.
// 16. Verify size of PVC SVC and GC are equal and bigger than what it was
// after step 3.
// 17. Delete pod created in step 12.
// 18. Delete PVC created in step 6.
// 19. Delete PV leftover in GC.
// 20. Delete SC created in step 1.
ginkgo.It("PV with reclaim policy can be reused and resized with pod", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a Pod to use this PVC, and verify volume has been attached.
ginkgo.By("Creating pod to attach PV to the node")
pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err := getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify the volume is accessible and filesystem type is as expected")
cmd[1] = pod.Name
lastOutput := framework.RunKubectlOrDie(namespace, cmd...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion")
originalFsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Delete POD.
ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace))
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client,
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
ginkgo.By("Delete PVC in GC")
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcDeleted = true
ginkgo.By("Check GC PV exists and is released")
pv, err = waitForPvToBeReleased(ctx, client, pv.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
oldPvUID := string(pv.UID)
fmt.Println("PV uuid", oldPvUID)
ginkgo.By("Check SVC PVC exists")
_ = getPVCFromSupervisorCluster(svcPVCName)
ginkgo.By("Remove claimRef from GC PVC")
pv.Spec.ClaimRef = nil
pv, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Creating the PVC in guest cluster")
pvclaim = getPersistentVolumeClaimSpec(namespace, nil, pv.Name)
pvclaim.Spec.StorageClassName = &storageclass.Name
pvclaim, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvclaim, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Wait for the PVC in guest cluster to bind the lingering pv")
framework.ExpectNoError(fpv.WaitOnPVandPVC(client, framework.NewTimeoutContextWithDefaults(),
namespace, pv, pvclaim))
// Modify PVC spec to trigger volume expansion.
// We expand the PVC while no pod is using it to ensure offline expansion.
ginkgo.By("Expanding current pvc")
currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvclaim, err = expandPVCSize(pvclaim, newSize, client)
framework.ExpectNoError(err, "While updating pvc for more size")
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
pvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvclaim.Name)
}
ginkgo.By("Checking for PVC request size change on SVC PVC")
b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize)
gomega.Expect(b).To(gomega.BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for resize on SVC PV")
verifyPVSizeinSupervisor(svcPVCName, newSize)
ginkgo.By("Checking for conditions on pvc")
pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC")
_, err = checkSvcPvcHasGivenStatusCondition(svcPVCName, true, v1.PersistentVolumeClaimFileSystemResizePending)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle))
queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(queryResult.Volumes) == 0 {
err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verifying disk size requested in volume expansion is honored")
newSizeInMb := convertGiStrToMibInt64(newSize)
if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb {
err = fmt.Errorf("got wrong disk size after volume expansion")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create a new Pod to use this PVC, and verify volume has been attached.
ginkgo.By("Creating a new pod to attach PV again to the node")
pod, err = createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify after expansion the filesystem type is as expected")
cmd[1] = pod.Name
lastOutput = framework.RunKubectlOrDie(namespace, cmd...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Waiting for file system resize to finish")
pvclaim, err = waitForFSResize(pvclaim, client)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvclaim.Status.Conditions
expectEqual(len(pvcConditions), 0, "pvc should not have conditions")
ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion")
fsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Filesystem size may be smaller than the size of the block volume.
// Here since filesystem was already formatted on the original volume,
// we can compare the new filesystem size with the original filesystem size.
if fsSize <= originalFsSize {
framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)
}
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
gomega.Expect(verifyResizeCompletedInSupervisor(svcPVCName)).To(gomega.BeTrue())
// Delete POD.
ginkgo.By(fmt.Sprintf("Deleting the new pod %s in namespace %s after expansion", pod.Name, namespace))
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node after expansion")
isDiskDetached, err = e2eVSphere.waitForVolumeDetachedFromNode(client,
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
})
// PV with reclaim policy retain can be resized when used in a fresh GC.
// Steps:
// 1. Create a SC with allowVolumeExpansion set to 'true' in GC1.
// 2. create a GC1 PVC using the SC created in step 1 and wait for binding
// with PV with reclaim policy set to 'Retain'.
// 3. Delete GC1 PVC.
// 4. verify GC1 PVC is removed but SVC PV, PVC and GC1 PV still exist.
// 5. delete GC1 PV. SVC PV, PVC still exist.
// 6. Create a new GC GC2.
// 7. create SC in GC1 similar to the SC created in step 1 but with reclaim
// policy set to delete.
// 8. Create new PV in GC2 using the SVC PVC from step 5 and SC created in
// step 7.
// 9. create new PVC in GC2 using PV created in step 8.
// 10. verify a new PVC API object is created.
// 11. Resize PVC from step 9 in GC2.
// 12. Wait for PVC in GC2 and SVC to reach "FilesystemResizePending" state.
// 13. Check using CNS query that size has got updated to what was used in
// step 11.
// 14. Verify size of PVs in SVC and GC to same as the one used in the step 11.
// 15. delete PVC created in step 9.
// 16. delete SC created in step 1 and step 7.
// 17. delete GC2.
// Steps 6 and 17 need to run manually before and after this suite.
ginkgo.It("PV with reclaim policy retain can be resized when used in a fresh GC", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
newGcKubconfigPath := os.Getenv("NEW_GUEST_CLUSTER_KUBE_CONFIG")
if newGcKubconfigPath == "" {
ginkgo.Skip("Env NEW_GUEST_CLUSTER_KUBE_CONFIG is missing")
}
clientNewGc, err = createKubernetesClientFromConfig(newGcKubconfigPath)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Error creating k8s client with %v: %v", newGcKubconfigPath, err))
ginkgo.By("Creating namespace on second GC")
ns, err := framework.CreateTestingNS(f.BaseName, clientNewGc, map[string]string{
"e2e-framework": f.BaseName,
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error creating namespace on second GC")
namespaceNewGC = ns.Name
framework.Logf("Created namespace on second GC %v", namespaceNewGC)
defer func() {
err := clientNewGc.CoreV1().Namespaces().Delete(ctx, namespaceNewGC, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Getting ready nodes on GC 2")
nodeList, err := fnodes.GetReadySchedulableNodes(clientNewGc)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero(), "Unable to find ready and schedulable Node")
ginkgo.By("Delete PVC and PV form orignal GC")
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcDeleted = true
err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvDeleted = true
ginkgo.By("Check SVC PVC still exists")
_ = getPVCFromSupervisorCluster(svcPVCName)
scParameters := make(map[string]string)
scParameters[scParamFsType] = ext4FSType
scParameters[svStorageClassName] = storagePolicyName
storageclassNewGC, err := createStorageClass(clientNewGc,
scParameters, nil, v1.PersistentVolumeReclaimDelete, "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvc, err := createPVC(clientNewGc, namespaceNewGC, nil, "", storageclassNewGC, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var pvcs []*v1.PersistentVolumeClaim
pvcs = append(pvcs, pvc)
ginkgo.By("Waiting for all claims to be in bound state")
pvs, err := fpv.WaitForPVClaimBoundPhase(clientNewGc, pvcs, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvtemp := pvs[0]
defer func() {
err = clientNewGc.CoreV1().PersistentVolumeClaims(namespaceNewGC).Delete(ctx,
pvc.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = clientNewGc.StorageV1().StorageClasses().Delete(ctx,
storageclassNewGC.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvtemp.Spec.CSI.VolumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is deleted in Supervisor Cluster")
volumeExists := verifyVolumeExistInSupervisorCluster(pvtemp.Spec.CSI.VolumeHandle)
gomega.Expect(volumeExists).To(gomega.BeFalse())
}()
volumeID := getVolumeIDFromSupervisorCluster(svcPVCName)
gomega.Expect(volumeID).NotTo(gomega.BeEmpty())
ginkgo.By("Creating the PV")
pvNew := getPersistentVolumeSpec(svcPVCName, v1.PersistentVolumeReclaimDelete, nil)
pvNew.Annotations = pvtemp.Annotations
pvNew.Spec.StorageClassName = pvtemp.Spec.StorageClassName
pvNew.Spec.CSI = pvtemp.Spec.CSI
pvNew.Spec.CSI.VolumeHandle = svcPVCName
pvNew, err = clientNewGc.CoreV1().PersistentVolumes().Create(ctx, pvNew, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Creating the PVC")
pvcNew := getPersistentVolumeClaimSpec(namespaceNewGC, nil, pvNew.Name)
pvcNew.Spec.StorageClassName = &pvtemp.Spec.StorageClassName
pvcNew, err = clientNewGc.CoreV1().PersistentVolumeClaims(namespaceNewGC).Create(ctx,
pvcNew, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for PV and PVC to Bind.
framework.ExpectNoError(fpv.WaitOnPVandPVC(clientNewGc,
framework.NewTimeoutContextWithDefaults(), namespaceNewGC, pvNew, pvcNew))
ginkgo.By("Expanding current pvc")
currentPvcSize := pvcNew.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvcNew, err = expandPVCSize(pvcNew, newSize, clientNewGc)
framework.ExpectNoError(err, "While updating pvc for more size")
gomega.Expect(pvcNew).NotTo(gomega.BeNil())
pvcSize := pvcNew.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvcNew.Name)
}
ginkgo.By("Checking for PVC request size change on SVC PVC")
b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize)
gomega.Expect(b).To(gomega.BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvcNew, clientNewGc, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for resize on SVC PV")
verifyPVSizeinSupervisor(svcPVCName, newSize)
ginkgo.By("Checking for conditions on pvc")
pvcNew, err = waitForPVCToReachFileSystemResizePendingCondition(clientNewGc,
namespaceNewGC, pvcNew.Name, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC")
_, err = checkSvcPvcHasGivenStatusCondition(svcPVCName, true, v1.PersistentVolumeClaimFileSystemResizePending)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle))
queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(queryResult.Volumes) == 0 {
err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verifying disk size requested in volume expansion is honored")
newSizeInMb := convertGiStrToMibInt64(newSize)
if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb {
err = fmt.Errorf("got wrong disk size after volume expansion")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create a new Pod to use this PVC, and verify volume has been attached.
ginkgo.By("Creating a pod to attach PV again to the node")
pod, err := createPod(clientNewGc, namespaceNewGC, nil, []*v1.PersistentVolumeClaim{pvcNew}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s",
pvNew.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err := getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(clientNewGc, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify after expansion the filesystem type is as expected")
oldKubeConfig := framework.TestContext.KubeConfig
framework.TestContext.KubeConfig = newGcKubconfigPath
defer func() {
framework.TestContext.KubeConfig = oldKubeConfig
}()
cmd2 = []string{"exec", pod.Name, fmt.Sprintf("--namespace=%v", namespaceNewGC),
"--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"}
lastOutput := framework.RunKubectlOrDie(namespaceNewGC, cmd2...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Waiting for file system resize to finish")
pvcNew, err = waitForFSResize(pvcNew, clientNewGc)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvclaim.Status.Conditions
expectEqual(len(pvcConditions), 0, "pvc should not have conditions")
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
gomega.Expect(verifyResizeCompletedInSupervisor(svcPVCName)).To(gomega.BeTrue())
// Delete POD.
ginkgo.By(fmt.Sprintf("Deleting the new pod %s in namespace %s after expansion", pod.Name, namespaceNewGC))
err = fpod.DeletePodWithWait(clientNewGc, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node after expansion")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(clientNewGc,
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
ginkgo.By("Deleting the PV Claim")
framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(clientNewGc, pvcNew.Name, namespaceNewGC),
"Failed to delete PVC ", pvcNew.Name)
pvcNew = nil
ginkgo.By("Verify PV should be deleted automatically")
framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(clientNewGc, pvNew.Name, poll, pollTimeoutShort))
pvNew = nil
ginkgo.By("Verify volume is deleted in Supervisor Cluster")
volumeExists := verifyVolumeExistInSupervisorCluster(svcPVCName)
gomega.Expect(volumeExists).To(gomega.BeFalse())
pvcDeletedInSvc = true
})
/* Verify deleting GC PVC during online volume expansion when reclaim policy in SC set to retain.
Reuse the PV and create PVC and perform volume expansion
1. Create a SC with allowVolumeExpansion set to 'true' and with reclaim policy set to 'Retain'
2. create a GC PVC using the SC created in step 1 and wait for binding with PV
3. Delete GC PVC
4. verify GC PVC is removed but SVC PV, PVC and GC PV still exist
5. remove claimRef from the PV lingering in GC to get it to Available state
6. Create new PVC in GC using the PV lingering in GC using SC created in step 1
7. verify same SVC PVC is reused
8. Create a POD using the PVC created in step 6
9. Resize PVC in GC
10. Check using CNS query that size has SV PVC and GC PVC are same
11. Wait for resize to complete and verify that "FilesystemResizePending" is removed from SV PVC and GC PVC
12. check the size of GC PVC and SVC PVC using CNS query.
13. verify data is intact on the PV mounted on the pod
14. Delete POD
15. Delete PVC in GC
16. Delete left over PV in GC
15. Delete SC
*/
ginkgo.It("Verify online volume expansion when PV with reclaim policy is reused to create PVC", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ginkgo.By("Delete PVC in GC")
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcDeleted = true
ginkgo.By("Check GC PV exists and is released")
pv, err = waitForPvToBeReleased(ctx, client, pv.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
oldPvUID := string(pv.UID)
fmt.Println("PV uuid", oldPvUID)
ginkgo.By("Check SVC PVC exists")
_ = getPVCFromSupervisorCluster(svcPVCName)
ginkgo.By("Remove claimRef from GC PVC")
pv.Spec.ClaimRef = nil
pv, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Creating the PVC in guest cluster")
pvclaim = getPersistentVolumeClaimSpec(namespace, nil, pv.Name)
pvclaim.Spec.StorageClassName = &storageclass.Name
pvclaim, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvclaim, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Wait for the PVC in guest cluster to bind the lingering pv")
err = fpv.WaitOnPVandPVC(client, framework.NewTimeoutContextWithDefaults(), namespace, pv, pvclaim)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create a POD to use this PVC, and verify volume has been attached.
ginkgo.By("Creating pod to attach PV to the node")
pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err := getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify the volume is accessible and filesystem type is as expected")
cmd[1] = pod.Name
lastOutput := framework.RunKubectlOrDie(namespace, cmd...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion")
originalFsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
// Delete POD.
ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace))
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client,
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
}()
// Modify PVC spec to trigger volume expansion.
// We expand the PVC while no pod is using it to ensure offline expansion.
ginkgo.By("Expanding current pvc")
currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvclaim, err = expandPVCSize(pvclaim, newSize, client)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
ginkgo.By("Checking for PVC request size change on SVC PVC")
b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize)
gomega.Expect(b).To(gomega.BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Checking for resize on SVC PV")
verifyPVSizeinSupervisor(svcPVCName, newSize)
ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle))
queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(queryResult.Volumes) == 0 {
err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verifying disk size requested in volume expansion is honored")
newSizeInMb := convertGiStrToMibInt64(newSize)
if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb {
err = fmt.Errorf("got wrong disk size after volume expansion")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for file system resize to finish")
pvclaim, err = waitForFSResize(pvclaim, client)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcConditions := pvclaim.Status.Conditions
expectEqual(len(pvcConditions), 0, "pvc should not have conditions")
ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion")
fsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Filesystem size may be smaller than the size of the block volume.
// Here since filesystem was already formatted on the original volume,
// we can compare the new filesystem size with the original filesystem size.
gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalFsSize),
fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize))
ginkgo.By("File system resize finished successfully")
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
gomega.Expect(verifyResizeCompletedInSupervisor(svcPVCName)).To(gomega.BeTrue())
})
// PV with reclaim policy retain can be resized when used in a fresh GC.
// Steps:
// 1. Create a SC with allowVolumeExpansion set to 'true' in GC2.
// 2. create a GC2 PVC using the SC created in step 1 and wait for binding
// with PV with reclaim policy set to 'Retain'.
// 3. Delete GC2 PVC.
// 4. verify GC2 PVC is removed but SVC PV, PVC and GC2 PV still exist.
// 5. delete GC2 PV. SVC PV, PVC still exist.
// 6. Create a new GC GC2.
// 7. create SC in GC1 similar to the SC created in step 1 but with reclaim
// policy set to delete.
// 8. Create new PV in GC1 using the SVC PVC from step 5 and SC created in
// step 7.
// 9. create new PVC in GC1 using PV created in step 8.
// 10. verify a new PVC API object is created.
// 11. Create POD
// 12. Trigger online volume expansion on GC1 PVC
// 13, Check CNS querry for newly updated size
// 14. Check using CNS query that size has got updated to what was used in step 12
// 15. Verify size of PVs in SVC and GC to same as the one used in the step 12
// delete PVC created in step 9.
// 16. delete SC created in step 1 and step 7.
// 17. delete GC1.
ginkgo.It("online volume expansion-PV with reclaim policy retain can be resized when used in a fresh GC", func() {
var err error
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
curtime := time.Now().Unix()
randomValue := rand.Int()
val := strconv.FormatInt(int64(randomValue), 10)
val = string(val[1:3])
curtimestring := strconv.FormatInt(curtime, 10)
svpvcName := "cns-pvc-" + curtimestring + val
framework.Logf("pvc name :%s", svpvcName)
namespace = getNamespaceToRunTests(f)
newGcKubconfigPath := os.Getenv("NEW_GUEST_CLUSTER_KUBE_CONFIG")
if newGcKubconfigPath == "" {
ginkgo.Skip("Env NEW_GUEST_CLUSTER_KUBE_CONFIG is missing")
}
clientNewGc, err = createKubernetesClientFromConfig(newGcKubconfigPath)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Error creating k8s client with %v: %v", newGcKubconfigPath, err))
ginkgo.By("Creating namespace on second GC")
ns, err := framework.CreateTestingNS(f.BaseName, clientNewGc, map[string]string{
"e2e-framework": f.BaseName,
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error creating namespace on second GC")
f.AddNamespacesToDelete(ns)
namespaceNewGC = ns.Name
framework.Logf("Created namespace on second GC %v", namespaceNewGC)
defer func() {
err := clientNewGc.CoreV1().Namespaces().Delete(ctx, namespaceNewGC, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Getting ready nodes on GC 2")
nodeList, err := fnodes.GetReadySchedulableNodes(clientNewGc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero(), "Unable to find ready and schedulable Node in new GC")
_, storageclass, profileID := staticProvisioningPreSetUpUtil(ctx, f, client, storagePolicyName)
// Get supvervisor cluster client.
svcClient, svNamespace := getSvcClientAndNamespace()
ginkgo.By("Creating FCD (CNS Volume)")
fcdID, err = e2eVSphere.createFCDwithValidProfileID(ctx,
"staticfcd"+curtimestring, profileID, diskSizeInMb, defaultDatastore.Reference())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora",
pandoraSyncWaitTime, fcdID))
time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second)
deleteFCDRequired = true
ginkgo.By("Create CNS register volume with above created FCD")
cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, svNamespace, fcdID, "", svpvcName, v1.ReadWriteOnce)
err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = waitForCNSRegisterVolumeToGetCreated(ctx,
restConfig, svNamespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
cnsRegisterVolumeName := cnsRegisterVolume.GetName()
framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName)
ginkgo.By("verify created PV, PVC and check the bidirectional reference")
svcPVC, err := svcClient.CoreV1().PersistentVolumeClaims(svNamespace).Get(ctx, svpvcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
svcPV := getPvFromClaim(svcClient, svNamespace, svpvcName)
verifyBidirectionalReferenceOfPVandPVC(ctx, svcClient, svcPVC, svcPV, fcdID)
//Create PVC,PV in GC2
gcPVC, gcPV := createStaticPVandPVCinGuestCluster(clientNewGc, ctx, namespaceNewGC, svpvcName,
diskSize, storageclass, v1.PersistentVolumeReclaimRetain)
isGC2PVCCreated := true
isGC2PVCreated := true
defer func() {
if isGC2PVCCreated {
ginkgo.By("Deleting the gc2 PVC")
err = fpv.DeletePersistentVolumeClaim(clientNewGc, gcPVC.Name, namespaceNewGC)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if isGC2PVCreated {
ginkgo.By("Deleting the gc2 PV")
err = fpv.DeletePersistentVolume(clientNewGc, gcPV.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()
ginkgo.By("Deleting the gc2 PVC")
err = fpv.DeletePersistentVolumeClaim(clientNewGc, gcPVC.Name, namespaceNewGC)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isGC2PVCCreated = false
ginkgo.By("Deleting the gc2 PV")
err = fpv.DeletePersistentVolume(clientNewGc, gcPV.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = fpv.WaitForPersistentVolumeDeleted(clientNewGc, gcPV.Name, poll, pollTimeoutShort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isGC2PVCreated = false
scParameters := make(map[string]string)
scParameters[scParamFsType] = ext4FSType
scParameters[svStorageClassName] = storagePolicyName
storageclassInGC1, err := createStorageClass(client,
scParameters, nil, v1.PersistentVolumeReclaimDelete, "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcNew, pvNew, pod, _ := createStaticPVandPVCandPODinGuestCluster(client, ctx, namespace, svpvcName, diskSize,
storageclassInGC1, v1.PersistentVolumeReclaimRetain)
defer func() {
ginkgo.By("Deleting the gc PVC")
err = fpv.DeletePersistentVolumeClaim(client, pvcNew.Name, namespaceNewGC)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Deleting the gc PV")
err = fpv.DeletePersistentVolume(client, pvNew.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
defer func() {
ginkgo.By("Deleting the pod")
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client,
pvNew.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", pvNew.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
}()
volHandle = getVolumeIDFromSupervisorCluster(pvNew.Spec.CSI.VolumeHandle)
framework.Logf("Volume Handle :%s", volHandle)
onlineVolumeResizeCheck(f, client, namespace, svpvcName, volHandle, pvcNew, pod)
})
// 1. Create a SC with allowVolumeExpansion set to 'true' in GC1
// 2. create a GC1 PVC using the SC created in step 1 and wait for binding with PV with reclaim policy set to 'Retain'
// 3. Trigger offline volume expansion on PVC
// 4. Delete PVC and PV created in step 2
// 5. SVC PVC is still present for the above created PVC
// 6. Statically create PVC and PV in GC1 pointing to SVC PVC volume handle
// 7. wait for PVC and PV to be in bound state
// 8. Create POD using newly created PVC
// 9. SVC PVC will complete Offline volume expansion and same will reflect on GC PVC created in step 5
// 10. Trigger Online volume expansion on the PVC
// 11. verify File system size
// 12. Delete POD, PVC, PV and SC
ginkgo.It("Offline resize of PVC in GC1, Delete PVC and PV in GC1. Statically "+
"prov same PVC and PV in GC1 and deploy a Pod and trigger online volume expansion", func() {
var err error
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
curtime := time.Now().Unix()
randomValue := rand.Int()
val := strconv.FormatInt(int64(randomValue), 10)
val = string(val[1:3])
curtimestring := strconv.FormatInt(curtime, 10)
svpvcName := "cns-pvc-" + curtimestring + val
framework.Logf("pvc name :%s", svpvcName)
namespace = getNamespaceToRunTests(f)
_, storageclass, profileID := staticProvisioningPreSetUpUtil(ctx, f, client, storagePolicyName)
// Get supvervisor cluster client.
svcClient, svNamespace := getSvcClientAndNamespace()
ginkgo.By("Creating FCD (CNS Volume)")
fcdID, err = e2eVSphere.createFCDwithValidProfileID(ctx,
"staticfcd"+curtimestring, profileID, diskSizeInMb, defaultDatastore.Reference())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora",
pandoraSyncWaitTime, fcdID))
time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second)
deleteFCDRequired = true
ginkgo.By("Create CNS register volume with above created FCD")
cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, svNamespace, fcdID, "", svpvcName, v1.ReadWriteOnce)
err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = waitForCNSRegisterVolumeToGetCreated(ctx,
restConfig, svNamespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
cnsRegisterVolumeName := cnsRegisterVolume.GetName()
framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName)
ginkgo.By("verify created PV, PVC and check the bidirectional reference")
svcPVC, err := svcClient.CoreV1().PersistentVolumeClaims(svNamespace).Get(ctx, svpvcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
svcPV := getPvFromClaim(svcClient, svNamespace, svpvcName)
verifyBidirectionalReferenceOfPVandPVC(ctx, svcClient, svcPVC, svcPV, fcdID)
pvcNew, pvNew := createStaticPVandPVCinGuestCluster(client, ctx, namespace, svpvcName,
diskSize, storageclass, v1.PersistentVolumeReclaimRetain)
isGC1pvcCreated := true
isGC1pvCreated := true
defer func() {
if isGC1pvcCreated {
ginkgo.By("Deleting the gc PVC")
err = fpv.DeletePersistentVolumeClaim(client, pvcNew.Name, namespaceNewGC)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if isGC1pvCreated {
ginkgo.By("Deleting the gc PV")
err = fpv.DeletePersistentVolume(client, pvNew.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = fpv.WaitForPersistentVolumeDeleted(client, pvNew.Name, poll, pollTimeoutShort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()
ginkgo.By("Trigger offline expansion")
currentPvcSize := pvcNew.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvcNew, err = expandPVCSize(pvcNew, newSize, client)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(pvcNew).NotTo(gomega.BeNil())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvcNew, client, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for conditions on pvc")
pvcNew, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvcNew.Name, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Deleting the gc PVC")
err = fpv.DeletePersistentVolumeClaim(client, pvcNew.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isGC1pvcCreated = false
ginkgo.By("Deleting the gc PV")
err = fpv.DeletePersistentVolume(client, pvNew.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = fpv.WaitForPersistentVolumeDeleted(client, pvNew.Name, poll, pollTimeoutShort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isGC1pvCreated = false
ginkgo.By("Check SVC PVC exists")
svcPVC = getPVCFromSupervisorCluster(svcPVC.Name)
newsizeAfterTriggeringOfflineExpansion := svcPVC.Spec.Resources.Requests[v1.ResourceStorage]
newsize := sizeInMb(newsizeAfterTriggeringOfflineExpansion)
size := strconv.FormatInt(newsize, 10)
framework.Logf("newsizeAfterTriggeringOfflineExpansion size: %s", size)
pvcNew, pvNew, pod, _ := createStaticPVandPVCandPODinGuestCluster(client, ctx, namespace, svcPVC.Name, "3Gi",
storageclass, v1.PersistentVolumeReclaimDelete)
defer func() {
ginkgo.By("Deleting the gc PVC")
err = fpv.DeletePersistentVolumeClaim(client, pvcNew.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Deleting the gc PV")
err = fpv.DeletePersistentVolume(client, pvNew.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = fpv.WaitForPersistentVolumeDeleted(client, pvNew.Name, poll, pollTimeoutShort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
defer func() {
ginkgo.By("Deleting the pod")
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client,
pvNew.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", pvNew.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
}()
volHandle = getVolumeIDFromSupervisorCluster(pvNew.Spec.CSI.VolumeHandle)
framework.Logf("Volume Handle :%s", volHandle)
onlineVolumeResizeCheck(f, client, namespace, svcPVC.Name, volHandle, pvcNew, pod)
})
})
func waitForPvToBeReleased(ctx context.Context, client clientset.Interface,
pvName string) (*v1.PersistentVolume, error) {
var pv *v1.PersistentVolume
var err error
waitErr := wait.PollImmediate(resizePollInterval, pollTimeoutShort, func() (bool, error) {
pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if pv.Status.Phase == v1.VolumeReleased {
return true, nil
}
return false, nil
})
return pv, waitErr
}
|
[
"\"NEW_GUEST_CLUSTER_KUBE_CONFIG\"",
"\"NEW_GUEST_CLUSTER_KUBE_CONFIG\""
] |
[] |
[
"NEW_GUEST_CLUSTER_KUBE_CONFIG"
] |
[]
|
["NEW_GUEST_CLUSTER_KUBE_CONFIG"]
|
go
| 1 | 0 | |
ipn/ipnlocal/local.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipnlocal
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
"inet.af/netaddr"
"tailscale.com/control/controlclient"
"tailscale.com/internal/deepprint"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/ipn/policy"
"tailscale.com/net/interfaces"
"tailscale.com/net/tsaddr"
"tailscale.com/portlist"
"tailscale.com/tailcfg"
"tailscale.com/types/empty"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/wgkey"
"tailscale.com/util/systemd"
"tailscale.com/version"
"tailscale.com/wgengine"
"tailscale.com/wgengine/filter"
"tailscale.com/wgengine/router"
"tailscale.com/wgengine/router/dns"
"tailscale.com/wgengine/tsdns"
"tailscale.com/wgengine/wgcfg"
"tailscale.com/wgengine/wgcfg/nmcfg"
)
var controlDebugFlags = getControlDebugFlags()
func getControlDebugFlags() []string {
if e := os.Getenv("TS_DEBUG_CONTROL_FLAGS"); e != "" {
return strings.Split(e, ",")
}
return nil
}
// LocalBackend is the glue between the major pieces of the Tailscale
// network software: the cloud control plane (via controlclient), the
// network data plane (via wgengine), and the user-facing UIs and CLIs
// (collectively called "frontends", via LocalBackend's implementation
// of the Backend interface).
//
// LocalBackend implements the overall state machine for the Tailscale
// application. Frontends, controlclient and wgengine can feed events
// into LocalBackend to advance the state machine, and advancing the
// state machine generates events back out to zero or more components.
type LocalBackend struct {
// Elements that are thread-safe or constant after construction.
ctx context.Context // canceled by Close
ctxCancel context.CancelFunc // cancels ctx
logf logger.Logf // general logging
keyLogf logger.Logf // for printing list of peers on change
statsLogf logger.Logf // for printing peers stats on change
e wgengine.Engine
store ipn.StateStore
backendLogID string
portpoll *portlist.Poller // may be nil
portpollOnce sync.Once // guards starting readPoller
gotPortPollRes chan struct{} // closed upon first readPoller result
serverURL string // tailcontrol URL
newDecompressor func() (controlclient.Decompressor, error)
filterHash string
// The mutex protects the following elements.
mu sync.Mutex
notify func(ipn.Notify)
c *controlclient.Client
stateKey ipn.StateKey // computed in part from user-provided value
userID string // current controlling user ID (for Windows, primarily)
prefs *ipn.Prefs
inServerMode bool
machinePrivKey wgkey.Private
state ipn.State
// hostinfo is mutated in-place while mu is held.
hostinfo *tailcfg.Hostinfo
// netMap is not mutated in-place once set.
netMap *netmap.NetworkMap
nodeByAddr map[netaddr.IP]*tailcfg.Node
activeLogin string // last logged LoginName from netMap
engineStatus ipn.EngineStatus
endpoints []string
blocked bool
authURL string
interact bool
prevIfState *interfaces.State
// statusLock must be held before calling statusChanged.Wait() or
// statusChanged.Broadcast().
statusLock sync.Mutex
statusChanged *sync.Cond
}
// NewLocalBackend returns a new LocalBackend that is ready to run,
// but is not actually running.
func NewLocalBackend(logf logger.Logf, logid string, store ipn.StateStore, e wgengine.Engine) (*LocalBackend, error) {
if e == nil {
panic("ipn.NewLocalBackend: wgengine must not be nil")
}
// Default filter blocks everything, until Start() is called.
e.SetFilter(filter.NewAllowNone(logf))
ctx, cancel := context.WithCancel(context.Background())
portpoll, err := portlist.NewPoller()
if err != nil {
logf("skipping portlist: %s", err)
}
b := &LocalBackend{
ctx: ctx,
ctxCancel: cancel,
logf: logf,
keyLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
e: e,
store: store,
backendLogID: logid,
state: ipn.NoState,
portpoll: portpoll,
gotPortPollRes: make(chan struct{}),
}
e.SetLinkChangeCallback(b.linkChange)
b.statusChanged = sync.NewCond(&b.statusLock)
return b, nil
}
// linkChange is called (in a new goroutine) by wgengine when its link monitor
// detects a network change.
func (b *LocalBackend) linkChange(major bool, ifst *interfaces.State) {
b.mu.Lock()
defer b.mu.Unlock()
hadPAC := b.prevIfState.HasPAC()
b.prevIfState = ifst
networkUp := ifst.AnyInterfaceUp()
if b.c != nil {
go b.c.SetPaused(b.state == ipn.Stopped || !networkUp)
}
// If the PAC-ness of the network changed, reconfig wireguard+route to
// add/remove subnets.
if hadPAC != ifst.HasPAC() {
b.logf("linkChange: in state %v; PAC changed from %v->%v", b.state, hadPAC, ifst.HasPAC())
switch b.state {
case ipn.NoState, ipn.Stopped:
// Do nothing.
default:
go b.authReconfig()
}
}
}
// Shutdown halts the backend and all its sub-components. The backend
// can no longer be used after Shutdown returns.
func (b *LocalBackend) Shutdown() {
b.mu.Lock()
cli := b.c
b.mu.Unlock()
if cli != nil {
cli.Shutdown()
}
b.ctxCancel()
b.e.Close()
b.e.Wait()
}
// Status returns the latest status of the backend and its
// sub-components.
func (b *LocalBackend) Status() *ipnstate.Status {
sb := new(ipnstate.StatusBuilder)
b.UpdateStatus(sb)
return sb.Status()
}
// UpdateStatus implements ipnstate.StatusUpdater.
func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) {
b.e.UpdateStatus(sb)
b.mu.Lock()
defer b.mu.Unlock()
sb.SetBackendState(b.state.String())
// TODO: hostinfo, and its networkinfo
// TODO: EngineStatus copy (and deprecate it?)
if b.netMap != nil {
sb.SetMagicDNSSuffix(b.netMap.MagicDNSSuffix())
for id, up := range b.netMap.UserProfiles {
sb.AddUser(id, up)
}
for _, p := range b.netMap.Peers {
var lastSeen time.Time
if p.LastSeen != nil {
lastSeen = *p.LastSeen
}
var tailAddr string
for _, addr := range p.Addresses {
// The peer struct currently only allows a single
// Tailscale IP address. For compatibility with the
// old display, make sure it's the IPv4 address.
if addr.IP.Is4() && addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.IP) {
tailAddr = addr.IP.String()
break
}
}
sb.AddPeer(key.Public(p.Key), &ipnstate.PeerStatus{
InNetworkMap: true,
UserID: p.User,
TailAddr: tailAddr,
HostName: p.Hostinfo.Hostname,
DNSName: p.Name,
OS: p.Hostinfo.OS,
KeepAlive: p.KeepAlive,
Created: p.Created,
LastSeen: lastSeen,
ShareeNode: p.Hostinfo.ShareeNode,
ExitNode: p.StableID != "" && p.StableID == b.prefs.ExitNodeID,
})
}
}
}
// WhoIs reports the node and user who owns the node with the given IP.
// If ok == true, n and u are valid.
func (b *LocalBackend) WhoIs(ip netaddr.IP) (n *tailcfg.Node, u tailcfg.UserProfile, ok bool) {
b.mu.Lock()
defer b.mu.Unlock()
n, ok = b.nodeByAddr[ip]
if !ok {
return nil, u, false
}
u, ok = b.netMap.UserProfiles[n.User]
if !ok {
return nil, u, false
}
return n, u, true
}
// SetDecompressor sets a decompression function, which must be a zstd
// reader.
//
// This exists because the iOS/Mac NetworkExtension is very resource
// constrained, and the zstd package is too heavy to fit in the
// constrained RSS limit.
func (b *LocalBackend) SetDecompressor(fn func() (controlclient.Decompressor, error)) {
b.newDecompressor = fn
}
// setClientStatus is the callback invoked by the control client whenever it posts a new status.
// Among other things, this is where we update the netmap, packet filters, DNS and DERP maps.
func (b *LocalBackend) setClientStatus(st controlclient.Status) {
// The following do not depend on any data for which we need to lock b.
if st.Err != "" {
// TODO(crawshaw): display in the UI.
if st.Err == "EOF" {
b.logf("[v1] Received error: EOF")
} else {
b.logf("Received error: %v", st.Err)
}
return
}
if st.LoginFinished != nil {
// Auth completed, unblock the engine
b.blockEngineUpdates(false)
b.authReconfig()
b.send(ipn.Notify{LoginFinished: &empty.Message{}})
}
prefsChanged := false
// Lock b once and do only the things that require locking.
b.mu.Lock()
prefs := b.prefs
stateKey := b.stateKey
netMap := b.netMap
interact := b.interact
if st.Persist != nil {
if !b.prefs.Persist.Equals(st.Persist) {
prefsChanged = true
b.prefs.Persist = st.Persist.Clone()
}
}
if temporarilySetMachineKeyInPersist() && b.prefs.Persist != nil &&
b.prefs.Persist.LegacyFrontendPrivateMachineKey.IsZero() {
b.prefs.Persist.LegacyFrontendPrivateMachineKey = b.machinePrivKey
prefsChanged = true
}
if st.NetMap != nil {
if b.keepOneExitNodeLocked(st.NetMap) {
prefsChanged = true
}
b.setNetMapLocked(st.NetMap)
}
if st.URL != "" {
b.authURL = st.URL
}
if b.state == ipn.NeedsLogin {
if !b.prefs.WantRunning {
prefsChanged = true
}
b.prefs.WantRunning = true
}
// Prefs will be written out; this is not safe unless locked or cloned.
if prefsChanged {
prefs = b.prefs.Clone()
}
b.mu.Unlock()
// Now complete the lock-free parts of what we started while locked.
if prefsChanged {
if stateKey != "" {
if err := b.store.WriteState(stateKey, prefs.ToBytes()); err != nil {
b.logf("Failed to save new controlclient state: %v", err)
}
}
b.send(ipn.Notify{Prefs: prefs})
}
if st.NetMap != nil {
if netMap != nil {
diff := st.NetMap.ConciseDiffFrom(netMap)
if strings.TrimSpace(diff) == "" {
b.logf("[v1] netmap diff: (none)")
} else {
b.logf("netmap diff:\n%v", diff)
}
}
b.updateFilter(st.NetMap, prefs)
b.e.SetNetworkMap(st.NetMap)
if !dnsMapsEqual(st.NetMap, netMap) {
b.updateDNSMap(st.NetMap)
}
b.e.SetDERPMap(st.NetMap.DERPMap)
b.send(ipn.Notify{NetMap: st.NetMap})
}
if st.URL != "" {
b.logf("Received auth URL: %.20v...", st.URL)
if interact {
b.popBrowserAuthNow()
}
}
b.stateMachine()
// This is currently (2020-07-28) necessary; conditionally disabling it is fragile!
// This is where netmap information gets propagated to router and magicsock.
b.authReconfig()
}
// keepOneExitNodeLocked edits nm to retain only the default
// routes provided by the exit node specified in b.prefs. It returns
// whether prefs was mutated as part of the process, due to an exit
// node IP being converted into a node ID.
func (b *LocalBackend) keepOneExitNodeLocked(nm *netmap.NetworkMap) (prefsChanged bool) {
// If we have a desired IP on file, try to find the corresponding
// node.
if !b.prefs.ExitNodeIP.IsZero() {
// IP takes precedence over ID, so if both are set, clear ID.
if b.prefs.ExitNodeID != "" {
b.prefs.ExitNodeID = ""
prefsChanged = true
}
peerLoop:
for _, peer := range nm.Peers {
for _, addr := range peer.Addresses {
if !addr.IsSingleIP() || addr.IP != b.prefs.ExitNodeIP {
continue
}
// Found the node being referenced, upgrade prefs to
// reference it directly for next time.
b.prefs.ExitNodeID = peer.StableID
b.prefs.ExitNodeIP = netaddr.IP{}
prefsChanged = true
break peerLoop
}
}
}
// At this point, we have a node ID if the requested node is in
// the netmap. If not, the ID will be empty, and we'll strip out
// all default routes.
for _, peer := range nm.Peers {
out := peer.AllowedIPs[:0]
for _, allowedIP := range peer.AllowedIPs {
if allowedIP.Bits == 0 && peer.StableID != b.prefs.ExitNodeID {
continue
}
out = append(out, allowedIP)
}
peer.AllowedIPs = out
}
return prefsChanged
}
// setWgengineStatus is the callback by the wireguard engine whenever it posts a new status.
// This updates the endpoints both in the backend and in the control client.
func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) {
if err != nil {
b.logf("wgengine status error: %v", err)
return
}
if s == nil {
b.logf("[unexpected] non-error wgengine update with status=nil: %v", s)
return
}
b.mu.Lock()
es := b.parseWgStatusLocked(s)
c := b.c
b.engineStatus = es
b.endpoints = append([]string{}, s.LocalAddrs...)
b.mu.Unlock()
if c != nil {
c.UpdateEndpoints(0, s.LocalAddrs)
}
b.stateMachine()
b.statusLock.Lock()
b.statusChanged.Broadcast()
b.statusLock.Unlock()
b.send(ipn.Notify{Engine: &es})
}
// Start applies the configuration specified in opts, and starts the
// state machine.
//
// TODO(danderson): this function is trying to do too many things at
// once: it loads state, or imports it, or updates prefs sometimes,
// contains some settings that are one-shot things done by `tailscale
// up` because we had nowhere else to put them, and there's no clear
// guarantee that switching from one user's state to another is
// actually a supported operation (it should be, but it's very unclear
// from the following whether or not that is a safe transition).
func (b *LocalBackend) Start(opts ipn.Options) error {
if opts.Prefs == nil && opts.StateKey == "" {
return errors.New("no state key or prefs provided")
}
if opts.Prefs != nil {
b.logf("Start: %v", opts.Prefs.Pretty())
} else {
b.logf("Start")
}
hostinfo := controlclient.NewHostinfo()
hostinfo.BackendLogID = b.backendLogID
hostinfo.FrontendLogID = opts.FrontendLogID
b.mu.Lock()
if b.c != nil {
// TODO(apenwarr): avoid the need to reinit controlclient.
// This will trigger a full relogin/reconfigure cycle every
// time a Handle reconnects to the backend. Ideally, we
// would send the new Prefs and everything would get back
// into sync with the minimal changes. But that's not how it
// is right now, which is a sign that the code is still too
// complicated.
b.c.Shutdown()
}
if b.hostinfo != nil {
hostinfo.Services = b.hostinfo.Services // keep any previous session and netinfo
hostinfo.NetInfo = b.hostinfo.NetInfo
}
b.hostinfo = hostinfo
b.state = ipn.NoState
if err := b.loadStateLocked(opts.StateKey, opts.Prefs, opts.LegacyConfigPath); err != nil {
b.mu.Unlock()
return fmt.Errorf("loading requested state: %v", err)
}
b.inServerMode = b.prefs.ForceDaemon
b.serverURL = b.prefs.ControlURL
hostinfo.RoutableIPs = append(hostinfo.RoutableIPs, b.prefs.AdvertiseRoutes...)
hostinfo.RequestTags = append(hostinfo.RequestTags, b.prefs.AdvertiseTags...)
if b.inServerMode || runtime.GOOS == "windows" {
b.logf("Start: serverMode=%v", b.inServerMode)
}
applyPrefsToHostinfo(hostinfo, b.prefs)
b.notify = opts.Notify
b.setNetMapLocked(nil)
persistv := b.prefs.Persist
machinePrivKey := b.machinePrivKey
b.mu.Unlock()
b.updateFilter(nil, nil)
if b.portpoll != nil {
b.portpollOnce.Do(func() {
go b.portpoll.Run(b.ctx)
go b.readPoller()
// Give the poller a second to get results to
// prevent it from restarting our map poll
// HTTP request (via doSetHostinfoFilterServices >
// cli.SetHostinfo). In practice this is very quick.
t0 := time.Now()
timer := time.NewTimer(time.Second)
select {
case <-b.gotPortPollRes:
b.logf("got initial portlist info in %v", time.Since(t0).Round(time.Millisecond))
timer.Stop()
case <-timer.C:
b.logf("timeout waiting for initial portlist")
}
})
}
var discoPublic tailcfg.DiscoKey
if controlclient.Debug.Disco {
discoPublic = b.e.DiscoPublicKey()
}
var err error
if persistv == nil {
// let controlclient initialize it
persistv = &persist.Persist{}
}
cli, err := controlclient.New(controlclient.Options{
MachinePrivateKey: machinePrivKey,
Logf: logger.WithPrefix(b.logf, "control: "),
Persist: *persistv,
ServerURL: b.serverURL,
AuthKey: opts.AuthKey,
Hostinfo: hostinfo,
KeepAlive: true,
NewDecompressor: b.newDecompressor,
HTTPTestClient: opts.HTTPTestClient,
DiscoPublicKey: discoPublic,
DebugFlags: controlDebugFlags,
})
if err != nil {
return err
}
b.mu.Lock()
b.c = cli
endpoints := b.endpoints
b.mu.Unlock()
if endpoints != nil {
cli.UpdateEndpoints(0, endpoints)
}
cli.SetStatusFunc(b.setClientStatus)
b.e.SetStatusCallback(b.setWgengineStatus)
b.e.SetNetInfoCallback(b.setNetInfo)
b.mu.Lock()
prefs := b.prefs.Clone()
if temporarilySetMachineKeyInPersist() && prefs.Persist != nil &&
prefs.Persist.LegacyFrontendPrivateMachineKey.IsZero() {
prefs.Persist.LegacyFrontendPrivateMachineKey = b.machinePrivKey
}
b.mu.Unlock()
blid := b.backendLogID
b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID)
b.send(ipn.Notify{BackendLogID: &blid})
b.send(ipn.Notify{Prefs: prefs})
cli.Login(nil, controlclient.LoginDefault)
return nil
}
// updateFilter updates the packet filter in wgengine based on the
// given netMap and user preferences.
func (b *LocalBackend) updateFilter(netMap *netmap.NetworkMap, prefs *ipn.Prefs) {
// NOTE(danderson): keep change detection as the first thing in
// this function. Don't try to optimize by returning early, more
// likely than not you'll just end up breaking the change
// detection and end up with the wrong filter installed. This is
// quite hard to debug, so save yourself the trouble.
var (
haveNetmap = netMap != nil
addrs []netaddr.IPPrefix
packetFilter []filter.Match
localNetsB netaddr.IPSetBuilder
shieldsUp = prefs == nil || prefs.ShieldsUp // Be conservative when not ready
)
if haveNetmap {
addrs = netMap.Addresses
for _, p := range addrs {
localNetsB.AddPrefix(p)
}
packetFilter = netMap.PacketFilter
}
if prefs != nil {
for _, r := range prefs.AdvertiseRoutes {
// TODO: when advertising default routes, trim out local
// nets.
localNetsB.AddPrefix(r)
}
}
localNets := localNetsB.IPSet()
changed := deepprint.UpdateHash(&b.filterHash, haveNetmap, addrs, packetFilter, localNets.Ranges(), shieldsUp)
if !changed {
return
}
if !haveNetmap {
b.logf("netmap packet filter: (not ready yet)")
b.e.SetFilter(filter.NewAllowNone(b.logf))
return
}
oldFilter := b.e.GetFilter()
if shieldsUp {
b.logf("netmap packet filter: (shields up)")
b.e.SetFilter(filter.NewShieldsUpFilter(localNets, oldFilter, b.logf))
} else {
b.logf("netmap packet filter: %v", packetFilter)
b.e.SetFilter(filter.New(packetFilter, localNets, oldFilter, b.logf))
}
}
// dnsCIDRsEqual determines whether two CIDR lists are equal
// for DNS map construction purposes (that is, only the first entry counts).
func dnsCIDRsEqual(newAddr, oldAddr []netaddr.IPPrefix) bool {
if len(newAddr) != len(oldAddr) {
return false
}
if len(newAddr) == 0 || newAddr[0] == oldAddr[0] {
return true
}
return false
}
// dnsMapsEqual determines whether the new and the old network map
// induce the same DNS map. It does so without allocating memory,
// at the expense of giving false negatives if peers are reordered.
func dnsMapsEqual(new, old *netmap.NetworkMap) bool {
if (old == nil) != (new == nil) {
return false
}
if old == nil && new == nil {
return true
}
if len(new.Peers) != len(old.Peers) {
return false
}
if new.Name != old.Name {
return false
}
if !dnsCIDRsEqual(new.Addresses, old.Addresses) {
return false
}
for i, newPeer := range new.Peers {
oldPeer := old.Peers[i]
if newPeer.Name != oldPeer.Name {
return false
}
if !dnsCIDRsEqual(newPeer.Addresses, oldPeer.Addresses) {
return false
}
}
return true
}
// updateDNSMap updates the domain map in the DNS resolver in wgengine
// based on the given netMap and user preferences.
func (b *LocalBackend) updateDNSMap(netMap *netmap.NetworkMap) {
if netMap == nil {
b.logf("dns map: (not ready)")
return
}
nameToIP := make(map[string]netaddr.IP)
set := func(name string, addrs []netaddr.IPPrefix) {
if len(addrs) == 0 || name == "" {
return
}
nameToIP[name] = addrs[0].IP
}
for _, peer := range netMap.Peers {
set(peer.Name, peer.Addresses)
}
set(netMap.Name, netMap.Addresses)
dnsMap := tsdns.NewMap(nameToIP, magicDNSRootDomains(netMap))
// map diff will be logged in tsdns.Resolver.SetMap.
b.e.SetDNSMap(dnsMap)
}
// readPoller is a goroutine that receives service lists from
// b.portpoll and propagates them into the controlclient's HostInfo.
func (b *LocalBackend) readPoller() {
n := 0
for {
ports, ok := <-b.portpoll.C
if !ok {
return
}
sl := []tailcfg.Service{}
for _, p := range ports {
s := tailcfg.Service{
Proto: tailcfg.ServiceProto(p.Proto),
Port: p.Port,
Description: p.Process,
}
if policy.IsInterestingService(s, version.OS()) {
sl = append(sl, s)
}
}
b.mu.Lock()
if b.hostinfo == nil {
b.hostinfo = new(tailcfg.Hostinfo)
}
b.hostinfo.Services = sl
hi := b.hostinfo
b.mu.Unlock()
b.doSetHostinfoFilterServices(hi)
n++
if n == 1 {
close(b.gotPortPollRes)
}
}
}
// send delivers n to the connected frontend. If no frontend is
// connected, the notification is dropped without being delivered.
func (b *LocalBackend) send(n ipn.Notify) {
b.mu.Lock()
notify := b.notify
b.mu.Unlock()
if notify != nil {
n.Version = version.Long
notify(n)
} else {
b.logf("nil notify callback; dropping %+v", n)
}
}
// popBrowserAuthNow shuts down the data plane and sends an auth URL
// to the connected frontend, if any.
func (b *LocalBackend) popBrowserAuthNow() {
b.mu.Lock()
url := b.authURL
b.interact = false
b.authURL = ""
b.mu.Unlock()
b.logf("popBrowserAuthNow: url=%v", url != "")
b.blockEngineUpdates(true)
b.stopEngineAndWait()
b.send(ipn.Notify{BrowseToURL: &url})
if b.State() == ipn.Running {
b.enterState(ipn.Starting)
}
}
// initMachineKeyLocked is called to initialize b.machinePrivKey.
//
// b.prefs must already be initialized.
// b.stateKey should be set too, but just for nicer log messages.
// b.mu must be held.
func (b *LocalBackend) initMachineKeyLocked() (err error) {
if temporarilySetMachineKeyInPersist() {
defer func() {
if err != nil {
return
}
if b.prefs != nil && b.prefs.Persist != nil {
b.prefs.Persist.LegacyFrontendPrivateMachineKey = b.machinePrivKey
}
}()
}
if !b.machinePrivKey.IsZero() {
// Already set.
return nil
}
var legacyMachineKey wgkey.Private
if b.prefs.Persist != nil {
legacyMachineKey = b.prefs.Persist.LegacyFrontendPrivateMachineKey
}
keyText, err := b.store.ReadState(ipn.MachineKeyStateKey)
if err == nil {
if err := b.machinePrivKey.UnmarshalText(keyText); err != nil {
return fmt.Errorf("invalid key in %s key of %v: %w", ipn.MachineKeyStateKey, b.store, err)
}
if b.machinePrivKey.IsZero() {
return fmt.Errorf("invalid zero key stored in %v key of %v", ipn.MachineKeyStateKey, b.store)
}
if !legacyMachineKey.IsZero() && !bytes.Equal(legacyMachineKey[:], b.machinePrivKey[:]) {
b.logf("frontend-provided legacy machine key ignored; used value from server state")
}
return nil
}
if err != ipn.ErrStateNotExist {
return fmt.Errorf("error reading %v key of %v: %w", ipn.MachineKeyStateKey, b.store, err)
}
// If we didn't find one already on disk and the prefs already
// have a legacy machine key, use that. Otherwise generate a
// new one.
if !legacyMachineKey.IsZero() {
if b.stateKey == "" {
b.logf("using frontend-provided legacy machine key")
} else {
b.logf("using legacy machine key from state key %q", b.stateKey)
}
b.machinePrivKey = legacyMachineKey
} else {
b.logf("generating new machine key")
var err error
b.machinePrivKey, err = wgkey.NewPrivate()
if err != nil {
return fmt.Errorf("initializing new machine key: %w", err)
}
}
keyText, _ = b.machinePrivKey.MarshalText()
if err := b.store.WriteState(ipn.MachineKeyStateKey, keyText); err != nil {
b.logf("error writing machine key to store: %v", err)
return err
}
b.logf("machine key written to store")
return nil
}
// writeServerModeStartState stores the ServerModeStartKey value based on the current
// user and prefs. If userID is blank or prefs is blank, no work is done.
//
// b.mu may either be held or not.
func (b *LocalBackend) writeServerModeStartState(userID string, prefs *ipn.Prefs) {
if userID == "" || prefs == nil {
return
}
if prefs.ForceDaemon {
stateKey := ipn.StateKey("user-" + userID)
if err := b.store.WriteState(ipn.ServerModeStartKey, []byte(stateKey)); err != nil {
b.logf("WriteState error: %v", err)
}
// It's important we do this here too, even if it looks
// redundant with the one in the 'if stateKey != ""'
// check block above. That one won't fire in the case
// where the Windows client started up in client mode.
// This happens when we transition into server mode:
if err := b.store.WriteState(stateKey, prefs.ToBytes()); err != nil {
b.logf("WriteState error: %v", err)
}
} else {
if err := b.store.WriteState(ipn.ServerModeStartKey, nil); err != nil {
b.logf("WriteState error: %v", err)
}
}
}
// loadStateLocked sets b.prefs and b.stateKey based on a complex
// combination of key, prefs, and legacyPath. b.mu must be held when
// calling.
func (b *LocalBackend) loadStateLocked(key ipn.StateKey, prefs *ipn.Prefs, legacyPath string) (err error) {
if prefs == nil && key == "" {
panic("state key and prefs are both unset")
}
// Optimistically set stateKey (for initMachineKeyLocked's
// logging), but revert it if we return an error so a later SetPrefs
// call can't pick it up if it's bogus.
b.stateKey = key
defer func() {
if err != nil {
b.stateKey = ""
}
}()
if key == "" {
// Frontend owns the state, we just need to obey it.
//
// If the frontend (e.g. on Windows) supplied the
// optional/legacy machine key then it's used as the
// value instead of making up a new one.
b.logf("using frontend prefs: %s", prefs.Pretty())
b.prefs = prefs.Clone()
if err := b.initMachineKeyLocked(); err != nil {
return fmt.Errorf("initMachineKeyLocked: %w", err)
}
b.writeServerModeStartState(b.userID, b.prefs)
return nil
}
if prefs != nil {
// Backend owns the state, but frontend is trying to migrate
// state into the backend.
b.logf("importing frontend prefs into backend store; frontend prefs: %s", prefs.Pretty())
if err := b.store.WriteState(key, prefs.ToBytes()); err != nil {
return fmt.Errorf("store.WriteState: %v", err)
}
}
b.logf("using backend prefs")
bs, err := b.store.ReadState(key)
if err != nil {
if errors.Is(err, ipn.ErrStateNotExist) {
if legacyPath != "" {
b.prefs, err = ipn.LoadPrefs(legacyPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
b.logf("failed to load legacy prefs: %v", err)
}
b.prefs = ipn.NewPrefs()
} else {
b.logf("imported prefs from relaynode for %q: %v", key, b.prefs.Pretty())
}
} else {
b.prefs = ipn.NewPrefs()
b.logf("created empty state for %q: %s", key, b.prefs.Pretty())
}
if err := b.initMachineKeyLocked(); err != nil {
return fmt.Errorf("initMachineKeyLocked: %w", err)
}
return nil
}
return fmt.Errorf("store.ReadState(%q): %v", key, err)
}
b.prefs, err = ipn.PrefsFromBytes(bs, false)
if err != nil {
return fmt.Errorf("PrefsFromBytes: %v", err)
}
b.logf("backend prefs for %q: %s", key, b.prefs.Pretty())
if err := b.initMachineKeyLocked(); err != nil {
return fmt.Errorf("initMachineKeyLocked: %w", err)
}
return nil
}
// State returns the backend state machine's current state.
func (b *LocalBackend) State() ipn.State {
b.mu.Lock()
defer b.mu.Unlock()
return b.state
}
func (b *LocalBackend) InServerMode() bool {
b.mu.Lock()
defer b.mu.Unlock()
return b.inServerMode
}
// getEngineStatus returns a copy of b.engineStatus.
//
// TODO(bradfitz): remove this and use Status() throughout.
func (b *LocalBackend) getEngineStatus() ipn.EngineStatus {
b.mu.Lock()
defer b.mu.Unlock()
return b.engineStatus
}
// Login implements Backend.
func (b *LocalBackend) Login(token *oauth2.Token) {
b.mu.Lock()
b.assertClientLocked()
c := b.c
b.mu.Unlock()
c.Login(token, controlclient.LoginInteractive)
}
// StartLoginInteractive implements Backend. It requests a new
// interactive login from controlclient, unless such a flow is already
// in progress, in which case StartLoginInteractive attempts to pick
// up the in-progress flow where it left off.
func (b *LocalBackend) StartLoginInteractive() {
b.mu.Lock()
b.assertClientLocked()
b.interact = true
url := b.authURL
c := b.c
b.mu.Unlock()
b.logf("StartLoginInteractive: url=%v", url != "")
if url != "" {
b.popBrowserAuthNow()
} else {
c.Login(nil, controlclient.LoginInteractive)
}
}
// FakeExpireAfter implements Backend.
func (b *LocalBackend) FakeExpireAfter(x time.Duration) {
b.logf("FakeExpireAfter: %v", x)
b.mu.Lock()
defer b.mu.Unlock()
if b.netMap == nil {
return
}
// This function is called very rarely,
// so we prefer to fully copy the netmap over introducing in-place modification here.
mapCopy := *b.netMap
e := mapCopy.Expiry
if e.IsZero() || time.Until(e) > x {
mapCopy.Expiry = time.Now().Add(x)
}
b.setNetMapLocked(&mapCopy)
b.send(ipn.Notify{NetMap: b.netMap})
}
func (b *LocalBackend) Ping(ipStr string) {
ip, err := netaddr.ParseIP(ipStr)
if err != nil {
b.logf("ignoring Ping request to invalid IP %q", ipStr)
return
}
b.e.Ping(ip, func(pr *ipnstate.PingResult) {
b.send(ipn.Notify{PingResult: pr})
})
}
// parseWgStatusLocked returns an EngineStatus based on s.
//
// b.mu must be held; mostly because the caller is about to anyway, and doing so
// gives us slightly better guarantees about the two peers stats lines not
// being intermixed if there are concurrent calls to our caller.
func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineStatus) {
var peerStats, peerKeys strings.Builder
ret.LiveDERPs = s.DERPs
ret.LivePeers = map[tailcfg.NodeKey]ipnstate.PeerStatusLite{}
for _, p := range s.Peers {
if !p.LastHandshake.IsZero() {
fmt.Fprintf(&peerStats, "%d/%d ", p.RxBytes, p.TxBytes)
fmt.Fprintf(&peerKeys, "%s ", p.NodeKey.ShortString())
ret.NumLive++
ret.LivePeers[p.NodeKey] = p
}
ret.RBytes += p.RxBytes
ret.WBytes += p.TxBytes
}
// [GRINDER STATS LINES] - please don't remove (used for log parsing)
if peerStats.Len() > 0 {
b.keyLogf("[v1] peer keys: %s", strings.TrimSpace(peerKeys.String()))
b.statsLogf("[v1] v%v peers: %v", version.Long, strings.TrimSpace(peerStats.String()))
}
return ret
}
// shouldUploadServices reports whether this node should include services
// in Hostinfo. When the user preferences currently request "shields up"
// mode, all inbound connections are refused, so services are not reported.
// Otherwise, shouldUploadServices respects NetMap.CollectServices.
func (b *LocalBackend) shouldUploadServices() bool {
b.mu.Lock()
defer b.mu.Unlock()
if b.prefs == nil || b.netMap == nil {
return false // default to safest setting
}
return !b.prefs.ShieldsUp && b.netMap.CollectServices
}
func (b *LocalBackend) SetCurrentUserID(uid string) {
b.mu.Lock()
b.userID = uid
b.mu.Unlock()
}
func (b *LocalBackend) SetWantRunning(wantRunning bool) {
b.mu.Lock()
new := b.prefs.Clone()
b.mu.Unlock()
if new.WantRunning == wantRunning {
return
}
new.WantRunning = wantRunning
b.logf("SetWantRunning: %v", wantRunning)
b.SetPrefs(new)
}
// SetPrefs saves new user preferences and propagates them throughout
// the system. Implements Backend.
func (b *LocalBackend) SetPrefs(newp *ipn.Prefs) {
if newp == nil {
panic("SetPrefs got nil prefs")
}
b.mu.Lock()
netMap := b.netMap
stateKey := b.stateKey
oldp := b.prefs
newp.Persist = oldp.Persist // caller isn't allowed to override this
b.prefs = newp
b.inServerMode = newp.ForceDaemon
// We do this to avoid holding the lock while doing everything else.
newp = b.prefs.Clone()
oldHi := b.hostinfo
newHi := oldHi.Clone()
newHi.RoutableIPs = append([]netaddr.IPPrefix(nil), b.prefs.AdvertiseRoutes...)
applyPrefsToHostinfo(newHi, newp)
b.hostinfo = newHi
hostInfoChanged := !oldHi.Equal(newHi)
userID := b.userID
b.mu.Unlock()
if stateKey != "" {
if err := b.store.WriteState(stateKey, newp.ToBytes()); err != nil {
b.logf("Failed to save new controlclient state: %v", err)
}
}
b.writeServerModeStartState(userID, newp)
// [GRINDER STATS LINE] - please don't remove (used for log parsing)
b.logf("SetPrefs: %v", newp.Pretty())
if netMap != nil {
if login := netMap.UserProfiles[netMap.User].LoginName; login != "" {
if newp.Persist == nil {
b.logf("active login: %s", login)
} else if newp.Persist.LoginName != login {
// Corp issue 461: sometimes the wrong prefs are
// logged; the frontend isn't always getting
// notified (to update its prefs/persist) on
// account switch. Log this while we figure it
// out.
b.logf("active login: %s ([unexpected] corp#461, not %s)", newp.Persist.LoginName)
}
}
}
if oldp.ShieldsUp != newp.ShieldsUp || hostInfoChanged {
b.doSetHostinfoFilterServices(newHi)
}
b.updateFilter(netMap, newp)
if netMap != nil {
b.e.SetDERPMap(netMap.DERPMap)
}
if oldp.WantRunning != newp.WantRunning {
b.stateMachine()
} else {
b.authReconfig()
}
b.send(ipn.Notify{Prefs: newp})
}
// doSetHostinfoFilterServices calls SetHostinfo on the controlclient,
// possibly after mangling the given hostinfo.
//
// TODO(danderson): we shouldn't be mangling hostinfo here after
// painstakingly constructing it in twelvety other places.
func (b *LocalBackend) doSetHostinfoFilterServices(hi *tailcfg.Hostinfo) {
hi2 := *hi
if !b.shouldUploadServices() {
hi2.Services = []tailcfg.Service{}
}
b.mu.Lock()
cli := b.c
b.mu.Unlock()
// b.c might not be started yet
if cli != nil {
cli.SetHostinfo(&hi2)
}
}
// NetMap returns the latest cached network map received from
// controlclient, or nil if no network map was received yet.
func (b *LocalBackend) NetMap() *netmap.NetworkMap {
b.mu.Lock()
defer b.mu.Unlock()
return b.netMap
}
// blockEngineUpdate sets b.blocked to block, while holding b.mu. Its
// indirect effect is to turn b.authReconfig() into a no-op if block
// is true.
func (b *LocalBackend) blockEngineUpdates(block bool) {
b.logf("blockEngineUpdates(%v)", block)
b.mu.Lock()
b.blocked = block
b.mu.Unlock()
}
// authReconfig pushes a new configuration into wgengine, if engine
// updates are not currently blocked, based on the cached netmap and
// user prefs.
func (b *LocalBackend) authReconfig() {
b.mu.Lock()
blocked := b.blocked
uc := b.prefs
nm := b.netMap
hasPAC := b.prevIfState.HasPAC()
disableSubnetsIfPAC := nm != nil && nm.Debug != nil && nm.Debug.DisableSubnetsIfPAC.EqualBool(true)
b.mu.Unlock()
if blocked {
b.logf("authReconfig: blocked, skipping.")
return
}
if nm == nil {
b.logf("authReconfig: netmap not yet valid. Skipping.")
return
}
if !uc.WantRunning {
b.logf("authReconfig: skipping because !WantRunning.")
return
}
var flags netmap.WGConfigFlags
if uc.RouteAll {
flags |= netmap.AllowSubnetRoutes
}
if uc.AllowSingleHosts {
flags |= netmap.AllowSingleHosts
}
if hasPAC && disableSubnetsIfPAC {
if flags&netmap.AllowSubnetRoutes != 0 {
b.logf("authReconfig: have PAC; disabling subnet routes")
flags &^= netmap.AllowSubnetRoutes
}
}
cfg, err := nmcfg.WGCfg(nm, b.logf, flags)
if err != nil {
b.logf("wgcfg: %v", err)
return
}
rcfg := routerConfig(cfg, uc)
// If CorpDNS is false, rcfg.DNS remains the zero value.
if uc.CorpDNS {
proxied := nm.DNS.Proxied
if proxied && len(nm.DNS.Nameservers) == 0 {
b.logf("[unexpected] dns proxied but no nameservers")
proxied = false
}
rcfg.DNS = dns.Config{
Nameservers: nm.DNS.Nameservers,
Domains: nm.DNS.Domains,
PerDomain: nm.DNS.PerDomain,
Proxied: proxied,
}
}
err = b.e.Reconfig(cfg, rcfg)
if err == wgengine.ErrNoChanges {
return
}
b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", uc.RouteAll, uc.CorpDNS, flags, err)
}
// magicDNSRootDomains returns the subset of nm.DNS.Domains that are the search domains for MagicDNS.
// Each entry has a trailing period.
func magicDNSRootDomains(nm *netmap.NetworkMap) []string {
if v := nm.MagicDNSSuffix(); v != "" {
return []string{strings.Trim(v, ".") + "."}
}
return nil
}
var (
ipv4Default = netaddr.MustParseIPPrefix("0.0.0.0/0")
ipv6Default = netaddr.MustParseIPPrefix("::/0")
)
// routerConfig produces a router.Config from a wireguard config and IPN prefs.
func routerConfig(cfg *wgcfg.Config, prefs *ipn.Prefs) *router.Config {
rs := &router.Config{
LocalAddrs: unmapIPPrefixes(cfg.Addresses),
SubnetRoutes: unmapIPPrefixes(prefs.AdvertiseRoutes),
SNATSubnetRoutes: !prefs.NoSNAT,
NetfilterMode: prefs.NetfilterMode,
}
for _, peer := range cfg.Peers {
rs.Routes = append(rs.Routes, unmapIPPrefixes(peer.AllowedIPs)...)
}
// Sanity check: we expect the control server to program both a v4
// and a v6 default route, if default routing is on. Fill in
// blackhole routes appropriately if we're missing some. This is
// likely to break some functionality, but if the user expressed a
// preference for routing remotely, we want to avoid leaking
// traffic at the expense of functionality.
if prefs.ExitNodeID != "" || !prefs.ExitNodeIP.IsZero() {
var default4, default6 bool
for _, route := range rs.Routes {
if route == ipv4Default {
default4 = true
} else if route == ipv6Default {
default6 = true
}
if default4 && default6 {
break
}
}
if !default4 {
rs.Routes = append(rs.Routes, ipv4Default)
}
if !default6 {
rs.Routes = append(rs.Routes, ipv6Default)
}
}
rs.Routes = append(rs.Routes, netaddr.IPPrefix{
IP: tsaddr.TailscaleServiceIP(),
Bits: 32,
})
return rs
}
func unmapIPPrefixes(ippsList ...[]netaddr.IPPrefix) (ret []netaddr.IPPrefix) {
for _, ipps := range ippsList {
for _, ipp := range ipps {
ret = append(ret, netaddr.IPPrefix{IP: ipp.IP.Unmap(), Bits: ipp.Bits})
}
}
return ret
}
func applyPrefsToHostinfo(hi *tailcfg.Hostinfo, prefs *ipn.Prefs) {
if h := prefs.Hostname; h != "" {
hi.Hostname = h
}
if v := prefs.OSVersion; v != "" {
hi.OSVersion = v
}
if m := prefs.DeviceModel; m != "" {
hi.DeviceModel = m
}
hi.ShieldsUp = prefs.ShieldsUp
}
// enterState transitions the backend into newState, updating internal
// state and propagating events out as needed.
//
// TODO(danderson): while this isn't a lie, exactly, a ton of other
// places twiddle IPN internal state without going through here, so
// really this is more "one of several places in which random things
// happen".
func (b *LocalBackend) enterState(newState ipn.State) {
b.mu.Lock()
state := b.state
b.state = newState
prefs := b.prefs
notify := b.notify
bc := b.c
networkUp := b.prevIfState.AnyInterfaceUp()
activeLogin := b.activeLogin
authURL := b.authURL
b.mu.Unlock()
if state == newState {
return
}
b.logf("Switching ipn state %v -> %v (WantRunning=%v)",
state, newState, prefs.WantRunning)
if notify != nil {
b.send(ipn.Notify{State: &newState})
}
if bc != nil {
bc.SetPaused(newState == ipn.Stopped || !networkUp)
}
switch newState {
case ipn.NeedsLogin:
systemd.Status("Needs login: %s", authURL)
b.blockEngineUpdates(true)
fallthrough
case ipn.Stopped:
err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{})
if err != nil {
b.logf("Reconfig(down): %v", err)
}
if authURL == "" {
systemd.Status("Stopped; run 'tailscale up' to log in")
}
case ipn.Starting, ipn.NeedsMachineAuth:
b.authReconfig()
// Needed so that UpdateEndpoints can run
b.e.RequestStatus()
case ipn.Running:
var addrs []string
for _, addr := range b.netMap.Addresses {
addrs = append(addrs, addr.IP.String())
}
systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrs, " "))
default:
b.logf("[unexpected] unknown newState %#v", newState)
}
}
// nextState returns the state the backend seems to be in, based on
// its internal state.
func (b *LocalBackend) nextState() ipn.State {
b.mu.Lock()
b.assertClientLocked()
var (
c = b.c
netMap = b.netMap
state = b.state
wantRunning = b.prefs.WantRunning
)
b.mu.Unlock()
switch {
case netMap == nil:
if c.AuthCantContinue() {
// Auth was interrupted or waiting for URL visit,
// so it won't proceed without human help.
return ipn.NeedsLogin
} else {
// Auth or map request needs to finish
return state
}
case !wantRunning:
return ipn.Stopped
case !netMap.Expiry.IsZero() && time.Until(netMap.Expiry) <= 0:
return ipn.NeedsLogin
case netMap.MachineStatus != tailcfg.MachineAuthorized:
// TODO(crawshaw): handle tailcfg.MachineInvalid
return ipn.NeedsMachineAuth
case state == ipn.NeedsMachineAuth:
// (if we get here, we know MachineAuthorized == true)
return ipn.Starting
case state == ipn.Starting:
if st := b.getEngineStatus(); st.NumLive > 0 || st.LiveDERPs > 0 {
return ipn.Running
} else {
return state
}
case state == ipn.Running:
return ipn.Running
default:
return ipn.Starting
}
}
// RequestEngineStatus implements Backend.
func (b *LocalBackend) RequestEngineStatus() {
b.e.RequestStatus()
}
// RequestStatus implements Backend.
func (b *LocalBackend) RequestStatus() {
st := b.Status()
b.send(ipn.Notify{Status: st})
}
// stateMachine updates the state machine state based on other things
// that have happened. It is invoked from the various callbacks that
// feed events into LocalBackend.
//
// TODO(apenwarr): use a channel or something to prevent re-entrancy?
// Or maybe just call the state machine from fewer places.
func (b *LocalBackend) stateMachine() {
b.enterState(b.nextState())
}
// stopEngineAndWait deconfigures the local network data plane, and
// waits for it to deliver a status update before returning.
//
// TODO(danderson): this may be racy. We could unblock upon receiving
// a status update that predates the "I've shut down" update.
func (b *LocalBackend) stopEngineAndWait() {
b.logf("stopEngineAndWait...")
b.e.Reconfig(&wgcfg.Config{}, &router.Config{})
b.requestEngineStatusAndWait()
b.logf("stopEngineAndWait: done.")
}
// Requests the wgengine status, and does not return until the status
// was delivered (to the usual callback).
func (b *LocalBackend) requestEngineStatusAndWait() {
b.logf("requestEngineStatusAndWait")
b.statusLock.Lock()
go b.e.RequestStatus()
b.logf("requestEngineStatusAndWait: waiting...")
b.statusChanged.Wait() // temporarily releases lock while waiting
b.logf("requestEngineStatusAndWait: got status update.")
b.statusLock.Unlock()
}
// Logout tells the controlclient that we want to log out, and transitions the local engine to the logged-out state without waiting for controlclient to be in that state.
//
// TODO(danderson): controlclient Logout does nothing useful, and we
// shouldn't be transitioning to a state based on what we believe
// controlclient may have done.
//
// NOTE(apenwarr): No easy way to persist logged-out status.
// Maybe that's for the better; if someone logs out accidentally,
// rebooting will fix it.
func (b *LocalBackend) Logout() {
b.mu.Lock()
c := b.c
b.setNetMapLocked(nil)
b.mu.Unlock()
if c == nil {
// Double Logout can happen via repeated IPN
// connections to ipnserver making it repeatedly
// transition from 1->0 total connections, which on
// Windows by default ("client mode") causes a Logout
// on the transition to zero.
// Previously this crashed when we asserted that c was non-nil
// here.
return
}
c.Logout()
b.mu.Lock()
b.setNetMapLocked(nil)
b.mu.Unlock()
b.stateMachine()
}
// assertClientLocked crashes if there is no controlclient in this backend.
func (b *LocalBackend) assertClientLocked() {
if b.c == nil {
panic("LocalBackend.assertClient: b.c == nil")
}
}
// setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the
// controlclient, if one exists.
func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) {
b.mu.Lock()
c := b.c
if b.hostinfo != nil {
b.hostinfo.NetInfo = ni.Clone()
}
b.mu.Unlock()
if c == nil {
return
}
c.SetNetInfo(ni)
}
func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
var login string
if nm != nil {
login = nm.UserProfiles[nm.User].LoginName
if login == "" {
login = "<missing-profile>"
}
}
b.netMap = nm
if login != b.activeLogin {
b.logf("active login: %v", login)
b.activeLogin = login
}
if nm == nil {
b.nodeByAddr = nil
return
}
// Update the nodeByAddr index.
if b.nodeByAddr == nil {
b.nodeByAddr = map[netaddr.IP]*tailcfg.Node{}
}
// First pass, mark everything unwanted.
for k := range b.nodeByAddr {
b.nodeByAddr[k] = nil
}
addNode := func(n *tailcfg.Node) {
for _, ipp := range n.Addresses {
if ipp.IsSingleIP() {
b.nodeByAddr[ipp.IP] = n
}
}
}
if nm.SelfNode != nil {
addNode(nm.SelfNode)
}
for _, p := range nm.Peers {
addNode(p)
}
// Third pass, actually delete the unwanted items.
for k, v := range b.nodeByAddr {
if v == nil {
delete(b.nodeByAddr, k)
}
}
}
// TestOnlyPublicKeys returns the current machine and node public
// keys. Used in tests only to facilitate automated node authorization
// in the test harness.
func (b *LocalBackend) TestOnlyPublicKeys() (machineKey tailcfg.MachineKey, nodeKey tailcfg.NodeKey) {
b.mu.Lock()
prefs := b.prefs
machinePrivKey := b.machinePrivKey
b.mu.Unlock()
if prefs == nil || machinePrivKey.IsZero() {
return
}
mk := machinePrivKey.Public()
nk := prefs.Persist.PrivateNodeKey.Public()
return tailcfg.MachineKey(mk), tailcfg.NodeKey(nk)
}
// temporarilySetMachineKeyInPersist reports whether we should set
// the machine key in Prefs.Persist.LegacyFrontendPrivateMachineKey
// for the frontend to write out to its preferences for use later.
//
// TODO: remove this in Tailscale 1.3.x (so it effectively always
// returns false). It just exists so users can downgrade from 1.2.x to
// 1.0.x. But eventually we want to stop sending the machine key to
// clients. We can't do that until 1.0.x is no longer supported.
func temporarilySetMachineKeyInPersist() bool {
switch runtime.GOOS {
case "darwin", "ios", "android":
// iOS, macOS, Android users can't downgrade anyway.
return false
}
return true
}
|
[
"\"TS_DEBUG_CONTROL_FLAGS\""
] |
[] |
[
"TS_DEBUG_CONTROL_FLAGS"
] |
[]
|
["TS_DEBUG_CONTROL_FLAGS"]
|
go
| 1 | 0 | |
pkg/testing/integration/program.go
|
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
cryptorand "crypto/rand"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/pkg/v3/backend/filestate"
"github.com/pulumi/pulumi/pkg/v3/engine"
"github.com/pulumi/pulumi/pkg/v3/operations"
"github.com/pulumi/pulumi/pkg/v3/resource/stack"
"github.com/pulumi/pulumi/sdk/v3/go/common/apitype"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/config"
pulumi_testing "github.com/pulumi/pulumi/sdk/v3/go/common/testing"
"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
"github.com/pulumi/pulumi/sdk/v3/go/common/tools"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/retry"
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
"github.com/stretchr/testify/assert"
user "github.com/tweekmonster/luser"
)
const PythonRuntime = "python"
const NodeJSRuntime = "nodejs"
const GoRuntime = "go"
const DotNetRuntime = "dotnet"
const windowsOS = "windows"
// RuntimeValidationStackInfo contains details related to the stack that runtime validation logic may want to use.
type RuntimeValidationStackInfo struct {
StackName tokens.QName
Deployment *apitype.DeploymentV3
RootResource apitype.ResourceV3
Outputs map[string]interface{}
Events []apitype.EngineEvent
}
// EditDir is an optional edit to apply to the example, as subsequent deployments.
type EditDir struct {
Dir string
ExtraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo)
// Additive is true if Dir should be copied *on top* of the test directory.
// Otherwise Dir *replaces* the test directory, except we keep .pulumi/ and Pulumi.yaml and Pulumi.<stack>.yaml.
Additive bool
// ExpectFailure is true if we expect this test to fail. This is very coarse grained, and will essentially
// tolerate *any* failure in the program (IDEA: in the future, offer a way to narrow this down more).
ExpectFailure bool
// ExpectNoChanges is true if the edit is expected to not propose any changes.
ExpectNoChanges bool
// Stdout is the writer to use for all stdout messages.
Stdout io.Writer
// Stderr is the writer to use for all stderr messages.
Stderr io.Writer
// Verbose may be set to true to print messages as they occur, rather than buffering and showing upon failure.
Verbose bool
// Run program directory in query mode.
QueryMode bool
}
// TestCommandStats is a collection of data related to running a single command during a test.
type TestCommandStats struct {
// StartTime is the time at which the command was started
StartTime string `json:"startTime"`
// EndTime is the time at which the command exited
EndTime string `json:"endTime"`
// ElapsedSeconds is the time at which the command exited
ElapsedSeconds float64 `json:"elapsedSeconds"`
// StackName is the name of the stack
StackName string `json:"stackName"`
// TestId is the unique ID of the test run
TestID string `json:"testId"`
// StepName is the command line which was invoked
StepName string `json:"stepName"`
// CommandLine is the command line which was invoked
CommandLine string `json:"commandLine"`
// TestName is the name of the directory in which the test was executed
TestName string `json:"testName"`
// IsError is true if the command failed
IsError bool `json:"isError"`
// The Cloud that the test was run against, or empty for local deployments
CloudURL string `json:"cloudURL"`
}
// TestStatsReporter reports results and metadata from a test run.
type TestStatsReporter interface {
ReportCommand(stats TestCommandStats)
}
// ConfigValue is used to provide config values to a test program.
type ConfigValue struct {
// The config key to pass to `pulumi config`.
Key string
// The config value to pass to `pulumi config`.
Value string
// Secret indicates that the `--secret` flag should be specified when calling `pulumi config`.
Secret bool
// Path indicates that the `--path` flag should be specified when calling `pulumi config`.
Path bool
}
// ProgramTestOptions provides options for ProgramTest
type ProgramTestOptions struct {
// Dir is the program directory to test.
Dir string
// Array of NPM packages which must be `yarn linked` (e.g. {"pulumi", "@pulumi/aws"})
Dependencies []string
// Map of package names to versions. The test will use the specified versions of these packages instead of what
// is declared in `package.json`.
Overrides map[string]string
// Map of config keys and values to set (e.g. {"aws:region": "us-east-2"}).
Config map[string]string
// Map of secure config keys and values to set (e.g. {"aws:region": "us-east-2"}).
Secrets map[string]string
// List of config keys and values to set in order, including Secret and Path options.
OrderedConfig []ConfigValue
// SecretsProvider is the optional custom secrets provider to use instead of the default.
SecretsProvider string
// EditDirs is an optional list of edits to apply to the example, as subsequent deployments.
EditDirs []EditDir
// ExtraRuntimeValidation is an optional callback for additional validation, called before applying edits.
ExtraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo)
// RelativeWorkDir is an optional path relative to `Dir` which should be used as working directory during tests.
RelativeWorkDir string
// AllowEmptyPreviewChanges is true if we expect that this test's no-op preview may propose changes (e.g.
// because the test is sensitive to the exact contents of its working directory and those contents change
// incidentally between the initial update and the empty update).
AllowEmptyPreviewChanges bool
// AllowEmptyUpdateChanges is true if we expect that this test's no-op update may perform changes (e.g.
// because the test is sensitive to the exact contents of its working directory and those contents change
// incidentally between the initial update and the empty update).
AllowEmptyUpdateChanges bool
// ExpectFailure is true if we expect this test to fail. This is very coarse grained, and will essentially
// tolerate *any* failure in the program (IDEA: in the future, offer a way to narrow this down more).
ExpectFailure bool
// ExpectRefreshChanges may be set to true if a test is expected to have changes yielded by an immediate refresh.
// This could occur, for example, is a resource's state is constantly changing outside of Pulumi (e.g., timestamps).
ExpectRefreshChanges bool
// RetryFailedSteps indicates that failed updates, refreshes, and destroys should be retried after a brief
// intermission. A maximum of 3 retries will be attempted.
RetryFailedSteps bool
// SkipRefresh indicates that the refresh step should be skipped entirely.
SkipRefresh bool
// SkipPreview indicates that the preview step should be skipped entirely.
SkipPreview bool
// SkipUpdate indicates that the update step should be skipped entirely.
SkipUpdate bool
// SkipExportImport skips testing that exporting and importing the stack works properly.
SkipExportImport bool
// SkipEmptyPreviewUpdate skips the no-change preview/update that is performed that validates
// that no changes happen.
SkipEmptyPreviewUpdate bool
// SkipStackRemoval indicates that the stack should not be removed. (And so the test's results could be inspected
// in the Pulumi Service after the test has completed.)
SkipStackRemoval bool
// Quick implies SkipPreview, SkipExportImport and SkipEmptyPreviewUpdate
Quick bool
// PreviewCommandlineFlags specifies flags to add to the `pulumi preview` command line (e.g. "--color=raw")
PreviewCommandlineFlags []string
// UpdateCommandlineFlags specifies flags to add to the `pulumi up` command line (e.g. "--color=raw")
UpdateCommandlineFlags []string
// QueryCommandlineFlags specifies flags to add to the `pulumi query` command line (e.g. "--color=raw")
QueryCommandlineFlags []string
// RunBuild indicates that the build step should be run (e.g. run `yarn build` for `nodejs` programs)
RunBuild bool
// RunUpdateTest will ensure that updates to the package version can test for spurious diffs
RunUpdateTest bool
// DecryptSecretsInOutput will ensure that stack output is passed `--show-secrets` parameter
// Used in conjunction with ExtraRuntimeValidation
DecryptSecretsInOutput bool
// CloudURL is an optional URL to override the default Pulumi Service API (https://api.pulumi-staging.io). The
// PULUMI_ACCESS_TOKEN environment variable must also be set to a valid access token for the target cloud.
CloudURL string
// StackName allows the stack name to be explicitly provided instead of computed from the
// environment during tests.
StackName string
// If non-empty, specifies the value of the `--tracing` flag to pass
// to Pulumi CLI, which may be a Zipkin endpoint or a
// `file:./local.trace` style url for AppDash tracing.
//
// Template `{command}` syntax will be expanded to the current
// command name such as `pulumi-stack-rm`. This is useful for
// file-based tracing since `ProgramTest` performs multiple
// CLI invocations that can inadvertently overwrite the trace
// file.
Tracing string
// NoParallel will opt the test out of being ran in parallel.
NoParallel bool
// PrePulumiCommand specifies a callback that will be executed before each `pulumi` invocation. This callback may
// optionally return another callback to be invoked after the `pulumi` invocation completes.
PrePulumiCommand func(verb string) (func(err error) error, error)
// ReportStats optionally specifies how to report results from the test for external collection.
ReportStats TestStatsReporter
// Stdout is the writer to use for all stdout messages.
Stdout io.Writer
// Stderr is the writer to use for all stderr messages.
Stderr io.Writer
// Verbose may be set to true to print messages as they occur, rather than buffering and showing upon failure.
Verbose bool
// DebugLogging may be set to anything >0 to enable excessively verbose debug logging from `pulumi`. This is
// equivalent to `--logtostderr -v=N`, where N is the value of DebugLogLevel. This may also be enabled by setting
// the environment variable PULUMI_TEST_DEBUG_LOG_LEVEL.
DebugLogLevel int
// DebugUpdates may be set to true to enable debug logging from `pulumi preview`, `pulumi up`, and
// `pulumi destroy`. This may also be enabled by setting the environment variable PULUMI_TEST_DEBUG_UPDATES.
DebugUpdates bool
// Bin is a location of a `pulumi` executable to be run. Taken from the $PATH if missing.
Bin string
// YarnBin is a location of a `yarn` executable to be run. Taken from the $PATH if missing.
YarnBin string
// GoBin is a location of a `go` executable to be run. Taken from the $PATH if missing.
GoBin string
// PythonBin is a location of a `python` executable to be run. Taken from the $PATH if missing.
PythonBin string
// PipenvBin is a location of a `pipenv` executable to run. Taken from the $PATH if missing.
PipenvBin string
// DotNetBin is a location of a `dotnet` executable to be run. Taken from the $PATH if missing.
DotNetBin string
// Additional environment variables to pass for each command we run.
Env []string
// Automatically create and use a virtual environment, rather than using the Pipenv tool. This is now the default
// behavior, so this option no longer has any affect. To go back to the old behavior use the `UsePipenv` option.
UseAutomaticVirtualEnv bool
// Use the Pipenv tool to manage the virtual environment.
UsePipenv bool
// If set, this hook is called after the `pulumi preview` command has completed.
PreviewCompletedHook func(dir string) error
}
func (opts *ProgramTestOptions) GetDebugLogLevel() int {
if opts.DebugLogLevel > 0 {
return opts.DebugLogLevel
}
if du := os.Getenv("PULUMI_TEST_DEBUG_LOG_LEVEL"); du != "" {
if n, e := strconv.Atoi(du); e != nil {
panic(e)
} else if n > 0 {
return n
}
}
return 0
}
func (opts *ProgramTestOptions) GetDebugUpdates() bool {
return opts.DebugUpdates || os.Getenv("PULUMI_TEST_DEBUG_UPDATES") != ""
}
// GetStackName returns a stack name to use for this test.
func (opts *ProgramTestOptions) GetStackName() tokens.QName {
if opts.StackName == "" {
// Fetch the host and test dir names, cleaned so to contain just [a-zA-Z0-9-_] chars.
hostname, err := os.Hostname()
contract.AssertNoErrorf(err, "failure to fetch hostname for stack prefix")
var host string
for _, c := range hostname {
if len(host) >= 10 {
break
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '-' || c == '_' {
host += string(c)
}
}
var test string
for _, c := range filepath.Base(opts.Dir) {
if len(test) >= 10 {
break
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '-' || c == '_' {
test += string(c)
}
}
b := make([]byte, 4)
_, err = cryptorand.Read(b)
contract.AssertNoError(err)
opts.StackName = strings.ToLower("p-it-" + host + "-" + test + "-" + hex.EncodeToString(b))
}
return tokens.QName(opts.StackName)
}
// GetStackNameWithOwner gets the name of the stack prepended with an owner, if PULUMI_TEST_OWNER is set.
// We use this in CI to create test stacks in an organization that all developers have access to, for debugging.
func (opts *ProgramTestOptions) GetStackNameWithOwner() tokens.QName {
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner != "" {
return tokens.QName(fmt.Sprintf("%s/%s", owner, opts.GetStackName()))
}
return opts.GetStackName()
}
// With combines a source set of options with a set of overrides.
func (opts ProgramTestOptions) With(overrides ProgramTestOptions) ProgramTestOptions {
if overrides.Dir != "" {
opts.Dir = overrides.Dir
}
if overrides.Dependencies != nil {
opts.Dependencies = overrides.Dependencies
}
if overrides.Overrides != nil {
opts.Overrides = overrides.Overrides
}
for k, v := range overrides.Config {
if opts.Config == nil {
opts.Config = make(map[string]string)
}
opts.Config[k] = v
}
for k, v := range overrides.Secrets {
if opts.Secrets == nil {
opts.Secrets = make(map[string]string)
}
opts.Secrets[k] = v
}
if overrides.SecretsProvider != "" {
opts.SecretsProvider = overrides.SecretsProvider
}
if overrides.EditDirs != nil {
opts.EditDirs = overrides.EditDirs
}
if overrides.ExtraRuntimeValidation != nil {
opts.ExtraRuntimeValidation = overrides.ExtraRuntimeValidation
}
if overrides.RelativeWorkDir != "" {
opts.RelativeWorkDir = overrides.RelativeWorkDir
}
if overrides.AllowEmptyPreviewChanges {
opts.AllowEmptyPreviewChanges = overrides.AllowEmptyPreviewChanges
}
if overrides.AllowEmptyUpdateChanges {
opts.AllowEmptyUpdateChanges = overrides.AllowEmptyUpdateChanges
}
if overrides.ExpectFailure {
opts.ExpectFailure = overrides.ExpectFailure
}
if overrides.ExpectRefreshChanges {
opts.ExpectRefreshChanges = overrides.ExpectRefreshChanges
}
if overrides.RetryFailedSteps {
opts.RetryFailedSteps = overrides.RetryFailedSteps
}
if overrides.SkipRefresh {
opts.SkipRefresh = overrides.SkipRefresh
}
if overrides.SkipPreview {
opts.SkipPreview = overrides.SkipPreview
}
if overrides.SkipUpdate {
opts.SkipUpdate = overrides.SkipUpdate
}
if overrides.SkipExportImport {
opts.SkipExportImport = overrides.SkipExportImport
}
if overrides.SkipEmptyPreviewUpdate {
opts.SkipEmptyPreviewUpdate = overrides.SkipEmptyPreviewUpdate
}
if overrides.SkipStackRemoval {
opts.SkipStackRemoval = overrides.SkipStackRemoval
}
if overrides.Quick {
opts.Quick = overrides.Quick
}
if overrides.PreviewCommandlineFlags != nil {
opts.PreviewCommandlineFlags = append(opts.PreviewCommandlineFlags, overrides.PreviewCommandlineFlags...)
}
if overrides.UpdateCommandlineFlags != nil {
opts.UpdateCommandlineFlags = append(opts.UpdateCommandlineFlags, overrides.UpdateCommandlineFlags...)
}
if overrides.QueryCommandlineFlags != nil {
opts.QueryCommandlineFlags = append(opts.QueryCommandlineFlags, overrides.QueryCommandlineFlags...)
}
if overrides.RunBuild {
opts.RunBuild = overrides.RunBuild
}
if overrides.RunUpdateTest {
opts.RunUpdateTest = overrides.RunUpdateTest
}
if overrides.DecryptSecretsInOutput {
opts.DecryptSecretsInOutput = overrides.DecryptSecretsInOutput
}
if overrides.CloudURL != "" {
opts.CloudURL = overrides.CloudURL
}
if overrides.StackName != "" {
opts.StackName = overrides.StackName
}
if overrides.Tracing != "" {
opts.Tracing = overrides.Tracing
}
if overrides.NoParallel {
opts.NoParallel = overrides.NoParallel
}
if overrides.PrePulumiCommand != nil {
opts.PrePulumiCommand = overrides.PrePulumiCommand
}
if overrides.ReportStats != nil {
opts.ReportStats = overrides.ReportStats
}
if overrides.Stdout != nil {
opts.Stdout = overrides.Stdout
}
if overrides.Stderr != nil {
opts.Stderr = overrides.Stderr
}
if overrides.Verbose {
opts.Verbose = overrides.Verbose
}
if overrides.DebugLogLevel != 0 {
opts.DebugLogLevel = overrides.DebugLogLevel
}
if overrides.DebugUpdates {
opts.DebugUpdates = overrides.DebugUpdates
}
if overrides.Bin != "" {
opts.Bin = overrides.Bin
}
if overrides.YarnBin != "" {
opts.YarnBin = overrides.YarnBin
}
if overrides.GoBin != "" {
opts.GoBin = overrides.GoBin
}
if overrides.PipenvBin != "" {
opts.PipenvBin = overrides.PipenvBin
}
if overrides.Env != nil {
opts.Env = append(opts.Env, overrides.Env...)
}
if overrides.UsePipenv {
opts.UsePipenv = overrides.UsePipenv
}
return opts
}
type regexFlag struct {
re *regexp.Regexp
}
func (rf *regexFlag) String() string {
if rf.re == nil {
return ""
}
return rf.re.String()
}
func (rf *regexFlag) Set(v string) error {
r, err := regexp.Compile(v)
if err != nil {
return err
}
rf.re = r
return nil
}
var directoryMatcher regexFlag
var listDirs bool
var pipMutex *fsutil.FileMutex
func init() {
flag.Var(&directoryMatcher, "dirs", "optional list of regexes to use to select integration tests to run")
flag.BoolVar(&listDirs, "list-dirs", false, "list available integration tests without running them")
mutexPath := filepath.Join(os.TempDir(), "pip-mutex.lock")
pipMutex = fsutil.NewFileMutex(mutexPath)
}
// GetLogs retrieves the logs for a given stack in a particular region making the query provided.
//
// [provider] should be one of "aws" or "azure"
func GetLogs(
t *testing.T,
provider, region string,
stackInfo RuntimeValidationStackInfo,
query operations.LogQuery) *[]operations.LogEntry {
snap, err := stack.DeserializeDeploymentV3(*stackInfo.Deployment, stack.DefaultSecretsProvider)
assert.NoError(t, err)
tree := operations.NewResourceTree(snap.Resources)
if !assert.NotNil(t, tree) {
return nil
}
cfg := map[config.Key]string{
config.MustMakeKey(provider, "region"): region,
}
ops := tree.OperationsProvider(cfg)
// Validate logs from example
logs, err := ops.GetLogs(query)
if !assert.NoError(t, err) {
return nil
}
return logs
}
func prepareProgram(t *testing.T, opts *ProgramTestOptions) {
// If we're just listing tests, simply print this test's directory.
if listDirs {
fmt.Printf("%s\n", opts.Dir)
}
// If we have a matcher, ensure that this test matches its pattern.
if directoryMatcher.re != nil && !directoryMatcher.re.Match([]byte(opts.Dir)) {
t.Skip(fmt.Sprintf("Skipping: '%v' does not match '%v'", opts.Dir, directoryMatcher.re))
}
// Disable stack backups for tests to avoid filling up ~/.pulumi/backups with unnecessary
// backups of test stacks.
if err := os.Setenv(filestate.DisableCheckpointBackupsEnvVar, "1"); err != nil {
t.Errorf("error setting env var '%s': %v", filestate.DisableCheckpointBackupsEnvVar, err)
}
// We want tests to default into being ran in parallel, hence the odd double negative.
if !opts.NoParallel {
t.Parallel()
}
if ciutil.IsCI() && os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Skip("Skipping: PULUMI_ACCESS_TOKEN is not set")
}
// If the test panics, recover and log instead of letting the panic escape the test. Even though *this* test will
// have run deferred functions and cleaned up, if the panic reaches toplevel it will kill the process and prevent
// other tests running in parallel from cleaning up.
defer func() {
if failure := recover(); failure != nil {
t.Errorf("panic testing %v: %v", opts.Dir, failure)
}
}()
// Set up some default values for sending test reports and tracing data. We use environment varaiables to
// control these globally and set reasonable values for our own use in CI.
if opts.ReportStats == nil {
if v := os.Getenv("PULUMI_TEST_REPORT_CONFIG"); v != "" {
splits := strings.Split(v, ":")
if len(splits) != 3 {
t.Errorf("report config should be set to a value of the form: <aws-region>:<bucket-name>:<keyPrefix>")
}
opts.ReportStats = NewS3Reporter(splits[0], splits[1], splits[2])
}
}
if opts.Tracing == "" {
opts.Tracing = os.Getenv("PULUMI_TEST_TRACE_ENDPOINT")
}
}
// ProgramTest runs a lifecycle of Pulumi commands in a program working directory, using the `pulumi` and `yarn`
// binaries available on PATH. It essentially executes the following workflow:
//
// yarn install
// yarn link <each opts.Depencies>
// (+) yarn run build
// pulumi init
// (*) pulumi login
// pulumi stack init integrationtesting
// pulumi config set <each opts.Config>
// pulumi config set --secret <each opts.Secrets>
// pulumi preview
// pulumi up
// pulumi stack export --file stack.json
// pulumi stack import --file stack.json
// pulumi preview (expected to be empty)
// pulumi up (expected to be empty)
// pulumi destroy --yes
// pulumi stack rm --yes integrationtesting
//
// (*) Only if PULUMI_ACCESS_TOKEN is set.
// (+) Only if `opts.RunBuild` is true.
//
// All commands must return success return codes for the test to succeed, unless ExpectFailure is true.
func ProgramTest(t *testing.T, opts *ProgramTestOptions) {
prepareProgram(t, opts)
pt := newProgramTester(t, opts)
err := pt.TestLifeCycleInitAndDestroy()
assert.NoError(t, err)
}
// ProgramTestManualLifeCycle returns a ProgramTester than must be manually controlled in terms of its lifecycle
func ProgramTestManualLifeCycle(t *testing.T, opts *ProgramTestOptions) *ProgramTester {
prepareProgram(t, opts)
pt := newProgramTester(t, opts)
return pt
}
// ProgramTester contains state associated with running a single test pass.
type ProgramTester struct {
t *testing.T // the Go tester for this run.
opts *ProgramTestOptions // options that control this test run.
bin string // the `pulumi` binary we are using.
yarnBin string // the `yarn` binary we are using.
goBin string // the `go` binary we are using.
pythonBin string // the `python` binary we are using.
pipenvBin string // The `pipenv` binary we are using.
dotNetBin string // the `dotnet` binary we are using.
eventLog string // The path to the event log for this test.
maxStepTries int // The maximum number of times to retry a failed pulumi step.
tmpdir string // the temporary directory we use for our test environment
projdir string // the project directory we use for this run
TestFinished bool // whether or not the test if finished
}
func newProgramTester(t *testing.T, opts *ProgramTestOptions) *ProgramTester {
stackName := opts.GetStackName()
maxStepTries := 1
if opts.RetryFailedSteps {
maxStepTries = 3
}
if opts.Quick {
opts.SkipPreview = true
opts.SkipExportImport = true
opts.SkipEmptyPreviewUpdate = true
}
return &ProgramTester{
t: t,
opts: opts,
eventLog: filepath.Join(os.TempDir(), string(stackName)+"-events.json"),
maxStepTries: maxStepTries,
}
}
func (pt *ProgramTester) getBin() (string, error) {
return getCmdBin(&pt.bin, "pulumi", pt.opts.Bin)
}
func (pt *ProgramTester) getYarnBin() (string, error) {
return getCmdBin(&pt.yarnBin, "yarn", pt.opts.YarnBin)
}
func (pt *ProgramTester) getGoBin() (string, error) {
return getCmdBin(&pt.goBin, "go", pt.opts.GoBin)
}
// getPythonBin returns a path to the currently-installed `python` binary, or an error if it could not be found.
func (pt *ProgramTester) getPythonBin() (string, error) {
if pt.pythonBin == "" {
pt.pythonBin = pt.opts.PythonBin
if pt.opts.PythonBin == "" {
var err error
// Look for `python3` by default, but fallback to `python` if not found, except on Windows
// where we look for these in the reverse order because the default python.org Windows
// installation does not include a `python3` binary, and the existence of a `python3.exe`
// symlink to `python.exe` on some systems does not work correctly with the Python `venv`
// module.
pythonCmds := []string{"python3", "python"}
if runtime.GOOS == windowsOS {
pythonCmds = []string{"python", "python3"}
}
for _, bin := range pythonCmds {
pt.pythonBin, err = exec.LookPath(bin)
// Break on the first cmd we find on the path (if any).
if err == nil {
break
}
}
if err != nil {
return "", errors.Wrapf(err, "Expected to find one of %q on $PATH", pythonCmds)
}
}
}
return pt.pythonBin, nil
}
// getPipenvBin returns a path to the currently-installed Pipenv tool, or an error if the tool could not be found.
func (pt *ProgramTester) getPipenvBin() (string, error) {
return getCmdBin(&pt.pipenvBin, "pipenv", pt.opts.PipenvBin)
}
func (pt *ProgramTester) getDotNetBin() (string, error) {
return getCmdBin(&pt.dotNetBin, "dotnet", pt.opts.DotNetBin)
}
func (pt *ProgramTester) pulumiCmd(name string, args []string) ([]string, error) {
bin, err := pt.getBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
if du := pt.opts.GetDebugLogLevel(); du > 0 {
cmd = append(cmd, "--logtostderr", "-v="+strconv.Itoa(du))
}
cmd = append(cmd, args...)
if tracing := pt.opts.Tracing; tracing != "" {
cmd = append(cmd, "--tracing", strings.ReplaceAll(tracing, "{command}", name))
}
return cmd, nil
}
func (pt *ProgramTester) yarnCmd(args []string) ([]string, error) {
bin, err := pt.getYarnBin()
if err != nil {
return nil, err
}
result := []string{bin}
result = append(result, args...)
return withOptionalYarnFlags(result), nil
}
func (pt *ProgramTester) pythonCmd(args []string) ([]string, error) {
bin, err := pt.getPythonBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
return append(cmd, args...), nil
}
func (pt *ProgramTester) pipenvCmd(args []string) ([]string, error) {
bin, err := pt.getPipenvBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
return append(cmd, args...), nil
}
func (pt *ProgramTester) runCommand(name string, args []string, wd string) error {
return RunCommand(pt.t, name, args, wd, pt.opts)
}
func (pt *ProgramTester) runPulumiCommand(name string, args []string, wd string, expectFailure bool) error {
cmd, err := pt.pulumiCmd(name, args)
if err != nil {
return err
}
var postFn func(error) error
if pt.opts.PrePulumiCommand != nil {
postFn, err = pt.opts.PrePulumiCommand(args[0])
if err != nil {
return err
}
}
isUpdate := args[0] == "preview" || args[0] == "up" || args[0] == "destroy" || args[0] == "refresh"
// If we're doing a preview or an update and this project is a Python project, we need to run
// the command in the context of the virtual environment that Pipenv created in order to pick up
// the correct version of Python. We also need to do this for destroy and refresh so that
// dynamic providers are run in the right virtual environment.
// This is only necessary when not using automatic virtual environment support.
if pt.opts.UsePipenv && isUpdate {
projinfo, err := pt.getProjinfo(wd)
if err != nil {
return nil
}
if projinfo.Proj.Runtime.Name() == "python" {
pipenvBin, err := pt.getPipenvBin()
if err != nil {
return err
}
// "pipenv run" activates the current virtual environment and runs the remainder of the arguments as if it
// were a command.
cmd = append([]string{pipenvBin, "run"}, cmd...)
}
}
_, _, err = retry.Until(context.Background(), retry.Acceptor{
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
runerr := pt.runCommand(name, cmd, wd)
if runerr == nil {
return true, nil, nil
} else if _, ok := runerr.(*exec.ExitError); ok && isUpdate && !expectFailure {
// the update command failed, let's try again, assuming we haven't failed a few times.
if try+1 >= pt.maxStepTries {
return false, nil, errors.Errorf("%v did not succeed after %v tries", cmd, try+1)
}
pt.t.Logf("%v failed: %v; retrying...", cmd, runerr)
return false, nil, nil
}
// someother error, fail
return false, nil, runerr
},
})
if postFn != nil {
if postErr := postFn(err); postErr != nil {
return multierror.Append(err, postErr)
}
}
return err
}
func (pt *ProgramTester) runYarnCommand(name string, args []string, wd string) error {
cmd, err := pt.yarnCmd(args)
if err != nil {
return err
}
_, _, err = retry.Until(context.Background(), retry.Acceptor{
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
runerr := pt.runCommand(name, cmd, wd)
if runerr == nil {
return true, nil, nil
} else if _, ok := runerr.(*exec.ExitError); ok {
// yarn failed, let's try again, assuming we haven't failed a few times.
if try+1 >= 3 {
return false, nil, errors.Errorf("%v did not complete after %v tries", cmd, try+1)
}
return false, nil, nil
}
// someother error, fail
return false, nil, runerr
},
})
return err
}
func (pt *ProgramTester) runPythonCommand(name string, args []string, wd string) error {
cmd, err := pt.pythonCmd(args)
if err != nil {
return err
}
return pt.runCommand(name, cmd, wd)
}
func (pt *ProgramTester) runVirtualEnvCommand(name string, args []string, wd string) error {
// When installing with `pip install -e`, a PKG-INFO file is created. If two packages are being installed
// this way simultaneously (which happens often, when running tests), both installations will be writing the
// same file simultaneously. If one process catches "PKG-INFO" in a half-written state, the one process that
// observed the torn write will fail to install the package.
//
// To avoid this problem, we use pipMutex to explicitly serialize installation operations. Doing so avoids
// the problem of multiple processes stomping on the same files in the source tree. Note that pipMutex is a
// file mutex, so this strategy works even if the go test runner chooses to split up text execution across
// multiple processes. (Furthermore, each test gets an instance of ProgramTester and thus the mutex, so we'd
// need to be sharing the mutex globally in each test process if we weren't using the file system to lock.)
if name == "virtualenv-pip-install-package" {
if err := pipMutex.Lock(); err != nil {
panic(err)
}
if pt.opts.Verbose {
pt.t.Log("acquired pip install lock")
defer pt.t.Log("released pip install lock")
}
defer func() {
if err := pipMutex.Unlock(); err != nil {
panic(err)
}
}()
}
virtualenvBinPath, err := getVirtualenvBinPath(wd, args[0])
if err != nil {
return err
}
cmd := append([]string{virtualenvBinPath}, args[1:]...)
return pt.runCommand(name, cmd, wd)
}
func (pt *ProgramTester) runPipenvCommand(name string, args []string, wd string) error {
// Pipenv uses setuptools to install and uninstall packages. Setuptools has an installation mode called "develop"
// that we use to install the package being tested, since it is 1) lightweight and 2) not doing so has its own set
// of annoying problems.
//
// Setuptools develop does three things:
// 1. It invokes the "egg_info" command in the target package,
// 2. It creates a special `.egg-link` sentinel file in the current site-packages folder, pointing to the package
// being installed's path on disk
// 3. It updates easy-install.pth in site-packages so that pip understand that this package has been installed.
//
// Steps 2 and 3 operate entirely within the context of a virtualenv. The state that they mutate is fully contained
// within the current virtualenv. However, step 1 operates in the context of the package's source tree. Egg info
// is responsible for producing a minimal "egg" for a particular package, and its largest responsibility is creating
// a PKG-INFO file for a package. PKG-INFO contains, among other things, the version of the package being installed.
//
// If two packages are being installed in "develop" mode simultaneously (which happens often, when running tests),
// both installations will run "egg_info" on the source tree and both processes will be writing the same files
// simultaneously. If one process catches "PKG-INFO" in a half-written state, the one process that observed the
// torn write will fail to install the package (setuptools crashes).
//
// To avoid this problem, we use pipMutex to explicitly serialize installation operations. Doing so avoids the
// problem of multiple processes stomping on the same files in the source tree. Note that pipMutex is a file
// mutex, so this strategy works even if the go test runner chooses to split up text execution across multiple
// processes. (Furthermore, each test gets an instance of ProgramTester and thus the mutex, so we'd need to be
// sharing the mutex globally in each test process if we weren't using the file system to lock.)
if name == "pipenv-install-package" {
if err := pipMutex.Lock(); err != nil {
panic(err)
}
if pt.opts.Verbose {
pt.t.Log("acquired pip install lock")
defer pt.t.Log("released pip install lock")
}
defer func() {
if err := pipMutex.Unlock(); err != nil {
panic(err)
}
}()
}
cmd, err := pt.pipenvCmd(args)
if err != nil {
return err
}
return pt.runCommand(name, cmd, wd)
}
// TestLifeCyclePrepare prepares a test by creating a temporary directory
func (pt *ProgramTester) TestLifeCyclePrepare() error {
tmpdir, projdir, err := pt.copyTestToTemporaryDirectory()
pt.tmpdir = tmpdir
pt.projdir = projdir
return err
}
// TestCleanUp cleans up the temporary directory that a test used
func (pt *ProgramTester) TestCleanUp() {
testFinished := pt.TestFinished
if pt.tmpdir != "" {
if !testFinished || pt.t.Failed() {
// Test aborted or failed. Maybe copy to "failed tests" directory.
failedTestsDir := os.Getenv("PULUMI_FAILED_TESTS_DIR")
if failedTestsDir != "" {
dest := filepath.Join(failedTestsDir, pt.t.Name()+uniqueSuffix())
contract.IgnoreError(fsutil.CopyFile(dest, pt.tmpdir, nil))
}
} else {
contract.IgnoreError(os.RemoveAll(pt.tmpdir))
}
} else {
// When tmpdir is empty, we ran "in tree", which means we wrote output
// to the "command-output" folder in the projdir, and we should clean
// it up if the test passed
if testFinished && !pt.t.Failed() {
contract.IgnoreError(os.RemoveAll(filepath.Join(pt.projdir, commandOutputFolderName)))
}
}
}
// TestLifeCycleInitAndDestroy executes the test and cleans up
func (pt *ProgramTester) TestLifeCycleInitAndDestroy() error {
err := pt.TestLifeCyclePrepare()
if err != nil {
return errors.Wrapf(err, "copying test to temp dir %s", pt.tmpdir)
}
pt.TestFinished = false
defer pt.TestCleanUp()
err = pt.TestLifeCycleInitialize()
if err != nil {
return errors.Wrap(err, "initializing test project")
}
// Ensure that before we exit, we attempt to destroy and remove the stack.
defer func() {
destroyErr := pt.TestLifeCycleDestroy()
assert.NoError(pt.t, destroyErr)
}()
if err = pt.TestPreviewUpdateAndEdits(); err != nil {
return errors.Wrap(err, "running test preview, update, and edits")
}
if pt.opts.RunUpdateTest {
err = upgradeProjectDeps(pt.projdir, pt)
if err != nil {
return errors.Wrap(err, "upgrading project dependencies")
}
if err = pt.TestPreviewUpdateAndEdits(); err != nil {
return errors.Wrap(err, "running test preview, update, and edits")
}
}
pt.TestFinished = true
return nil
}
func upgradeProjectDeps(projectDir string, pt *ProgramTester) error {
projInfo, err := pt.getProjinfo(projectDir)
if err != nil {
return errors.Wrap(err, "getting project info")
}
switch rt := projInfo.Proj.Runtime.Name(); rt {
case NodeJSRuntime:
if err = pt.yarnLinkPackageDeps(projectDir); err != nil {
return err
}
case PythonRuntime:
if err = pt.installPipPackageDeps(projectDir); err != nil {
return err
}
default:
return errors.Errorf("unrecognized project runtime: %s", rt)
}
return nil
}
// TestLifeCycleInitialize initializes the project directory and stack along with any configuration
func (pt *ProgramTester) TestLifeCycleInitialize() error {
dir := pt.projdir
stackName := pt.opts.GetStackName()
// If RelativeWorkDir is specified, apply that relative to the temp folder for use as working directory during tests.
if pt.opts.RelativeWorkDir != "" {
dir = filepath.Join(dir, pt.opts.RelativeWorkDir)
}
// Set the default target Pulumi API if not overridden in options.
if pt.opts.CloudURL == "" {
pulumiAPI := os.Getenv("PULUMI_API")
if pulumiAPI != "" {
pt.opts.CloudURL = pulumiAPI
}
}
// Ensure all links are present, the stack is created, and all configs are applied.
pt.t.Logf("Initializing project (dir %s; stack %s)", dir, stackName)
// Login as needed.
stackInitName := string(pt.opts.GetStackNameWithOwner())
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" && pt.opts.CloudURL == "" {
fmt.Printf("Using existing logged in user for tests. Set PULUMI_ACCESS_TOKEN and/or PULUMI_API to override.\n")
} else {
// Set PulumiCredentialsPathEnvVar to our CWD, so we use credentials specific to just this
// test.
pt.opts.Env = append(pt.opts.Env, fmt.Sprintf("%s=%s", workspace.PulumiCredentialsPathEnvVar, dir))
loginArgs := []string{"login"}
loginArgs = addFlagIfNonNil(loginArgs, "--cloud-url", pt.opts.CloudURL)
// If this is a local OR cloud login, then don't attach the owner to the stack-name.
if pt.opts.CloudURL != "" {
stackInitName = string(pt.opts.GetStackName())
}
if err := pt.runPulumiCommand("pulumi-login", loginArgs, dir, false); err != nil {
return err
}
}
// Stack init
stackInitArgs := []string{"stack", "init", stackInitName}
if pt.opts.SecretsProvider != "" {
stackInitArgs = append(stackInitArgs, "--secrets-provider", pt.opts.SecretsProvider)
}
if err := pt.runPulumiCommand("pulumi-stack-init", stackInitArgs, dir, false); err != nil {
return err
}
for key, value := range pt.opts.Config {
if err := pt.runPulumiCommand("pulumi-config",
[]string{"config", "set", key, value}, dir, false); err != nil {
return err
}
}
for key, value := range pt.opts.Secrets {
if err := pt.runPulumiCommand("pulumi-config",
[]string{"config", "set", "--secret", key, value}, dir, false); err != nil {
return err
}
}
for _, cv := range pt.opts.OrderedConfig {
configArgs := []string{"config", "set", cv.Key, cv.Value}
if cv.Secret {
configArgs = append(configArgs, "--secret")
}
if cv.Path {
configArgs = append(configArgs, "--path")
}
if err := pt.runPulumiCommand("pulumi-config", configArgs, dir, false); err != nil {
return err
}
}
return nil
}
// TestLifeCycleDestroy destroys a stack and removes it
func (pt *ProgramTester) TestLifeCycleDestroy() error {
if pt.projdir != "" {
// Destroy and remove the stack.
pt.t.Log("Destroying stack")
destroy := []string{"destroy", "--non-interactive", "--yes", "--skip-preview"}
if pt.opts.GetDebugUpdates() {
destroy = append(destroy, "-d")
}
if err := pt.runPulumiCommand("pulumi-destroy", destroy, pt.projdir, false); err != nil {
return err
}
if pt.t.Failed() {
pt.t.Logf("Test failed, retaining stack '%s'", pt.opts.GetStackNameWithOwner())
return nil
}
if !pt.opts.SkipStackRemoval {
return pt.runPulumiCommand("pulumi-stack-rm", []string{"stack", "rm", "--yes"}, pt.projdir, false)
}
}
return nil
}
// TestPreviewUpdateAndEdits runs the preview, update, and any relevant edits
func (pt *ProgramTester) TestPreviewUpdateAndEdits() error {
dir := pt.projdir
// Now preview and update the real changes.
pt.t.Log("Performing primary preview and update")
initErr := pt.PreviewAndUpdate(dir, "initial", pt.opts.ExpectFailure, false, false)
// If the initial preview/update failed, just exit without trying the rest (but make sure to destroy).
if initErr != nil {
return initErr
}
// Perform an empty preview and update; nothing is expected to happen here.
if !pt.opts.SkipExportImport {
pt.t.Log("Roundtripping checkpoint via stack export and stack import")
if err := pt.exportImport(dir); err != nil {
return err
}
}
if !pt.opts.SkipEmptyPreviewUpdate {
msg := ""
if !pt.opts.AllowEmptyUpdateChanges {
msg = "(no changes expected)"
}
pt.t.Logf("Performing empty preview and update%s", msg)
if err := pt.PreviewAndUpdate(
dir, "empty", false, !pt.opts.AllowEmptyPreviewChanges, !pt.opts.AllowEmptyUpdateChanges); err != nil {
return err
}
}
// Run additional validation provided by the test options, passing in the checkpoint info.
if err := pt.performExtraRuntimeValidation(pt.opts.ExtraRuntimeValidation, dir); err != nil {
return err
}
if !pt.opts.SkipRefresh {
// Perform a refresh and ensure it doesn't yield changes.
refresh := []string{"refresh", "--non-interactive", "--yes", "--skip-preview"}
if pt.opts.GetDebugUpdates() {
refresh = append(refresh, "-d")
}
if !pt.opts.ExpectRefreshChanges {
refresh = append(refresh, "--expect-no-changes")
}
if err := pt.runPulumiCommand("pulumi-refresh", refresh, dir, false); err != nil {
return err
}
}
// If there are any edits, apply them and run a preview and update for each one.
return pt.testEdits(dir)
}
func (pt *ProgramTester) exportImport(dir string) error {
exportCmd := []string{"stack", "export", "--file", "stack.json"}
importCmd := []string{"stack", "import", "--file", "stack.json"}
defer func() {
contract.IgnoreError(os.Remove(filepath.Join(dir, "stack.json")))
}()
if err := pt.runPulumiCommand("pulumi-stack-export", exportCmd, dir, false); err != nil {
return err
}
return pt.runPulumiCommand("pulumi-stack-import", importCmd, dir, false)
}
// PreviewAndUpdate runs pulumi preview followed by pulumi up
func (pt *ProgramTester) PreviewAndUpdate(dir string, name string, shouldFail, expectNopPreview,
expectNopUpdate bool) error {
preview := []string{"preview", "--non-interactive"}
update := []string{"up", "--non-interactive", "--yes", "--skip-preview", "--event-log", pt.eventLog}
if pt.opts.GetDebugUpdates() {
preview = append(preview, "-d")
update = append(update, "-d")
}
if expectNopPreview {
preview = append(preview, "--expect-no-changes")
}
if expectNopUpdate {
update = append(update, "--expect-no-changes")
}
if pt.opts.PreviewCommandlineFlags != nil {
preview = append(preview, pt.opts.PreviewCommandlineFlags...)
}
if pt.opts.UpdateCommandlineFlags != nil {
update = append(update, pt.opts.UpdateCommandlineFlags...)
}
// If not in quick mode, run an explicit preview.
if !pt.opts.SkipPreview {
if err := pt.runPulumiCommand("pulumi-preview-"+name, preview, dir, shouldFail); err != nil {
if shouldFail {
pt.t.Log("Permitting failure (ExpectFailure=true for this preview)")
return nil
}
return err
}
if pt.opts.PreviewCompletedHook != nil {
if err := pt.opts.PreviewCompletedHook(dir); err != nil {
return err
}
}
}
// Now run an update.
if !pt.opts.SkipUpdate {
if err := pt.runPulumiCommand("pulumi-update-"+name, update, dir, shouldFail); err != nil {
if shouldFail {
pt.t.Log("Permitting failure (ExpectFailure=true for this update)")
return nil
}
return err
}
}
// If we expected a failure, but none occurred, return an error.
if shouldFail {
return errors.New("expected this step to fail, but it succeeded")
}
return nil
}
func (pt *ProgramTester) query(dir string, name string, shouldFail bool) error {
query := []string{"query", "--non-interactive"}
if pt.opts.GetDebugUpdates() {
query = append(query, "-d")
}
if pt.opts.QueryCommandlineFlags != nil {
query = append(query, pt.opts.QueryCommandlineFlags...)
}
// Now run a query.
if err := pt.runPulumiCommand("pulumi-query-"+name, query, dir, shouldFail); err != nil {
if shouldFail {
pt.t.Log("Permitting failure (ExpectFailure=true for this update)")
return nil
}
return err
}
// If we expected a failure, but none occurred, return an error.
if shouldFail {
return errors.New("expected this step to fail, but it succeeded")
}
return nil
}
func (pt *ProgramTester) testEdits(dir string) error {
for i, edit := range pt.opts.EditDirs {
var err error
if err = pt.testEdit(dir, i, edit); err != nil {
return err
}
}
return nil
}
func (pt *ProgramTester) testEdit(dir string, i int, edit EditDir) error {
pt.t.Logf("Applying edit '%v' and rerunning preview and update", edit.Dir)
if edit.Additive {
// Just copy new files into dir
if err := fsutil.CopyFile(dir, edit.Dir, nil); err != nil {
return errors.Wrapf(err, "Couldn't copy %v into %v", edit.Dir, dir)
}
} else {
// Create a new temporary directory
newDir, err := ioutil.TempDir("", pt.opts.StackName+"-")
if err != nil {
return errors.Wrapf(err, "Couldn't create new temporary directory")
}
// Delete whichever copy of the test is unused when we return
dirToDelete := newDir
defer func() {
contract.IgnoreError(os.RemoveAll(dirToDelete))
}()
// Copy everything except Pulumi.yaml, Pulumi.<stack-name>.yaml, and .pulumi from source into new directory
exclusions := make(map[string]bool)
projectYaml := workspace.ProjectFile + ".yaml"
configYaml := workspace.ProjectFile + "." + pt.opts.StackName + ".yaml"
exclusions[workspace.BookkeepingDir] = true
exclusions[projectYaml] = true
exclusions[configYaml] = true
if err := fsutil.CopyFile(newDir, edit.Dir, exclusions); err != nil {
return errors.Wrapf(err, "Couldn't copy %v into %v", edit.Dir, newDir)
}
// Copy Pulumi.yaml, Pulumi.<stack-name>.yaml, and .pulumi from old directory to new directory
oldProjectYaml := filepath.Join(dir, projectYaml)
newProjectYaml := filepath.Join(newDir, projectYaml)
oldConfigYaml := filepath.Join(dir, configYaml)
newConfigYaml := filepath.Join(newDir, configYaml)
oldProjectDir := filepath.Join(dir, workspace.BookkeepingDir)
newProjectDir := filepath.Join(newDir, workspace.BookkeepingDir)
if err := fsutil.CopyFile(newProjectYaml, oldProjectYaml, nil); err != nil {
return errors.Wrap(err, "Couldn't copy Pulumi.yaml")
}
if err := fsutil.CopyFile(newConfigYaml, oldConfigYaml, nil); err != nil {
return errors.Wrapf(err, "Couldn't copy Pulumi.%s.yaml", pt.opts.StackName)
}
if err := fsutil.CopyFile(newProjectDir, oldProjectDir, nil); err != nil {
return errors.Wrap(err, "Couldn't copy .pulumi")
}
// Finally, replace our current temp directory with the new one.
dirOld := dir + ".old"
if err := os.Rename(dir, dirOld); err != nil {
return errors.Wrapf(err, "Couldn't rename %v to %v", dir, dirOld)
}
// There's a brief window here where the old temp dir name could be taken from us.
if err := os.Rename(newDir, dir); err != nil {
return errors.Wrapf(err, "Couldn't rename %v to %v", newDir, dir)
}
// Keep dir, delete oldDir
dirToDelete = dirOld
}
err := pt.prepareProjectDir(dir)
if err != nil {
return errors.Wrapf(err, "Couldn't prepare project in %v", dir)
}
oldStdOut := pt.opts.Stdout
oldStderr := pt.opts.Stderr
oldVerbose := pt.opts.Verbose
if edit.Stdout != nil {
pt.opts.Stdout = edit.Stdout
}
if edit.Stderr != nil {
pt.opts.Stderr = edit.Stderr
}
if edit.Verbose {
pt.opts.Verbose = true
}
defer func() {
pt.opts.Stdout = oldStdOut
pt.opts.Stderr = oldStderr
pt.opts.Verbose = oldVerbose
}()
if !edit.QueryMode {
if err = pt.PreviewAndUpdate(dir, fmt.Sprintf("edit-%d", i),
edit.ExpectFailure, edit.ExpectNoChanges, edit.ExpectNoChanges); err != nil {
return err
}
} else {
if err = pt.query(dir, fmt.Sprintf("query-%d", i), edit.ExpectFailure); err != nil {
return err
}
}
return pt.performExtraRuntimeValidation(edit.ExtraRuntimeValidation, dir)
}
func (pt *ProgramTester) performExtraRuntimeValidation(
extraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo), dir string) error {
if extraRuntimeValidation == nil {
return nil
}
stackName := pt.opts.GetStackName()
// Create a temporary file name for the stack export
tempDir, err := ioutil.TempDir("", string(stackName))
if err != nil {
return err
}
fileName := filepath.Join(tempDir, "stack.json")
// Invoke `pulumi stack export`
// There are situations where we want to get access to the secrets in the validation
// this will allow us to get access to them as part of running ExtraRuntimeValidation
var pulumiCommand []string
if pt.opts.DecryptSecretsInOutput {
pulumiCommand = append(pulumiCommand, "stack", "export", "--show-secrets", "--file", fileName)
} else {
pulumiCommand = append(pulumiCommand, "stack", "export", "--file", fileName)
}
if err = pt.runPulumiCommand("pulumi-export",
pulumiCommand, dir, false); err != nil {
return errors.Wrapf(err, "expected to export stack to file: %s", fileName)
}
// Open the exported JSON file
f, err := os.Open(fileName)
if err != nil {
return errors.Wrapf(err, "expected to be able to open file with stack exports: %s", fileName)
}
defer func() {
contract.IgnoreClose(f)
contract.IgnoreError(os.RemoveAll(tempDir))
}()
// Unmarshal the Deployment
var untypedDeployment apitype.UntypedDeployment
if err = json.NewDecoder(f).Decode(&untypedDeployment); err != nil {
return err
}
var deployment apitype.DeploymentV3
if err = json.Unmarshal(untypedDeployment.Deployment, &deployment); err != nil {
return err
}
// Get the root resource and outputs from the deployment
var rootResource apitype.ResourceV3
var outputs map[string]interface{}
for _, res := range deployment.Resources {
if res.Type == resource.RootStackType {
rootResource = res
outputs = res.Outputs
}
}
// Read the event log.
eventsFile, err := os.Open(pt.eventLog)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "expected to be able to open event log file %s", pt.eventLog)
}
defer contract.IgnoreClose(eventsFile)
decoder, events := json.NewDecoder(eventsFile), []apitype.EngineEvent{}
for {
var event apitype.EngineEvent
if err = decoder.Decode(&event); err != nil {
if err == io.EOF {
break
}
return errors.Wrapf(err, "decoding engine event")
}
events = append(events, event)
}
// Populate stack info object with all of this data to pass to the validation function
stackInfo := RuntimeValidationStackInfo{
StackName: pt.opts.GetStackName(),
Deployment: &deployment,
RootResource: rootResource,
Outputs: outputs,
Events: events,
}
pt.t.Log("Performing extra runtime validation.")
extraRuntimeValidation(pt.t, stackInfo)
pt.t.Log("Extra runtime validation complete.")
return nil
}
// copyTestToTemporaryDirectory creates a temporary directory to run the test in and copies the test to it.
func (pt *ProgramTester) copyTestToTemporaryDirectory() (string, string, error) {
// Get the source dir and project info.
sourceDir := pt.opts.Dir
projinfo, err := pt.getProjinfo(sourceDir)
if err != nil {
return "", "", err
}
if pt.opts.Stdout == nil {
pt.opts.Stdout = os.Stdout
}
if pt.opts.Stderr == nil {
pt.opts.Stderr = os.Stderr
}
pt.t.Logf("sample: %v", sourceDir)
bin, err := pt.getBin()
if err != nil {
return "", "", err
}
pt.t.Logf("pulumi: %v\n", bin)
stackName := string(pt.opts.GetStackName())
// For most projects, we will copy to a temporary directory. For Go projects, however, we must create
// a folder structure that adheres to GOPATH requirements
var tmpdir, projdir string
if projinfo.Proj.Runtime.Name() == "go" {
targetDir, err := tools.CreateTemporaryGoFolder("stackName")
if err != nil {
return "", "", errors.Wrap(err, "Couldn't create temporary directory")
}
tmpdir = targetDir
projdir = targetDir
} else {
targetDir, tempErr := ioutil.TempDir("", stackName+"-")
if tempErr != nil {
return "", "", errors.Wrap(tempErr, "Couldn't create temporary directory")
}
tmpdir = targetDir
projdir = targetDir
}
// Copy the source project.
if copyErr := fsutil.CopyFile(tmpdir, sourceDir, nil); copyErr != nil {
return "", "", copyErr
}
projinfo.Root = projdir
err = pt.prepareProject(projinfo)
if err != nil {
return "", "", errors.Wrapf(err, "Failed to prepare %v", projdir)
}
// TODO[pulumi/pulumi#5455]: Dynamic providers fail to load when used from multi-lang components.
// Until that's been fixed, this environment variable can be set by a test, which results in
// a package.json being emitted in the project directory and `yarn install && yarn link @pulumi/pulumi`
// being run.
// When the underlying issue has been fixed, the use of this environment variable should be removed.
var yarnLinkPulumi bool
for _, env := range pt.opts.Env {
if env == "PULUMI_TEST_YARN_LINK_PULUMI=true" {
yarnLinkPulumi = true
break
}
}
if yarnLinkPulumi {
const packageJSON = `{
"name": "test",
"peerDependencies": {
"@pulumi/pulumi": "latest"
}
}`
if err := ioutil.WriteFile(filepath.Join(projdir, "package.json"), []byte(packageJSON), 0600); err != nil {
return "", "", err
}
if err = pt.runYarnCommand("yarn-install", []string{"install"}, projdir); err != nil {
return "", "", err
}
if err := pt.runYarnCommand("yarn-link", []string{"link", "@pulumi/pulumi"}, projdir); err != nil {
return "", "", err
}
}
pt.t.Logf("projdir: %v", projdir)
return tmpdir, projdir, nil
}
func (pt *ProgramTester) getProjinfo(projectDir string) (*engine.Projinfo, error) {
// Load up the package so we know things like what language the project is.
projfile := filepath.Join(projectDir, workspace.ProjectFile+".yaml")
proj, err := workspace.LoadProject(projfile)
if err != nil {
return nil, err
}
return &engine.Projinfo{Proj: proj, Root: projectDir}, nil
}
// prepareProject runs setup necessary to get the project ready for `pulumi` commands.
func (pt *ProgramTester) prepareProject(projinfo *engine.Projinfo) error {
// Based on the language, invoke the right routine to prepare the target directory.
switch rt := projinfo.Proj.Runtime.Name(); rt {
case NodeJSRuntime:
return pt.prepareNodeJSProject(projinfo)
case PythonRuntime:
return pt.preparePythonProject(projinfo)
case GoRuntime:
return pt.prepareGoProject(projinfo)
case DotNetRuntime:
return pt.prepareDotNetProject(projinfo)
default:
return errors.Errorf("unrecognized project runtime: %s", rt)
}
}
// prepareProjectDir runs setup necessary to get the project ready for `pulumi` commands.
func (pt *ProgramTester) prepareProjectDir(projectDir string) error {
projinfo, err := pt.getProjinfo(projectDir)
if err != nil {
return err
}
return pt.prepareProject(projinfo)
}
// prepareNodeJSProject runs setup necessary to get a Node.js project ready for `pulumi` commands.
func (pt *ProgramTester) prepareNodeJSProject(projinfo *engine.Projinfo) error {
if err := pulumi_testing.WriteYarnRCForTest(projinfo.Root); err != nil {
return err
}
// Get the correct pwd to run Yarn in.
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
// If the test requested some packages to be overridden, we do two things. First, if the package is listed as a
// direct dependency of the project, we change the version constraint in the package.json. For transitive
// dependeices, we use yarn's "resolutions" feature to force them to a specific version.
if len(pt.opts.Overrides) > 0 {
packageJSON, err := readPackageJSON(cwd)
if err != nil {
return err
}
resolutions := make(map[string]interface{})
for packageName, packageVersion := range pt.opts.Overrides {
for _, section := range []string{"dependencies", "devDependencies"} {
if _, has := packageJSON[section]; has {
entry := packageJSON[section].(map[string]interface{})
if _, has := entry[packageName]; has {
entry[packageName] = packageVersion
}
}
}
pt.t.Logf("adding resolution for %s to version %s", packageName, packageVersion)
resolutions["**/"+packageName] = packageVersion
}
// Wack any existing resolutions section with our newly computed one.
packageJSON["resolutions"] = resolutions
if err := writePackageJSON(cwd, packageJSON); err != nil {
return err
}
}
// Now ensure dependencies are present.
if err = pt.runYarnCommand("yarn-install", []string{"install"}, cwd); err != nil {
return err
}
if !pt.opts.RunUpdateTest {
if err = pt.yarnLinkPackageDeps(cwd); err != nil {
return err
}
}
if pt.opts.RunBuild {
// And finally compile it using whatever build steps are in the package.json file.
if err = pt.runYarnCommand("yarn-build", []string{"run", "build"}, cwd); err != nil {
return err
}
}
return nil
}
// readPackageJSON unmarshals the package.json file located in pathToPackage.
func readPackageJSON(pathToPackage string) (map[string]interface{}, error) {
f, err := os.Open(filepath.Join(pathToPackage, "package.json"))
if err != nil {
return nil, errors.Wrap(err, "opening package.json")
}
defer contract.IgnoreClose(f)
var ret map[string]interface{}
if err := json.NewDecoder(f).Decode(&ret); err != nil {
return nil, errors.Wrap(err, "decoding package.json")
}
return ret, nil
}
func writePackageJSON(pathToPackage string, metadata map[string]interface{}) error {
// os.Create truncates the already existing file.
f, err := os.Create(filepath.Join(pathToPackage, "package.json"))
if err != nil {
return errors.Wrap(err, "opening package.json")
}
defer contract.IgnoreClose(f)
encoder := json.NewEncoder(f)
encoder.SetIndent("", " ")
return errors.Wrap(encoder.Encode(metadata), "writing package.json")
}
// preparePythonProject runs setup necessary to get a Python project ready for `pulumi` commands.
func (pt *ProgramTester) preparePythonProject(projinfo *engine.Projinfo) error {
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
if pt.opts.UsePipenv {
if err = pt.preparePythonProjectWithPipenv(cwd); err != nil {
return err
}
} else {
if err = pt.runPythonCommand("python-venv", []string{"-m", "venv", "venv"}, cwd); err != nil {
return err
}
projinfo.Proj.Runtime.SetOption("virtualenv", "venv")
projfile := filepath.Join(projinfo.Root, workspace.ProjectFile+".yaml")
if err = projinfo.Proj.Save(projfile); err != nil {
return errors.Wrap(err, "saving project")
}
if err := pt.runVirtualEnvCommand("virtualenv-pip-install",
[]string{"python", "-m", "pip", "install", "-r", "requirements.txt"}, cwd); err != nil {
return err
}
}
if !pt.opts.RunUpdateTest {
if err = pt.installPipPackageDeps(cwd); err != nil {
return err
}
}
return nil
}
func (pt *ProgramTester) preparePythonProjectWithPipenv(cwd string) error {
// Allow ENV var based overload of desired Python version for
// the Pipenv environment. This is useful in CI scenarios that
// need to pin a specific version such as 3.9.x vs 3.10.x.
pythonVersion := os.Getenv("PYTHON_VERSION")
if pythonVersion == "" {
pythonVersion = "3"
}
// Create a new Pipenv environment. This bootstraps a new virtual environment containing the version of Python that
// we requested. Note that this version of Python is sourced from the machine, so you must first install the version
// of Python that you are requesting on the host machine before building a virtualenv for it.
if err := pt.runPipenvCommand("pipenv-new", []string{"--python", pythonVersion}, cwd); err != nil {
return err
}
// Install the package's dependencies. We do this by running `pip` inside the virtualenv that `pipenv` has created.
// We don't use `pipenv install` because we don't want a lock file and prefer the similar model of `pip install`
// which matches what our customers do
err := pt.runPipenvCommand("pipenv-install", []string{"run", "pip", "install", "-r", "requirements.txt"}, cwd)
if err != nil {
return err
}
return nil
}
// YarnLinkPackageDeps bring in package dependencies via yarn
func (pt *ProgramTester) yarnLinkPackageDeps(cwd string) error {
for _, dependency := range pt.opts.Dependencies {
if err := pt.runYarnCommand("yarn-link", []string{"link", dependency}, cwd); err != nil {
return err
}
}
return nil
}
// InstallPipPackageDeps brings in package dependencies via pip install
func (pt *ProgramTester) installPipPackageDeps(cwd string) error {
var err error
for _, dep := range pt.opts.Dependencies {
// If the given filepath isn't absolute, make it absolute. We're about to pass it to pipenv and pipenv is
// operating inside of a random folder in /tmp.
if !filepath.IsAbs(dep) {
dep, err = filepath.Abs(dep)
if err != nil {
return err
}
}
if pt.opts.UsePipenv {
if err := pt.runPipenvCommand("pipenv-install-package",
[]string{"run", "pip", "install", "-e", dep}, cwd); err != nil {
return err
}
} else {
if err := pt.runVirtualEnvCommand("virtualenv-pip-install-package",
[]string{"python", "-m", "pip", "install", "-e", dep}, cwd); err != nil {
return err
}
}
}
return nil
}
func getVirtualenvBinPath(cwd, bin string) (string, error) {
virtualenvBinPath := filepath.Join(cwd, "venv", "bin", bin)
if runtime.GOOS == windowsOS {
virtualenvBinPath = filepath.Join(cwd, "venv", "Scripts", fmt.Sprintf("%s.exe", bin))
}
if info, err := os.Stat(virtualenvBinPath); err != nil || info.IsDir() {
return "", errors.Errorf("Expected %s to exist in virtual environment at %q", bin, virtualenvBinPath)
}
return virtualenvBinPath, nil
}
// getSanitizedPkg strips the version string from a go dep
// Note: most of the pulumi modules don't use major version subdirectories for modules
func getSanitizedModulePath(pkg string) string {
re := regexp.MustCompile(`v\d`)
v := re.FindString(pkg)
if v != "" {
return strings.TrimSuffix(strings.Replace(pkg, v, "", -1), "/")
}
return pkg
}
func getRewritePath(pkg string, gopath string, depRoot string) string {
var depParts []string
sanitizedPkg := getSanitizedModulePath(pkg)
splitPkg := strings.Split(sanitizedPkg, "/")
if depRoot != "" {
// Get the package name
// This is the value after "github.com/foo/bar"
repoName := splitPkg[2]
basePath := splitPkg[len(splitPkg)-1]
if basePath == repoName {
depParts = append([]string{depRoot, repoName})
} else {
depParts = append([]string{depRoot, repoName, basePath})
}
return filepath.Join(depParts...)
}
depParts = append([]string{gopath, "src"}, splitPkg...)
return filepath.Join(depParts...)
}
// prepareGoProject runs setup necessary to get a Go project ready for `pulumi` commands.
func (pt *ProgramTester) prepareGoProject(projinfo *engine.Projinfo) error {
// Go programs are compiled, so we will compile the project first.
goBin, err := pt.getGoBin()
if err != nil {
return errors.Wrap(err, "locating `go` binary")
}
// Ensure GOPATH is known.
gopath := os.Getenv("GOPATH")
if gopath == "" {
usr, userErr := user.Current()
if userErr != nil {
return userErr
}
gopath = filepath.Join(usr.HomeDir, "go")
}
depRoot := os.Getenv("PULUMI_GO_DEP_ROOT")
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
// initialize a go.mod for dependency resolution if one doesn't exist
_, err = os.Stat(filepath.Join(cwd, "go.mod"))
if err != nil {
err = pt.runCommand("go-mod-init", []string{goBin, "mod", "init"}, cwd)
if err != nil {
return err
}
}
// link local dependencies
for _, pkg := range pt.opts.Dependencies {
dep := getRewritePath(pkg, gopath, depRoot)
editStr := fmt.Sprintf("%s=%s", pkg, dep)
err = pt.runCommand("go-mod-edit", []string{goBin, "mod", "edit", "-replace", editStr}, cwd)
if err != nil {
return err
}
}
// tidy to resolve all transitive dependencies including from local dependencies above
err = pt.runCommand("go-mod-tidy", []string{goBin, "mod", "tidy"}, cwd)
if err != nil {
return err
}
if pt.opts.RunBuild {
outBin := filepath.Join(gopath, "bin", string(projinfo.Proj.Name))
if runtime.GOOS == windowsOS {
outBin = fmt.Sprintf("%s.exe", outBin)
}
err = pt.runCommand("go-build", []string{goBin, "build", "-o", outBin, "."}, cwd)
if err != nil {
return err
}
_, err = os.Stat(outBin)
if err != nil {
return fmt.Errorf("error finding built application artifact: %w", err)
}
}
return nil
}
// prepareDotNetProject runs setup necessary to get a .NET project ready for `pulumi` commands.
func (pt *ProgramTester) prepareDotNetProject(projinfo *engine.Projinfo) error {
dotNetBin, err := pt.getDotNetBin()
if err != nil {
return errors.Wrap(err, "locating `dotnet` binary")
}
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
localNuget := os.Getenv("PULUMI_LOCAL_NUGET")
if localNuget == "" {
localNuget = "/opt/pulumi/nuget"
}
for _, dep := range pt.opts.Dependencies {
// dotnet add package requires a specific version in case of a pre-release, so we have to look it up.
matches, err := filepath.Glob(filepath.Join(localNuget, dep+".?.*.nupkg"))
if err != nil {
return errors.Wrap(err, "failed to find a local Pulumi NuGet package")
}
if len(matches) != 1 {
return errors.Errorf("attempting to find a local Pulumi NuGet package yielded %v results", matches)
}
file := filepath.Base(matches[0])
r := strings.NewReplacer(dep+".", "", ".nupkg", "")
version := r.Replace(file)
err = pt.runCommand("dotnet-add-package",
[]string{dotNetBin, "add", "package", dep, "-v", version}, cwd)
if err != nil {
return errors.Wrapf(err, "failed to add dependency on %s", dep)
}
}
return nil
}
|
[
"\"PULUMI_TEST_DEBUG_LOG_LEVEL\"",
"\"PULUMI_TEST_DEBUG_UPDATES\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_TEST_REPORT_CONFIG\"",
"\"PULUMI_TEST_TRACE_ENDPOINT\"",
"\"PULUMI_FAILED_TESTS_DIR\"",
"\"PULUMI_API\"",
"\"PULUMI_ACCESS_TOKEN\"",
"\"PYTHON_VERSION\"",
"\"GOPATH\"",
"\"PULUMI_GO_DEP_ROOT\"",
"\"PULUMI_LOCAL_NUGET\""
] |
[] |
[
"PULUMI_ACCESS_TOKEN",
"PULUMI_FAILED_TESTS_DIR",
"PULUMI_TEST_DEBUG_UPDATES",
"PULUMI_API",
"PULUMI_GO_DEP_ROOT",
"PULUMI_LOCAL_NUGET",
"PULUMI_TEST_OWNER",
"GOPATH",
"PYTHON_VERSION",
"PULUMI_TEST_DEBUG_LOG_LEVEL",
"PULUMI_TEST_REPORT_CONFIG",
"PULUMI_TEST_TRACE_ENDPOINT"
] |
[]
|
["PULUMI_ACCESS_TOKEN", "PULUMI_FAILED_TESTS_DIR", "PULUMI_TEST_DEBUG_UPDATES", "PULUMI_API", "PULUMI_GO_DEP_ROOT", "PULUMI_LOCAL_NUGET", "PULUMI_TEST_OWNER", "GOPATH", "PYTHON_VERSION", "PULUMI_TEST_DEBUG_LOG_LEVEL", "PULUMI_TEST_REPORT_CONFIG", "PULUMI_TEST_TRACE_ENDPOINT"]
|
go
| 12 | 0 | |
commands/config.go
|
package commands
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
func getDefaultConfigFile() string {
return filepath.Join(getDefaultConfigDirectory(), "anka-config.toml")
}
func getDefaultCertificateDirectory() string {
return filepath.Join(getDefaultConfigDirectory(), "certs")
}
type configOptions struct {
config *common.Config
ConfigFile string `short:"c" long:"config" env:"CONFIG_FILE" description:"Config file"`
}
func (c *configOptions) saveConfig() error {
return c.config.SaveConfig(c.ConfigFile)
}
func (c *configOptions) loadConfig() error {
config := common.NewConfig()
err := config.LoadConfig(c.ConfigFile)
if err != nil {
return err
}
c.config = config
return nil
}
func (c *configOptions) RunnerByName(name string) (*common.RunnerConfig, error) {
if c.config == nil {
return nil, fmt.Errorf("config has not been loaded")
}
for _, runner := range c.config.Runners {
if runner.Name == name {
return runner, nil
}
}
return nil, fmt.Errorf("could not find a runner with the name '%s'", name)
}
//nolint:lll
type configOptionsWithListenAddress struct {
configOptions
ListenAddress string `long:"listen-address" env:"LISTEN_ADDRESS" description:"Metrics / pprof server listening address"`
}
func (c *configOptionsWithListenAddress) listenAddress() (string, error) {
address := c.config.ListenAddress
if c.ListenAddress != "" {
address = c.ListenAddress
}
if address == "" {
return "", nil
}
_, port, err := net.SplitHostPort(address)
if err != nil && !strings.Contains(err.Error(), "missing port in address") {
return "", err
}
if port == "" {
return fmt.Sprintf("%s:%d", address, common.DefaultMetricsServerPort), nil
}
return address, nil
}
func init() {
configFile := os.Getenv("CONFIG_FILE")
if configFile == "" {
err := os.Setenv("CONFIG_FILE", getDefaultConfigFile())
if err != nil {
logrus.WithError(err).Fatal("Couldn't set CONFIG_FILE environment variable")
}
}
network.CertificateDirectory = getDefaultCertificateDirectory()
}
|
[
"\"CONFIG_FILE\""
] |
[] |
[
"CONFIG_FILE"
] |
[]
|
["CONFIG_FILE"]
|
go
| 1 | 0 | |
python/oneflow/compatible/single_client/test/xrt/test_layer_norm_param_grad.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
config = flow.function_config()
def make_job(shape, gamma_shape, params_axis, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def layer_norm_param_grad_job(
dy=flow.FixedTensorDef(shape, dtype=dtype),
norm=flow.FixedTensorDef(shape, dtype=dtype),
gamma=flow.FixedTensorDef(gamma_shape, dtype=dtype),
):
return flow.layers.layer_norm_param_grad(
dy, norm, gamma, begin_params_axis=params_axis
)
return layer_norm_param_grad_job
def make_xla_job(shape, gamma_shape, params_axis, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_layer_norm_param_grad_job(
dy=flow.FixedTensorDef(shape, dtype=dtype),
norm=flow.FixedTensorDef(shape, dtype=dtype),
gamma=flow.FixedTensorDef(gamma_shape, dtype=dtype),
):
return flow.layers.layer_norm_param_grad(
dy, norm, gamma, begin_params_axis=params_axis
)
return xla_layer_norm_param_grad_job
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestLayerNormParamGrad(unittest.TestCase):
def _test_body(self, dy, norm, gamma, params_axis, dtype=np.float32):
f1 = make_job(dy.shape, gamma.shape, params_axis, dtype=flow.float32)
f2 = make_xla_job(dy.shape, gamma.shape, params_axis, dtype=flow.float32)
(d_norm1, d_beta1, d_gamma1) = f1(dy, norm, gamma).get()
(d_norm2, d_beta2, d_gamma2) = f2(dy, norm, gamma).get()
print("normalize diff:")
print(" without xla: ", d_norm1)
print(" with xla: ", d_norm2)
print("beta diff:")
print(" without xla: ", d_beta1)
print(" with xla: ", d_beta2)
print("gamma diff:")
print(" without xla: ", d_gamma1)
print(" with xla: ", d_gamma2)
self.assertTrue(d_norm1.shape, d_norm2.shape)
self.assertTrue(d_beta1.shape, d_beta2.shape)
self.assertTrue(d_gamma1.shape, d_gamma2.shape)
self.assertTrue(
np.allclose(d_norm1.numpy(), d_norm2.numpy(), rtol=0.001, atol=1e-05)
)
self.assertTrue(
np.allclose(d_beta1.numpy(), d_beta2.numpy(), rtol=0.001, atol=1e-05)
)
self.assertTrue(
np.allclose(d_gamma1.numpy(), d_gamma2.numpy(), rtol=0.001, atol=1e-05)
)
flow.clear_default_session()
def _test_ones_body(self, shape, params_axis=-1, dtype=np.float32):
dy = np.ones(shape, dtype=dtype)
norm = np.ones(shape, dtype=dtype)
if params_axis < 0:
params_axis += len(shape)
gamma_shape = shape[params_axis:]
if len(gamma_shape) == 0:
gamma_shape = [1]
gamma = np.ones(gamma_shape, dtype=dtype)
self._test_body(dy, norm, gamma, params_axis, dtype=dtype)
def _test_random_body(self, shape, params_axis=-1, dtype=np.float32):
dy = np.random.random(shape).astype(dtype)
norm = np.random.random(shape).astype(dtype)
if params_axis < 0:
params_axis += len(shape)
gamma_shape = shape[params_axis:]
if len(gamma_shape) == 0:
gamma_shape = [1]
gamma = np.random.random(gamma_shape).astype(dtype)
self._test_body(dy, norm, gamma, params_axis, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((1, 10))
self._test_ones_body((2, 10, 2))
self._test_ones_body((2, 5, 2, 2))
def test_random_input(self):
self._test_random_body((1, 10))
self._test_random_body((2, 10, 2))
self._test_random_body((2, 5, 2, 2))
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"ONEFLOW_TEST_CPU_ONLY"
] |
[]
|
["ONEFLOW_TEST_CPU_ONLY"]
|
python
| 1 | 0 | |
function-controller/cmd/function-controller.go
|
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"strings"
riffcs "github.com/projectriff/riff/kubernetes-crds/pkg/client/clientset/versioned"
informers "github.com/projectriff/riff/kubernetes-crds/pkg/client/informers/externalversions"
"k8s.io/client-go/tools/clientcmd"
"flag"
"log"
"os"
"os/signal"
"syscall"
"github.com/bsm/sarama-cluster"
"github.com/projectriff/riff/function-controller/pkg/controller"
"github.com/projectriff/riff/function-controller/pkg/controller/autoscaler"
riffInformersV1 "github.com/projectriff/riff/kubernetes-crds/pkg/client/informers/externalversions/projectriff/v1alpha1"
"github.com/projectriff/riff/message-transport/pkg/transport/kafka"
"github.com/projectriff/riff/message-transport/pkg/transport/metrics/kafka_over_kafka"
k8sInformers "k8s.io/client-go/informers"
"k8s.io/client-go/informers/extensions/v1beta1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
func main() {
kubeconfig := flag.String("kubeconf", "", "Path to a kube config. Only required if out-of-cluster.")
masterURL := flag.String("master-url", "", "Path to master URL. Useful eg when using proxy")
brokers := strings.Split(os.Getenv("KAFKA_BROKERS"), ",")
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig)
if err != nil {
log.Fatalf("Error getting client config: %s", err.Error())
}
topicsInformer, functionsInformer, linksInformer, deploymentInformer := makeInformers(config)
streamGatewayFeatureFlag := os.Getenv("stream-gateway") == "enabled"
log.Printf("Feature flag stream-gateway=enabled: %t", streamGatewayFeatureFlag)
deployer, err := controller.NewDeployer(config, brokers, streamGatewayFeatureFlag)
if err != nil {
panic(err)
}
metricsReceiver, err := kafka_over_kafka.NewMetricsReceiver(brokers, "autoscaler", makeConsumerConfig())
if err != nil {
panic(err)
}
transportInspector, err := kafka.NewInspector(brokers)
if err != nil {
panic(err)
}
autoScaler := autoscaler.NewAutoScaler(metricsReceiver, transportInspector)
ctrl := controller.New(topicsInformer, functionsInformer, linksInformer, deploymentInformer, deployer, autoScaler, 8080)
stopCh := make(chan struct{})
go ctrl.Run(stopCh)
// Trap signals to trigger a proper shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
// Wait for shutdown
<-signals
log.Println("Shutting Down...")
stopCh <- struct{}{}
}
func makeInformers(config *rest.Config) (riffInformersV1.TopicInformer, riffInformersV1.FunctionInformer, riffInformersV1.LinkInformer, v1beta1.DeploymentInformer) {
riffClient, err := riffcs.NewForConfig(config)
if err != nil {
log.Fatalf("Error building riff clientset: %s", err.Error())
}
riffInformerFactory := informers.NewSharedInformerFactory(riffClient, 0)
topicsInformer := riffInformerFactory.Projectriff().V1alpha1().Topics()
functionsInformer := riffInformerFactory.Projectriff().V1alpha1().Functions()
linksInformer := riffInformerFactory.Projectriff().V1alpha1().Links()
k8sClient, err := kubernetes.NewForConfig(config)
deploymentInformer := k8sInformers.NewSharedInformerFactory(k8sClient, 0).Extensions().V1beta1().Deployments()
return topicsInformer, functionsInformer, linksInformer, deploymentInformer
}
func makeConsumerConfig() *cluster.Config {
consumerConfig := cluster.NewConfig()
consumerConfig.Consumer.Return.Errors = true
consumerConfig.Group.Return.Notifications = true
return consumerConfig
}
|
[
"\"KAFKA_BROKERS\"",
"\"stream-gateway\""
] |
[] |
[
"stream-gateway",
"KAFKA_BROKERS"
] |
[]
|
["stream-gateway", "KAFKA_BROKERS"]
|
go
| 2 | 0 | |
cirno/__main__.py
|
#!/usr/bin/env python3
import os
from discord.ext import commands
import aiosqlite
from aioosuapi import aioosuapi
from cirno.modules import first_run
from cirno.manifest import VERSION
from cirno.manifest import CONTRIBUTORS
from cirno.modules.storage_management import database_file
from cirno.modules.connections import bot_token
from cirno.modules.connections import osu_api_key
if os.environ.get('CIRNO_PREFIX'):
command_prefix = os.environ.get('CIRNO_PREFIX')
else:
command_prefix = ","
first_run.ensure_tables()
initial_extensions = [
"cirno.cogs.BotManagement",
"cirno.cogs.ScoreTracking",
]
class Cirno(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.background_tasks = []
self.app_version = VERSION
self.project_contributors = CONTRIBUTORS
self.description = f"Cirno {self.app_version}"
self.database_file = database_file
self.osu = aioosuapi(osu_api_key)
for extension in initial_extensions:
try:
self.load_extension(extension)
except Exception as e:
print(e)
async def start(self, *args, **kwargs):
self.db = await aiosqlite.connect(self.database_file)
await super().start(*args, **kwargs)
async def close(self):
# Cancel all Task object generated by cogs.
# This prevents any task still running due to having long sleep time.
for task in self.background_tasks:
task.cancel()
# Close connection to the database
await self.db.close()
# Run actual discord.py close.
await super().close()
async def on_ready(self):
print("Logged in as")
print(self.user.name)
print(self.user.id)
print("------")
await first_run.add_admins(self)
client = Cirno(command_prefix=command_prefix)
client.run(bot_token)
|
[] |
[] |
[
"CIRNO_PREFIX"
] |
[]
|
["CIRNO_PREFIX"]
|
python
| 1 | 0 | |
delta/data/task/text_cls_task_test.py
|
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=missing-docstring
import os
from pathlib import Path
from absl import logging
import numpy as np
import tensorflow as tf
# delta
from delta import utils
from delta.data.task.text_cls_task import TextClsTask
from delta.utils.register import import_all_modules_for_register
class TextClsTaskTest(tf.test.TestCase):
def setUp(self):
import_all_modules_for_register()
main_root = os.environ['MAIN_ROOT']
main_root = Path(main_root)
self.config_file = main_root.joinpath(
'egs/mock_text_cls_data/text_cls/v1/config/han-cls.yml')
def tearDown(self):
''' tear down '''
def test_english(self):
config = utils.load_config(self.config_file)
class_num = config["data"]["task"]["classes"]["num_classes"]
task_config = config["data"]["task"]
task_config["language"] = "english"
task_config["split_by_space"] = True
data_config = config["data"]
data_config["train"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/train.english.txt"
]
data_config["eval"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/eval.english.txt"
]
data_config["infer"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/test.english.txt"
]
task_config[
"text_vocab"] = "egs/mock_text_cls_data/text_cls/v1/data/text_vocab.english.txt"
task_config["need_shuffle"] = False
config["model"]["split_token"] = ""
task_config["preparer"]["reuse"] = False
task = TextClsTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run(
[data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"]])
logging.debug(res[0][0][:5])
logging.debug(res[1][0][:5])
self.assertAllEqual(res[0][0][:5], [3, 4, 5, 0, 0])
self.assertEqual(np.shape(res[1]), (32, class_num))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.session() as sess:
res = sess.run(input_x, feed_dict={input_sentence: ["All is well."]})
logging.debug(res[0][:5])
self.assertAllEqual(res[0][:5], [3, 4, 5, 0, 0])
# # comment it for no dense data now
# def test_english_dense(self):
# config = utils.load_config(self.config_file)
# max_len = config["model"]["net"]["structure"]["max_len"]
# class_num = config["data"]["task"]["classes"]["num_classes"]
# data_config = config["data"]
# task_config = data_config["task"]
# task_config["language"] = "chinese"
# task_config["split_by_space"] = True
# task_config["use_dense"] = True
# task_config["dense_input_dim"] = 31
# data_config["train"][
# "dense_npy"] = "./delta/config/data/text_cls/english/dense_data/ds_train_scale.npy"
# data_config["eval"][
# "dense_npy"] = "./delta/config/data/text_cls/english/dense_data/ds_eval_scale.npy"
# data_config["infer"][
# "dense_npy"] = "./delta/config/data/text_cls/english/dense_data/ds_test_scale.npy"
#
# task = TextClsTask(config, utils.TRAIN)
#
# # test offline data
# # task.do_pre_process()
# data = task.dataset()
# self.assertTrue("input_x_dict" in data and
# "input_x" in data["input_x_dict"])
# self.assertTrue("input_x_dict" in data and
# "input_dense" in data["input_x_dict"])
# self.assertTrue("input_y_dict" in data and
# "input_y" in data["input_y_dict"])
# with self.session() as sess:
# sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
# res = sess.run([
# data["input_x_dict"]["input_x"], data["input_x_dict"]["input_dense"],
# data["input_y_dict"]["input_y"]
# ])
# logging.debug(res[0][0])
# logging.debug(res[1][0])
# logging.debug(res[2][0])
# self.assertEqual(np.shape(res[0]), (32, max_len))
# self.assertEqual(np.shape(res[1]), (32, task_config["dense_input_dim"]))
# self.assertEqual(np.shape(res[2]), (32, class_num))
#
# # test online data
# export_inputs = task.export_inputs()
# self.assertTrue("export_inputs" in export_inputs and
# "input_sentence" in export_inputs["export_inputs"])
# input_sentence = export_inputs["export_inputs"]["input_sentence"]
# input_x = export_inputs["model_inputs"]["input_x"]
# with self.session() as sess:
# res = sess.run(input_x, feed_dict={input_sentence: ["All is well."]})
# logging.debug(res[0])
# self.assertEqual(np.shape(res[0]), (max_len,))
def test_chinese_split_by_space(self):
config = utils.load_config(self.config_file)
class_num = config["data"]["task"]["classes"]["num_classes"]
data_config = config["data"]
task_config = data_config["task"]
task_config["language"] = "chinese"
task_config["split_by_space"] = True
task_config["use_word"] = False
data_config = config["data"]
data_config["train"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/train.split_by_space.txt"
]
data_config["eval"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/eval.split_by_space.txt"
]
data_config["infer"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/test.split_by_space.txt"
]
task_config[
"text_vocab"] = "egs/mock_text_cls_data/text_cls/v1/data/text_vocab.split_by_space.txt"
task_config["need_shuffle"] = False
config["model"]["split_token"] = ""
task_config["preparer"]["reuse"] = False
task = TextClsTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run(
[data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"]])
logging.debug(res[0][0])
logging.debug(res[1][0])
self.assertAllEqual(res[0][0][:5], [2, 3, 0, 0, 0])
self.assertEqual(np.shape(res[1]), (32, class_num))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.session() as sess:
res = sess.run(input_x, feed_dict={input_sentence: ["都 挺好"]})
logging.debug(res[0][:5])
logging.debug(np.shape(res[0]))
self.assertAllEqual(res[0][:5], [2, 3, 0, 0, 0])
def test_chinese_word(self):
config = utils.load_config(self.config_file)
class_num = config["data"]["task"]["classes"]["num_classes"]
data_config = config["data"]
task_config = data_config["task"]
task_config["language"] = "chinese"
task_config["split_by_space"] = False
task_config["use_word"] = True
data_config = config["data"]
data_config["train"]["paths"] = \
["egs/mock_text_cls_data/text_cls/v1/data/train.split_by_space.txt"]
data_config["eval"]["paths"] = \
["egs/mock_text_cls_data/text_cls/v1/data/eval.split_by_space.txt"]
data_config["infer"]["paths"] = \
["egs/mock_text_cls_data/text_cls/v1/data/test.split_by_space.txt"]
task_config[
"text_vocab"] = "egs/mock_text_cls_data/text_cls/v1/data/text_vocab.split_by_space.txt"
task_config["need_shuffle"] = False
config["model"]["split_token"] = ""
task_config["preparer"]["reuse"] = False
task = TextClsTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run(
[data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"]])
logging.debug(res[0][0])
logging.debug(res[1][0])
self.assertAllEqual(res[0][0][:5], [2, 0, 0, 0, 0])
self.assertEqual(np.shape(res[1]), (32, class_num))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.session() as sess:
res = sess.run(input_x, feed_dict={input_sentence: ["我很愤怒"]})
logging.debug(res[0])
logging.debug(np.shape(res[0]))
self.assertAllEqual(res[0][:5], [4, 5, 0, 0, 0])
def test_chinese_char(self):
config = utils.load_config(self.config_file)
max_len = config["model"]["net"]["structure"]["max_len"]
class_num = config["data"]["task"]["classes"]["num_classes"]
data_config = config["data"]
task_config = data_config["task"]
task_config["language"] = "chinese"
task_config["split_by_space"] = False
task_config["use_word"] = False
data_config = config["data"]
data_config["train"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/train.split_by_char.txt"
]
data_config["eval"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/eval.split_by_char.txt"
]
data_config["infer"]["paths"] = [
"egs/mock_text_cls_data/text_cls/v1/data/test.split_by_char.txt"
]
task_config[
"text_vocab"] = "egs/mock_text_cls_data/text_cls/v1/data/text_vocab.split_by_char.txt"
task_config["need_shuffle"] = False
config["model"]["split_token"] = ""
task_config["preparer"]["reuse"] = False
task = TextClsTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run([
data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"],
data["input_x_len"]
])
logging.debug(res[0][0])
logging.debug(res[1][0])
self.assertAllEqual(res[0][0][:5], [2, 3, 4, 0, 0])
self.assertEqual(np.shape(res[0]), (32, max_len))
self.assertEqual(np.shape(res[1]), (32, class_num))
self.assertEqual(np.shape(res[2]), (32,))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.session() as sess:
res = sess.run(input_x, feed_dict={input_sentence: ["都挺好"]})
logging.debug(res[0][:5])
logging.debug(np.shape(res[0]))
self.assertEqual(np.shape(res[0]), (max_len,))
self.assertAllEqual(res[0][:5], [2, 3, 4, 0, 0])
def test_chinese_with_split_token(self):
config = utils.load_config(self.config_file)
max_len = config["model"]["net"]["structure"]["max_len"]
class_num = config["data"]["task"]["classes"]["num_classes"]
data_config = config["data"]
task_config = data_config["task"]
task_config["language"] = "chinese"
task_config["split_by_space"] = False
task_config["use_word"] = True
data_config = config["data"]
data_config["train"]["paths"] = \
["egs/mock_text_cls_data/text_cls/v1/data/train.split_by_line_mark.txt"]
data_config["eval"]["paths"] = \
["egs/mock_text_cls_data/text_cls/v1/data/eval.split_by_line_mark.txt"]
data_config["infer"]["paths"] = \
["egs/mock_text_cls_data/text_cls/v1/data/test.split_by_line_mark.txt"]
task_config[
"text_vocab"] = "egs/mock_text_cls_data/text_cls/v1/data/text_vocab.split_by_line_mark.txt"
task_config["need_shuffle"] = False
task_config["preparer"]["reuse"] = False
task = TextClsTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run([
data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"],
data["input_x_len"]
])
logging.debug(res[0][0][:10])
logging.debug(res[1][0])
self.assertAllEqual(
res[0][0][:10],
[2, 0, 0, 0, 6, 2, 0, 0, 8, 0]) #[2,3,0,0,6,2,0,0,8,0]
self.assertEqual(np.shape(res[0]), (32, max_len))
self.assertEqual(np.shape(res[1]), (32, class_num))
self.assertEqual(np.shape(res[2]), (32,))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.session() as sess:
res = sess.run(input_x, feed_dict={input_sentence: ["我很愤怒。|超级生气!"]})
logging.debug(res[0][:10])
logging.debug(np.shape(res[0]))
self.assertEqual(np.shape(res[0]), (max_len,))
self.assertAllEqual(res[0][:10], [4, 5, 0, 0, 6, 9, 10, 0, 0, 0])
if __name__ == "__main__":
logging.set_verbosity(logging.DEBUG)
tf.test.main()
|
[] |
[] |
[
"MAIN_ROOT"
] |
[]
|
["MAIN_ROOT"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.