filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
bsp/stm32/stm32l053-st-nucleo/rtconfig.py
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m0plus -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M0 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M0'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M0'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
|
[] |
[] |
[
"RTT_CC",
"RTT_ROOT",
"RTT_EXEC_PATH"
] |
[]
|
["RTT_CC", "RTT_ROOT", "RTT_EXEC_PATH"]
|
python
| 3 | 0 | |
en2wubi/en2wubi/__init__.py
|
# -*- encoding: utf-8 -*-
"""Initialization file for en2wubi package.
Author: Yuhuang Hu
Email : [email protected]
"""
import os
from os.path import join
E2W_PATH = os.environ["EN2WUBI_PATH"]
E2W_DATA_PATH = join(E2W_PATH, "data")
E2W_EN_DATA_PATH = join(E2W_DATA_PATH, "en") # English data
E2W_CN_DATA_PATH = join(E2W_DATA_PATH, "cn") # Chinese character data
E2W_PY_DATA_PATH = join(E2W_DATA_PATH, "wb") # Chinese character encodings
E2W_PACKAGE_PATH = os.path.dirname(os.path.abspath(__file__))
E2W_PACKAGE_DATA_PATH = join(E2W_PACKAGE_PATH, "e2w_data")
E2W_PACKAGE_DICT_PATH = join(E2W_PACKAGE_DATA_PATH, "dict")
E2W_CH_WUBI_PATH = join(E2W_PACKAGE_DICT_PATH, "chinese_wubi")
E2W_WUBI_CH_PATH = join(E2W_PACKAGE_DICT_PATH, "wubi_chinese")
# Create necessary folder structure
if not os.path.isdir(E2W_PATH):
os.makedirs(E2W_PATH)
if not os.path.isdir(E2W_DATA_PATH):
os.makedirs(E2W_DATA_PATH)
if not os.path.isdir(E2W_CN_DATA_PATH):
os.makedirs(E2W_CN_DATA_PATH)
if not os.path.isdir(E2W_EN_DATA_PATH):
os.makedirs(E2W_EN_DATA_PATH)
if not os.path.isdir(E2W_PY_DATA_PATH):
os.makedirs(E2W_PY_DATA_PATH)
# Dictionary of Chinese punctuation to English one
CH2EN_PUNC = {ord(f): ord(t)
for f, t in zip(
u',。!?【】()%#@&1234567890;:',
u',.!?[]()%#@&1234567890;:')}
EN2CH_PUNC = {ord(f): ord(t)
for f, t in zip(
u',.!?[]()%#@&1234567890;:',
u',。!?【】()%#@&1234567890;:')}
|
[] |
[] |
[
"EN2WUBI_PATH"
] |
[]
|
["EN2WUBI_PATH"]
|
python
| 1 | 0 | |
escrutiniosocial/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "escrutiniosocial.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
userbot/__init__.py
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Heroku Credentials for updater.
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/alfiananda84/ProjectBish.git")
# UPSTREAM_REPO_URL branch, the default is master
UPSTREAM_REPO_BRANCH = os.environ.get(
"UPSTREAM_REPO_BRANCH", "master")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# Telegraph
TELEGRAPH_SHORT_NAME = os.environ.get("TELEGRAPH_SHORT_NAME", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA", None)
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
G_DRIVE_FOLDER_ID = os.environ.get("G_DRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Terminal Alias
TERM_ALIAS = os.environ.get("TERM_ALIAS", None)
# Genius Lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN", None)
# Quote
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
ZALG_LIST = {}
|
[] |
[] |
[
"GOOGLE_CHROME_BIN",
"G_DRIVE_CLIENT_SECRET",
"COUNTRY",
"LASTFM_API",
"ANTI_SPAMBOT_SHOUT",
"UPSTREAM_REPO_URL",
"OCR_SPACE_API_KEY",
"BIO_PREFIX",
"LOGSPAMMER",
"TZ_NUMBER",
"G_DRIVE_FOLDER_ID",
"LASTFM_PASSWORD",
"TELEGRAPH_SHORT_NAME",
"DATABASE_URL",
"HEROKU_APP_NAME",
"___________PLOX_______REMOVE_____THIS_____LINE__________",
"GIT_REPO_NAME",
"HEROKU_API_KEY",
"CHROME_DRIVER",
"YOUTUBE_API_KEY",
"LASTFM_USERNAME",
"G_DRIVE_CLIENT_ID",
"API_KEY",
"PM_AUTO_BAN",
"DEFAULT_BIO",
"ANTI_SPAMBOT",
"OPEN_WEATHER_MAP_APPID",
"LASTFM_SECRET",
"G_DRIVE_AUTH_TOKEN_DATA",
"UPSTREAM_REPO_BRANCH",
"WEATHER_DEFCITY",
"STRING_SESSION",
"QUOTES_API_TOKEN",
"CONSOLE_LOGGER_VERBOSE",
"GITHUB_ACCESS_TOKEN",
"ALIVE_NAME",
"BOTLOG_CHATID",
"TMP_DOWNLOAD_DIRECTORY",
"CLEAN_WELCOME",
"GENIUS_ACCESS_TOKEN",
"G_DRIVE_DATA",
"REM_BG_API_KEY",
"BOTLOG",
"API_HASH",
"TERM_ALIAS"
] |
[]
|
["GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "ANTI_SPAMBOT_SHOUT", "UPSTREAM_REPO_URL", "OCR_SPACE_API_KEY", "BIO_PREFIX", "LOGSPAMMER", "TZ_NUMBER", "G_DRIVE_FOLDER_ID", "LASTFM_PASSWORD", "TELEGRAPH_SHORT_NAME", "DATABASE_URL", "HEROKU_APP_NAME", "___________PLOX_______REMOVE_____THIS_____LINE__________", "GIT_REPO_NAME", "HEROKU_API_KEY", "CHROME_DRIVER", "YOUTUBE_API_KEY", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "API_KEY", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "UPSTREAM_REPO_BRANCH", "WEATHER_DEFCITY", "STRING_SESSION", "QUOTES_API_TOKEN", "CONSOLE_LOGGER_VERBOSE", "GITHUB_ACCESS_TOKEN", "ALIVE_NAME", "BOTLOG_CHATID", "TMP_DOWNLOAD_DIRECTORY", "CLEAN_WELCOME", "GENIUS_ACCESS_TOKEN", "G_DRIVE_DATA", "REM_BG_API_KEY", "BOTLOG", "API_HASH", "TERM_ALIAS"]
|
python
| 45 | 0 | |
k3s_test.go
|
package dtest
import (
"context"
"os"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/datawire/dlib/dlog"
)
func TestMain(m *testing.M) {
// Skip the test if running in CI and not on linux, because we won't be able to run the container other than on linux
isCi := os.Getenv("CI")
if isCi == "true" && runtime.GOOS != "linux" {
return
}
// we get the lock to make sure we are the only thing running
// because the nat tests interfere with docker functionality
WithMachineLock(context.TODO(), func(ctx context.Context) {
os.Exit(m.Run())
})
}
func TestContainer(t *testing.T) {
ctx := dlog.NewTestContext(t, false)
id := dockerUp(ctx, "dtest-test-tag", "nginx")
running := dockerPs(ctx)
assert.Contains(t, running, id)
dockerKill(ctx, id)
running = dockerPs(ctx)
assert.NotContains(t, running, id)
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
tests/test_marathon_lb.py
|
import copy
import json
import unittest
import os
import string
import random
import marathon_lb
class TestMarathonUpdateHaproxy(unittest.TestCase):
def setUp(self):
if 'HAPROXY_GLOBAL_DEFAULT_OPTIONS' in os.environ:
del os.environ['HAPROXY_GLOBAL_DEFAULT_OPTIONS']
self.base_config = '''global
log /dev/log local0
log /dev/log local1 notice
spread-checks 5
max-spread-checks 15000
maxconn 50000
tune.ssl.default-dh-param 2048
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:\
ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:\
ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:\
DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:\
ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:DHE-RSA-AES128-SHA256:\
DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:\
AES256-SHA256:!aNULL:!MD5:!DSS
ssl-default-bind-options no-sslv3 no-tlsv10 no-tls-tickets
ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:\
ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:\
ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:\
DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:\
ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:DHE-RSA-AES128-SHA256:\
DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:\
AES256-SHA256:!aNULL:!MD5:!DSS
ssl-default-server-options no-sslv3 no-tlsv10 no-tls-tickets
stats socket /var/run/haproxy/socket expose-fd listeners
server-state-file global
server-state-base /var/state/haproxy/
lua-load /marathon-lb/getpids.lua
lua-load /marathon-lb/getconfig.lua
lua-load /marathon-lb/getmaps.lua
lua-load /marathon-lb/signalmlb.lua
defaults
load-server-state-from-file global
log global
retries 3
backlog 10000
maxconn 10000
timeout connect 3s
timeout client 30s
timeout server 30s
timeout tunnel 3600s
timeout http-keep-alive 1s
timeout http-request 15s
timeout queue 30s
timeout tarpit 60s
option dontlognull
option http-server-close
option redispatch
listen stats
bind 0.0.0.0:9090
balance
mode http
stats enable
http-request use-service prometheus-exporter if { path /metrics }
monitor-uri /_haproxy_health_check
acl getpid path /_haproxy_getpids
http-request use-service lua.getpids if getpid
acl getvhostmap path /_haproxy_getvhostmap
http-request use-service lua.getvhostmap if getvhostmap
acl getappmap path /_haproxy_getappmap
http-request use-service lua.getappmap if getappmap
acl getconfig path /_haproxy_getconfig
http-request use-service lua.getconfig if getconfig
acl signalmlbhup path /_mlb_signal/hup
http-request use-service lua.signalmlbhup if signalmlbhup
acl signalmlbusr1 path /_mlb_signal/usr1
http-request use-service lua.signalmlbusr1 if signalmlbusr1
'''
def test_config_no_apps(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
'''
print("actual config:\n")
print(config)
self.assertMultiLineEqual(config, expected)
def test_config_env_template(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
os.environ["HAPROXY_HTTP_FRONTEND_HEAD"] = '''
frontend changed_frontend
bind *:80
mode http
'''
templater = marathon_lb.ConfigTemplater()
del os.environ["HAPROXY_HTTP_FRONTEND_HEAD"]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend changed_frontend
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
'''
print("actual config:\n")
print(config)
self.assertMultiLineEqual(config, expected)
def test_config_with_ssl_no_apps(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = "/etc/haproxy/mysite.com.pem"
templater = marathon_lb.ConfigTemplater()
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/haproxy/mysite.com.pem
mode http
'''
self.assertMultiLineEqual(config, expected)
def test_config_with_multissl_no_apps(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = "/etc/haproxy/mysite1.com.pem,/etc/haproxy/mysite2.com.pem"
templater = marathon_lb.ConfigTemplater()
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
'''
expected += " bind *:443 ssl crt /etc/haproxy/mysite1.com.pem " \
"crt /etc/haproxy/mysite2.com.pem"
expected += "\n mode http\n"
self.assertMultiLineEqual(config, expected)
def test_config_simple_app(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_healthcheck_command(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "COMMAND",
"command": {
"value": "curl -f -X GET http://$HOST:$PORT0/health"
},
# no portIndex
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode tcp
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode tcp
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_vhost(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com"
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.groups = ['external']
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
use_backend nginx_10000 if { ssl_fc_sni test }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_vhost_and_redirect(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com"
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
app.redirectHttpToHttps = True
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
use_backend nginx_10000 if host_test_example_com_nginx
redirect scheme https code 301 if !{ ssl_fc } host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost_and_redirect(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.groups = ['external']
app.redirectHttpToHttps = True
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
redirect scheme https code 301 if !{ ssl_fc } host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
use_backend nginx_10000 if { ssl_fc_sni test }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_vhost_with_auth(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com"
app.groups = ['external']
app.authRealm = "realm"
app.authUser = "testuser"
app.authPasswd = "testpasswd"
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
userlist user_nginx_10000
user testuser password testpasswd
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if host_test_example_com_nginx \
!auth_test_example_com_nginx
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if { ssl_fc_sni test.example.com } \
!auth_test_example_com_nginx
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost_and_auth(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.authRealm = "realm"
app.authUser = "testuser"
app.authPasswd = "testpasswd"
app.groups = ['external']
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
userlist user_nginx_10000
user testuser password testpasswd
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if host_test_example_com_nginx \
!auth_test_example_com_nginx
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if { ssl_fc_sni test.example.com } \
!auth_test_example_com_nginx
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if { ssl_fc_sni test } \
!auth_test_example_com_nginx
use_backend nginx_10000 if { ssl_fc_sni test }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_vhost_with_path_and_auth(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com"
app.path = '/some/path'
app.groups = ['external']
app.authRealm = "realm"
app.authUser = "testuser"
app.authPasswd = "testpasswd"
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
userlist user_nginx_10000
user testuser password testpasswd
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
acl path_nginx_10000 path_beg /some/path
http-request auth realm "realm" if host_test_example_com_nginx \
path_nginx_10000 !auth_test_example_com_nginx
use_backend nginx_10000 if host_test_example_com_nginx path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if { ssl_fc_sni test.example.com } \
path_nginx_10000 !auth_test_example_com_nginx
use_backend nginx_10000 if { ssl_fc_sni test.example.com } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost_with_path_and_auth(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.path = '/some/path'
app.groups = ['external']
app.authRealm = "realm"
app.authUser = "testuser"
app.authPasswd = "testpasswd"
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
userlist user_nginx_10000
user testuser password testpasswd
frontend marathon_http_in
bind *:80
mode http
acl path_nginx_10000 path_beg /some/path
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
http-request auth realm "realm" if host_test_example_com_nginx \
path_nginx_10000 !auth_test_example_com_nginx
use_backend nginx_10000 if host_test_example_com_nginx path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if { ssl_fc_sni test.example.com } \
path_nginx_10000 !auth_test_example_com_nginx
use_backend nginx_10000 if { ssl_fc_sni test.example.com } ''' + \
'''path_nginx_10000
acl auth_test_example_com_nginx http_auth(user_nginx_10000)
http-request auth realm "realm" if { ssl_fc_sni test } \
path_nginx_10000 !auth_test_example_com_nginx
use_backend nginx_10000 if { ssl_fc_sni test } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_vhost_with_path(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com"
app.path = '/some/path'
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if host_test_example_com_nginx path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if { ssl_fc_sni test.example.com } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost_with_path(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.path = '/some/path'
app.groups = ['external']
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl path_nginx_10000 path_beg /some/path
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
use_backend nginx_10000 if host_test_example_com_nginx path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if { ssl_fc_sni test.example.com } ''' + \
'''path_nginx_10000
use_backend nginx_10000 if { ssl_fc_sni test } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_vhost_with_path_and_redirect(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com"
app.path = '/some/path'
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
app.redirectHttpToHttps = True
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl path_nginx_10000 path_beg /some/path
redirect scheme https code 301 if !{ ssl_fc } host_test_example_com_nginx\
path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if { ssl_fc_sni test.example.com } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost_with_path_and_redirect(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.path = '/some/path'
app.groups = ['external']
app.redirectHttpToHttps = True
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl path_nginx_10000 path_beg /some/path
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
redirect scheme https code 301 if !{ ssl_fc } host_test_example_com_nginx\
path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if { ssl_fc_sni test.example.com } ''' + \
'''path_nginx_10000
use_backend nginx_10000 if { ssl_fc_sni test } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_multiple_vhost_path_redirect_hsts(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.path = '/some/path'
app.groups = ['external']
app.redirectHttpToHttps = True
app.useHsts = True
app.add_backend("agent1", "192.0.2.1", 1234, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl path_nginx_10000 path_beg /some/path
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
redirect scheme https code 301 if !{ ssl_fc } host_test_example_com_nginx\
path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if { ssl_fc_sni test.example.com } ''' + \
r'''path_nginx_10000
use_backend nginx_10000 if { ssl_fc_sni test } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
rspadd Strict-Transport-Security:\ max-age=15768000
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_balance(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.balance = "leastconn"
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance leastconn
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_bridge_app_marathon15(self):
with open('tests/marathon15_apps.json') as data_file:
apps = json.load(data_file)
class Marathon:
def __init__(self, data):
self.data = data
def list(self):
return self.data
def health_check(self):
return True
def strict_mode(self):
return False
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
apps = marathon_lb.get_apps(Marathon(apps['apps']))
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_myvhost_com_pywebserver hdr(host) -i myvhost.com
use_backend pywebserver_10101 if host_myvhost_com_pywebserver
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__pywebserver hdr(x-marathon-app-id) -i /pywebserver
use_backend pywebserver_10101 if app__pywebserver
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend pywebserver_10101 if { ssl_fc_sni myvhost.com }
frontend pywebserver_10101
bind *:10101
mode http
use_backend pywebserver_10101
backend pywebserver_10101
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
server 10_0_2_148_1565 10.0.2.148:1565 id 16827
'''
self.assertMultiLineEqual(config, expected)
def test_zdd_app(self):
with open('tests/zdd_apps.json') as data_file:
zdd_apps = json.load(data_file)
class Marathon:
def __init__(self, data):
self.data = data
def list(self):
return self.data
def health_check(self):
return True
def strict_mode(self):
return False
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
apps = marathon_lb.get_apps(Marathon(zdd_apps['apps']))
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server 10_0_1_147_25724 10.0.1.147:25724 id 9975 check inter 3s fall 11
server 10_0_6_25_16916 10.0.6.25:16916 id 14685 check inter 3s fall 11
server 10_0_6_25_23336 10.0.6.25:23336 id 14676 check inter 3s fall 11
server 10_0_6_25_31184 10.0.6.25:31184 id 27565 check inter 3s fall 11 disabled
''' # noqa: E501
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_healthcheck_port(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"port": 1024,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11 port 1024
''' # noqa: E501
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_healthcheck_port_using_another_portindex(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "192.0.2.1", 1024, False)
app.healthcheck_port_index = 1
admin_app = marathon_lb.MarathonService('/nginx', 10001, healthCheck,
strictMode)
admin_app.groups = ['external']
admin_app.add_backend("agent1", "192.0.2.1", 1025, False)
apps = [app, admin_app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
frontend nginx_10001
bind *:10001
mode http
use_backend nginx_10001
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1024 192.0.2.1:1024 id 18199 check inter 2s fall 11 port 1025
backend nginx_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1025 192.0.2.1:1025 id 22260 check inter 2s fall 11
''' # noqa: E501
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_healthcheck_port_diff_portindex_and_group(self):
apps = dict()
groups = ['external', 'internal']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "192.0.2.1", 1024, False)
app.healthcheck_port_index = 1
admin_app = marathon_lb.MarathonService('/nginx', 10001, healthCheck,
strictMode)
admin_app.groups = ['internal']
admin_app.add_backend("agent1", "192.0.2.1", 1025, False)
apps = [app, admin_app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
frontend nginx_10001
bind *:10001
mode http
use_backend nginx_10001
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1024 192.0.2.1:1024 id 18199 check inter 2s fall 11 port 1025
backend nginx_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1025 192.0.2.1:1025 id 22260 check inter 2s fall 11
''' # noqa: E501
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_healthcheck_port_portindex_out_of_range(self):
"""
see marathon_lb.get_backend_port(apps, app, idx) for impl.
if app.healthcheck_port_index has a out of bounds value,
then the app idx-th backend is returned instead.
:return:
"""
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "192.0.2.1", 1024, False)
app.healthcheck_port_index = 3
admin_app = marathon_lb.MarathonService('/nginx', 10001, healthCheck,
strictMode)
admin_app.groups = ['external']
admin_app.add_backend("agent1", "192.0.2.1", 1025, False)
apps = [app, admin_app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
frontend nginx_10001
bind *:10001
mode http
use_backend nginx_10001
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1024 192.0.2.1:1024 id 18199 check inter 2s fall 11 port 1024
backend nginx_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1025 192.0.2.1:1025 id 22260 check inter 2s fall 11
''' # noqa: E501
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_tcp_healthcheck(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"protocol": "TCP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
app.hostname = "test.example.com"
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_haproxy_group_fallback(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.groups = ['external', 'internal']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx', 10001, healthCheck,
strictMode)
app2.groups = ['external', 'internal']
app2.add_backend("agent1", "1.1.1.1", 1025, False)
apps = [app1, app2]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode tcp
use_backend nginx_10000
frontend nginx_10001
bind *:10001
mode tcp
use_backend nginx_10001
backend nginx_10000
balance roundrobin
mode tcp
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363
backend nginx_10001
balance roundrobin
mode tcp
server agent1_1_1_1_1_1025 1.1.1.1:1025 id 19971
'''
self.assertMultiLineEqual(config, expected)
def test_config_haproxy_group_per_service(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx', 10001, healthCheck,
strictMode)
app2.haproxy_groups = ['internal']
app2.add_backend("agent1", "1.1.1.1", 1025, False)
apps = [app1, app2]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode tcp
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode tcp
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363
'''
self.assertMultiLineEqual(config, expected)
def test_config_haproxy_group_hybrid(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.haproxy_groups = ['internal']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx', 10001, healthCheck,
strictMode)
app2.groups = ['external']
app2.add_backend("agent1", "1.1.1.1", 1025, False)
apps = [app1, app2]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10001
bind *:10001
mode tcp
use_backend nginx_10001
backend nginx_10001
balance roundrobin
mode tcp
server agent1_1_1_1_1_1025 1.1.1.1:1025 id 19971
'''
self.assertMultiLineEqual(config, expected)
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_proxypass(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.hostname = 'test.example.com'
app.proxypath = '/test/'
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + r'''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header Host test.example.com
reqirep "^([^ :]*)\ /test//?(.*)" "\1\ /\2"
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_revproxy(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.hostname = 'test.example.com'
app.revproxypath = '/test'
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
acl hdr_location res.hdr(Location) -m found
rspirep "^Location: (https?://test.example.com(:[0-9]+)?)?(/.*)" "Location: \
/test" if hdr_location
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_redirect(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.hostname = 'test.example.com'
app.redirpath = '/test'
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_test_example_com_nginx hdr(host) -i test.example.com
use_backend nginx_10000 if host_test_example_com_nginx
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend nginx_10000 if { ssl_fc_sni test.example.com }
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
acl is_root path -i /
acl is_domain hdr(host) -i test.example.com
redirect code 301 location /test if is_domain is_root
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_sticky(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
app.sticky = True
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode tcp
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode tcp
cookie mesosphere_server_id insert indirect nocache
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check cookie d6ad48c81f
'''
self.assertMultiLineEqual(config, expected)
def test_config_multi_app_multiple_vhost_with_path(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.hostname = "test.example.com,test"
app.path = '/some/path'
app.groups = ['external']
app1 = copy.deepcopy(app)
app2 = copy.deepcopy(app)
app3 = copy.deepcopy(app)
app.add_backend("agent1", "192.0.2.1", 1234, False)
app1.backend_weight = 1
app1.appId += '1'
app1.add_backend("agent1", "192.0.2.1", 2234, False)
app2.backend_weight = 2
app2.appId += '2'
app2.add_backend("agent1", "192.0.2.1", 3234, False)
app3.backend_weight = 3
app3.appId += '3'
app3.add_backend("agent1", "192.0.2.1", 4234, False)
apps = [app, app1, app2, app3]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl path_nginx3_10000 path_beg /some/path
acl host_test_example_com_nginx3 hdr(host) -i test.example.com
acl host_test_example_com_nginx3 hdr(host) -i test
use_backend nginx3_10000 if host_test_example_com_nginx3 path_nginx3_10000
acl path_nginx2_10000 path_beg /some/path
acl host_test_example_com_nginx2 hdr(host) -i test.example.com
acl host_test_example_com_nginx2 hdr(host) -i test
use_backend nginx2_10000 if host_test_example_com_nginx2 path_nginx2_10000
acl path_nginx1_10000 path_beg /some/path
acl host_test_example_com_nginx1 hdr(host) -i test.example.com
acl host_test_example_com_nginx1 hdr(host) -i test
use_backend nginx1_10000 if host_test_example_com_nginx1 path_nginx1_10000
acl path_nginx_10000 path_beg /some/path
acl host_test_example_com_nginx hdr(host) -i test.example.com
acl host_test_example_com_nginx hdr(host) -i test
use_backend nginx_10000 if host_test_example_com_nginx path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
acl app__nginx1 hdr(x-marathon-app-id) -i /nginx1
use_backend nginx1_10000 if app__nginx1
acl app__nginx2 hdr(x-marathon-app-id) -i /nginx2
use_backend nginx2_10000 if app__nginx2
acl app__nginx3 hdr(x-marathon-app-id) -i /nginx3
use_backend nginx3_10000 if app__nginx3
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx3_10000 path_beg /some/path
use_backend nginx3_10000 if { ssl_fc_sni test.example.com } path_nginx3_10000
use_backend nginx3_10000 if { ssl_fc_sni test } path_nginx3_10000
acl path_nginx2_10000 path_beg /some/path
use_backend nginx2_10000 if { ssl_fc_sni test.example.com } path_nginx2_10000
use_backend nginx2_10000 if { ssl_fc_sni test } path_nginx2_10000
acl path_nginx1_10000 path_beg /some/path
use_backend nginx1_10000 if { ssl_fc_sni test.example.com } path_nginx1_10000
use_backend nginx1_10000 if { ssl_fc_sni test } path_nginx1_10000
acl path_nginx_10000 path_beg /some/path
use_backend nginx_10000 if { ssl_fc_sni test.example.com } path_nginx_10000
use_backend nginx_10000 if { ssl_fc_sni test } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
frontend nginx1_10000
bind *:10000
mode http
use_backend nginx1_10000
frontend nginx2_10000
bind *:10000
mode http
use_backend nginx2_10000
frontend nginx3_10000
bind *:10000
mode http
use_backend nginx3_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_1234 192.0.2.1:1234 id 15582 check inter 2s fall 11
backend nginx1_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_2234 192.0.2.1:2234 id 20338 check inter 2s fall 11
backend nginx2_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_3234 192.0.2.1:3234 id 3933 check inter 2s fall 11
backend nginx3_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 10s
server agent1_192_0_2_1_4234 192.0.2.1:4234 id 31229 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_haproxy_map(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net,server.nginx1.net"
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/apache', 10001, healthCheck,
strictMode)
app2.hostname = "server.apache.net"
app2.haproxy_groups = ['external']
app2.add_backend("agent2", "2.2.2.2", 1025, False)
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend apache_10001
bind *:10001
mode http
use_backend apache_10001
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend apache_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
# Check the domain map
domain_config_map = {}
for element in domain_map_array:
for key, value in list(element.items()):
domain_config_map[key] = value
expected_domain_map = {}
expected_domain_map["server.nginx.net"] = "nginx_10000"
expected_domain_map["server.nginx1.net"] = "nginx_10000"
expected_domain_map["server.apache.net"] = "apache_10001"
self.assertEqual(domain_config_map, expected_domain_map)
# Check the app map
app_config_map = {}
for element in app_map_array:
for key, value in list(element.items()):
app_config_map[key] = value
expected_app_map = {}
expected_app_map["/apache"] = "apache_10001"
expected_app_map["/nginx"] = "nginx_10000"
self.assertEqual(app_config_map, expected_app_map)
def test_config_haproxy_map_hybrid(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net,server.nginx1.net"
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/apache', 10001, healthCheck,
strictMode)
app2.hostname = "server.apache.net"
app2.path = "/apache"
app2.haproxy_groups = ['external']
app2.add_backend("agent2", "2.2.2.2", 1025, False)
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_server_apache_net_apache hdr(host) -i server.apache.net
acl path_apache_10001 path_beg /apache
use_backend apache_10001 if host_server_apache_net_apache path_apache_10001
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_apache_10001 path_beg /apache
use_backend apache_10001 if { ssl_fc_sni server.apache.net } \
path_apache_10001
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend apache_10001
bind *:10001
mode http
use_backend apache_10001
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend apache_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
# Check the domain map
domain_config_map = {}
for element in domain_map_array:
for key, value in list(element.items()):
domain_config_map[key] = value
expected_domain_map = {}
expected_domain_map["server.nginx.net"] = "nginx_10000"
expected_domain_map["server.nginx1.net"] = "nginx_10000"
self.assertEqual(domain_config_map, expected_domain_map)
# Check the app map
app_config_map = {}
for element in app_map_array:
for key, value in list(element.items()):
app_config_map[key] = value
expected_app_map = {}
expected_app_map["/apache"] = "apache_10001"
expected_app_map["/nginx"] = "nginx_10000"
self.assertEqual(app_config_map, expected_app_map)
# Tests a scenario in which two applications are deployed,
# one with authentication and the other without. The app id
# of the one without authentication comes before the other
# one when sorted alphabetically. In this scenario we expect
# the 'domain2backend.map' use_backend definition to still be defined last.
def test_config_haproxy_map_auth_noauth(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx1', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net"
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx2', 10001, healthCheck,
strictMode)
app2.hostname = "server.nginx.net"
app2.authRealm = "realm"
app2.authUser = "testuser"
app2.authPasswd = "testpasswd"
app2.haproxy_groups = ['external']
app2.add_backend("agent2", "2.2.2.2", 1025, False)
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
userlist user_nginx2_10001
user testuser password testpasswd
frontend marathon_http_in
bind *:80
mode http
acl host_server_nginx_net_nginx2 hdr(host) -i server.nginx.net
acl auth_server_nginx_net_nginx2 http_auth(user_nginx2_10001)
http-request auth realm "realm" if host_server_nginx_net_nginx2 \
!auth_server_nginx_net_nginx2
use_backend nginx2_10001 if host_server_nginx_net_nginx2
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl auth_server_nginx_net_nginx2 http_auth(user_nginx2_10001)
http-request auth realm "realm" if { ssl_fc_sni server.nginx.net } \
!auth_server_nginx_net_nginx2
use_backend nginx2_10001 if { ssl_fc_sni server.nginx.net }
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend nginx1_10000
bind *:10000
mode http
use_backend nginx1_10000
frontend nginx2_10001
bind *:10001
mode http
use_backend nginx2_10001
backend nginx1_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 3s fall 11
backend nginx2_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
# Check the domain map
domain_config_map = {}
for element in domain_map_array:
for key, value in list(element.items()):
domain_config_map[key] = value
expected_domain_map = {}
expected_domain_map["server.nginx.net"] = "nginx1_10000"
self.assertEqual(domain_config_map, expected_domain_map)
# Check the app map
app_config_map = {}
for element in app_map_array:
for key, value in list(element.items()):
app_config_map[key] = value
expected_app_map = {}
expected_app_map["/nginx2"] = "nginx2_10001"
expected_app_map["/nginx1"] = "nginx1_10000"
self.assertEqual(app_config_map, expected_app_map)
def test_config_haproxy_map_hybrid_with_vhost_path(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net,server.nginx1.net"
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/apache', 10001, healthCheck,
strictMode)
app2.hostname = "server.apache.net,server.apache1.net"
app2.path = "/apache"
app2.haproxy_groups = ['external']
app2.add_backend("agent2", "2.2.2.2", 1025, False)
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl path_apache_10001 path_beg /apache
acl host_server_apache_net_apache hdr(host) -i server.apache.net
acl host_server_apache_net_apache hdr(host) -i server.apache1.net
use_backend apache_10001 if host_server_apache_net_apache path_apache_10001
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_apache_10001 path_beg /apache
use_backend apache_10001 if { ssl_fc_sni server.apache.net } \
path_apache_10001
use_backend apache_10001 if { ssl_fc_sni server.apache1.net } \
path_apache_10001
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend apache_10001
bind *:10001
mode http
use_backend apache_10001
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend apache_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
# Check the domain map
domain_config_map = {}
for element in domain_map_array:
for key, value in list(element.items()):
domain_config_map[key] = value
expected_domain_map = {}
expected_domain_map["server.nginx.net"] = "nginx_10000"
expected_domain_map["server.nginx1.net"] = "nginx_10000"
self.assertEqual(domain_config_map, expected_domain_map)
# Check the app map
app_config_map = {}
for element in app_map_array:
for key, value in list(element.items()):
app_config_map[key] = value
expected_app_map = {}
expected_app_map["/apache"] = "apache_10001"
expected_app_map["/nginx"] = "nginx_10000"
self.assertEqual(app_config_map, expected_app_map)
def test_config_haproxy_map_hybrid_httptohttps_redirect(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net,server.nginx1.net"
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/apache', 10001, healthCheck,
strictMode)
app2.hostname = "server.apache.net,server.apache1.net"
app2.haproxy_groups = ['external']
app2.add_backend("agent2", "2.2.2.2", 1025, False)
app2.redirectHttpToHttps = True
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_server_apache_net_apache hdr(host) -i server.apache.net
acl host_server_apache_net_apache hdr(host) -i server.apache1.net
redirect scheme https code 301 if !{ ssl_fc } host_server_apache_net_apache
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend apache_10001
bind *:10001
mode http
use_backend apache_10001
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend apache_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
# Check the domain map
domain_config_map = {}
for element in domain_map_array:
for key, value in list(element.items()):
domain_config_map[key] = value
expected_domain_map = {}
expected_domain_map["server.nginx.net"] = "nginx_10000"
expected_domain_map["server.nginx1.net"] = "nginx_10000"
expected_domain_map["server.apache.net"] = "apache_10001"
expected_domain_map["server.apache1.net"] = "apache_10001"
self.assertEqual(domain_config_map, expected_domain_map)
# Check the app map
app_config_map = {}
for element in app_map_array:
for key, value in list(element.items()):
app_config_map[key] = value
expected_app_map = {}
expected_app_map["/apache"] = "apache_10001"
expected_app_map["/nginx"] = "nginx_10000"
self.assertEqual(app_config_map, expected_app_map)
def test_config_simple_app_long_backend_proxypass(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.proxypath = "/testing1234_1234-1234-test123_testing"
app.path = "/testing1234_1234-1234-test123_testing"
app.hostname = "testhost.com"
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl host_testhost_com_nginx hdr(host) -i testhost.com
acl path_nginx_10000 path_beg ''' + app.path + '''
use_backend nginx_10000 if host_testhost_com_nginx path_nginx_10000
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
acl path_nginx_10000 path_beg ''' + app.path + r'''
use_backend nginx_10000 if { ssl_fc_sni testhost.com } path_nginx_10000
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header Host testhost.com
reqirep "^([^ :]*)\ ''' + app.proxypath + r'''/?(.*)" "\1\ /\2"
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_proxypass_health_check(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.proxypath = "/proxy/path"
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + r'''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header Host None
reqirep "^([^ :]*)\ /proxy/path/?(.*)" "\1\ /\2"
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_config_simple_app_revproxy_health_check(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 10
}
app = marathon_lb.MarathonService('/nginx', 10000, healthCheck,
strictMode)
app.revproxypath = "/proxy/path"
app.groups = ['external']
app.add_backend("agent1", "1.1.1.1", 1024, False)
apps = [app]
config = marathon_lb.config(apps, groups, bind_http_https,
ssl_certs, templater)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
frontend marathon_http_appid_in
bind *:9091
mode http
acl app__nginx hdr(x-marathon-app-id) -i /nginx
use_backend nginx_10000 if app__nginx
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
frontend nginx_10000
bind *:10000
mode http
use_backend nginx_10000
backend nginx_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
acl hdr_location res.hdr(Location) -m found
rspirep "^Location: (https?://None(:[0-9]+)?)?(/.*)" \
"Location: /proxy/path" if hdr_location
option httpchk GET /
timeout check 10s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 2s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_strict_mode_on_and_off(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx1', 10000, healthCheck,
True)
app1.hostname = "server.nginx.net"
app1.haproxy_groups = ['external']
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx2', 10001, healthCheck,
False)
app2.hostname = "server.nginx.net"
app2.haproxy_groups = ['external']
app2.add_backend("agent2", "2.2.2.2", 1025, False)
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend nginx2_10001
bind *:10001
mode http
use_backend nginx2_10001
backend nginx2_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_backend_disabled_and_enablede(self):
apps = dict()
groups = ['external']
bind_http_https = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {}
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx1', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net"
app1.haproxy_groups = ['external']
app1.enabled = False
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx2', 10001, healthCheck,
strictMode)
app2.hostname = "server.nginx.net"
app2.haproxy_groups = ['external']
app2.enabled = True
app2.add_backend("agent2", "2.2.2.2", 1025, False)
apps = [app1, app2]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
use_backend %[req.hdr(host),lower,regsub(:.*$,,),\
map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),lower,\
map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/cert.pem
mode http
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
frontend nginx2_10001
bind *:10001
mode http
use_backend nginx2_10001
backend nginx2_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
def test_group_https_by_vhost(self):
groups = ['external']
bind_http_https = True
group_https_by_vhost = True
ssl_certs = ""
templater = marathon_lb.ConfigTemplater()
strictMode = False
healthCheck = {
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 15,
"intervalSeconds": 3,
"timeoutSeconds": 15,
"maxConsecutiveFailures": 10
}
app1 = marathon_lb.MarathonService('/nginx1', 10000, healthCheck,
strictMode)
app1.hostname = "server.nginx.net,server3.nginx.net"
app1.haproxy_groups = ['external']
app1.enabled = True
app1.add_backend("agent1", "1.1.1.1", 1024, False)
app2 = marathon_lb.MarathonService('/nginx2', 10001, healthCheck,
strictMode)
app2.hostname = "server2.nginx.net"
app2.haproxy_groups = ['external']
app2.sslCert = "/etc/cert"
app2.bindOptions = "verify required"
app2.path = "/test"
app2.enabled = True
app2.add_backend("agent2", "2.2.2.2", 1025, False)
app3 = marathon_lb.MarathonService('/nginx3', 10002, healthCheck,
strictMode)
app3.hostname = "server2.nginx.net"
app3.path = "/test2"
app3.haproxy_groups = ['external']
app3.enabled = True
app3.backend_weight = 99
app3.add_backend("agent3", "3.3.3.3", 1026, False)
app4 = marathon_lb.MarathonService('/letsencrypt', 10002, healthCheck,
strictMode)
app4.hostname = "nginx.net,server2.nginx.net,server.nginx.net"
app4.path = "/.well-known/acme-challenge"
app4.haproxy_groups = ['external']
app4.enabled = True
app4.backend_weight = 99
app4.add_backend("agent4", "4.4.4.4", 1026, False)
apps = [app1, app2, app3, app4]
haproxy_map = True
domain_map_array = []
app_map_array = []
config_file = "/etc/haproxy/haproxy.cfg"
config = marathon_lb.config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file,
group_https_by_vhost)
expected = self.base_config + '''
frontend marathon_http_in
bind *:80
mode http
acl path_letsencrypt_10002 path_beg /.well-known/acme-challenge
acl host_nginx_net_letsencrypt hdr(host) -i nginx.net
acl host_nginx_net_letsencrypt hdr(host) -i server2.nginx.net
acl host_nginx_net_letsencrypt hdr(host) -i server.nginx.net
use_backend letsencrypt_10002 \
if host_nginx_net_letsencrypt path_letsencrypt_10002
acl host_server2_nginx_net_nginx3 hdr(host) -i server2.nginx.net
acl path_nginx3_10002 path_beg /test2
use_backend nginx3_10002 \
if host_server2_nginx_net_nginx3 path_nginx3_10002
acl host_server2_nginx_net_nginx2 hdr(host) -i server2.nginx.net
acl path_nginx2_10001 path_beg /test
use_backend nginx2_10001 if host_server2_nginx_net_nginx2 path_nginx2_10001
use_backend %[req.hdr(host),\
lower,regsub(:.*$,,),map(/etc/haproxy/domain2backend.map)]
frontend marathon_http_appid_in
bind *:9091
mode http
use_backend %[req.hdr(x-marathon-app-id),\
lower,map(/etc/haproxy/app2backend.map)]
frontend marathon_https_in
bind *:443
mode tcp
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
use_backend nginx_net if { req_ssl_sni -i nginx.net }
use_backend server_nginx_net if { req_ssl_sni -i server.nginx.net }
use_backend server2_nginx_net if { req_ssl_sni -i server2.nginx.net }
use_backend server3_nginx_net if { req_ssl_sni -i server3.nginx.net }
backend nginx_net
server loopback-for-tls abns@nginx_net send-proxy-v2
frontend nginx_net
mode http
bind abns@nginx_net accept-proxy ssl crt /etc/ssl/cert.pem
acl path_letsencrypt_10002 path_beg /.well-known/acme-challenge
use_backend letsencrypt_10002 \
if { ssl_fc_sni nginx.net } path_letsencrypt_10002
backend server_nginx_net
server loopback-for-tls abns@server_nginx_net send-proxy-v2
frontend server_nginx_net
mode http
bind abns@server_nginx_net accept-proxy ssl crt /etc/ssl/cert.pem
acl path_letsencrypt_10002 path_beg /.well-known/acme-challenge
use_backend letsencrypt_10002 \
if { ssl_fc_sni server.nginx.net } path_letsencrypt_10002
use_backend %[ssl_fc_sni,lower,map(/etc/haproxy/domain2backend.map)]
backend server2_nginx_net
server loopback-for-tls abns@server2_nginx_net send-proxy-v2
frontend server2_nginx_net
mode http
bind abns@server2_nginx_net accept-proxy ssl crt /etc/cert verify required
acl path_letsencrypt_10002 path_beg /.well-known/acme-challenge
use_backend letsencrypt_10002 \
if { ssl_fc_sni server2.nginx.net } path_letsencrypt_10002
acl path_nginx3_10002 path_beg /test2
use_backend nginx3_10002 \
if { ssl_fc_sni server2.nginx.net } path_nginx3_10002
acl path_nginx2_10001 path_beg /test
use_backend nginx2_10001 \
if { ssl_fc_sni server2.nginx.net } path_nginx2_10001
backend server3_nginx_net
server loopback-for-tls abns@server3_nginx_net send-proxy-v2
frontend server3_nginx_net
mode http
bind abns@server3_nginx_net accept-proxy ssl crt /etc/ssl/cert.pem
frontend letsencrypt_10002
bind *:10002
mode http
use_backend letsencrypt_10002
frontend nginx1_10000
bind *:10000
mode http
use_backend nginx1_10000
frontend nginx2_10001
bind *:10001 ssl crt /etc/cert verify required
mode http
use_backend nginx2_10001
frontend nginx3_10002
bind *:10002
mode http
use_backend nginx3_10002
backend letsencrypt_10002
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent4_4_4_4_4_1026 4.4.4.4:1026 id 18496 check inter 3s fall 11
backend nginx1_10000
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent1_1_1_1_1_1024 1.1.1.1:1024 id 28363 check inter 3s fall 11
backend nginx2_10001
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent2_2_2_2_2_1025 2.2.2.2:1025 id 5918 check inter 3s fall 11
backend nginx3_10002
balance roundrobin
mode http
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk GET /
timeout check 15s
server agent3_3_3_3_3_1026 3.3.3.3:1026 id 638 check inter 3s fall 11
'''
self.assertMultiLineEqual(config, expected)
class TestFunctions(unittest.TestCase):
def test_json_number(self):
json_value = '1'
data = marathon_lb.load_json(json_value)
expected = 1
self.assertEquals(data, expected)
def test_json_string(self):
json_value = '"1"'
data = marathon_lb.load_json(json_value)
expected = "1"
self.assertEquals(data, expected)
def test_json_nested_null_dict_remain(self):
json_value = '{"key":null,"key2":"y","key3":{"key4":null,"key5":"x"}}'
data = marathon_lb.load_json(json_value)
expected = {'key3': {'key5': 'x'}, 'key2': 'y'}
self.assertEquals(data, expected)
def test_json_nested_null_dict(self):
json_value = '{"key":null,"key2":"y","key3":{"key4":null}}'
data = marathon_lb.load_json(json_value)
expected = {'key3': {}, 'key2': 'y'}
self.assertEquals(data, expected)
def test_json_simple_list_dict(self):
json_value = '["k1",{"k2":null,"k3":"v3"},"k4"]'
data = marathon_lb.load_json(json_value)
expected = ['k1', {'k3': 'v3'}, 'k4']
self.assertEquals(data, expected)
def test_json_nested_null_dict_list(self):
json_value = '["k1",{"k2":null,"k3":["k4",{"k5":null}]},"k6"]'
data = marathon_lb.load_json(json_value)
expected = ['k1', {'k3': ['k4', {}]}, 'k6']
self.assertEquals(data, expected)
class TestServerIdGeneration(unittest.TestCase):
@staticmethod
def _randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def test_if_server_id_is_generated(self):
test_cases = [
('0', 17068),
('1', 25733),
('a', 25929),
('10_0_6_25_23336', 14676),
]
taken_server_ids = set()
for i in range(0, len(test_cases)):
self.assertEqual(
marathon_lb.calculate_server_id(
test_cases[i][0], taken_server_ids),
test_cases[i][1])
self.assertEqual(set([x[1] for x in test_cases]), taken_server_ids)
def test_if_server_id_collisions_are_handled_synthetic(self):
# All the test cases here generate the same server id in the first
# iteration. The idea is that if the collisions are handled, it will
# still result in different server ids returned by the
# calculate_server_id() function.
test_cases = [
('yftjqzplpu', 28876),
('ttccbfrdhi', 7893),
('ilvparharq', 22002),
('gpagkxfzou', 21805),
('dcsfcvfolh', 20892),
('tsqkugaath', 25675),
]
taken_server_ids = set()
for i in range(0, len(test_cases)):
self.assertEqual(
marathon_lb.calculate_server_id(
test_cases[i][0], taken_server_ids),
test_cases[i][1])
self.assertEqual(set([x[1] for x in test_cases]), taken_server_ids)
def test_if_server_id_collisions_are_handled_accurate(self):
num_server_names = 30000
# The approach of this test is more real-life like: we generate
# num_server_names unique server names. If num_server_names is close to
# the limit (i.e. 32767), collisions need to be handled gracefull in
# order to generate all the server id.
# Haproxy is most probably incapable of handling so many
# backend servers but still - passing this test should prove that we
# have enough headroom to handle all the real life scenarios.
taken_server_ids = set()
for i in range(0, num_server_names):
marathon_lb.calculate_server_id(
self._randomword(20), taken_server_ids)
self.assertEqual(len(taken_server_ids), num_server_names)
def test_if_server_id_is_always_nonzero(self):
# This test assumes some knowledge of the internal implementation of
# the tested function, as it uses pre-calculated strings which normally
# would result in server_id==0. Unfortunatelly there is no easy way to
# test it more reliably - i.e without making assumptions about the
# input.
test_cases = [
('uudnntiqtd', 26825),
('rghtavdepy', 5030),
('ofdsehlvjo', 26512),
('adwquoyjfl', 24165),
('oebmwvpofe', 11608),
]
for i in range(0, len(test_cases)):
self.assertEqual(
marathon_lb.calculate_server_id(test_cases[i][0], set()),
test_cases[i][1])
def test_service_name_sequence_to_service_id_sequence_stability(self):
num_server_names = 1000
# This test checks if given the same sequence of string of
# service_names the resulting sequence of service_ids will always be
# the same
server_ids = dict()
for i in range(0, num_server_names):
sn = self._randomword(20)
server_ids[sn] = list()
for i in range(0, 3):
tmp_set = set()
for sn in server_ids:
server_ids[sn].append(
marathon_lb.calculate_server_id(sn, tmp_set))
for sn in server_ids:
# Compare first and the second server_id for the given server
# name:
self.assertEqual(server_ids[sn][0], server_ids[sn][1])
# Compare second and the third server_id for the given server
# name:
self.assertEqual(server_ids[sn][1], server_ids[sn][2])
def test_if_server_name_cant_be_empty_string(self):
with self.assertRaises(ValueError):
marathon_lb.calculate_server_id('', set())
def test_if_server_name_cant_be_none(self):
with self.assertRaises(ValueError):
marathon_lb.calculate_server_id(None, set())
|
[] |
[] |
[
"HAPROXY_GLOBAL_DEFAULT_OPTIONS",
"HAPROXY_HTTP_FRONTEND_HEAD"
] |
[]
|
["HAPROXY_GLOBAL_DEFAULT_OPTIONS", "HAPROXY_HTTP_FRONTEND_HEAD"]
|
python
| 2 | 0 | |
cmd/server.go
|
package cmd
import (
"context"
"errors"
"fmt"
"log"
"net"
"net/http"
"net/url"
"os"
"time"
"github.com/gorilla/websocket"
"github.com/rickbau5/ws-example/internal/handlers"
"golang.org/x/sync/errgroup"
)
var logger = log.New(os.Stdout, "[ws-server] ", log.LstdFlags)
func Server(ctx context.Context) error {
grp, grpCtx := errgroup.WithContext(context.Background())
host, err := os.Hostname()
if err != nil {
return err
}
mux := http.NewServeMux()
mux.Handle("/healthcheck", &handlers.Health{Start: time.Now()})
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
})
mux.Handle("/ws", &handlers.WebSocketHandler{
ID: host,
Upgrader: websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
},
Logger: logger,
})
server := http.Server{
Addr: os.Getenv("HTTP_SERVER_ADDR"),
Handler: mux,
ConnState: func(conn net.Conn, state http.ConnState) {
u, err := url.Parse(conn.RemoteAddr().String())
if err != nil {
return
}
// ignore connections from localhost
if u.Hostname() == "127.0.0.1" {
return
}
logger.Printf("connection state change: %s -> %s\n", conn.RemoteAddr(), state)
},
BaseContext: func(_ net.Listener) context.Context { return grpCtx },
}
grp.Go(server.ListenAndServe)
// wait for shutdown / error
grp.Go(func() error {
select {
case <-ctx.Done(): // parent cancelled
case <-grpCtx.Done(): // server cancelled
}
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := server.Shutdown(shutdownCtx); err != nil {
return fmt.Errorf("error in server shutdown: %w", err)
}
return errors.New("shutting down")
})
// all grp goroutines will return an error in all cases which ensures this will close
return grp.Wait()
}
|
[
"\"HTTP_SERVER_ADDR\""
] |
[] |
[
"HTTP_SERVER_ADDR"
] |
[]
|
["HTTP_SERVER_ADDR"]
|
go
| 1 | 0 | |
helpers/construct_world.go
|
package helpers
import (
"fmt"
"os"
"path/filepath"
"github.com/cloudfoundry-incubator/consuladapter/consulrunner"
"github.com/cloudfoundry-incubator/diego-ssh/keys"
"github.com/cloudfoundry-incubator/inigo/world"
"github.com/onsi/ginkgo/config"
. "github.com/onsi/gomega"
)
var PreloadedStacks = []string{"red-stack", "blue-stack"}
var DefaultStack = PreloadedStacks[0]
var addresses world.ComponentAddresses
const assetsPath = "../fixtures/certs/"
func MakeComponentMaker(builtArtifacts world.BuiltArtifacts, localIP string) world.ComponentMaker {
gardenBinPath := os.Getenv("GARDEN_BINPATH")
gardenRootFSPath := os.Getenv("GARDEN_ROOTFS")
gardenGraphPath := os.Getenv("GARDEN_GRAPH_PATH")
externalAddress := os.Getenv("EXTERNAL_ADDRESS")
if gardenGraphPath == "" {
gardenGraphPath = os.TempDir()
}
Expect(gardenBinPath).NotTo(BeEmpty(), "must provide $GARDEN_BINPATH")
Expect(gardenRootFSPath).NotTo(BeEmpty(), "must provide $GARDEN_ROOTFS")
Expect(externalAddress).NotTo(BeEmpty(), "must provide $EXTERNAL_ADDRESS")
stackPathMap := map[string]string{}
for _, stack := range PreloadedStacks {
stackPathMap[stack] = gardenRootFSPath
}
addresses = world.ComponentAddresses{
GardenLinux: fmt.Sprintf("127.0.0.1:%d", 10000+config.GinkgoConfig.ParallelNode),
NATS: fmt.Sprintf("127.0.0.1:%d", 11000+config.GinkgoConfig.ParallelNode),
Etcd: fmt.Sprintf("127.0.0.1:%d", 12000+config.GinkgoConfig.ParallelNode),
EtcdPeer: fmt.Sprintf("127.0.0.1:%d", 12500+config.GinkgoConfig.ParallelNode),
Consul: fmt.Sprintf("127.0.0.1:%d", 12750+config.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength),
Rep: fmt.Sprintf("0.0.0.0:%d", 14000+config.GinkgoConfig.ParallelNode),
FileServer: fmt.Sprintf("%s:%d", localIP, 17000+config.GinkgoConfig.ParallelNode),
Router: fmt.Sprintf("127.0.0.1:%d", 18000+config.GinkgoConfig.ParallelNode),
TPSListener: fmt.Sprintf("127.0.0.1:%d", 19000+config.GinkgoConfig.ParallelNode),
FakeCC: fmt.Sprintf("127.0.0.1:%d", 20000+config.GinkgoConfig.ParallelNode),
BBS: fmt.Sprintf("127.0.0.1:%d", 20500+config.GinkgoConfig.ParallelNode),
Receptor: fmt.Sprintf("127.0.0.1:%d", 21000+config.GinkgoConfig.ParallelNode),
ReceptorTaskHandler: fmt.Sprintf("127.0.0.1:%d", 21500+config.GinkgoConfig.ParallelNode),
Stager: fmt.Sprintf("127.0.0.1:%d", 22000+config.GinkgoConfig.ParallelNode),
NsyncListener: fmt.Sprintf("127.0.0.1:%d", 22500+config.GinkgoConfig.ParallelNode),
Auctioneer: fmt.Sprintf("0.0.0.0:%d", 23000+config.GinkgoConfig.ParallelNode),
SSHProxy: fmt.Sprintf("127.0.0.1:%d", 23500+config.GinkgoConfig.ParallelNode),
}
hostKeyPair, err := keys.RSAKeyPairFactory.NewKeyPair(1024)
Expect(err).NotTo(HaveOccurred())
userKeyPair, err := keys.RSAKeyPairFactory.NewKeyPair(1024)
Expect(err).NotTo(HaveOccurred())
sshKeys := world.SSHKeys{
HostKey: hostKeyPair.PrivateKey(),
HostKeyPem: hostKeyPair.PEMEncodedPrivateKey(),
PrivateKeyPem: userKeyPair.PEMEncodedPrivateKey(),
AuthorizedKey: userKeyPair.AuthorizedKey(),
}
serverCert, err := filepath.Abs(assetsPath + "server.crt")
Expect(err).NotTo(HaveOccurred())
serverKey, err := filepath.Abs(assetsPath + "server.key")
Expect(err).NotTo(HaveOccurred())
clientCrt, err := filepath.Abs(assetsPath + "client.crt")
Expect(err).NotTo(HaveOccurred())
clientKey, err := filepath.Abs(assetsPath + "client.key")
Expect(err).NotTo(HaveOccurred())
caCert, err := filepath.Abs(assetsPath + "ca.crt")
Expect(err).NotTo(HaveOccurred())
sslConfig := world.SSLConfig{
ServerCert: serverCert,
ServerKey: serverKey,
ClientCert: clientCrt,
ClientKey: clientKey,
CACert: caCert,
}
return world.ComponentMaker{
Artifacts: builtArtifacts,
Addresses: addresses,
PreloadedStackPathMap: stackPathMap,
ExternalAddress: externalAddress,
GardenBinPath: gardenBinPath,
GardenGraphPath: gardenGraphPath,
SSHConfig: sshKeys,
SSL: sslConfig,
}
}
|
[
"\"GARDEN_BINPATH\"",
"\"GARDEN_ROOTFS\"",
"\"GARDEN_GRAPH_PATH\"",
"\"EXTERNAL_ADDRESS\""
] |
[] |
[
"GARDEN_ROOTFS",
"EXTERNAL_ADDRESS",
"GARDEN_BINPATH",
"GARDEN_GRAPH_PATH"
] |
[]
|
["GARDEN_ROOTFS", "EXTERNAL_ADDRESS", "GARDEN_BINPATH", "GARDEN_GRAPH_PATH"]
|
go
| 4 | 0 | |
rest_api/simple_supply_rest_api/main.py
|
import argparse
import asyncio
import logging
import sys
import datetime
import time
import os
import bcrypt
from Crypto.Cipher import AES
from zmq.asyncio import ZMQEventLoop
from sawtooth_sdk.processor.log import init_console_logging
from aiohttp import web
from simple_supply_rest_api.route_handler import RouteHandler
from simple_supply_rest_api.database import Database
from simple_supply_rest_api.messaging import Messenger
from os.path import join, dirname
from dotenv import load_dotenv
LOGGER = logging.getLogger(__name__)
def parse_args(args):
parser = argparse.ArgumentParser(
description='Starts the Simple Supply REST API')
parser.add_argument(
'-B', '--bind',
help='identify host and port for api to run on',
default='localhost:8000')
parser.add_argument(
'-C', '--connect',
help='specify URL to connect to a running validator',
default='tcp://localhost:4004')
parser.add_argument(
'-t', '--timeout',
help='set time (in seconds) to wait for a validator response',
default=500)
parser.add_argument(
'--db-name',
help='The name of the database',
default='bev')
parser.add_argument(
'--db-host',
help='The host of the database',
default='localhost')
parser.add_argument(
'--db-port',
help='The port of the database',
default='5432')
parser.add_argument(
'--db-user',
help='The authorized user of the database',
default='sawtooth')
parser.add_argument(
'--db-password',
help="The authorized user's password for database access",
default='sawtooth')
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='enable more verbose output to stderr')
return parser.parse_args(args)
def start_rest_api(host, port, messenger, database):
loop = asyncio.get_event_loop()
loop.run_until_complete(database.connect())
app = web.Application(loop=loop)
app['aes_key'] = os.environ.get("AES_KEY")
app['secret_key'] = os.environ.get("SECRET_KEY")
messenger.open_validator_connection()
handler = RouteHandler(loop, messenger, database)
app.router.add_post('/elections', handler.create_election)
app.router.add_get('/elections/public/current', handler.list_public_elections)
app.router.add_get('/elections/public/past', handler.list_public_past_elections)
app.router.add_get('/elections/{electionId}', handler.get_election)
app.router.add_get('/elections/{electionId}/number_of_votes', handler.get_election_votes)
app.router.add_get('/elections/{electionId}/poll_book', handler.get_poll_registrations)
app.router.add_get('/elections/{electionId}/poll_book/count', handler.count_poll_registrations)
app.router.add_get('/elections/{electionId}/voting_options', handler.list_voting_options_election)
app.router.add_put('/elections/{electionId}', handler.update_election)
app.router.add_patch('/elections/{electionId}/poll_registration/{voterId}/status', handler.update_poll_book_status)
app.router.add_get('/voting_options/{votingOptionId}', handler.get_voting_option)
app.router.add_patch('/voting_options/{votingOptionId}/status', handler.update_voting_option_status)
app.router.add_post('/voters', handler.create_voter)
app.router.add_patch('/voters/{voterId}/type', handler.update_voter_type)
app.router.add_get('/voters/admins', handler.list_admins)
app.router.add_get('/voters/{voterId}', handler.get_voters)
app.router.add_get('/voters/admins/{voterId}/elections', handler.list_admin_elections)
app.router.add_get('/voters/{voterId}/elections/current', handler.list_elections_current)
app.router.add_get('/voters/{voterId}/elections/past', handler.list_elections_past)
app.router.add_get('/voters/{voterId}/election/{electionId}/vote', handler.get_vote_election)
app.router.add_get('/votes/{voteId}', handler.get_vote)
app.router.add_post('/votes/{votingOptionId}', handler.create_vote)
app.router.add_put('/votes/{voteId}', handler.update_vote)
app.router.add_post('/authentication', handler.authenticate)
app.router.add_post('/logout', handler.logout)
LOGGER.info('Starting BEV REST API on %s:%s', host, port)
loop.run_until_complete(create_superadmins(messenger, database))
web.run_app(
app,
host=host,
port=port,
access_log=LOGGER,
access_log_format='%r: %s status, %b size, in %Tf s')
async def create_superadmins(messenger, database):
if await database.is_superadmin_created() is not None:
return
superadmins = os.environ.get("SUPERADMINS").split()
superadmins_passwords = os.environ.get("SUPERADMINS_PASSWORDS").split()
superadmins_size = len(superadmins)
i = 0
if superadmins_size != len(superadmins_passwords):
LOGGER.exception("Superadmin passwords must be the same number as superadmin IDs")
sys.exit(1)
while i < superadmins_size:
public_key, private_key = messenger.get_new_key_pair()
await messenger.send_create_voter_transaction(
private_key=private_key,
voter_id=superadmins[i],
public_key=public_key,
name=superadmins[i],
created_at=get_time(),
type='SUPERADMIN')
encrypted_private_key = encrypt_private_key(os.environ.get("AES_KEY"), public_key, private_key)
hashed_password = hash_password(superadmins_passwords[i])
await database.create_auth_entry(public_key, encrypted_private_key, hashed_password)
i += 1
LOGGER.info('Created Super Admin accounts')
def encrypt_private_key(aes_key, public_key, private_key):
init_vector = bytes.fromhex(public_key[:32])
cipher = AES.new(bytes.fromhex(aes_key), AES.MODE_CBC, init_vector)
return cipher.encrypt(private_key)
def hash_password(password):
return bcrypt.hashpw(bytes(password, 'utf-8'), bcrypt.gensalt())
def get_time():
dts = datetime.datetime.utcnow()
return round(time.mktime(dts.timetuple()) + dts.microsecond / 1e6)
def main():
loop = ZMQEventLoop()
asyncio.set_event_loop(loop)
try:
opts = parse_args(sys.argv[1:])
init_console_logging(verbose_level=opts.verbose)
validator_url = opts.connect
if "tcp://" not in validator_url:
validator_url = "tcp://" + validator_url
database = Database(
opts.db_host,
opts.db_port,
opts.db_name,
opts.db_user,
opts.db_password,
loop)
messenger = Messenger(validator_url, database)
try:
host, port = opts.bind.split(":")
port = int(port)
except ValueError:
print("Unable to parse binding {}: Must be in the format"
" host:port".format(opts.bind))
sys.exit(1)
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
start_rest_api(host, port, messenger, database)
except Exception as err: # pylint: disable=broad-except
LOGGER.exception(err)
sys.exit(1)
finally:
database.disconnect()
messenger.close_validator_connection()
|
[] |
[] |
[
"SUPERADMINS",
"AES_KEY",
"SECRET_KEY",
"SUPERADMINS_PASSWORDS"
] |
[]
|
["SUPERADMINS", "AES_KEY", "SECRET_KEY", "SUPERADMINS_PASSWORDS"]
|
python
| 4 | 0 | |
vendor/github.com/mattn/go-shellwords/shellwords.go
|
package shellwords
import (
"errors"
"os"
"regexp"
"strings"
)
var (
ParseEnv bool = false
ParseBacktick bool = false
)
var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`)
func isSpace(r rune) bool {
switch r {
case ' ', '\t', '\r', '\n':
return true
}
return false
}
func replaceEnv(getenv func(string) string, s string) string {
if getenv == nil {
getenv = os.Getenv
}
return envRe.ReplaceAllStringFunc(s, func(s string) string {
s = s[1:]
if s[0] == '{' {
s = s[1 : len(s)-1]
}
return getenv(s)
})
}
type Parser struct {
ParseEnv bool
ParseBacktick bool
Position int
Dir string
// If ParseEnv is true, use this for getenv.
// If nil, use os.Getenv.
Getenv func(string) string
}
func NewParser() *Parser {
return &Parser{
ParseEnv: ParseEnv,
ParseBacktick: ParseBacktick,
Position: 0,
Dir: "",
}
}
func (p *Parser) Parse(line string) ([]string, error) {
args := []string{}
buf := ""
var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool
backtick := ""
pos := -1
got := false
loop:
for i, r := range line {
if escaped {
buf += string(r)
escaped = false
continue
}
if r == '\\' {
if singleQuoted {
buf += string(r)
} else {
escaped = true
}
continue
}
if isSpace(r) {
if singleQuoted || doubleQuoted || backQuote || dollarQuote {
buf += string(r)
backtick += string(r)
} else if got {
if p.ParseEnv {
buf = replaceEnv(p.Getenv, buf)
}
args = append(args, buf)
buf = ""
got = false
}
continue
}
switch r {
case '`':
if !singleQuoted && !doubleQuoted && !dollarQuote {
if p.ParseBacktick {
if backQuote {
out, err := shellRun(backtick, p.Dir)
if err != nil {
return nil, err
}
buf = buf[:len(buf)-len(backtick)] + out
}
backtick = ""
backQuote = !backQuote
continue
}
backtick = ""
backQuote = !backQuote
}
case ')':
if !singleQuoted && !doubleQuoted && !backQuote {
if p.ParseBacktick {
if dollarQuote {
out, err := shellRun(backtick, p.Dir)
if err != nil {
return nil, err
}
buf = buf[:len(buf)-len(backtick)-2] + out
}
backtick = ""
dollarQuote = !dollarQuote
continue
}
backtick = ""
dollarQuote = !dollarQuote
}
case '(':
if !singleQuoted && !doubleQuoted && !backQuote {
if !dollarQuote && strings.HasSuffix(buf, "$") {
dollarQuote = true
buf += "("
continue
} else {
return nil, errors.New("invalid command line string")
}
}
case '"':
if !singleQuoted && !dollarQuote {
if doubleQuoted && buf == "" {
got = true
}
doubleQuoted = !doubleQuoted
continue
}
case '\'':
if !doubleQuoted && !dollarQuote {
if singleQuoted && buf == "" {
got = true
}
singleQuoted = !singleQuoted
continue
}
case ';', '&', '|', '<', '>':
if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) {
if r == '>' && len(buf) > 0 {
if c := buf[0]; '0' <= c && c <= '9' {
i -= 1
got = false
}
}
pos = i
break loop
}
}
got = true
buf += string(r)
if backQuote || dollarQuote {
backtick += string(r)
}
}
if got {
if p.ParseEnv {
buf = replaceEnv(p.Getenv, buf)
}
args = append(args, buf)
}
if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote {
return nil, errors.New("invalid command line string")
}
p.Position = pos
return args, nil
}
func Parse(line string) ([]string, error) {
return NewParser().Parse(line)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
test/functional/test_framework/util.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import glob
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
import glob
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError(
"Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError(
"Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg)
for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError(
"Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError(
"Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError(
"Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError(
"Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" %
(str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, check_interval=0.05, label="wait_until"):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(check_interval)
# Print the cause of the timeout
assert attempts > attempt, f"{label} : max attempts exceeeded (attempts={attempt})"
assert timeout >= time.time(), f"{label} : timeout exceeded {timeout}"
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("shrinkdebugfile=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
try:
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except: pass # any failures while reading the cookie file are treated as if the file was not there
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Disconnects only the outbound connection "from_connection -> node_num"
# If nodes were connected with connect_nodes_bi (default setup_network) use disconnect_nodes_bi to completely split the nodes if needed
def disconnect_nodes(from_connection, node_num):
subver = "testnode%d" % node_num
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if subver in peer['subver'] and not peer['inbound']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if subver in peer['subver'] and not peer['inbound']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
# Disconnects both outbound and inbound connections between nodes[node_a_index] and nodes[node_b_index]
# Inbound connection on one node is implicitly closed as a result of closing the outbound connection on the other node
def disconnect_nodes_bi(nodes, node_a_index, node_b_index):
disconnect_nodes(nodes[node_a_index], node_b_index)
disconnect_nodes(nodes[node_b_index], node_a_index)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_mesh(nodes, bi=False):
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
if bi:
connect_nodes_bi(nodes, i, j)
else:
connect_nodes(nodes[i], j)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000))
for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
non_final_pool = set(rpc_connections[0].getrawnonfinalmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
pool_match = set(rpc_connections[i].getrawmempool()) == pool
non_final_pool_match = set(rpc_connections[i].getrawnonfinalmempool()) == non_final_pool
if pool_match and non_final_pool_match:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
def check_mempool_equals(rpc, should_be_in_mempool, timeout=20):
wait_until(lambda: set(rpc.getrawmempool()) == {t.hash for t in should_be_in_mempool}, timeout=timeout)
# The function checks if transaction/block was rejected
# The actual reject reason is checked if specified
def wait_for_reject_message(conn, reject_reason=None, timeout=5):
wait_until(lambda: ('reject' in list(conn.cb.last_message.keys()) and (
reject_reason == None or conn.cb.last_message['reject'].reason == reject_reason)), timeout=timeout)
if conn.cb.last_message['reject'].message == b'tx':
conn.rpc.log.info('Transaction rejected with ' + (conn.cb.last_message['reject'].reason).decode('utf8') + ' -- OK')
else:
conn.rpc.log.info('Block rejected with ' + (conn.cb.last_message['reject'].reason).decode('utf8') + ' -- OK')
conn.cb.last_message.pop('reject', None)
# The function checks that transaction/block was not rejected
def ensure_no_rejection(conn):
# wait 2 seconds for transaction/block before checking for reject message
time.sleep(2)
wait_until(lambda: not ('reject' in list(conn.cb.last_message.keys())) or conn.cb.last_message[
'reject'].reason == None, timeout=5)
conn.rpc.log.info('Not rejected -- OK')
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" %
(txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append(
{"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" %
(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(
change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount + fee * 2)
outputs = make_change(from_node, total_in, amount + fee, fee)
outputs[self_address] = float(amount + fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount + fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [{"txid": self_txid, "vout": vout}]
outputs = {to_node.getnewaddress(): float(amount)}
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count, age=101):
to_generate = int(0.5 * count) + age
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before
# the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE|FORKID")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 200 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_srcdir(calling_script=None):
"""
Try to find out the base folder containing the 'src' folder.
If SRCDIR is set it does a sanity check and returns that.
Otherwise it goes on a search and rescue mission.
Returns None if it cannot find a suitable folder.
TODO: This is only used for cdefs, consider moving that there.
"""
def contains_src(path_to_check):
if not path_to_check:
return False
else:
cand_path = os.path.join(path_to_check, 'src')
return os.path.exists(cand_path) and os.path.isdir(cand_path)
srcdir = os.environ.get('SRCDIR', '')
if contains_src(srcdir):
return srcdir
# If we have a caller, try to guess from its location where the
# top level might be.
if calling_script:
caller_basedir = os.path.dirname(
os.path.dirname(os.path.dirname(calling_script)))
if caller_basedir != '' and contains_src(os.path.abspath(caller_basedir)):
return os.path.abspath(caller_basedir)
# Try to work it based out on main module
# We might expect the caller to be rpc-tests.py or a test script
# itself.
import sys
mainmod = sys.modules['__main__']
mainmod_path = getattr(mainmod, '__file__', '')
if mainmod_path and mainmod_path.endswith('.py'):
maybe_top = os.path.dirname(
os.path.dirname(os.path.dirname(mainmod_path)))
if contains_src(os.path.abspath(maybe_top)):
return os.path.abspath(maybe_top)
# No luck, give up.
return None
def loghash(inhash=None):
if inhash:
if len(inhash) > 12:
return "{" + inhash[:6] + "...." + inhash[-6:] + "}"
else:
return inhash
else:
return inhash
def check_for_log_msg(rpc, log_msg, node_dir):
for line in open(glob.glob(rpc.options.tmpdir + node_dir + "/regtest/bitcoind.log")[0]):
if log_msg in line:
rpc.log.info("Found line: %s", line)
return True
return False
def hashToHex(hash):
return format(hash, '064x')
|
[] |
[] |
[
"SRCDIR"
] |
[]
|
["SRCDIR"]
|
python
| 1 | 0 | |
roles/keychain/tests/test_keychain.py
|
import unittest
import os
import pytest
import testinfra.utils.ansible_runner
class TestIntermediateSetup(unittest.TestCase):
def setUp(self):
self.host = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_host('instance')
@pytest.mark.parametrize('file, content', [
"/Users/vagrant/Library/LaunchAgents/bitrise.io.tools.keychain-unlocker.plist", "bitrise.io.tools.keychain-unlocker",
"/opt/bitrise/unlock_keychain.sh", "#!/bin/bash"
])
def test_keychain_unlocker_files(self, file, content):
file = self.host.file(file)
assert file.exists
assert file.contains(content)
@pytest.mark.parametrize('file, content', [
"/Users/vagrant/logs/bitrise.io.tools.keychain-unlocker.log", "Successfully unlocked login.keychain"
])
def test_keychain_unlocker_has_run(self, file, content):
file = host.file(file)
assert file.exists
assert file.contains(content)
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
|
package grpcclient
import (
"context"
"encoding/json"
"io"
"net"
"os"
"strings"
"time"
"github.com/gogo/googleapis/google/rpc"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/grpcerrors"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
fstypes "github.com/tonistiigi/fsutil/types"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
const frontendPrefix = "BUILDKIT_FRONTEND_OPT_"
type GrpcClient interface {
Run(context.Context, client.BuildFunc) error
}
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
resp, err := c.Ping(ctx, &pb.PingRequest{})
if err != nil {
return nil, err
}
if resp.FrontendAPICaps == nil {
resp.FrontendAPICaps = defaultCaps()
}
if resp.LLBCaps == nil {
resp.LLBCaps = defaultLLBCaps()
}
return &grpcClient{
client: c,
opts: opts,
sessionID: session,
workers: w,
product: product,
caps: pb.Caps.CapSet(resp.FrontendAPICaps),
llbCaps: opspb.Caps.CapSet(resp.LLBCaps),
requests: map[string]*pb.SolveRequest{},
}, nil
}
func current() (GrpcClient, error) {
if ep := product(); ep != "" {
apicaps.ExportedProduct = ep
}
ctx, conn, err := grpcClientConn(context.Background())
if err != nil {
return nil, err
}
return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers())
}
func convertRef(ref client.Reference) (*pb.Ref, error) {
if ref == nil {
return &pb.Ref{}, nil
}
r, ok := ref.(*reference)
if !ok {
return nil, errors.Errorf("invalid return reference type %T", ref)
}
return &pb.Ref{Id: r.id, Def: r.def}, nil
}
func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error {
client, err := current()
if err != nil {
return errors.Wrapf(err, "failed to initialize client from environment")
}
return client.Run(ctx, f)
}
func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError error) {
export := c.caps.Supports(pb.CapReturnResult) == nil
var (
res *client.Result
err error
)
if export {
defer func() {
req := &pb.ReturnRequest{}
if retError == nil {
if res == nil {
res = &client.Result{}
}
pbRes := &pb.Result{
Metadata: res.Metadata,
}
if res.Refs != nil {
if c.caps.Supports(pb.CapProtoRefArray) == nil {
m := map[string]*pb.Ref{}
for k, r := range res.Refs {
pbRef, err := convertRef(r)
if err != nil {
retError = err
continue
}
m[k] = pbRef
}
pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}}
} else {
// Server doesn't support the new wire format for refs, so we construct
// a deprecated result ref map.
m := map[string]string{}
for k, r := range res.Refs {
pbRef, err := convertRef(r)
if err != nil {
retError = err
continue
}
m[k] = pbRef.Id
}
pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: m}}
}
} else {
pbRef, err := convertRef(res.Ref)
if err != nil {
retError = err
} else {
if c.caps.Supports(pb.CapProtoRefArray) == nil {
pbRes.Result = &pb.Result_Ref{Ref: pbRef}
} else {
// Server doesn't support the new wire format for refs, so we construct
// a deprecated result ref.
pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: pbRef.Id}
}
}
}
if retError == nil {
req.Result = pbRes
}
}
if retError != nil {
st, _ := status.FromError(grpcerrors.ToGRPC(retError))
stp := st.Proto()
req.Error = &rpc.Status{
Code: stp.Code,
Message: stp.Message,
Details: convertToGogoAny(stp.Details),
}
}
if _, err := c.client.Return(ctx, req); err != nil && retError == nil {
retError = err
}
}()
}
if res, err = f(ctx, c); err != nil {
return err
}
if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil {
return err
}
if !export {
exportedAttrBytes, err := json.Marshal(res.Metadata)
if err != nil {
return errors.Wrapf(err, "failed to marshal return metadata")
}
req, err := c.requestForRef(res.Ref)
if err != nil {
return errors.Wrapf(err, "failed to find return ref")
}
req.Final = true
req.ExporterAttr = exportedAttrBytes
if _, err := c.client.Solve(ctx, req); err != nil {
return errors.Wrapf(err, "failed to solve")
}
}
return nil
}
// defaultCaps returns the capabilities that were implemented when capabilities
// support was added. This list is frozen and should never be changed.
func defaultCaps() []apicaps.PBCap {
return []apicaps.PBCap{
{ID: string(pb.CapSolveBase), Enabled: true},
{ID: string(pb.CapSolveInlineReturn), Enabled: true},
{ID: string(pb.CapResolveImage), Enabled: true},
{ID: string(pb.CapReadFile), Enabled: true},
}
}
// defaultLLBCaps returns the LLB capabilities that were implemented when capabilities
// support was added. This list is frozen and should never be changed.
func defaultLLBCaps() []apicaps.PBCap {
return []apicaps.PBCap{
{ID: string(opspb.CapSourceImage), Enabled: true},
{ID: string(opspb.CapSourceLocal), Enabled: true},
{ID: string(opspb.CapSourceLocalUnique), Enabled: true},
{ID: string(opspb.CapSourceLocalSessionID), Enabled: true},
{ID: string(opspb.CapSourceLocalIncludePatterns), Enabled: true},
{ID: string(opspb.CapSourceLocalFollowPaths), Enabled: true},
{ID: string(opspb.CapSourceLocalExcludePatterns), Enabled: true},
{ID: string(opspb.CapSourceLocalSharedKeyHint), Enabled: true},
{ID: string(opspb.CapSourceGit), Enabled: true},
{ID: string(opspb.CapSourceGitKeepDir), Enabled: true},
{ID: string(opspb.CapSourceGitFullURL), Enabled: true},
{ID: string(opspb.CapSourceHTTP), Enabled: true},
{ID: string(opspb.CapSourceHTTPChecksum), Enabled: true},
{ID: string(opspb.CapSourceHTTPPerm), Enabled: true},
{ID: string(opspb.CapSourceHTTPUIDGID), Enabled: true},
{ID: string(opspb.CapBuildOpLLBFileName), Enabled: true},
{ID: string(opspb.CapExecMetaBase), Enabled: true},
{ID: string(opspb.CapExecMetaProxy), Enabled: true},
{ID: string(opspb.CapExecMountBind), Enabled: true},
{ID: string(opspb.CapExecMountCache), Enabled: true},
{ID: string(opspb.CapExecMountCacheSharing), Enabled: true},
{ID: string(opspb.CapExecMountSelector), Enabled: true},
{ID: string(opspb.CapExecMountTmpfs), Enabled: true},
{ID: string(opspb.CapExecMountSecret), Enabled: true},
{ID: string(opspb.CapConstraints), Enabled: true},
{ID: string(opspb.CapPlatform), Enabled: true},
{ID: string(opspb.CapMetaIgnoreCache), Enabled: true},
{ID: string(opspb.CapMetaDescription), Enabled: true},
{ID: string(opspb.CapMetaExportCache), Enabled: true},
}
}
type grpcClient struct {
client pb.LLBBridgeClient
opts map[string]string
sessionID string
product string
workers []client.WorkerInfo
caps apicaps.CapSet
llbCaps apicaps.CapSet
requests map[string]*pb.SolveRequest
}
func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) {
emptyReq := &pb.SolveRequest{
Definition: &opspb.Definition{},
}
if ref == nil {
return emptyReq, nil
}
r, ok := ref.(*reference)
if !ok {
return nil, errors.Errorf("return reference has invalid type %T", ref)
}
if r.id == "" {
return emptyReq, nil
}
req, ok := c.requests[r.id]
if !ok {
return nil, errors.Errorf("did not find request for return reference %s", r.id)
}
return req, nil
}
func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*client.Result, error) {
if creq.Definition != nil {
for _, md := range creq.Definition.Metadata {
for cap := range md.Caps {
if err := c.llbCaps.Supports(cap); err != nil {
return nil, err
}
}
}
}
var (
// old API
legacyRegistryCacheImports []string
// new API (CapImportCaches)
cacheImports []*pb.CacheOptionsEntry
)
supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil
for _, im := range creq.CacheImports {
if !supportCapImportCaches && im.Type == "registry" {
legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"])
} else {
cacheImports = append(cacheImports, &pb.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
})
}
}
req := &pb.SolveRequest{
Definition: creq.Definition,
Frontend: creq.Frontend,
FrontendOpt: creq.FrontendOpt,
FrontendInputs: creq.FrontendInputs,
AllowResultReturn: true,
AllowResultArrayRef: true,
// old API
ImportCacheRefsDeprecated: legacyRegistryCacheImports,
// new API
CacheImports: cacheImports,
}
// backwards compatibility with inline return
if c.caps.Supports(pb.CapReturnResult) != nil {
req.ExporterAttr = []byte("{}")
}
resp, err := c.client.Solve(ctx, req)
if err != nil {
return nil, err
}
res := &client.Result{}
if resp.Result == nil {
if id := resp.Ref; id != "" {
c.requests[id] = req
}
res.SetRef(&reference{id: resp.Ref, c: c})
} else {
res.Metadata = resp.Result.Metadata
switch pbRes := resp.Result.Result.(type) {
case *pb.Result_RefDeprecated:
if id := pbRes.RefDeprecated; id != "" {
res.SetRef(&reference{id: id, c: c})
}
case *pb.Result_RefsDeprecated:
for k, v := range pbRes.RefsDeprecated.Refs {
ref := &reference{id: v, c: c}
if v == "" {
ref = nil
}
res.AddRef(k, ref)
}
case *pb.Result_Ref:
if pbRes.Ref.Id != "" {
ref, err := newReference(c, pbRes.Ref)
if err != nil {
return nil, err
}
res.SetRef(ref)
}
case *pb.Result_Refs:
for k, v := range pbRes.Refs.Refs {
var ref *reference
if v.Id != "" {
ref, err = newReference(c, v)
if err != nil {
return nil, err
}
}
res.AddRef(k, ref)
}
}
}
return res, nil
}
func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
var p *opspb.Platform
if platform := opt.Platform; platform != nil {
p = &opspb.Platform{
OS: platform.OS,
Architecture: platform.Architecture,
Variant: platform.Variant,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
}
}
resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName})
if err != nil {
return "", nil, err
}
return resp.Digest, resp.Config, nil
}
func (c *grpcClient) BuildOpts() client.BuildOpts {
return client.BuildOpts{
Opts: c.opts,
SessionID: c.sessionID,
Workers: c.workers,
Product: c.product,
LLBCaps: c.llbCaps,
Caps: c.caps,
}
}
func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) {
err := c.caps.Supports(pb.CapFrontendInputs)
if err != nil {
return nil, err
}
resp, err := c.client.Inputs(ctx, &pb.InputsRequest{})
if err != nil {
return nil, err
}
inputs := make(map[string]llb.State)
for key, def := range resp.Definitions {
op, err := llb.NewDefinitionOp(def)
if err != nil {
return nil, err
}
inputs[key] = llb.NewState(op)
}
return inputs, nil
}
type reference struct {
c *grpcClient
id string
def *opspb.Definition
output llb.Output
}
func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) {
return &reference{c: c, id: ref.Id, def: ref.Def}, nil
}
func (r *reference) ToState() (st llb.State, err error) {
err = r.c.caps.Supports(pb.CapReferenceOutput)
if err != nil {
return st, err
}
if r.def == nil {
return st, errors.Errorf("gateway did not return reference with definition")
}
defop, err := llb.NewDefinitionOp(r.def)
if err != nil {
return st, err
}
return llb.NewState(defop), nil
}
func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id}
if r := req.Range; r != nil {
rfr.Range = &pb.FileRange{
Offset: int64(r.Offset),
Length: int64(r.Length),
}
}
resp, err := r.c.client.ReadFile(ctx, rfr)
if err != nil {
return nil, err
}
return resp.Data, nil
}
func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapReadDir); err != nil {
return nil, err
}
rdr := &pb.ReadDirRequest{
DirPath: req.Path,
IncludePattern: req.IncludePattern,
Ref: r.id,
}
resp, err := r.c.client.ReadDir(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Entries, nil
}
func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapStatFile); err != nil {
return nil, err
}
rdr := &pb.StatFileRequest{
Path: req.Path,
Ref: r.id,
}
resp, err := r.c.client.StatFile(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Stat, nil
}
func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) {
dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
return stdioConn(), nil
})
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor))
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
ctx, cancel := context.WithCancel(ctx)
_ = cancel
// go monitorHealth(ctx, cc, cancel)
return ctx, cc, nil
}
func stdioConn() net.Conn {
return &conn{os.Stdin, os.Stdout, os.Stdout}
}
type conn struct {
io.Reader
io.Writer
io.Closer
}
func (s *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) SetDeadline(t time.Time) error {
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "pipe"
}
func (d dummyAddr) String() string {
return "localhost"
}
func opts() map[string]string {
opts := map[string]string{}
for _, env := range os.Environ() {
parts := strings.SplitN(env, "=", 2)
k := parts[0]
v := ""
if len(parts) == 2 {
v = parts[1]
}
if !strings.HasPrefix(k, frontendPrefix) {
continue
}
parts = strings.SplitN(v, "=", 2)
v = ""
if len(parts) == 2 {
v = parts[1]
}
opts[parts[0]] = v
}
return opts
}
func sessionID() string {
return os.Getenv("BUILDKIT_SESSION_ID")
}
func workers() []client.WorkerInfo {
var c []client.WorkerInfo
if err := json.Unmarshal([]byte(os.Getenv("BUILDKIT_WORKERS")), &c); err != nil {
return nil
}
return c
}
func product() string {
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
}
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
out := make([]*gogotypes.Any, len(in))
for i := range in {
out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}
|
[
"\"BUILDKIT_SESSION_ID\"",
"\"BUILDKIT_WORKERS\"",
"\"BUILDKIT_EXPORTEDPRODUCT\""
] |
[] |
[
"BUILDKIT_WORKERS",
"BUILDKIT_SESSION_ID",
"BUILDKIT_EXPORTEDPRODUCT"
] |
[]
|
["BUILDKIT_WORKERS", "BUILDKIT_SESSION_ID", "BUILDKIT_EXPORTEDPRODUCT"]
|
go
| 3 | 0 | |
rtmp_callback.go
|
// RTMP callback
package main
import (
"os"
"time"
"net/http"
"github.com/golang-jwt/jwt"
)
const JWT_EXPIRATION_TIME_SECONDS = 120
var JWT_SECRET = os.Getenv("JWT_SECRET")
var CALLBACK_URL = os.Getenv("CALLBACK_URL")
func (s *RTMPSession) SendStartCallback() bool {
if CALLBACK_URL == "" {
return true // No callback
}
LogDebugSession(s.id, s.ip, "POST "+CALLBACK_URL+" | Event: START | Channel: "+s.channel)
var subject = os.Getenv("CUSTOM_JWT_SUBJECT")
if subject == "" {
subject = "rtmp_event"
}
exp := time.Now().Unix() + JWT_EXPIRATION_TIME_SECONDS
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
"sub": "rtmp_event",
"event": "start",
"channel": s.channel,
"key": s.key,
"client_ip": s.ip,
"rtmp_port": s.server.port,
"exp": exp,
})
tokenb64, e := token.SignedString([]byte(JWT_SECRET))
if e != nil {
LogError(e)
return false
}
client := &http.Client{}
req, e := http.NewRequest("POST", CALLBACK_URL, nil)
if e != nil {
LogError(e)
return false
}
req.Header.Set("rtmp-event", tokenb64)
res, e := client.Do(req)
if e != nil {
LogError(e)
return false
}
if res.StatusCode != 200 {
return false
}
s.stream_id = res.Header.Get("stream-id")
LogDebugSession(s.id, s.ip, "Stream ID: "+s.stream_id)
return true
}
func (s *RTMPSession) SendStopCallback() bool {
if CALLBACK_URL == "" {
return true // No callback
}
LogDebugSession(s.id, s.ip, "POST "+CALLBACK_URL+" | Event: STOP | Channel: "+s.channel)
exp := time.Now().Unix() + JWT_EXPIRATION_TIME_SECONDS
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
"sub": "rtmp_event",
"event": "stop",
"channel": s.channel,
"key": s.key,
"stream_id": s.stream_id,
"client_ip": s.ip,
"exp": exp,
})
tokenb64, e := token.SignedString([]byte(JWT_SECRET))
if e != nil {
LogError(e)
return false
}
client := &http.Client{}
req, e := http.NewRequest("POST", CALLBACK_URL, nil)
if e != nil {
LogError(e)
return false
}
req.Header.Set("rtmp-event", tokenb64)
res, e := client.Do(req)
if e != nil {
LogError(e)
return false
}
if res.StatusCode != 200 {
return false
}
return true
}
|
[
"\"JWT_SECRET\"",
"\"CALLBACK_URL\"",
"\"CUSTOM_JWT_SUBJECT\""
] |
[] |
[
"JWT_SECRET",
"CALLBACK_URL",
"CUSTOM_JWT_SUBJECT"
] |
[]
|
["JWT_SECRET", "CALLBACK_URL", "CUSTOM_JWT_SUBJECT"]
|
go
| 3 | 0 | |
python/examples/rllib/rllib_multi_agent.py
|
import os
import sys
import ray
from ray import tune
from ray.rllib.agents.impala import ImpalaTrainer
from ray.rllib.models import ModelCatalog
from ray.tune.registry import register_env
from griddly import gd
from griddly.util.rllib.callbacks import VideoCallbacks
from griddly.util.rllib.environment.core import RLlibMultiAgentWrapper, RLlibEnv
from griddly.util.rllib.torch.agents.conv_agent import SimpleConvAgent
if __name__ == '__main__':
sep = os.pathsep
os.environ['PYTHONPATH'] = sep.join(sys.path)
ray.init(num_gpus=1)
env_name = 'ray-ma-env'
# Create the gridnet environment and wrap it in a multi-agent wrapper for self-play
def _create_env(env_config):
env = RLlibEnv(env_config)
return RLlibMultiAgentWrapper(env, env_config)
register_env(env_name, _create_env)
ModelCatalog.register_custom_model('SimpleConv', SimpleConvAgent)
max_training_steps = 50000000
config = {
'framework': 'torch',
'num_workers': 3,
'num_envs_per_worker': 1,
'callbacks': VideoCallbacks,
'model': {
'custom_model': 'SimpleConv',
'custom_model_config': {}
},
'env': env_name,
'env_config': {
# in the griddly environment we set a variable to let the training environment
# know if that player is no longer active
# 'player_done_variable': 'player_done',
# 'record_video_config': {
# 'frequency': 20000, # number of rollouts
# 'directory': 'videos'
# },
'random_level_on_reset': False,
'yaml_file': 'Multi-Agent/robot_tag_12.yaml',
'global_observer_type': gd.ObserverType.VECTOR,
'player_observer_type': gd.ObserverType.VECTOR,
# 'max_steps': 500,
},
'entropy_coeff_schedule': [
[0, 0.01],
[max_training_steps, 0.0]
],
'lr_schedule': [
[0, 0.0005],
[max_training_steps, 0.0]
]
}
stop = {
'timesteps_total': max_training_steps,
}
result = tune.run(ImpalaTrainer, config=config, stop=stop)
|
[] |
[] |
[
"PYTHONPATH"
] |
[]
|
["PYTHONPATH"]
|
python
| 1 | 0 | |
common/aggregatelog/constants.go
|
package aggregatelog
import "os"
const (
SENTRY_LOG_SERVICENAME = "sentry"
ELASTIC_LOG_SERVICENAME = "elastic"
INFO_LEVEL = "info"
ERROR_LEVEL = "error"
WARNING_LEVEL = "warning"
DEBUG_LEVEL = "debug"
FATAL_LEVEL = "fatal"
)
var SENTRY_DSN = os.Getenv("SENTRY_DSN")
var ELASTIC_URL = os.Getenv("ELASTIC_URL")
|
[
"\"SENTRY_DSN\"",
"\"ELASTIC_URL\""
] |
[] |
[
"SENTRY_DSN",
"ELASTIC_URL"
] |
[]
|
["SENTRY_DSN", "ELASTIC_URL"]
|
go
| 2 | 0 | |
config/asgi.py
|
"""
ASGI config for Graphql Project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/asgi/
"""
import os
import sys
from pathlib import Path
from django.core.asgi import get_asgi_application
# This allows easy placement of apps within the interior
# graphql_project directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "graphql_project"))
# If DJANGO_SETTINGS_MODULE is unset, default to the local settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any ASGI server configured to use this file.
django_application = get_asgi_application()
# Apply ASGI middleware here.
# from helloworld.asgi import HelloWorldApplication
# application = HelloWorldApplication(application)
# Import websocket application here, so apps from django_application are loaded first
from config.websocket import websocket_application # noqa isort:skip
async def application(scope, receive, send):
if scope["type"] == "http":
await django_application(scope, receive, send)
elif scope["type"] == "websocket":
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.py
|
import os
from bot import DuccLife
os.environ.setdefault("JISHAKU_HIDE", "1")
os.environ.setdefault("JISHAKU_NO_UNDERSCORE", "1")
bot = DuccLife()
if __name__ == "__main__":
bot.run(bot.config.bot_token)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/client/pkg/config/config.go
|
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/satori/go.uuid"
)
const configEnvVar = "PACH_CONFIG"
var defaultConfigDir = filepath.Join(os.Getenv("HOME"), ".pachyderm")
var defaultConfigPath = filepath.Join(defaultConfigDir, "config.json")
func configPath() string {
if env, ok := os.LookupEnv(configEnvVar); ok {
return env
}
return defaultConfigPath
}
// Read loads the Pachyderm config on this machine.
// If an existing configuration cannot be found, it sets up the defaults. Read
// returns a nil Config if and only if it returns a non-nil error.
func Read() (*Config, error) {
var c *Config
// Read json file
p := configPath()
if raw, err := ioutil.ReadFile(p); err == nil {
err = json.Unmarshal(raw, &c)
if err != nil {
return nil, err
}
} else {
// File doesn't exist, so create a new config
fmt.Println("No config detected. Generating new config...")
c = &Config{}
}
if c.UserID == "" {
fmt.Printf("No UserID present in config. Generating new UserID and "+
"updating config at %s\n", p)
uuid, err := uuid.NewV4()
if err != nil {
return nil, err
}
c.UserID = uuid.String()
if err := c.Write(); err != nil {
return nil, err
}
}
return c, nil
}
// Write writes the configuration in 'c' to this machine's Pachyderm config
// file.
func (c *Config) Write() error {
rawConfig, err := json.MarshalIndent(c, "", " ")
if err != nil {
return err
}
// If we're not using a custom config path, create the default config path
p := configPath()
if _, ok := os.LookupEnv(configEnvVar); ok {
// using overridden config path -- just make sure the parent dir exists
d := filepath.Dir(p)
if _, err := os.Stat(d); err != nil {
return fmt.Errorf("cannot use config at %s: could not stat parent directory (%v)", p, err)
}
} else {
// using the default config path, create the config directory
err = os.MkdirAll(defaultConfigDir, 0755)
if err != nil {
return err
}
}
return ioutil.WriteFile(p, rawConfig, 0644)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
dm/dm/ctl/common/config.go
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"encoding/json"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/pingcap/ticdc/dm/dm/config"
"github.com/pingcap/ticdc/dm/pkg/utils"
"github.com/BurntSushi/toml"
"github.com/pingcap/errors"
"github.com/spf13/pflag"
)
const (
defaultRPCTimeout = "10m"
// EncryptCmdName is special command.
EncryptCmdName = "encrypt"
// DecryptCmdName is special command.
DecryptCmdName = "decrypt"
// Master specifies member master type.
Master = "master"
// Worker specifies member worker type.
Worker = "worker"
dialTimeout = 3 * time.Second
keepaliveTimeout = 3 * time.Second
keepaliveTime = 3 * time.Second
syncMasterEndpointsTime = 3 * time.Second
// DefaultErrorCnt represents default count of errors to display for check-task.
DefaultErrorCnt = 10
// DefaultWarnCnt represents count of warns to display for check-task.
DefaultWarnCnt = 10
)
var argsNeedAdjust = [...]string{"-version", "-config", "-master-addr", "-rpc-timeout", "-ssl-ca", "-ssl-cert", "-ssl-key", "-" + EncryptCmdName, "-" + DecryptCmdName}
// NewConfig creates a new base config for dmctl.
func NewConfig(fs *pflag.FlagSet) *Config {
cfg := &Config{}
cfg.FlagSet = fs
return cfg
}
// DefineConfigFlagSet defines flag definitions for configs.
func DefineConfigFlagSet(fs *pflag.FlagSet) {
fs.BoolP("version", "V", false, "Prints version and exit.")
fs.String("config", "", "Path to config file.")
fs.String("master-addr", "", "Master API server address, this parameter is required when interacting with the dm-master")
fs.String("rpc-timeout", defaultRPCTimeout, fmt.Sprintf("RPC timeout, default is %s.", defaultRPCTimeout))
fs.String("ssl-ca", "", "Path of file that contains list of trusted SSL CAs for connection.")
fs.String("ssl-cert", "", "Path of file that contains X509 certificate in PEM format for connection.")
fs.String("ssl-key", "", "Path of file that contains X509 key in PEM format for connection.")
fs.String(EncryptCmdName, "", "Encrypts plaintext to ciphertext.")
fs.String(DecryptCmdName, "", "Decrypts ciphertext to plaintext.")
_ = fs.MarkHidden(EncryptCmdName)
_ = fs.MarkHidden(DecryptCmdName)
}
// AdjustArgumentsForPflags adjust flag format args to pflags format.
func AdjustArgumentsForPflags(args []string) []string {
for i, arg := range args {
arg = strings.TrimSpace(arg)
for _, adjustArg := range argsNeedAdjust {
// -master-addr 127.0.0.1:8261 and -master-addr=127.0.0.1:8261
if arg == adjustArg || strings.HasPrefix(arg, adjustArg+"=") {
args[i] = "-" + arg
}
}
}
return args
}
func (c *Config) getConfigFromFlagSet() error {
var err error
fs := c.FlagSet
c.ConfigFile, err = fs.GetString("config")
if err != nil {
return err
}
c.MasterAddr, err = fs.GetString("master-addr")
if err != nil {
return err
}
c.RPCTimeoutStr, err = fs.GetString("rpc-timeout")
if err != nil {
return err
}
c.SSLCA, err = fs.GetString("ssl-ca")
if err != nil {
return err
}
c.SSLCert, err = fs.GetString("ssl-cert")
if err != nil {
return err
}
c.SSLKey, err = fs.GetString("ssl-key")
return err
}
// Config is the configuration.
type Config struct {
*pflag.FlagSet `json:"-"`
MasterAddr string `toml:"master-addr" json:"master-addr"`
RPCTimeoutStr string `toml:"rpc-timeout" json:"rpc-timeout"`
RPCTimeout time.Duration `json:"-"`
ConfigFile string `json:"config-file"`
config.Security
}
func (c *Config) String() string {
//nolint:staticcheck
cfg, err := json.Marshal(c)
if err != nil {
fmt.Printf("marshal config to json error %v", err)
}
return string(cfg)
}
// Adjust parses flag definitions from the argument list.
func (c *Config) Adjust() error {
err := c.getConfigFromFlagSet()
if err != nil {
return errors.Trace(err)
}
// Load config file if specified.
if c.ConfigFile != "" {
err = c.configFromFile(c.ConfigFile)
if err != nil {
return errors.Annotatef(err, "the current command parameter: --config is invalid or used incorrectly")
}
}
// Parse again to replace with command line options.
err = c.getConfigFromFlagSet()
if err != nil {
return errors.Trace(err)
}
// try get master Addr from env "DM_MASTER_ADDR" if this flag is empty.
if c.MasterAddr == "" {
c.MasterAddr = os.Getenv("DM_MASTER_ADDR")
}
if c.MasterAddr == "" {
return errors.Errorf("--master-addr not provided, this parameter is required when interacting with the dm-master, you can also use environment variable 'DM_MASTER_ADDR' to specify the value. Use `dmctl --help` to see more help messages")
}
return errors.Trace(c.adjust())
}
// Validate check config is ready to execute command.
func (c *Config) Validate() error {
if c.MasterAddr == "" {
return errors.New("--master-addr not provided")
}
if err := validateAddr(c.MasterAddr); err != nil {
return errors.Annotatef(err, "specify master addr %s", c.MasterAddr)
}
return nil
}
// configFromFile loads config from file.
func (c *Config) configFromFile(path string) error {
_, err := toml.DecodeFile(path, c)
return errors.Trace(err)
}
// adjust adjusts configs.
func (c *Config) adjust() error {
if c.RPCTimeoutStr == "" {
c.RPCTimeoutStr = defaultRPCTimeout
}
timeout, err := time.ParseDuration(c.RPCTimeoutStr)
if err != nil {
return errors.Trace(err)
}
if timeout <= time.Duration(0) {
return errors.Errorf("invalid time duration: %s", c.RPCTimeoutStr)
}
c.RPCTimeout = timeout
return nil
}
// validate host:port format address.
func validateAddr(addr string) error {
endpoints := strings.Split(addr, ",")
for _, endpoint := range endpoints {
if _, _, err := net.SplitHostPort(utils.UnwrapScheme(endpoint)); err != nil {
return errors.Trace(err)
}
}
return nil
}
|
[
"\"DM_MASTER_ADDR\""
] |
[] |
[
"DM_MASTER_ADDR"
] |
[]
|
["DM_MASTER_ADDR"]
|
go
| 1 | 0 | |
format/format.go
|
// Copyright (c) 2019, Daniel Martí <[email protected]>
// See LICENSE for licensing information
// Package format exposes gofumpt's formatting in an API similar to go/format.
// In general, the APIs are only guaranteed to work well when the input source
// is in canonical gofmt format.
package format
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
"golang.org/x/mod/semver"
"golang.org/x/tools/go/ast/astutil"
"mvdan.cc/gofumpt/internal/version"
)
type Options struct {
// LangVersion corresponds to the Go language version a piece of code is
// written in. The version is used to decide whether to apply formatting
// rules which require new language features. When inside a Go module,
// LangVersion should generally be specified as the result of:
//
// go list -m -f {{.GoVersion}}
//
// LangVersion is treated as a semantic version, which might start with
// a "v" prefix. Like Go versions, it might also be incomplete; "1.14"
// is equivalent to "1.14.0". When empty, it is equivalent to "v1", to
// not use language features which could break programs.
LangVersion string
ExtraRules bool
}
// Source formats src in gofumpt's format, assuming that src holds a valid Go
// source file.
func Source(src []byte, opts Options) ([]byte, error) {
fset := token.NewFileSet()
// Ensure our parsed files never start with base 1,
// to ensure that using token.NoPos+1 will panic.
fset.AddFile("gofumpt_base.go", 1, 10)
file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
if err != nil {
return nil, err
}
File(fset, file, opts)
var buf bytes.Buffer
if err := format.Node(&buf, fset, file); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
var rxCodeGenerated = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`)
// File modifies a file and fset in place to follow gofumpt's format. The
// changes might include manipulating adding or removing newlines in fset,
// modifying the position of nodes, or modifying literal values.
func File(fset *token.FileSet, file *ast.File, opts Options) {
simplify(file)
for _, cg := range file.Comments {
if cg.Pos() > file.Package {
break
}
for _, line := range cg.List {
if rxCodeGenerated.MatchString(line.Text) {
return
}
}
}
if opts.LangVersion == "" {
opts.LangVersion = "v1"
} else if opts.LangVersion[0] != 'v' {
opts.LangVersion = "v" + opts.LangVersion
}
if !semver.IsValid(opts.LangVersion) {
panic(fmt.Sprintf("invalid semver string: %q", opts.LangVersion))
}
f := &fumpter{
File: fset.File(file.Pos()),
fset: fset,
astFile: file,
Options: opts,
minSplitFactor: 0.4,
}
var topFuncType *ast.FuncType
pre := func(c *astutil.Cursor) bool {
f.applyPre(c)
switch node := c.Node().(type) {
case *ast.FuncDecl:
topFuncType = node.Type
case *ast.FieldList:
ft, _ := c.Parent().(*ast.FuncType)
if ft == nil || ft != topFuncType {
break
}
// For top-level function declaration parameters,
// require the line split to be longer.
// This avoids func lines which are a bit too short,
// and allows func lines which are a bit longer.
//
// We don't just increase longLineLimit,
// as we still want splits at around the same place.
if ft.Params == node {
f.minSplitFactor = 0.6
}
// Don't split result parameters into multiple lines,
// as that can be easily confused for input parameters.
// TODO: consider the same for single-line func calls in
// if statements.
// TODO: perhaps just use a higher factor, like 0.8.
if ft.Results == node {
f.minSplitFactor = 1000
}
case *ast.BlockStmt:
f.blockLevel++
}
return true
}
post := func(c *astutil.Cursor) bool {
f.applyPost(c)
// Reset minSplitFactor and blockLevel.
switch node := c.Node().(type) {
case *ast.FuncType:
if node == topFuncType {
f.minSplitFactor = 0.4
}
case *ast.BlockStmt:
f.blockLevel--
}
return true
}
astutil.Apply(file, pre, post)
}
// Multiline nodes which could easily fit on a single line under this many bytes
// may be collapsed onto a single line.
const shortLineLimit = 60
// Single-line nodes which take over this many bytes, and could easily be split
// into two lines of at least its minSplitFactor factor, may be split.
const longLineLimit = 100
var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`)
type fumpter struct {
Options
*token.File
fset *token.FileSet
astFile *ast.File
// blockLevel is the number of indentation blocks we're currently under.
// It is used to approximate the levels of indentation a line will end
// up with.
blockLevel int
minSplitFactor float64
}
func (f *fumpter) commentsBetween(p1, p2 token.Pos) []*ast.CommentGroup {
comments := f.astFile.Comments
i1 := sort.Search(len(comments), func(i int) bool {
return comments[i].Pos() >= p1
})
comments = comments[i1:]
i2 := sort.Search(len(comments), func(i int) bool {
return comments[i].Pos() >= p2
})
comments = comments[:i2]
return comments
}
func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment {
comments := f.astFile.Comments
i := sort.Search(len(comments), func(i int) bool {
return comments[i].Pos() >= pos
})
if i >= len(comments) {
return nil
}
line := f.Line(pos)
for _, comment := range comments[i].List {
if f.Line(comment.Pos()) == line {
return comment
}
}
return nil
}
// addNewline is a hack to let us force a newline at a certain position.
func (f *fumpter) addNewline(at token.Pos) {
offset := f.Offset(at)
field := reflect.ValueOf(f.File).Elem().FieldByName("lines")
n := field.Len()
lines := make([]int, 0, n+1)
for i := 0; i < n; i++ {
cur := int(field.Index(i).Int())
if offset == cur {
// This newline already exists; do nothing. Duplicate
// newlines can't exist.
return
}
if offset >= 0 && offset < cur {
lines = append(lines, offset)
offset = -1
}
lines = append(lines, cur)
}
if offset >= 0 {
lines = append(lines, offset)
}
if !f.SetLines(lines) {
panic(fmt.Sprintf("could not set lines to %v", lines))
}
}
// removeLines removes all newlines between two positions, so that they end
// up on the same line.
func (f *fumpter) removeLines(fromLine, toLine int) {
for fromLine < toLine {
f.MergeLine(fromLine)
toLine--
}
}
// removeLinesBetween is like removeLines, but it leaves one newline between the
// two positions.
func (f *fumpter) removeLinesBetween(from, to token.Pos) {
f.removeLines(f.Line(from)+1, f.Line(to))
}
type byteCounter int
func (b *byteCounter) Write(p []byte) (n int, err error) {
*b += byteCounter(len(p))
return len(p), nil
}
func (f *fumpter) printLength(node ast.Node) int {
var count byteCounter
if err := format.Node(&count, f.fset, node); err != nil {
panic(fmt.Sprintf("unexpected print error: %v", err))
}
// Add the space taken by an inline comment.
if c := f.inlineComment(node.End()); c != nil {
fmt.Fprintf(&count, " %s", c.Text)
}
// Add an approximation of the indentation level. We can't know the
// number of tabs go/printer will add ahead of time. Trying to print the
// entire top-level declaration would tell us that, but then it's near
// impossible to reliably find our node again.
return int(count) + (f.blockLevel * 8)
}
func (f *fumpter) tabbedColumn(p token.Pos) int {
col := f.Position(p).Column
// Like in printLength, add an approximation of the indentation level.
// Since any existing tabs were already counted as one column, multiply
// the level by 7.
return col + (f.blockLevel * 7)
}
func (f *fumpter) lineEnd(line int) token.Pos {
if line < 1 {
panic("illegal line number")
}
total := f.LineCount()
if line > total {
panic("illegal line number")
}
if line == total {
return f.astFile.End()
}
return f.LineStart(line+1) - 1
}
// rxCommentDirective covers all common Go comment directives:
//
// //go: | standard Go directives, like go:noinline
// //some-words: | similar to the syntax above, like lint:ignore or go-sumtype:decl
// //line | inserted line information for cmd/compile
// //export | to mark cgo funcs for exporting
// //extern | C function declarations for gccgo
// //sys(nb)? | syscall function wrapper prototypes
// //nolint | nolint directive for golangci
//
// Note that the "some-words:" matching expects a letter afterward, such as
// "go:generate", to prevent matching false positives like "https://site".
var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|nolint\b)`)
func (f *fumpter) applyPre(c *astutil.Cursor) {
f.splitLongLine(c)
switch node := c.Node().(type) {
case *ast.File:
// Join contiguous lone var/const/import lines.
// Abort if there are empty lines or comments in between,
// includng a leading comment, which could be a directive.
newDecls := make([]ast.Decl, 0, len(node.Decls))
for i := 0; i < len(node.Decls); {
newDecls = append(newDecls, node.Decls[i])
start, ok := node.Decls[i].(*ast.GenDecl)
if !ok || isCgoImport(start) || start.Doc != nil {
i++
continue
}
lastPos := start.Pos()
for i++; i < len(node.Decls); {
cont, ok := node.Decls[i].(*ast.GenDecl)
if !ok || cont.Tok != start.Tok || cont.Lparen != token.NoPos ||
f.Line(lastPos) < f.Line(cont.Pos())-1 || isCgoImport(cont) {
break
}
start.Specs = append(start.Specs, cont.Specs...)
if c := f.inlineComment(cont.End()); c != nil {
// don't move an inline comment outside
start.Rparen = c.End()
} else {
// so the code below treats the joined
// decl group as multi-line
start.Rparen = cont.End()
}
lastPos = cont.Pos()
i++
}
}
node.Decls = newDecls
// Multiline top-level declarations should be separated by an
// empty line.
// Do this after the joining of lone declarations above,
// as joining single-line declarations makes then multi-line.
var lastMulti bool
var lastEnd token.Pos
for _, decl := range node.Decls {
pos := decl.Pos()
comments := f.commentsBetween(lastEnd, pos)
if len(comments) > 0 {
pos = comments[0].Pos()
}
multi := f.Line(pos) < f.Line(decl.End())
if multi && lastMulti && f.Line(lastEnd)+1 == f.Line(pos) {
f.addNewline(lastEnd)
}
lastMulti = multi
lastEnd = decl.End()
}
// Comments aren't nodes, so they're not walked by default.
groupLoop:
for _, group := range node.Comments {
for _, comment := range group.List {
if comment.Text == "//gofumpt:diagnose" || strings.HasPrefix(comment.Text, "//gofumpt:diagnose ") {
slc := []string{
"//gofumpt:diagnose",
version.String(),
"-lang=" + f.LangVersion,
}
if f.ExtraRules {
slc = append(slc, "-extra")
}
comment.Text = strings.Join(slc, " ")
}
body := strings.TrimPrefix(comment.Text, "//")
if body == comment.Text {
// /*-style comment
continue groupLoop
}
if rxCommentDirective.MatchString(body) {
// this line is a directive
continue groupLoop
}
r, _ := utf8.DecodeRuneInString(body)
if !unicode.IsLetter(r) && !unicode.IsNumber(r) && !unicode.IsSpace(r) {
// this line could be code like "//{"
continue groupLoop
}
}
// If none of the comment group's lines look like a
// directive or code, add spaces, if needed.
for _, comment := range group.List {
body := strings.TrimPrefix(comment.Text, "//")
r, _ := utf8.DecodeRuneInString(body)
if !unicode.IsSpace(r) {
comment.Text = "// " + body
}
}
}
case *ast.DeclStmt:
decl, ok := node.Decl.(*ast.GenDecl)
if !ok || decl.Tok != token.VAR || len(decl.Specs) != 1 {
break // e.g. const name = "value"
}
spec := decl.Specs[0].(*ast.ValueSpec)
if spec.Type != nil {
break // e.g. var name Type
}
tok := token.ASSIGN
names := make([]ast.Expr, len(spec.Names))
for i, name := range spec.Names {
names[i] = name
if name.Name != "_" {
tok = token.DEFINE
}
}
c.Replace(&ast.AssignStmt{
Lhs: names,
Tok: tok,
Rhs: spec.Values,
})
case *ast.GenDecl:
if node.Tok == token.IMPORT && node.Lparen.IsValid() {
f.joinStdImports(node)
}
// Single var declarations shouldn't use parentheses, unless
// there's a comment on the grouped declaration.
if node.Tok == token.VAR && len(node.Specs) == 1 &&
node.Lparen.IsValid() && node.Doc == nil {
specPos := node.Specs[0].Pos()
specEnd := node.Specs[0].End()
if len(f.commentsBetween(node.TokPos, specPos)) > 0 {
// If the single spec has any comment, it must
// go before the entire declaration now.
node.TokPos = specPos
} else {
f.removeLines(f.Line(node.TokPos), f.Line(specPos))
}
f.removeLines(f.Line(specEnd), f.Line(node.Rparen))
// Remove the parentheses. go/printer will automatically
// get rid of the newlines.
node.Lparen = token.NoPos
node.Rparen = token.NoPos
}
case *ast.InterfaceType:
var prev *ast.Field
for _, method := range node.Methods.List {
switch {
case prev == nil:
removeToPos := method.Pos()
if comments := f.commentsBetween(node.Interface, method.Pos()); len(comments) > 0 {
// only remove leading line upto the first comment
removeToPos = comments[0].Pos()
}
// remove leading lines if they exist
f.removeLines(f.Line(node.Interface)+1, f.Line(removeToPos))
case len(f.commentsBetween(prev.End(), method.Pos())) > 0:
// comments in between; leave newlines alone
case len(prev.Names) != len(method.Names):
// don't group type unions with methods
case len(prev.Names) == 1 && token.IsExported(prev.Names[0].Name) != token.IsExported(method.Names[0].Name):
// don't group exported and unexported methods together
default:
f.removeLinesBetween(prev.End(), method.Pos())
}
prev = method
}
case *ast.BlockStmt:
f.stmts(node.List)
comments := f.commentsBetween(node.Lbrace, node.Rbrace)
if len(node.List) == 0 && len(comments) == 0 {
f.removeLinesBetween(node.Lbrace, node.Rbrace)
break
}
var sign *ast.FuncType
var cond ast.Expr
switch parent := c.Parent().(type) {
case *ast.FuncDecl:
sign = parent.Type
case *ast.FuncLit:
sign = parent.Type
case *ast.IfStmt:
cond = parent.Cond
case *ast.ForStmt:
cond = parent.Cond
}
if len(node.List) > 1 && sign == nil {
// only if we have a single statement, or if
// it's a func body.
break
}
var bodyPos, bodyEnd token.Pos
if len(node.List) > 0 {
bodyPos = node.List[0].Pos()
bodyEnd = node.List[len(node.List)-1].End()
}
if len(comments) > 0 {
if pos := comments[0].Pos(); !bodyPos.IsValid() || pos < bodyPos {
bodyPos = pos
}
if pos := comments[len(comments)-1].End(); !bodyPos.IsValid() || pos > bodyEnd {
bodyEnd = pos
}
}
f.removeLinesBetween(bodyEnd, node.Rbrace)
if cond != nil && f.Line(cond.Pos()) != f.Line(cond.End()) {
// The body is preceded by a multi-line condition, so an
// empty line can help readability.
return
}
if sign != nil {
endLine := f.Line(sign.End())
paramClosingIsFirstCharOnEndLine := sign.Params != nil &&
f.Position(sign.Params.Closing).Column == 1 &&
f.Line(sign.Params.Closing) == endLine
resultClosingIsFirstCharOnEndLine := sign.Results != nil &&
f.Position(sign.Results.Closing).Column == 1 &&
f.Line(sign.Results.Closing) == endLine
endLineIsIndented := !(paramClosingIsFirstCharOnEndLine || resultClosingIsFirstCharOnEndLine)
if f.Line(sign.Pos()) != endLine && endLineIsIndented {
// is there an empty line?
isThereAnEmptyLine := endLine+1 != f.Line(bodyPos)
// The body is preceded by a multi-line function
// signature, we move the `) {` to avoid the empty line.
switch {
case isThereAnEmptyLine && sign.Results != nil &&
!resultClosingIsFirstCharOnEndLine &&
sign.Results.Closing.IsValid(): // there may be no ")"
sign.Results.Closing += 1
f.addNewline(sign.Results.Closing)
case isThereAnEmptyLine && sign.Params != nil &&
!paramClosingIsFirstCharOnEndLine:
sign.Params.Closing += 1
f.addNewline(sign.Params.Closing)
}
}
}
f.removeLinesBetween(node.Lbrace, bodyPos)
case *ast.CaseClause:
f.stmts(node.Body)
openLine := f.Line(node.Case)
closeLine := f.Line(node.Colon)
if openLine == closeLine {
// nothing to do
break
}
if len(f.commentsBetween(node.Case, node.Colon)) > 0 {
// don't move comments
break
}
if f.printLength(node) > shortLineLimit {
// too long to collapse
break
}
f.removeLines(openLine, closeLine)
case *ast.CommClause:
f.stmts(node.Body)
case *ast.FieldList:
if node.NumFields() == 0 && len(f.commentsBetween(node.Pos(), node.End())) == 0 {
// Empty field lists should not contain a newline.
// Do not join the two lines if the first has an inline
// comment, as that can result in broken formatting.
openLine := f.Line(node.Pos())
closeLine := f.Line(node.End())
f.removeLines(openLine, closeLine)
}
// Merging adjacent fields (e.g. parameters) is disabled by default.
if !f.ExtraRules {
break
}
switch c.Parent().(type) {
case *ast.FuncDecl, *ast.FuncType, *ast.InterfaceType:
node.List = f.mergeAdjacentFields(node.List)
c.Replace(node)
case *ast.StructType:
// Do not merge adjacent fields in structs.
}
case *ast.BasicLit:
// Octal number literals were introduced in 1.13.
if semver.Compare(f.LangVersion, "v1.13") >= 0 {
if node.Kind == token.INT && rxOctalInteger.MatchString(node.Value) {
node.Value = "0o" + node.Value[1:]
c.Replace(node)
}
}
case *ast.AssignStmt:
// Only remove lines between the assignment token and the first right-hand side expression
f.removeLines(f.Line(node.TokPos), f.Line(node.Rhs[0].Pos()))
}
}
func (f *fumpter) applyPost(c *astutil.Cursor) {
switch node := c.Node().(type) {
// Adding newlines to composite literals happens as a "post" step, so
// that we can take into account whether "pre" steps added any newlines
// that would affect us here.
case *ast.CompositeLit:
if len(node.Elts) == 0 {
// doesn't have elements
break
}
openLine := f.Line(node.Lbrace)
closeLine := f.Line(node.Rbrace)
if openLine == closeLine {
// all in a single line
break
}
newlineAroundElems := false
newlineBetweenElems := false
lastEnd := node.Lbrace
lastLine := openLine
for i, elem := range node.Elts {
pos := elem.Pos()
comments := f.commentsBetween(lastEnd, pos)
if len(comments) > 0 {
pos = comments[0].Pos()
}
if curLine := f.Line(pos); curLine > lastLine {
if i == 0 {
newlineAroundElems = true
// remove leading lines if they exist
f.removeLines(openLine+1, curLine)
} else {
newlineBetweenElems = true
}
}
lastEnd = elem.End()
lastLine = f.Line(lastEnd)
}
if closeLine > lastLine {
newlineAroundElems = true
}
if newlineBetweenElems || newlineAroundElems {
first := node.Elts[0]
if openLine == f.Line(first.Pos()) {
// We want the newline right after the brace.
f.addNewline(node.Lbrace + 1)
closeLine = f.Line(node.Rbrace)
}
last := node.Elts[len(node.Elts)-1]
if closeLine == f.Line(last.End()) {
// We want the newline right before the brace.
f.addNewline(node.Rbrace)
}
}
// If there's a newline between any consecutive elements, there
// must be a newline between all composite literal elements.
if !newlineBetweenElems {
break
}
for i1, elem1 := range node.Elts {
i2 := i1 + 1
if i2 >= len(node.Elts) {
break
}
elem2 := node.Elts[i2]
// TODO: do we care about &{}?
_, ok1 := elem1.(*ast.CompositeLit)
_, ok2 := elem2.(*ast.CompositeLit)
if !ok1 && !ok2 {
continue
}
if f.Line(elem1.End()) == f.Line(elem2.Pos()) {
f.addNewline(elem1.End())
}
}
}
}
func (f *fumpter) splitLongLine(c *astutil.Cursor) {
if os.Getenv("GOFUMPT_SPLIT_LONG_LINES") != "on" {
// By default, this feature is turned off.
// Turn it on by setting GOFUMPT_SPLIT_LONG_LINES=on.
return
}
node := c.Node()
if node == nil {
return
}
newlinePos := node.Pos()
start := f.Position(node.Pos())
end := f.Position(node.End())
// If the node is already split in multiple lines, there's nothing to do.
if start.Line != end.Line {
return
}
// Only split at the start of the current node if it's part of a list.
if _, ok := c.Parent().(*ast.BinaryExpr); ok {
// Chains of binary expressions are considered lists, too.
} else if c.Index() >= 0 {
// For the rest of the nodes, we're in a list if c.Index() >= 0.
} else {
return
}
// Like in printLength, add an approximation of the indentation level.
// Since any existing tabs were already counted as one column, multiply
// the level by 7.
startCol := start.Column + f.blockLevel*7
endCol := end.Column + f.blockLevel*7
// If this is a composite literal,
// and we were going to insert a newline before the entire literal,
// insert the newline before the first element instead.
// Since we'll add a newline after the last element too,
// this format is generally going to be nicer.
if comp := isComposite(node); comp != nil && len(comp.Elts) > 0 {
newlinePos = comp.Elts[0].Pos()
}
// If this is a function call,
// and we were to add a newline before the first argument,
// prefer adding the newline before the entire call.
// End-of-line parentheses aren't very nice, as we don't put their
// counterparts at the start of a line too.
// We do this by using the average of the two starting positions.
if call, _ := node.(*ast.CallExpr); call != nil && len(call.Args) > 0 {
first := f.Position(call.Args[0].Pos())
startCol += (first.Column - start.Column) / 2
}
// If the start position is too short, we definitely won't split the line.
if startCol <= shortLineLimit {
return
}
lineEnd := f.Position(f.lineEnd(start.Line))
// firstLength and secondLength are the split line lengths, excluding
// indentation.
firstLength := start.Column - f.blockLevel
if firstLength < 0 {
panic("negative length")
}
secondLength := lineEnd.Column - start.Column
if secondLength < 0 {
panic("negative length")
}
// If the line ends past the long line limit,
// and both splits are estimated to take at least minSplitFactor of the limit,
// then split the line.
minSplitLength := int(f.minSplitFactor * longLineLimit)
if endCol > longLineLimit &&
firstLength >= minSplitLength && secondLength >= minSplitLength {
f.addNewline(newlinePos)
}
}
func isComposite(node ast.Node) *ast.CompositeLit {
switch node := node.(type) {
case *ast.CompositeLit:
return node
case *ast.UnaryExpr:
return isComposite(node.X) // e.g. &T{}
default:
return nil
}
}
func (f *fumpter) stmts(list []ast.Stmt) {
for i, stmt := range list {
ifs, ok := stmt.(*ast.IfStmt)
if !ok || i < 1 {
continue // not an if following another statement
}
as, ok := list[i-1].(*ast.AssignStmt)
if !ok || as.Tok != token.DEFINE ||
!identEqual(as.Lhs[len(as.Lhs)-1], "err") {
continue // not "..., err := ..."
}
be, ok := ifs.Cond.(*ast.BinaryExpr)
if !ok || ifs.Init != nil || ifs.Else != nil {
continue // complex if
}
if be.Op != token.NEQ || !identEqual(be.X, "err") ||
!identEqual(be.Y, "nil") {
continue // not "err != nil"
}
f.removeLinesBetween(as.End(), ifs.Pos())
}
}
func identEqual(expr ast.Expr, name string) bool {
id, ok := expr.(*ast.Ident)
return ok && id.Name == name
}
// isCgoImport returns true if the declaration is simply:
//
// import "C"
//
// or the equivalent:
//
// import `C`
//
// Note that parentheses do not affect the result.
func isCgoImport(decl *ast.GenDecl) bool {
if decl.Tok != token.IMPORT || len(decl.Specs) != 1 {
return false
}
spec := decl.Specs[0].(*ast.ImportSpec)
v, err := strconv.Unquote(spec.Path.Value)
if err != nil {
panic(err) // should never error
}
return v == "C"
}
// joinStdImports ensures that all standard library imports are together and at
// the top of the imports list.
func (f *fumpter) joinStdImports(d *ast.GenDecl) {
var std, other []ast.Spec
firstGroup := true
lastEnd := d.Pos()
needsSort := false
for i, spec := range d.Specs {
spec := spec.(*ast.ImportSpec)
if coms := f.commentsBetween(lastEnd, spec.Pos()); len(coms) > 0 {
lastEnd = coms[len(coms)-1].End()
}
if i > 0 && firstGroup && f.Line(spec.Pos()) > f.Line(lastEnd)+1 {
firstGroup = false
} else {
// We're still in the first group, update lastEnd.
lastEnd = spec.End()
}
path, _ := strconv.Unquote(spec.Path.Value)
switch {
// Imports with a period are definitely third party.
case strings.Contains(path, "."):
fallthrough
// "test" and "example" are reserved as per golang.org/issue/37641.
// "internal" is unreachable.
case strings.HasPrefix(path, "test/") ||
strings.HasPrefix(path, "example/") ||
strings.HasPrefix(path, "internal/"):
fallthrough
// To be conservative, if an import has a name or an inline
// comment, and isn't part of the top group, treat it as non-std.
case !firstGroup && (spec.Name != nil || spec.Comment != nil):
other = append(other, spec)
continue
}
// If we're moving this std import further up, reset its
// position, to avoid breaking comments.
if !firstGroup || len(other) > 0 {
setPos(reflect.ValueOf(spec), d.Pos())
needsSort = true
}
std = append(std, spec)
}
// Ensure there is an empty line between std imports and other imports.
if len(std) > 0 && len(other) > 0 && f.Line(std[len(std)-1].End())+1 >= f.Line(other[0].Pos()) {
// We add two newlines, as that's necessary in some edge cases.
// For example, if the std and non-std imports were together and
// without indentation, adding one newline isn't enough. Two
// empty lines will be printed as one by go/printer, anyway.
f.addNewline(other[0].Pos() - 1)
f.addNewline(other[0].Pos())
}
// Finally, join the imports, keeping std at the top.
d.Specs = append(std, other...)
// If we moved any std imports to the first group, we need to sort them
// again.
if needsSort {
ast.SortImports(f.fset, f.astFile)
}
}
// mergeAdjacentFields returns fields with adjacent fields merged if possible.
func (f *fumpter) mergeAdjacentFields(fields []*ast.Field) []*ast.Field {
// If there are less than two fields then there is nothing to merge.
if len(fields) < 2 {
return fields
}
// Otherwise, iterate over adjacent pairs of fields, merging if possible,
// and mutating fields. Elements of fields may be mutated (if merged with
// following fields), discarded (if merged with a preceeding field), or left
// unchanged.
i := 0
for j := 1; j < len(fields); j++ {
if f.shouldMergeAdjacentFields(fields[i], fields[j]) {
fields[i].Names = append(fields[i].Names, fields[j].Names...)
} else {
i++
fields[i] = fields[j]
}
}
return fields[:i+1]
}
func (f *fumpter) shouldMergeAdjacentFields(f1, f2 *ast.Field) bool {
if len(f1.Names) == 0 || len(f2.Names) == 0 {
// Both must have names for the merge to work.
return false
}
if f.Line(f1.Pos()) != f.Line(f2.Pos()) {
// Trust the user if they used separate lines.
return false
}
// Only merge if the types are equal.
opt := cmp.Comparer(func(x, y token.Pos) bool { return true })
return cmp.Equal(f1.Type, f2.Type, opt)
}
var posType = reflect.TypeOf(token.NoPos)
// setPos recursively sets all position fields in the node v to pos.
func setPos(v reflect.Value, pos token.Pos) {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if !v.IsValid() {
return
}
if v.Type() == posType {
v.Set(reflect.ValueOf(pos))
}
if v.Kind() == reflect.Struct {
for i := 0; i < v.NumField(); i++ {
setPos(v.Field(i), pos)
}
}
}
|
[
"\"GOFUMPT_SPLIT_LONG_LINES\""
] |
[] |
[
"GOFUMPT_SPLIT_LONG_LINES"
] |
[]
|
["GOFUMPT_SPLIT_LONG_LINES"]
|
go
| 1 | 0 | |
commands.go
|
package main
import (
"log"
"os"
"github.com/codegangsta/cli"
)
// Commands ...
var Commands = []cli.Command{
commandToggle,
}
var commandToggle = cli.Command{
Name: "toggl",
Usage: "Shows your activity with Toggl",
Description: `Shows your activity with Toggl on today`,
Action: doToggle,
}
func debug(v ...interface{}) {
if os.Getenv("DEBUG") != "" {
log.Println(v...)
}
}
func assert(err error) {
if err != nil {
log.Fatal(err)
}
}
func doToggle(c *cli.Context) {
Toggl()
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
lib/spack/spack/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This is where most of the action happens in Spack.
The spack package class structure is based strongly on Homebrew
(http://brew.sh/), mainly because Homebrew makes it very easy to create
packages.
"""
import base64
import contextlib
import copy
import functools
import hashlib
import inspect
import os
import re
import shutil
import sys
import textwrap
import time
from six import StringIO
from six import string_types
from six import with_metaclass
from ordereddict_backport import OrderedDict
import llnl.util.tty as tty
import spack.config
import spack.paths
import spack.store
import spack.compilers
import spack.directives
import spack.dependency
import spack.directory_layout
import spack.error
import spack.fetch_strategy as fs
import spack.hooks
import spack.mirror
import spack.mixins
import spack.multimethod
import spack.repo
import spack.url
import spack.util.environment
import spack.util.web
import spack.multimethod
from llnl.util.filesystem import mkdirp, touch, working_dir
from llnl.util.lang import memoized
from llnl.util.link_tree import LinkTree
from spack.filesystem_view import YamlFilesystemView
from spack.installer import \
install_args_docstring, PackageInstaller, InstallError
from spack.stage import stage_prefix, Stage, ResourceStage, StageComposite
from spack.util.package_hash import package_hash
from spack.version import Version
"""Allowed URL schemes for spack packages."""
_ALLOWED_URL_SCHEMES = ["http", "https", "ftp", "file", "git"]
# Filename for the Spack build/install log.
_spack_build_logfile = 'spack-build-out.txt'
# Filename for the Spack build/install environment file.
_spack_build_envfile = 'spack-build-env.txt'
# Filename for the Spack configure args file.
_spack_configure_argsfile = 'spack-configure-args.txt'
class InstallPhase(object):
"""Manages a single phase of the installation.
This descriptor stores at creation time the name of the method it should
search for execution. The method is retrieved at __get__ time, so that
it can be overridden by subclasses of whatever class declared the phases.
It also provides hooks to execute arbitrary callbacks before and after
the phase.
"""
def __init__(self, name):
self.name = name
self.run_before = []
self.run_after = []
def __get__(self, instance, owner):
# The caller is a class that is trying to customize
# my behavior adding something
if instance is None:
return self
# If instance is there the caller wants to execute the
# install phase, thus return a properly set wrapper
phase = getattr(instance, self.name)
@functools.wraps(phase)
def phase_wrapper(spec, prefix):
# Check instance attributes at the beginning of a phase
self._on_phase_start(instance)
# Execute phase pre-conditions,
# and give them the chance to fail
for callback in self.run_before:
callback(instance)
phase(spec, prefix)
# Execute phase sanity_checks,
# and give them the chance to fail
for callback in self.run_after:
callback(instance)
# Check instance attributes at the end of a phase
self._on_phase_exit(instance)
return phase_wrapper
def _on_phase_start(self, instance):
pass
def _on_phase_exit(self, instance):
# If a phase has a matching last_phase attribute,
# stop the installation process raising a StopIteration
if getattr(instance, 'last_phase', None) == self.name:
raise StopIteration('Stopping at \'{0}\' phase'.format(self.name))
def copy(self):
try:
return copy.deepcopy(self)
except TypeError:
# This bug-fix was not back-ported in Python 2.6
# http://bugs.python.org/issue1515
other = InstallPhase(self.name)
other.run_before.extend(self.run_before)
other.run_after.extend(self.run_after)
return other
class PackageMeta(
spack.directives.DirectiveMeta,
spack.mixins.PackageMixinsMeta,
spack.multimethod.MultiMethodMeta
):
"""
Package metaclass for supporting directives (e.g., depends_on) and phases
"""
phase_fmt = '_InstallPhase_{0}'
_InstallPhase_run_before = {}
_InstallPhase_run_after = {}
def __new__(cls, name, bases, attr_dict):
"""
Instance creation is preceded by phase attribute transformations.
Conveniently transforms attributes to permit extensible phases by
iterating over the attribute 'phases' and creating / updating private
InstallPhase attributes in the class that will be initialized in
__init__.
"""
if 'phases' in attr_dict:
# Turn the strings in 'phases' into InstallPhase instances
# and add them as private attributes
_InstallPhase_phases = [PackageMeta.phase_fmt.format(x) for x in attr_dict['phases']] # NOQA: ignore=E501
for phase_name, callback_name in zip(_InstallPhase_phases, attr_dict['phases']): # NOQA: ignore=E501
attr_dict[phase_name] = InstallPhase(callback_name)
attr_dict['_InstallPhase_phases'] = _InstallPhase_phases
def _flush_callbacks(check_name):
# Name of the attribute I am going to check it exists
attr_name = PackageMeta.phase_fmt.format(check_name)
checks = getattr(cls, attr_name)
if checks:
for phase_name, funcs in checks.items():
try:
# Search for the phase in the attribute dictionary
phase = attr_dict[
PackageMeta.phase_fmt.format(phase_name)]
except KeyError:
# If it is not there it's in the bases
# and we added a check. We need to copy
# and extend
for base in bases:
phase = getattr(
base,
PackageMeta.phase_fmt.format(phase_name),
None
)
if phase is not None:
break
attr_dict[PackageMeta.phase_fmt.format(
phase_name)] = phase.copy()
phase = attr_dict[
PackageMeta.phase_fmt.format(phase_name)]
getattr(phase, check_name).extend(funcs)
# Clear the attribute for the next class
setattr(cls, attr_name, {})
_flush_callbacks('run_before')
_flush_callbacks('run_after')
return super(PackageMeta, cls).__new__(cls, name, bases, attr_dict)
@staticmethod
def register_callback(check_type, *phases):
def _decorator(func):
attr_name = PackageMeta.phase_fmt.format(check_type)
check_list = getattr(PackageMeta, attr_name)
for item in phases:
checks = check_list.setdefault(item, [])
checks.append(func)
setattr(PackageMeta, attr_name, check_list)
return func
return _decorator
@property
def package_dir(self):
"""Directory where the package.py file lives."""
return os.path.abspath(os.path.dirname(self.module.__file__))
@property
def module(self):
"""Module object (not just the name) that this package is defined in.
We use this to add variables to package modules. This makes
install() methods easier to write (e.g., can call configure())
"""
return __import__(self.__module__, fromlist=[self.__name__])
@property
def namespace(self):
"""Spack namespace for the package, which identifies its repo."""
namespace, dot, module = self.__module__.rpartition('.')
prefix = '%s.' % spack.repo.repo_namespace
if namespace.startswith(prefix):
namespace = namespace[len(prefix):]
return namespace
@property
def fullname(self):
"""Name of this package, including the namespace"""
return '%s.%s' % (self.namespace, self.name)
@property
def name(self):
"""The name of this package.
The name of a package is the name of its Python module, without
the containing module names.
"""
if not hasattr(self, '_name'):
self._name = self.module.__name__
if '.' in self._name:
self._name = self._name[self._name.rindex('.') + 1:]
return self._name
def run_before(*phases):
"""Registers a method of a package to be run before a given phase"""
return PackageMeta.register_callback('run_before', *phases)
def run_after(*phases):
"""Registers a method of a package to be run after a given phase"""
return PackageMeta.register_callback('run_after', *phases)
def on_package_attributes(**attr_dict):
"""Decorator: executes instance function only if object has attr valuses.
Executes the decorated method only if at the moment of calling the
instance has attributes that are equal to certain values.
Args:
attr_dict (dict): dictionary mapping attribute names to their
required values
"""
def _execute_under_condition(func):
@functools.wraps(func)
def _wrapper(instance, *args, **kwargs):
# If all the attributes have the value we require, then execute
has_all_attributes = all(
[hasattr(instance, key) for key in attr_dict]
)
if has_all_attributes:
has_the_right_values = all(
[getattr(instance, key) == value for key, value in attr_dict.items()] # NOQA: ignore=E501
)
if has_the_right_values:
func(instance, *args, **kwargs)
return _wrapper
return _execute_under_condition
class PackageViewMixin(object):
"""This collects all functionality related to adding installed Spack
package to views. Packages can customize how they are added to views by
overriding these functions.
"""
def view_source(self):
"""The source root directory that will be added to the view: files are
added such that their path relative to the view destination matches
their path relative to the view source.
"""
return self.spec.prefix
def view_destination(self, view):
"""The target root directory: each file is added relative to this
directory.
"""
return view.get_projection_for_spec(self.spec)
def view_file_conflicts(self, view, merge_map):
"""Report any files which prevent adding this package to the view. The
default implementation looks for any files which already exist.
Alternative implementations may allow some of the files to exist in
the view (in this case they would be omitted from the results).
"""
return set(dst for dst in merge_map.values() if os.path.exists(dst))
def add_files_to_view(self, view, merge_map):
"""Given a map of package files to destination paths in the view, add
the files to the view. By default this adds all files. Alternative
implementations may skip some files, for example if other packages
linked into the view already include the file.
"""
for src, dst in merge_map.items():
if not os.path.exists(dst):
view.link(src, dst)
def remove_files_from_view(self, view, merge_map):
"""Given a map of package files to files currently linked in the view,
remove the files from the view. The default implementation removes all
files. Alternative implementations may not remove all files. For
example if two packages include the same file, it should only be
removed when both packages are removed.
"""
for src, dst in merge_map.items():
view.remove_file(src, dst)
class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
"""This is the superclass for all spack packages.
***The Package class***
At its core, a package consists of a set of software to be installed.
A package may focus on a piece of software and its associated software
dependencies or it may simply be a set, or bundle, of software. The
former requires defining how to fetch, verify (via, e.g., sha256), build,
and install that software and the packages it depends on, so that
dependencies can be installed along with the package itself. The latter,
sometimes referred to as a ``no-source`` package, requires only defining
the packages to be built.
Packages are written in pure Python.
There are two main parts of a Spack package:
1. **The package class**. Classes contain ``directives``, which are
special functions, that add metadata (versions, patches,
dependencies, and other information) to packages (see
``directives.py``). Directives provide the constraints that are
used as input to the concretizer.
2. **Package instances**. Once instantiated, a package is
essentially a software installer. Spack calls methods like
``do_install()`` on the ``Package`` object, and it uses those to
drive user-implemented methods like ``patch()``, ``install()``, and
other build steps. To install software, an instantiated package
needs a *concrete* spec, which guides the behavior of the various
install methods.
Packages are imported from repos (see ``repo.py``).
**Package DSL**
Look in ``lib/spack/docs`` or check https://spack.readthedocs.io for
the full documentation of the package domain-specific language. That
used to be partially documented here, but as it grew, the docs here
became increasingly out of date.
**Package Lifecycle**
A package's lifecycle over a run of Spack looks something like this:
.. code-block:: python
p = Package() # Done for you by spack
p.do_fetch() # downloads tarball from a URL (or VCS)
p.do_stage() # expands tarball in a temp directory
p.do_patch() # applies patches to expanded source
p.do_install() # calls package's install() function
p.do_uninstall() # removes install directory
although packages that do not have code have nothing to fetch so omit
``p.do_fetch()``.
There are also some other commands that clean the build area:
.. code-block:: python
p.do_clean() # removes the stage directory entirely
p.do_restage() # removes the build directory and
# re-expands the archive.
The convention used here is that a ``do_*`` function is intended to be
called internally by Spack commands (in ``spack.cmd``). These aren't for
package writers to override, and doing so may break the functionality
of the Package class.
Package creators have a lot of freedom, and they could technically
override anything in this class. That is not usually required.
For most use cases. Package creators typically just add attributes
like ``homepage`` and, for a code-based package, ``url``, or functions
such as ``install()``.
There are many custom ``Package`` subclasses in the
``spack.build_systems`` package that make things even easier for
specific build systems.
"""
#
# These are default values for instance variables.
#
#: A list or set of build time test functions to be called when tests
#: are executed or 'None' if there are no such test functions.
build_time_test_callbacks = None
#: Most Spack packages are used to install source or binary code while
#: those that do not can be used to install a set of other Spack packages.
has_code = True
#: A list or set of install time test functions to be called when tests
#: are executed or 'None' if there are no such test functions.
install_time_test_callbacks = None
#: By default we build in parallel. Subclasses can override this.
parallel = True
#: By default do not run tests within package's install()
run_tests = False
# FIXME: this is a bad object-oriented design, should be moved to Clang.
#: By default do not setup mockup XCode on macOS with Clang
use_xcode = False
#: Most packages are NOT extendable. Set to True if you want extensions.
extendable = False
#: When True, add RPATHs for the entire DAG. When False, add RPATHs only
#: for immediate dependencies.
transitive_rpaths = True
#: List of prefix-relative file paths (or a single path). If these do
#: not exist after install, or if they exist but are not files,
#: sanity checks fail.
sanity_check_is_file = []
#: List of prefix-relative directory paths (or a single path). If
#: these do not exist after install, or if they exist but are not
#: directories, sanity checks will fail.
sanity_check_is_dir = []
#: List of glob expressions. Each expression must either be
#: absolute or relative to the package source path.
#: Matching artifacts found at the end of the build process will be
#: copied in the same directory tree as _spack_build_logfile and
#: _spack_build_envfile.
archive_files = []
#: Boolean. Set to ``True`` for packages that require a manual download.
#: This is currently only used by package sanity tests.
manual_download = False
#
# Set default licensing information
#
#: Boolean. If set to ``True``, this software requires a license.
#: If set to ``False``, all of the ``license_*`` attributes will
#: be ignored. Defaults to ``False``.
license_required = False
#: String. Contains the symbol used by the license manager to denote
#: a comment. Defaults to ``#``.
license_comment = '#'
#: List of strings. These are files that the software searches for when
#: looking for a license. All file paths must be relative to the
#: installation directory. More complex packages like Intel may require
#: multiple licenses for individual components. Defaults to the empty list.
license_files = []
#: List of strings. Environment variables that can be set to tell the
#: software where to look for a license if it is not in the usual location.
#: Defaults to the empty list.
license_vars = []
#: String. A URL pointing to license setup instructions for the software.
#: Defaults to the empty string.
license_url = ''
#: Verbosity level, preserved across installs.
_verbose = None
#: index of patches by sha256 sum, built lazily
_patches_by_hash = None
#: List of strings which contains GitHub usernames of package maintainers.
#: Do not include @ here in order not to unnecessarily ping the users.
maintainers = []
#: List of attributes to be excluded from a package's hash.
metadata_attrs = ['homepage', 'url', 'urls', 'list_url', 'extendable',
'parallel', 'make_jobs']
def __init__(self, spec):
# this determines how the package should be built.
self.spec = spec
# Allow custom staging paths for packages
self.path = None
# Keep track of whether or not this package was installed from
# a binary cache.
self.installed_from_binary_cache = False
# Ensure that only one of these two attributes are present
if getattr(self, 'url', None) and getattr(self, 'urls', None):
msg = "a package can have either a 'url' or a 'urls' attribute"
msg += " [package '{0.name}' defines both]"
raise ValueError(msg.format(self))
# Set a default list URL (place to find available versions)
if not hasattr(self, 'list_url'):
self.list_url = None
if not hasattr(self, 'list_depth'):
self.list_depth = 0
# init internal variables
self._stage = None
self._fetcher = None
# Set up timing variables
self._fetch_time = 0.0
self._total_time = 0.0
if self.is_extension:
spack.repo.get(self.extendee_spec)._check_extendable()
super(PackageBase, self).__init__()
@property
def installed_upstream(self):
if not hasattr(self, '_installed_upstream'):
upstream, record = spack.store.db.query_by_spec_hash(
self.spec.dag_hash())
self._installed_upstream = upstream
return self._installed_upstream
@classmethod
def possible_dependencies(
cls, transitive=True, expand_virtuals=True, deptype='all',
visited=None, missing=None):
"""Return dict of possible dependencies of this package.
Args:
transitive (bool, optional): return all transitive dependencies if
True, only direct dependencies if False (default True)..
expand_virtuals (bool, optional): expand virtual dependencies into
all possible implementations (default True)
deptype (str or tuple, optional): dependency types to consider
visited (dicct, optional): dict of names of dependencies visited so
far, mapped to their immediate dependencies' names.
missing (dict, optional): dict to populate with packages and their
*missing* dependencies.
Returns:
(dict): dictionary mapping dependency names to *their*
immediate dependencies
Each item in the returned dictionary maps a (potentially
transitive) dependency of this package to its possible
*immediate* dependencies. If ``expand_virtuals`` is ``False``,
virtual package names wil be inserted as keys mapped to empty
sets of dependencies. Virtuals, if not expanded, are treated as
though they have no immediate dependencies.
Missing dependencies by default are ignored, but if a
missing dict is provided, it will be populated with package names
mapped to any dependencies they have that are in no
repositories. This is only populated if transitive is True.
Note: the returned dict *includes* the package itself.
"""
deptype = spack.dependency.canonical_deptype(deptype)
if visited is None:
visited = {cls.name: set()}
if missing is None:
missing = {cls.name: set()}
for name, conditions in cls.dependencies.items():
# check whether this dependency could be of the type asked for
types = [dep.type for cond, dep in conditions.items()]
types = set.union(*types)
if not any(d in types for d in deptype):
continue
# expand virtuals if enabled, otherwise just stop at virtuals
if spack.repo.path.is_virtual(name):
if expand_virtuals:
providers = spack.repo.path.providers_for(name)
dep_names = [spec.name for spec in providers]
else:
visited.setdefault(name, set())
continue
else:
dep_names = [name]
# add the dependency names to the visited dict
visited.setdefault(cls.name, set()).update(set(dep_names))
# recursively traverse dependencies
for dep_name in dep_names:
if dep_name in visited:
continue
visited.setdefault(dep_name, set())
# skip the rest if not transitive
if not transitive:
continue
try:
dep_cls = spack.repo.path.get_pkg_class(dep_name)
except spack.repo.UnknownPackageError:
# log unknown packages
missing.setdefault(cls.name, set()).add(dep_name)
continue
dep_cls.possible_dependencies(
transitive, expand_virtuals, deptype, visited, missing)
return visited
# package_dir and module are *class* properties (see PackageMeta),
# but to make them work on instances we need these defs as well.
@property
def package_dir(self):
"""Directory where the package.py file lives."""
return type(self).package_dir
@property
def module(self):
"""Module object that this package is defined in."""
return type(self).module
@property
def namespace(self):
"""Spack namespace for the package, which identifies its repo."""
return type(self).namespace
@property
def fullname(self):
"""Name of this package, including namespace: namespace.name."""
return type(self).fullname
@property
def name(self):
"""Name of this package (the module without parent modules)."""
return type(self).name
@property
def global_license_dir(self):
"""Returns the directory where global license files for all
packages are stored."""
return os.path.join(spack.paths.prefix, 'etc', 'spack', 'licenses')
@property
def global_license_file(self):
"""Returns the path where a global license file for this
particular package should be stored."""
if not self.license_files:
return
return os.path.join(self.global_license_dir, self.name,
os.path.basename(self.license_files[0]))
@property
def version(self):
if not self.spec.versions.concrete:
raise ValueError("Version requested for a package that"
" does not have a concrete version.")
return self.spec.versions[0]
@memoized
def version_urls(self):
"""OrderedDict of explicitly defined URLs for versions of this package.
Return:
An OrderedDict (version -> URL) different versions of this
package, sorted by version.
A version's URL only appears in the result if it has an an
explicitly defined ``url`` argument. So, this list may be empty
if a package only defines ``url`` at the top level.
"""
version_urls = OrderedDict()
for v, args in sorted(self.versions.items()):
if 'url' in args:
version_urls[v] = args['url']
return version_urls
def nearest_url(self, version):
"""Finds the URL with the "closest" version to ``version``.
This uses the following precedence order:
1. Find the next lowest or equal version with a URL.
2. If no lower URL, return the next *higher* URL.
3. If no higher URL, return None.
"""
version_urls = self.version_urls()
if version in version_urls:
return version_urls[version]
last_url = None
for v, u in self.version_urls().items():
if v > version:
if last_url:
return last_url
last_url = u
return last_url
def url_for_version(self, version):
"""Returns a URL from which the specified version of this package
may be downloaded.
version: class Version
The version for which a URL is sought.
See Class Version (version.py)
"""
if not isinstance(version, Version):
version = Version(version)
# If we have a specific URL for this version, don't extrapolate.
version_urls = self.version_urls()
if version in version_urls:
return version_urls[version]
# If no specific URL, use the default, class-level URL
url = getattr(self, 'url', None)
urls = getattr(self, 'urls', [None])
default_url = url or urls[0]
# if no exact match AND no class-level default, use the nearest URL
if not default_url:
default_url = self.nearest_url(version)
# if there are NO URLs to go by, then we can't do anything
if not default_url:
raise NoURLError(self.__class__)
return spack.url.substitute_version(
default_url, self.url_version(version))
def _make_resource_stage(self, root_stage, fetcher, resource):
resource_stage_folder = self._resource_stage(resource)
mirror_paths = spack.mirror.mirror_archive_paths(
fetcher,
os.path.join(self.name, "%s-%s" % (resource.name, self.version)))
stage = ResourceStage(resource.fetcher,
root=root_stage,
resource=resource,
name=resource_stage_folder,
mirror_paths=mirror_paths,
path=self.path)
return stage
def _make_root_stage(self, fetcher):
# Construct a mirror path (TODO: get this out of package.py)
mirror_paths = spack.mirror.mirror_archive_paths(
fetcher,
os.path.join(self.name, "%s-%s" % (self.name, self.version)),
self.spec)
# Construct a path where the stage should build..
s = self.spec
stage_name = "{0}{1}-{2}-{3}".format(stage_prefix, s.name, s.version,
s.dag_hash())
def download_search():
dynamic_fetcher = fs.from_list_url(self)
return [dynamic_fetcher] if dynamic_fetcher else []
stage = Stage(fetcher, mirror_paths=mirror_paths, name=stage_name,
path=self.path, search_fn=download_search)
return stage
def _make_stage(self):
# Construct a composite stage on top of the composite FetchStrategy
composite_fetcher = self.fetcher
composite_stage = StageComposite()
resources = self._get_needed_resources()
for ii, fetcher in enumerate(composite_fetcher):
if ii == 0:
# Construct root stage first
stage = self._make_root_stage(fetcher)
else:
# Construct resource stage
resource = resources[ii - 1] # ii == 0 is root!
stage = self._make_resource_stage(composite_stage[0], fetcher,
resource)
# Append the item to the composite
composite_stage.append(stage)
return composite_stage
@property
def stage(self):
"""Get the build staging area for this package.
This automatically instantiates a ``Stage`` object if the package
doesn't have one yet, but it does not create the Stage directory
on the filesystem.
"""
if not self.spec.versions.concrete:
raise ValueError(
"Cannot retrieve stage for package without concrete version.")
if self._stage is None:
self._stage = self._make_stage()
return self._stage
@stage.setter
def stage(self, stage):
"""Allow a stage object to be set to override the default."""
self._stage = stage
@property
def env_path(self):
"""Return the build environment file path associated with staging."""
# Backward compatibility: Return the name of an existing log path;
# otherwise, return the current install env path name.
old_filename = os.path.join(self.stage.path, 'spack-build.env')
if os.path.exists(old_filename):
return old_filename
else:
return os.path.join(self.stage.path, _spack_build_envfile)
@property
def install_env_path(self):
"""
Return the build environment file path on successful installation.
"""
install_path = spack.store.layout.metadata_path(self.spec)
# Backward compatibility: Return the name of an existing log path;
# otherwise, return the current install env path name.
old_filename = os.path.join(install_path, 'build.env')
if os.path.exists(old_filename):
return old_filename
else:
return os.path.join(install_path, _spack_build_envfile)
@property
def log_path(self):
"""Return the build log file path associated with staging."""
# Backward compatibility: Return the name of an existing log path.
for filename in ['spack-build.out', 'spack-build.txt']:
old_log = os.path.join(self.stage.path, filename)
if os.path.exists(old_log):
return old_log
# Otherwise, return the current log path name.
return os.path.join(self.stage.path, _spack_build_logfile)
@property
def install_log_path(self):
"""Return the build log file path on successful installation."""
install_path = spack.store.layout.metadata_path(self.spec)
# Backward compatibility: Return the name of an existing install log.
for filename in ['build.out', 'build.txt']:
old_log = os.path.join(install_path, filename)
if os.path.exists(old_log):
return old_log
# Otherwise, return the current install log path name.
return os.path.join(install_path, _spack_build_logfile)
@property
def configure_args_path(self):
"""Return the configure args file path associated with staging."""
return os.path.join(self.stage.path, _spack_configure_argsfile)
@property
def install_configure_args_path(self):
"""Return the configure args file path on successful installation."""
install_path = spack.store.layout.metadata_path(self.spec)
return os.path.join(install_path, _spack_configure_argsfile)
def _make_fetcher(self):
# Construct a composite fetcher that always contains at least
# one element (the root package). In case there are resources
# associated with the package, append their fetcher to the
# composite.
root_fetcher = fs.for_package_version(self, self.version)
fetcher = fs.FetchStrategyComposite() # Composite fetcher
fetcher.append(root_fetcher) # Root fetcher is always present
resources = self._get_needed_resources()
for resource in resources:
fetcher.append(resource.fetcher)
return fetcher
@property
def fetcher(self):
if not self.spec.versions.concrete:
raise ValueError("Cannot retrieve fetcher for"
" package without concrete version.")
if not self._fetcher:
self._fetcher = self._make_fetcher()
return self._fetcher
@fetcher.setter
def fetcher(self, f):
self._fetcher = f
def dependencies_of_type(self, *deptypes):
"""Get dependencies that can possibly have these deptypes.
This analyzes the package and determines which dependencies *can*
be a certain kind of dependency. Note that they may not *always*
be this kind of dependency, since dependencies can be optional,
so something may be a build dependency in one configuration and a
run dependency in another.
"""
return dict(
(name, conds) for name, conds in self.dependencies.items()
if any(dt in self.dependencies[name][cond].type
for cond in conds for dt in deptypes))
@property
def extendee_spec(self):
"""
Spec of the extendee of this package, or None if it is not an extension
"""
if not self.extendees:
return None
# TODO: allow more than one extendee.
name = next(iter(self.extendees))
# If the extendee is in the spec's deps already, return that.
for dep in self.spec.traverse(deptypes=('link', 'run')):
if name == dep.name:
return dep
# if the spec is concrete already, then it extends something
# that is an *optional* dependency, and the dep isn't there.
if self.spec._concrete:
return None
else:
# If it's not concrete, then return the spec from the
# extends() directive since that is all we know so far.
spec, kwargs = self.extendees[name]
return spec
@property
def extendee_args(self):
"""
Spec of the extendee of this package, or None if it is not an extension
"""
if not self.extendees:
return None
# TODO: allow multiple extendees.
name = next(iter(self.extendees))
return self.extendees[name][1]
@property
def is_extension(self):
# if it is concrete, it's only an extension if it actually
# dependes on the extendee.
if self.spec._concrete:
return self.extendee_spec is not None
else:
# If not, then it's an extension if it *could* be an extension
return bool(self.extendees)
def extends(self, spec):
'''
Returns True if this package extends the given spec.
If ``self.spec`` is concrete, this returns whether this package extends
the given spec.
If ``self.spec`` is not concrete, this returns whether this package may
extend the given spec.
'''
if spec.name not in self.extendees:
return False
s = self.extendee_spec
return s and spec.satisfies(s)
def is_activated(self, view):
"""Return True if package is activated."""
if not self.is_extension:
raise ValueError(
"is_activated called on package that is not an extension.")
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.extendee_spec)
return (self.name in exts) and (exts[self.name] == self.spec)
def provides(self, vpkg_name):
"""
True if this package provides a virtual package with the specified name
"""
return any(
any(self.spec.satisfies(c) for c in constraints)
for s, constraints in self.provided.items() if s.name == vpkg_name
)
@property
def installed(self):
"""Installation status of a package.
Returns:
True if the package has been installed, False otherwise.
"""
has_prefix = os.path.isdir(self.prefix)
try:
# If the spec is in the DB, check the installed
# attribute of the record
rec = spack.store.db.get_record(self.spec)
db_says_installed = rec.installed
except KeyError:
# If the spec is not in the DB, the method
# above raises a Key error
db_says_installed = False
return has_prefix and db_says_installed
@property
def prefix(self):
"""Get the prefix into which this package should be installed."""
return self.spec.prefix
@property
def architecture(self):
"""Get the spack.architecture.Arch object that represents the
environment in which this package will be built."""
if not self.spec.concrete:
raise ValueError("Can only get the arch for concrete package.")
return spack.architecture.arch_for_spec(self.spec.architecture)
@property
def compiler(self):
"""Get the spack.compiler.Compiler object used to build this package"""
if not self.spec.concrete:
raise ValueError("Can only get a compiler for a concrete package.")
return spack.compilers.compiler_for_spec(self.spec.compiler,
self.spec.architecture)
def url_version(self, version):
"""
Given a version, this returns a string that should be substituted
into the package's URL to download that version.
By default, this just returns the version string. Subclasses may need
to override this, e.g. for boost versions where you need to ensure that
there are _'s in the download URL.
"""
return str(version)
def remove_prefix(self):
"""
Removes the prefix for a package along with any empty parent
directories
"""
spack.store.layout.remove_install_directory(self.spec)
def do_fetch(self, mirror_only=False):
"""
Creates a stage directory and downloads the tarball for this package.
Working directory will be set to the stage directory.
"""
if not self.spec.concrete:
raise ValueError("Can only fetch concrete packages.")
if not self.has_code:
tty.msg(
"No fetch required for %s: package has no code." % self.name
)
start_time = time.time()
checksum = spack.config.get('config:checksum')
if checksum and self.version not in self.versions:
tty.warn("There is no checksum on file to fetch %s safely." %
self.spec.cformat('{name}{@version}'))
# Ask the user whether to skip the checksum if we're
# interactive, but just fail if non-interactive.
ck_msg = "Add a checksum or use --no-checksum to skip this check."
ignore_checksum = False
if sys.stdout.isatty():
ignore_checksum = tty.get_yes_or_no(" Fetch anyway?",
default=False)
if ignore_checksum:
tty.msg("Fetching with no checksum.", ck_msg)
if not ignore_checksum:
raise FetchError("Will not fetch %s" %
self.spec.format('{name}{@version}'), ck_msg)
self.stage.create()
self.stage.fetch(mirror_only)
self._fetch_time = time.time() - start_time
if checksum and self.version in self.versions:
self.stage.check()
self.stage.cache_local()
for patch in self.spec.patches:
patch.fetch()
if patch.stage:
patch.stage.cache_local()
def do_stage(self, mirror_only=False):
"""Unpacks and expands the fetched tarball."""
if not self.spec.concrete:
raise ValueError("Can only stage concrete packages.")
# Always create the stage directory at this point. Why? A no-code
# package may want to use the installation process to install metadata.
self.stage.create()
# Fetch/expand any associated code.
if self.has_code:
self.do_fetch(mirror_only)
self.stage.expand_archive()
if not os.listdir(self.stage.path):
raise FetchError("Archive was empty for %s" % self.name)
else:
# Support for post-install hooks requires a stage.source_path
mkdirp(self.stage.source_path)
def do_patch(self):
"""Applies patches if they haven't been applied already."""
if not self.spec.concrete:
raise ValueError("Can only patch concrete packages.")
# Kick off the stage first. This creates the stage.
self.do_stage()
# Package can add its own patch function.
has_patch_fun = hasattr(self, 'patch') and callable(self.patch)
# Get the patches from the spec (this is a shortcut for the MV-variant)
patches = self.spec.patches
# If there are no patches, note it.
if not patches and not has_patch_fun:
tty.msg("No patches needed for %s" % self.name)
return
# Construct paths to special files in the archive dir used to
# keep track of whether patches were successfully applied.
archive_dir = self.stage.source_path
good_file = os.path.join(archive_dir, '.spack_patched')
no_patches_file = os.path.join(archive_dir, '.spack_no_patches')
bad_file = os.path.join(archive_dir, '.spack_patch_failed')
# If we encounter an archive that failed to patch, restage it
# so that we can apply all the patches again.
if os.path.isfile(bad_file):
tty.msg("Patching failed last time. Restaging.")
self.stage.restage()
# If this file exists, then we already applied all the patches.
if os.path.isfile(good_file):
tty.msg("Already patched %s" % self.name)
return
elif os.path.isfile(no_patches_file):
tty.msg("No patches needed for %s" % self.name)
return
# Apply all the patches for specs that match this one
patched = False
for patch in patches:
try:
with working_dir(self.stage.source_path):
patch.apply(self.stage)
tty.msg('Applied patch %s' % patch.path_or_url)
patched = True
except spack.error.SpackError as e:
tty.debug(e)
# Touch bad file if anything goes wrong.
tty.msg('Patch %s failed.' % patch.path_or_url)
touch(bad_file)
raise
if has_patch_fun:
try:
with working_dir(self.stage.source_path):
self.patch()
tty.msg("Ran patch() for %s" % self.name)
patched = True
except spack.multimethod.NoSuchMethodError:
# We are running a multimethod without a default case.
# If there's no default it means we don't need to patch.
if not patched:
# if we didn't apply a patch from a patch()
# directive, AND the patch function didn't apply, say
# no patches are needed. Otherwise, we already
# printed a message for each patch.
tty.msg("No patches needed for %s" % self.name)
except spack.error.SpackError as e:
tty.debug(e)
# Touch bad file if anything goes wrong.
tty.msg("patch() function failed for %s" % self.name)
touch(bad_file)
raise
# Get rid of any old failed file -- patches have either succeeded
# or are not needed. This is mostly defensive -- it's needed
# if the restage() method doesn't clean *everything* (e.g., for a repo)
if os.path.isfile(bad_file):
os.remove(bad_file)
# touch good or no patches file so that we skip next time.
if patched:
touch(good_file)
else:
touch(no_patches_file)
@classmethod
def all_patches(cls):
"""Retrieve all patches associated with the package.
Retrieves patches on the package itself as well as patches on the
dependencies of the package."""
patches = []
for _, patch_list in cls.patches.items():
for patch in patch_list:
patches.append(patch)
pkg_deps = cls.dependencies
for dep_name in pkg_deps:
for _, dependency in pkg_deps[dep_name].items():
for _, patch_list in dependency.patches.items():
for patch in patch_list:
patches.append(patch)
return patches
def content_hash(self, content=None):
"""Create a hash based on the sources and logic used to build the
package. This includes the contents of all applied patches and the
contents of applicable functions in the package subclass."""
if not self.spec.concrete:
err_msg = ("Cannot invoke content_hash on a package"
" if the associated spec is not concrete")
raise spack.error.SpackError(err_msg)
hash_content = list()
try:
source_id = fs.for_package_version(self, self.version).source_id()
except fs.ExtrapolationError:
source_id = None
if not source_id:
# TODO? in cases where a digest or source_id isn't available,
# should this attempt to download the source and set one? This
# probably only happens for source repositories which are
# referenced by branch name rather than tag or commit ID.
message = 'Missing a source id for {s.name}@{s.version}'
tty.warn(message.format(s=self))
hash_content.append(''.encode('utf-8'))
else:
hash_content.append(source_id.encode('utf-8'))
hash_content.extend(':'.join((p.sha256, str(p.level))).encode('utf-8')
for p in self.spec.patches)
hash_content.append(package_hash(self.spec, content))
return base64.b32encode(
hashlib.sha256(bytes().join(
sorted(hash_content))).digest()).lower()
def _has_make_target(self, target):
"""Checks to see if 'target' is a valid target in a Makefile.
Parameters:
target (str): the target to check for
Returns:
bool: True if 'target' is found, else False
"""
# Prevent altering LC_ALL for 'make' outside this function
make = copy.deepcopy(inspect.getmodule(self).make)
# Use English locale for missing target message comparison
make.add_default_env('LC_ALL', 'C')
# Check if we have a Makefile
for makefile in ['GNUmakefile', 'Makefile', 'makefile']:
if os.path.exists(makefile):
break
else:
tty.msg('No Makefile found in the build directory')
return False
# Check if 'target' is a valid target.
#
# `make -n target` performs a "dry run". It prints the commands that
# would be run but doesn't actually run them. If the target does not
# exist, you will see one of the following error messages:
#
# GNU Make:
# make: *** No rule to make target `test'. Stop.
# *** No rule to make target 'test'. Stop.
#
# BSD Make:
# make: don't know how to make test. Stop
missing_target_msgs = [
"No rule to make target `{0}'. Stop.",
"No rule to make target '{0}'. Stop.",
"don't know how to make {0}. Stop",
]
kwargs = {
'fail_on_error': False,
'output': os.devnull,
'error': str,
}
stderr = make('-n', target, **kwargs)
for missing_target_msg in missing_target_msgs:
if missing_target_msg.format(target) in stderr:
tty.msg("Target '" + target + "' not found in " + makefile)
return False
return True
def _if_make_target_execute(self, target, *args, **kwargs):
"""Runs ``make target`` if 'target' is a valid target in the Makefile.
Parameters:
target (str): the target to potentially execute
"""
if self._has_make_target(target):
# Execute target
inspect.getmodule(self).make(target, *args, **kwargs)
def _has_ninja_target(self, target):
"""Checks to see if 'target' is a valid target in a Ninja build script.
Parameters:
target (str): the target to check for
Returns:
bool: True if 'target' is found, else False
"""
ninja = inspect.getmodule(self).ninja
# Check if we have a Ninja build script
if not os.path.exists('build.ninja'):
tty.msg('No Ninja build script found in the build directory')
return False
# Get a list of all targets in the Ninja build script
# https://ninja-build.org/manual.html#_extra_tools
all_targets = ninja('-t', 'targets', 'all', output=str).split('\n')
# Check if 'target' is a valid target
matches = [line for line in all_targets
if line.startswith(target + ':')]
if not matches:
tty.msg("Target '" + target + "' not found in build.ninja")
return False
return True
def _if_ninja_target_execute(self, target, *args, **kwargs):
"""Runs ``ninja target`` if 'target' is a valid target in the Ninja
build script.
Parameters:
target (str): the target to potentially execute
"""
if self._has_ninja_target(target):
# Execute target
inspect.getmodule(self).ninja(target, *args, **kwargs)
def _get_needed_resources(self):
resources = []
# Select the resources that are needed for this build
if self.spec.concrete:
for when_spec, resource_list in self.resources.items():
if when_spec in self.spec:
resources.extend(resource_list)
else:
for when_spec, resource_list in self.resources.items():
# Note that variant checking is always strict for specs where
# the name is not specified. But with strict variant checking,
# only variants mentioned in 'other' are checked. Here we only
# want to make sure that no constraints in when_spec
# conflict with the spec, so we need to invoke
# when_spec.satisfies(self.spec) vs.
# self.spec.satisfies(when_spec)
if when_spec.satisfies(self.spec, strict=False):
resources.extend(resource_list)
# Sorts the resources by the length of the string representing their
# destination. Since any nested resource must contain another
# resource's name in its path, it seems that should work
resources = sorted(resources, key=lambda res: len(res.destination))
return resources
def _resource_stage(self, resource):
pieces = ['resource', resource.name, self.spec.dag_hash()]
resource_stage_folder = '-'.join(pieces)
return resource_stage_folder
@contextlib.contextmanager
def _stage_and_write_lock(self):
"""Prefix lock nested in a stage."""
with self.stage:
with spack.store.db.prefix_write_lock(self.spec):
yield
def do_install(self, **kwargs):
"""Called by commands to install a package and or its dependencies.
Package implementations should override install() to describe
their build process.
Args:"""
builder = PackageInstaller(self)
builder.install(**kwargs)
do_install.__doc__ += install_args_docstring
def unit_test_check(self):
"""Hook for unit tests to assert things about package internals.
Unit tests can override this function to perform checks after
``Package.install`` and all post-install hooks run, but before
the database is updated.
The overridden function may indicate that the install procedure
should terminate early (before updating the database) by
returning ``False`` (or any value such that ``bool(result)`` is
``False``).
Return:
(bool): ``True`` to continue, ``False`` to skip ``install()``
"""
return True
def sanity_check_prefix(self):
"""This function checks whether install succeeded."""
def check_paths(path_list, filetype, predicate):
if isinstance(path_list, string_types):
path_list = [path_list]
for path in path_list:
abs_path = os.path.join(self.prefix, path)
if not predicate(abs_path):
raise InstallError(
"Install failed for %s. No such %s in prefix: %s" %
(self.name, filetype, path))
check_paths(self.sanity_check_is_file, 'file', os.path.isfile)
check_paths(self.sanity_check_is_dir, 'directory', os.path.isdir)
installed = set(os.listdir(self.prefix))
installed.difference_update(
spack.store.layout.hidden_file_paths)
if not installed:
raise InstallError(
"Install failed for %s. Nothing was installed!" % self.name)
@property
def build_log_path(self):
"""
Return the expected (or current) build log file path. The path points
to the staging build file until the software is successfully installed,
when it points to the file in the installation directory.
"""
return self.install_log_path if self.installed else self.log_path
@classmethod
def inject_flags(cls, name, flags):
"""
flag_handler that injects all flags through the compiler wrapper.
"""
return (flags, None, None)
@classmethod
def env_flags(cls, name, flags):
"""
flag_handler that adds all flags to canonical environment variables.
"""
return (None, flags, None)
@classmethod
def build_system_flags(cls, name, flags):
"""
flag_handler that passes flags to the build system arguments. Any
package using `build_system_flags` must also implement
`flags_to_build_system_args`, or derive from a class that
implements it. Currently, AutotoolsPackage and CMakePackage
implement it.
"""
return (None, None, flags)
def _get_legacy_environment_method(self, method_name):
legacy_fn = getattr(self, method_name, None)
name_prefix = method_name.split('_environment')[0]
if legacy_fn:
msg = '[DEPRECATED METHOD]\n"{0}" ' \
'still defines the deprecated method "{1}" ' \
'[should be split into "{2}_build_environment" and ' \
'"{2}_run_environment"]'
tty.debug(msg.format(self.name, method_name, name_prefix))
return legacy_fn
def setup_build_environment(self, env):
"""Sets up the build environment for a package.
This method will be called before the current package prefix exists in
Spack's store.
Args:
env (EnvironmentModifications): environment modifications to be
applied when the package is built. Package authors can call
methods on it to alter the build environment.
"""
legacy_fn = self._get_legacy_environment_method('setup_environment')
if legacy_fn:
_ = spack.util.environment.EnvironmentModifications()
legacy_fn(env, _)
def setup_run_environment(self, env):
"""Sets up the run environment for a package.
Args:
env (EnvironmentModifications): environment modifications to be
applied when the package is run. Package authors can call
methods on it to alter the run environment.
"""
legacy_fn = self._get_legacy_environment_method('setup_environment')
if legacy_fn:
_ = spack.util.environment.EnvironmentModifications()
legacy_fn(_, env)
def setup_dependent_build_environment(self, env, dependent_spec):
"""Sets up the build environment of packages that depend on this one.
This is similar to ``setup_build_environment``, but it is used to
modify the build environments of packages that *depend* on this one.
This gives packages like Python and others that follow the extension
model a way to implement common environment or compile-time settings
for dependencies.
This method will be called before the dependent package prefix exists
in Spack's store.
Examples:
1. Installing python modules generally requires ``PYTHONPATH``
to point to the ``lib/pythonX.Y/site-packages`` directory in the
module's install prefix. This method could be used to set that
variable.
Args:
env (EnvironmentModifications): environment modifications to be
applied when the dependent package is built. Package authors
can call methods on it to alter the build environment.
dependent_spec (Spec): the spec of the dependent package
about to be built. This allows the extendee (self) to query
the dependent's state. Note that *this* package's spec is
available as ``self.spec``
"""
legacy_fn = self._get_legacy_environment_method(
'setup_dependent_environment'
)
if legacy_fn:
_ = spack.util.environment.EnvironmentModifications()
legacy_fn(env, _, dependent_spec)
def setup_dependent_run_environment(self, env, dependent_spec):
"""Sets up the run environment of packages that depend on this one.
This is similar to ``setup_run_environment``, but it is used to
modify the run environments of packages that *depend* on this one.
This gives packages like Python and others that follow the extension
model a way to implement common environment or run-time settings
for dependencies.
Args:
env (EnvironmentModifications): environment modifications to be
applied when the dependent package is run. Package authors
can call methods on it to alter the build environment.
dependent_spec (Spec): The spec of the dependent package
about to be run. This allows the extendee (self) to query
the dependent's state. Note that *this* package's spec is
available as ``self.spec``
"""
legacy_fn = self._get_legacy_environment_method(
'setup_dependent_environment'
)
if legacy_fn:
_ = spack.util.environment.EnvironmentModifications()
legacy_fn(_, env, dependent_spec)
def setup_dependent_package(self, module, dependent_spec):
"""Set up Python module-scope variables for dependent packages.
Called before the install() method of dependents.
Default implementation does nothing, but this can be
overridden by an extendable package to set up the module of
its extensions. This is useful if there are some common steps
to installing all extensions for a certain package.
Examples:
1. Extensions often need to invoke the ``python`` interpreter
from the Python installation being extended. This routine
can put a ``python()`` Executable object in the module scope
for the extension package to simplify extension installs.
2. MPI compilers could set some variables in the dependent's
scope that point to ``mpicc``, ``mpicxx``, etc., allowing
them to be called by common name regardless of which MPI is used.
3. BLAS/LAPACK implementations can set some variables
indicating the path to their libraries, since these
paths differ by BLAS/LAPACK implementation.
Args:
module (spack.package.PackageBase.module): The Python ``module``
object of the dependent package. Packages can use this to set
module-scope variables for the dependent to use.
dependent_spec (Spec): The spec of the dependent package
about to be built. This allows the extendee (self) to
query the dependent's state. Note that *this*
package's spec is available as ``self.spec``.
"""
pass
flag_handler = inject_flags
# The flag handler method is called for each of the allowed compiler flags.
# It returns a triple of inject_flags, env_flags, build_system_flags.
# The flags returned as inject_flags are injected through the spack
# compiler wrappers.
# The flags returned as env_flags are passed to the build system through
# the environment variables of the same name.
# The flags returned as build_system_flags are passed to the build system
# package subclass to be turned into the appropriate part of the standard
# arguments. This is implemented for build system classes where
# appropriate and will otherwise raise a NotImplementedError.
def flags_to_build_system_args(self, flags):
# Takes flags as a dict name: list of values
if any(v for v in flags.values()):
msg = 'The {0} build system'.format(self.__class__.__name__)
msg += ' cannot take command line arguments for compiler flags'
raise NotImplementedError(msg)
@staticmethod
def uninstall_by_spec(spec, force=False, deprecator=None):
if not os.path.isdir(spec.prefix):
# prefix may not exist, but DB may be inconsistent. Try to fix by
# removing, but omit hooks.
specs = spack.store.db.query(spec, installed=True)
if specs:
if deprecator:
spack.store.db.deprecate(specs[0], deprecator)
tty.msg("Deprecating stale DB entry for "
"%s" % spec.short_spec)
else:
spack.store.db.remove(specs[0])
tty.msg("Removed stale DB entry for %s" % spec.short_spec)
return
else:
raise InstallError(str(spec) + " is not installed.")
if not force:
dependents = spack.store.db.installed_relatives(
spec, 'parents', True)
if dependents:
raise PackageStillNeededError(spec, dependents)
# Try to get the package for the spec
try:
pkg = spec.package
except spack.repo.UnknownEntityError:
pkg = None
# Pre-uninstall hook runs first.
with spack.store.db.prefix_write_lock(spec):
if pkg is not None:
spack.hooks.pre_uninstall(spec)
# Uninstalling in Spack only requires removing the prefix.
if not spec.external:
msg = 'Deleting package prefix [{0}]'
tty.debug(msg.format(spec.short_spec))
# test if spec is already deprecated, not whether we want to
# deprecate it now
deprecated = bool(spack.store.db.deprecator(spec))
spack.store.layout.remove_install_directory(spec, deprecated)
# Delete DB entry
if deprecator:
msg = 'deprecating DB entry [{0}] in favor of [{1}]'
tty.debug(msg.format(spec.short_spec, deprecator.short_spec))
spack.store.db.deprecate(spec, deprecator)
else:
msg = 'Deleting DB entry [{0}]'
tty.debug(msg.format(spec.short_spec))
spack.store.db.remove(spec)
if pkg is not None:
spack.hooks.post_uninstall(spec)
tty.msg("Successfully uninstalled %s" % spec.short_spec)
def do_uninstall(self, force=False):
"""Uninstall this package by spec."""
# delegate to instance-less method.
Package.uninstall_by_spec(self.spec, force)
def do_deprecate(self, deprecator, link_fn):
"""Deprecate this package in favor of deprecator spec"""
spec = self.spec
# Check whether package to deprecate has active extensions
if self.extendable:
view = spack.filesystem_view.YamlFilesystemView(spec.prefix,
spack.store.layout)
active_exts = view.extensions_layout.extension_map(spec).values()
if active_exts:
short = spec.format('{name}/{hash:7}')
m = "Spec %s has active extensions\n" % short
for active in active_exts:
m += ' %s\n' % active.format('{name}/{hash:7}')
m += "Deactivate extensions before deprecating %s" % short
tty.die(m)
# Check whether package to deprecate is an active extension
if self.is_extension:
extendee = self.extendee_spec
view = spack.filesystem_view.YamlFilesystemView(extendee.prefix,
spack.store.layout)
if self.is_activated(view):
short = spec.format('{name}/{hash:7}')
short_ext = extendee.format('{name}/{hash:7}')
msg = "Spec %s is an active extension of %s\n" % (short,
short_ext)
msg += "Deactivate %s to be able to deprecate it" % short
tty.die(msg)
# Install deprecator if it isn't installed already
if not spack.store.db.query(deprecator):
deprecator.package.do_install()
old_deprecator = spack.store.db.deprecator(spec)
if old_deprecator:
# Find this specs yaml file from its old deprecation
self_yaml = spack.store.layout.deprecated_file_path(spec,
old_deprecator)
else:
self_yaml = spack.store.layout.spec_file_path(spec)
# copy spec metadata to "deprecated" dir of deprecator
depr_yaml = spack.store.layout.deprecated_file_path(spec,
deprecator)
fs.mkdirp(os.path.dirname(depr_yaml))
shutil.copy2(self_yaml, depr_yaml)
# Any specs deprecated in favor of this spec are re-deprecated in
# favor of its new deprecator
for deprecated in spack.store.db.specs_deprecated_by(spec):
deprecated.package.do_deprecate(deprecator, link_fn)
# Now that we've handled metadata, uninstall and replace with link
Package.uninstall_by_spec(spec, force=True, deprecator=deprecator)
link_fn(deprecator.prefix, spec.prefix)
def _check_extendable(self):
if not self.extendable:
raise ValueError("Package %s is not extendable!" % self.name)
def _sanity_check_extension(self):
if not self.is_extension:
raise ActivationError("This package is not an extension.")
extendee_package = self.extendee_spec.package
extendee_package._check_extendable()
if not extendee_package.installed:
raise ActivationError(
"Can only (de)activate extensions for installed packages.")
if not self.installed:
raise ActivationError("Extensions must first be installed.")
if self.extendee_spec.name not in self.extendees:
raise ActivationError("%s does not extend %s!" %
(self.name, self.extendee.name))
def do_activate(self, view=None, with_dependencies=True, verbose=True):
"""Called on an extension to invoke the extendee's activate method.
Commands should call this routine, and should not call
activate() directly.
"""
if verbose:
tty.msg('Activating extension {0} for {1}'.format(
self.spec.cshort_spec, self.extendee_spec.cshort_spec))
self._sanity_check_extension()
if not view:
view = YamlFilesystemView(
self.extendee_spec.prefix, spack.store.layout)
extensions_layout = view.extensions_layout
extensions_layout.check_extension_conflict(
self.extendee_spec, self.spec)
# Activate any package dependencies that are also extensions.
if with_dependencies:
for spec in self.dependency_activations():
if not spec.package.is_activated(view):
spec.package.do_activate(
view, with_dependencies=with_dependencies,
verbose=verbose)
self.extendee_spec.package.activate(
self, view, **self.extendee_args)
extensions_layout.add_extension(self.extendee_spec, self.spec)
if verbose:
tty.debug('Activated extension {0} for {1}'.format(
self.spec.cshort_spec, self.extendee_spec.cshort_spec))
def dependency_activations(self):
return (spec for spec in self.spec.traverse(root=False, deptype='run')
if spec.package.extends(self.extendee_spec))
def activate(self, extension, view, **kwargs):
"""
Add the extension to the specified view.
Package authors can override this function to maintain some
centralized state related to the set of activated extensions
for a package.
Spack internals (commands, hooks, etc.) should call
do_activate() method so that proper checks are always executed.
"""
view.merge(extension.spec, ignore=kwargs.get('ignore', None))
def do_deactivate(self, view=None, **kwargs):
"""Remove this extension package from the specified view. Called
on the extension to invoke extendee's deactivate() method.
`remove_dependents=True` deactivates extensions depending on this
package instead of raising an error.
"""
self._sanity_check_extension()
force = kwargs.get('force', False)
verbose = kwargs.get('verbose', True)
remove_dependents = kwargs.get('remove_dependents', False)
if verbose:
tty.msg('Deactivating extension {0} for {1}'.format(
self.spec.cshort_spec, self.extendee_spec.cshort_spec))
if not view:
view = YamlFilesystemView(
self.extendee_spec.prefix, spack.store.layout)
extensions_layout = view.extensions_layout
# Allow a force deactivate to happen. This can unlink
# spurious files if something was corrupted.
if not force:
extensions_layout.check_activated(
self.extendee_spec, self.spec)
activated = extensions_layout.extension_map(
self.extendee_spec)
for name, aspec in activated.items():
if aspec == self.spec:
continue
for dep in aspec.traverse(deptype='run'):
if self.spec == dep:
if remove_dependents:
aspec.package.do_deactivate(**kwargs)
else:
msg = ('Cannot deactivate {0} because {1} is '
'activated and depends on it')
raise ActivationError(msg.format(
self.spec.cshort_spec, aspec.cshort_spec))
self.extendee_spec.package.deactivate(
self, view, **self.extendee_args)
# redundant activation check -- makes SURE the spec is not
# still activated even if something was wrong above.
if self.is_activated(view):
extensions_layout.remove_extension(
self.extendee_spec, self.spec)
if verbose:
tty.debug('Deactivated extension {0} for {1}'.format(
self.spec.cshort_spec, self.extendee_spec.cshort_spec))
def deactivate(self, extension, view, **kwargs):
"""
Remove all extension files from the specified view.
Package authors can override this method to support other
extension mechanisms. Spack internals (commands, hooks, etc.)
should call do_deactivate() method so that proper checks are
always executed.
"""
view.unmerge(extension.spec, ignore=kwargs.get('ignore', None))
def view(self):
"""Create a view with the prefix of this package as the root.
Extensions added to this view will modify the installation prefix of
this package.
"""
return YamlFilesystemView(self.prefix, spack.store.layout)
def do_restage(self):
"""Reverts expanded/checked out source to a pristine state."""
self.stage.restage()
def do_clean(self):
"""Removes the package's build stage and source tarball."""
for patch in self.spec.patches:
patch.clean()
self.stage.destroy()
def format_doc(self, **kwargs):
"""Wrap doc string at 72 characters and format nicely"""
indent = kwargs.get('indent', 0)
if not self.__doc__:
return ""
doc = re.sub(r'\s+', ' ', self.__doc__)
lines = textwrap.wrap(doc, 72)
results = StringIO()
for line in lines:
results.write((" " * indent) + line + "\n")
return results.getvalue()
@property
def all_urls(self):
"""A list of all URLs in a package.
Check both class-level and version-specific URLs.
Returns:
list: a list of URLs
"""
urls = []
if hasattr(self, 'url') and self.url:
urls.append(self.url)
for args in self.versions.values():
if 'url' in args:
urls.append(args['url'])
return urls
def fetch_remote_versions(self):
"""Find remote versions of this package.
Uses ``list_url`` and any other URLs listed in the package file.
Returns:
dict: a dictionary mapping versions to URLs
"""
if not self.all_urls:
return {}
try:
return spack.util.web.find_versions_of_archive(
self.all_urls, self.list_url, self.list_depth)
except spack.util.web.NoNetworkConnectionError as e:
tty.die("Package.fetch_versions couldn't connect to:", e.url,
e.message)
@property
def rpath(self):
"""Get the rpath this package links with, as a list of paths."""
rpaths = [self.prefix.lib, self.prefix.lib64]
deps = self.spec.dependencies(deptype='link')
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
return rpaths
@property
def rpath_args(self):
"""
Get the rpath args as a string, with -Wl,-rpath, for each element
"""
return " ".join("-Wl,-rpath,%s" % p for p in self.rpath)
@on_package_attributes(run_tests=True)
def _run_default_build_time_test_callbacks(self):
"""Tries to call all the methods that are listed in the attribute
``build_time_test_callbacks`` if ``self.run_tests is True``.
If ``build_time_test_callbacks is None`` returns immediately.
"""
if self.build_time_test_callbacks is None:
return
for name in self.build_time_test_callbacks:
try:
fn = getattr(self, name)
tty.msg('RUN-TESTS: build-time tests [{0}]'.format(name))
fn()
except AttributeError:
msg = 'RUN-TESTS: method not implemented [{0}]'
tty.warn(msg.format(name))
@on_package_attributes(run_tests=True)
def _run_default_install_time_test_callbacks(self):
"""Tries to call all the methods that are listed in the attribute
``install_time_test_callbacks`` if ``self.run_tests is True``.
If ``install_time_test_callbacks is None`` returns immediately.
"""
if self.install_time_test_callbacks is None:
return
for name in self.install_time_test_callbacks:
try:
fn = getattr(self, name)
tty.msg('RUN-TESTS: install-time tests [{0}]'.format(name))
fn()
except AttributeError:
msg = 'RUN-TESTS: method not implemented [{0}]'
tty.warn(msg.format(name))
inject_flags = PackageBase.inject_flags
env_flags = PackageBase.env_flags
build_system_flags = PackageBase.build_system_flags
class BundlePackage(PackageBase):
"""General purpose bundle, or no-code, package class."""
#: There are no phases by default but the property is required to support
#: post-install hooks (e.g., for module generation).
phases = []
#: This attribute is used in UI queries that require to know which
#: build-system class we are using
build_system_class = 'BundlePackage'
#: Bundle packages do not have associated source or binary code.
has_code = False
class Package(PackageBase):
"""General purpose class with a single ``install``
phase that needs to be coded by packagers.
"""
#: The one and only phase
phases = ['install']
#: This attribute is used in UI queries that require to know which
#: build-system class we are using
build_system_class = 'Package'
# This will be used as a registration decorator in user
# packages, if need be
run_after('install')(PackageBase.sanity_check_prefix)
def install_dependency_symlinks(pkg, spec, prefix):
"""
Execute a dummy install and flatten dependencies.
This routine can be used in a ``package.py`` definition by setting
``install = install_dependency_symlinks``.
This feature comes in handy for creating a common location for the
the installation of third-party libraries.
"""
flatten_dependencies(spec, prefix)
def use_cray_compiler_names():
"""Compiler names for builds that rely on cray compiler names."""
os.environ['CC'] = 'cc'
os.environ['CXX'] = 'CC'
os.environ['FC'] = 'ftn'
os.environ['F77'] = 'ftn'
def flatten_dependencies(spec, flat_dir):
"""Make each dependency of spec present in dir via symlink."""
for dep in spec.traverse(root=False):
name = dep.name
dep_path = spack.store.layout.path_for_spec(dep)
dep_files = LinkTree(dep_path)
os.mkdir(flat_dir + '/' + name)
conflict = dep_files.find_conflict(flat_dir + '/' + name)
if conflict:
raise DependencyConflictError(conflict)
dep_files.merge(flat_dir + '/' + name)
def possible_dependencies(*pkg_or_spec, **kwargs):
"""Get the possible dependencies of a number of packages.
See ``PackageBase.possible_dependencies`` for details.
"""
transitive = kwargs.get('transitive', True)
expand_virtuals = kwargs.get('expand_virtuals', True)
deptype = kwargs.get('deptype', 'all')
missing = kwargs.get('missing')
packages = []
for pos in pkg_or_spec:
if isinstance(pos, PackageMeta):
pkg = pos
elif isinstance(pos, spack.spec.Spec):
pkg = pos.package
else:
pkg = spack.spec.Spec(pos).package
packages.append(pkg)
visited = {}
for pkg in packages:
pkg.possible_dependencies(
transitive, expand_virtuals, deptype, visited, missing)
return visited
class FetchError(spack.error.SpackError):
"""Raised when something goes wrong during fetch."""
def __init__(self, message, long_msg=None):
super(FetchError, self).__init__(message, long_msg)
class PackageStillNeededError(InstallError):
"""Raised when package is still needed by another on uninstall."""
def __init__(self, spec, dependents):
super(PackageStillNeededError, self).__init__("Cannot uninstall %s" %
spec)
self.spec = spec
self.dependents = dependents
class PackageError(spack.error.SpackError):
"""Raised when something is wrong with a package definition."""
def __init__(self, message, long_msg=None):
super(PackageError, self).__init__(message, long_msg)
class PackageVersionError(PackageError):
"""Raised when a version URL cannot automatically be determined."""
def __init__(self, version):
super(PackageVersionError, self).__init__(
"Cannot determine a URL automatically for version %s" % version,
"Please provide a url for this version in the package.py file.")
class NoURLError(PackageError):
"""Raised when someone tries to build a URL for a package with no URLs."""
def __init__(self, cls):
super(NoURLError, self).__init__(
"Package %s has no version with a URL." % cls.__name__)
class InvalidPackageOpError(PackageError):
"""Raised when someone tries perform an invalid operation on a package."""
class ExtensionError(PackageError):
"""Superclass for all errors having to do with extension packages."""
class ActivationError(ExtensionError):
"""Raised when there are problems activating an extension."""
def __init__(self, msg, long_msg=None):
super(ActivationError, self).__init__(msg, long_msg)
class DependencyConflictError(spack.error.SpackError):
"""Raised when the dependencies cannot be flattened as asked for."""
def __init__(self, conflict):
super(DependencyConflictError, self).__init__(
"%s conflicts with another file in the flattened directory." % (
conflict))
|
[] |
[] |
[
"CXX",
"FC",
"F77",
"CC"
] |
[]
|
["CXX", "FC", "F77", "CC"]
|
python
| 4 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HouseMates.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
IoT/create_config.py
|
import argparse
import os
# -- fix for mac only
#import crypto
import sys
#sys.modules['Crypto'] = crypto
# --
import shutil
from Crypto.PublicKey import RSA
from datetime import datetime
if __name__ == "__main__":
config_file = "installer-config.txt"
destination = "files/root/home/pi/.ssh/"
KEY_DIR = os.getenv('iot_key_dir')
parser = argparse.ArgumentParser()
parser.add_argument('-hosts', action='store', dest='total_nodes',
help='total number of nodes for which the configuration needs to be generated')
parser.add_argument('-net', action='store', dest='network',
help='Network e.g. 192.168.1.0. Subnet is assumed to be /24')
parser.add_argument('-st', action='store', dest='start_from',
help='start the host naming from. The nodes will be numbered sequentially')
results = parser.parse_args()
all_hosts = list(range(int(results.start_from), int(results.start_from)+int(results.total_nodes)))
_all_ips = results.network.rstrip('0')+"{host}"
all_ips = list(map(lambda x: _all_ips.format(host=x),
sorted(all_hosts, reverse=True)))
for host in all_hosts:
file_path = os.path.join('{current_host}'.format(current_host=host),
destination)
installer_path = os.path.join('{current_host}'.format(current_host=host))
# parse the config file to a dictionary
conf = {}
with open(config_file) as fh:
for line in fh:
(key, value) = line.strip('\n').split('=')
conf[str(key)] = value
# Values to be changed
# ip address
if conf.get('ip_addr', None):
conf['ip_addr'] = all_ips.pop()
else:
print("ERROR: cannot find ip_addr field")
# ssh public/pvt key
# generate 2048bit key
key = RSA.generate(2048)
# write private key to the file
if not os.path.exists(file_path):
os.makedirs(file_path)
print(file_path)
with open(os.path.join(file_path, 'private.key'), 'wb') as pvtkey_file:
pvtkey_file.write(key.exportKey('PEM'))
os.chmod(os.path.join(file_path, 'private.key'), 0o400)
# change this in config file as well
if conf.get('user_ssh_pubkey', None):
conf['user_ssh_pubkey'] = key.publickey().exportKey('OpenSSH').decode('utf-8')
shutil.copytree(str(host), os.path.join(KEY_DIR, str(host)))
if conf.get('ip_addr', None):
conf['ip_addr'] = '.'.join(conf['ip_addr'].split('.')[:-1]) + '.{}'.format(host)
# write the configuration to a file
with open(os.path.join(installer_path, 'installer-config.txt'), 'w') as cfg:
for item in conf.items():
cfg.write("{key}={value}\n".format(key=item[0], value=item[1]))
# -- Thats It!
print("[{time}]: Done configuration for {host}".format(
time=datetime.now(),
host=host))
|
[] |
[] |
[
"iot_key_dir"
] |
[]
|
["iot_key_dir"]
|
python
| 1 | 0 | |
python/helpers/pydev/_pydev_runfiles/pydev_runfiles_pytest2.py
|
from _pydev_runfiles import pydev_runfiles_xml_rpc
import pickle
import zlib
import base64
import os
import py
from pydevd_file_utils import _NormFile
import pytest
import sys
import time
#=========================================================================
# Load filters with tests we should skip
#=========================================================================
py_test_accept_filter = None
def _load_filters():
global py_test_accept_filter
if py_test_accept_filter is None:
py_test_accept_filter = os.environ.get('PYDEV_PYTEST_SKIP')
if py_test_accept_filter:
py_test_accept_filter = pickle.loads(
zlib.decompress(base64.b64decode(py_test_accept_filter)))
else:
py_test_accept_filter = {}
def is_in_xdist_node():
main_pid = os.environ.get('PYDEV_MAIN_PID')
if main_pid and main_pid != str(os.getpid()):
return True
return False
connected = False
def connect_to_server_for_communication_to_xml_rpc_on_xdist():
global connected
if connected:
return
connected = True
if is_in_xdist_node():
port = os.environ.get('PYDEV_PYTEST_SERVER')
if not port:
sys.stderr.write(
'Error: no PYDEV_PYTEST_SERVER environment variable defined.\n')
else:
pydev_runfiles_xml_rpc.initialize_server(int(port), daemon=True)
PY2 = sys.version_info[0] <= 2
PY3 = not PY2
#=========================================================================
# Mocking to get clickable file representations
#=========================================================================
_mock_code = []
try:
from py._code import code # @UnresolvedImport
_mock_code.append(code)
except ImportError:
pass
try:
from _pytest._code import code # @UnresolvedImport
_mock_code.append(code)
except ImportError:
pass
def _MockFileRepresentation():
for code in _mock_code:
code.ReprFileLocation._original_toterminal = code.ReprFileLocation.toterminal
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors understand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
path = os.path.abspath(self.path)
if PY2:
# Note: it usually is NOT unicode...
if not isinstance(path, unicode):
path = path.decode(sys.getfilesystemencoding(), 'replace')
# Note: it usually is unicode...
if not isinstance(msg, unicode):
msg = msg.decode('utf-8', 'replace')
unicode_line = unicode('File "%s", line %s\n%s') % (
path, self.lineno, msg)
tw.line(unicode_line)
else:
tw.line('File "%s", line %s\n%s' % (path, self.lineno, msg))
code.ReprFileLocation.toterminal = toterminal
def _UninstallMockFileRepresentation():
for code in _mock_code:
# @UndefinedVariable
code.ReprFileLocation.toterminal = code.ReprFileLocation._original_toterminal
#=========================================================================
# End mocking to get clickable file representations
#=========================================================================
class State:
start_time = time.time()
buf_err = None
buf_out = None
def start_redirect():
if State.buf_out is not None:
return
from _pydevd_bundle import pydevd_io
State.buf_err = pydevd_io.start_redirect(keep_original_redirection=True, std='stderr')
State.buf_out = pydevd_io.start_redirect(keep_original_redirection=True, std='stdout')
def get_curr_output():
return State.buf_out.getvalue(), State.buf_err.getvalue()
def pytest_configure():
_MockFileRepresentation()
def pytest_unconfigure():
_UninstallMockFileRepresentation()
if is_in_xdist_node():
return
# Only report that it finished when on the main node (we don't want to report
# the finish on each separate node).
pydev_runfiles_xml_rpc.notifyTestRunFinished(
'Finished in: %.2f secs.' % (time.time() - State.start_time,))
def pytest_collection_modifyitems(session, config, items):
# A note: in xdist, this is not called on the main process, only in the
# secondary nodes, so, we'll actually make the filter and report it multiple
# times.
connect_to_server_for_communication_to_xml_rpc_on_xdist()
_load_filters()
if not py_test_accept_filter:
pydev_runfiles_xml_rpc.notifyTestsCollected(len(items))
return # Keep on going (nothing to filter)
new_items = []
for item in items:
f = _NormFile(str(item.parent.fspath))
name = item.name
if f not in py_test_accept_filter:
# print('Skip file: %s' % (f,))
continue # Skip the file
accept_tests = py_test_accept_filter[f]
if item.cls is not None:
class_name = item.cls.__name__
else:
class_name = None
for test in accept_tests:
# This happens when parameterizing pytest tests.
i = name.find('[')
if i > 0:
name = name[:i]
if test == name:
# Direct match of the test (just go on with the default
# loading)
new_items.append(item)
break
if class_name is not None:
if test == class_name + '.' + name:
new_items.append(item)
break
if class_name == test:
new_items.append(item)
break
else:
pass
# print('Skip test: %s.%s. Accept: %s' % (class_name, name, accept_tests))
# Modify the original list
items[:] = new_items
pydev_runfiles_xml_rpc.notifyTestsCollected(len(items))
from py.io import TerminalWriter
def _get_error_contents_from_report(report):
if report.longrepr is not None:
tw = TerminalWriter(stringio=True)
tw.hasmarkup = False
report.toterminal(tw)
exc = tw.stringio.getvalue()
s = exc.strip()
if s:
return s
return ''
def pytest_collectreport(report):
error_contents = _get_error_contents_from_report(report)
if error_contents:
report_test('fail', '<collect errors>', '<collect errors>', '', error_contents, 0.0)
def append_strings(s1, s2):
if s1.__class__ == s2.__class__:
return s1 + s2
if sys.version_info[0] == 2:
if not isinstance(s1, basestring):
s1 = str(s1)
if not isinstance(s2, basestring):
s2 = str(s2)
# Prefer bytes
if isinstance(s1, unicode):
s1 = s1.encode('utf-8')
if isinstance(s2, unicode):
s2 = s2.encode('utf-8')
return s1 + s2
else:
# Prefer str
if isinstance(s1, bytes):
s1 = s1.decode('utf-8', 'replace')
if isinstance(s2, bytes):
s2 = s2.decode('utf-8', 'replace')
return s1 + s2
def pytest_runtest_logreport(report):
if is_in_xdist_node():
# When running with xdist, we don't want the report to be called from the node, only
# from the main process.
return
report_duration = report.duration
report_when = report.when
report_outcome = report.outcome
if hasattr(report, 'wasxfail'):
if report_outcome != 'skipped':
report_outcome = 'passed'
if report_outcome == 'passed':
# passed on setup/teardown: no need to report if in setup or teardown
# (only on the actual test if it passed).
if report_when in ('setup', 'teardown'):
return
status = 'ok'
elif report_outcome == 'skipped':
status = 'skip'
else:
# It has only passed, skipped and failed (no error), so, let's consider
# error if not on call.
if report_when in ('setup', 'teardown'):
status = 'error'
else:
# any error in the call (not in setup or teardown) is considered a
# regular failure.
status = 'fail'
# This will work if pytest is not capturing it, if it is, nothing will
# come from here...
captured_output, error_contents = getattr(report, 'pydev_captured_output', ''), getattr(report, 'pydev_error_contents', '')
for type_section, value in report.sections:
if value:
if type_section in ('err', 'stderr', 'Captured stderr call'):
error_contents = append_strings(error_contents, value)
else:
captured_output = append_strings(error_contents, value)
filename = getattr(report, 'pydev_fspath_strpath', '<unable to get>')
test = report.location[2]
if report_outcome != 'skipped':
# On skipped, we'll have a traceback for the skip, which is not what we
# want.
exc = _get_error_contents_from_report(report)
if exc:
if error_contents:
error_contents = append_strings(error_contents, '----------------------------- Exceptions -----------------------------\n')
error_contents = append_strings(error_contents, exc)
report_test(status, filename, test, captured_output, error_contents, report_duration)
def report_test(status, filename, test, captured_output, error_contents, duration):
'''
@param filename: 'D:\\src\\mod1\\hello.py'
@param test: 'TestCase.testMet1'
@param status: fail, error, ok
'''
time_str = '%.2f' % (duration,)
pydev_runfiles_xml_rpc.notifyTest(
status, captured_output, error_contents, filename, test, time_str)
if not hasattr(pytest, 'hookimpl'):
raise AssertionError('Please upgrade pytest (the current version of pytest: %s is unsupported)' % (pytest.__version__,))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
report.pydev_fspath_strpath = item.fspath.strpath
report.pydev_captured_output, report.pydev_error_contents = get_curr_output()
@pytest.mark.tryfirst
def pytest_runtest_setup(item):
'''
Note: with xdist will be on a secondary process.
'''
# We have our own redirection: if xdist does its redirection, we'll have
# nothing in our contents (which is OK), but if it does, we'll get nothing
# from pytest but will get our own here.
start_redirect()
filename = item.fspath.strpath
test = item.location[2]
pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
|
[] |
[] |
[
"PYDEV_PYTEST_SERVER",
"PYDEV_MAIN_PID",
"PYDEV_PYTEST_SKIP"
] |
[]
|
["PYDEV_PYTEST_SERVER", "PYDEV_MAIN_PID", "PYDEV_PYTEST_SKIP"]
|
python
| 3 | 0 | |
mongodump/mongodump_test.go
|
// Copyright (C) MongoDB, Inc. 2014-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mongodump
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/mongodb/mongo-tools-common/bsonutil"
"github.com/mongodb/mongo-tools-common/db"
"github.com/mongodb/mongo-tools-common/json"
"github.com/mongodb/mongo-tools-common/log"
"github.com/mongodb/mongo-tools-common/options"
"github.com/mongodb/mongo-tools-common/testtype"
"github.com/mongodb/mongo-tools-common/testutil"
"github.com/mongodb/mongo-tools-common/util"
. "github.com/smartystreets/goconvey/convey"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
var (
// database with test data
testDB = "mongodump_test_db"
// temp database used for restoring a DB
testRestoreDB = "temp_mongodump_restore_test_db"
testCollectionNames = []string{"coll1", "coll2", "coll/three"}
)
const (
KerberosDumpDirectory = "dump-kerberos"
)
func simpleMongoDumpInstance() *MongoDump {
var toolOptions *options.ToolOptions
// get ToolOptions from URI or defaults
if uri := os.Getenv("MONGOD"); uri != "" {
fakeArgs := []string{"--uri=" + uri}
toolOptions = options.New("mongodump", "", "", "", options.EnabledOptions{URI: true})
toolOptions.URI.AddKnownURIParameters(options.KnownURIOptionsReadPreference)
_, err := toolOptions.ParseArgs(fakeArgs)
if err != nil {
panic("Could not parse MONGOD environment variable")
}
} else {
ssl := testutil.GetSSLOptions()
auth := testutil.GetAuthOptions()
connection := &options.Connection{
Host: "localhost",
Port: db.DefaultTestPort,
}
toolOptions = &options.ToolOptions{
SSL: &ssl,
Connection: connection,
Auth: &auth,
Verbosity: &options.Verbosity{},
URI: &options.URI{},
}
}
// Limit ToolOptions to test database
toolOptions.Namespace = &options.Namespace{DB: testDB}
outputOptions := &OutputOptions{
NumParallelCollections: 1,
}
inputOptions := &InputOptions{}
log.SetVerbosity(toolOptions.Verbosity)
return &MongoDump{
ToolOptions: toolOptions,
InputOptions: inputOptions,
OutputOptions: outputOptions,
}
}
// returns the number of .bson files in a directory
// excluding system.indexes.bson
func countNonIndexBSONFiles(dir string) (int, error) {
files, err := listNonIndexBSONFiles(dir)
if err != nil {
return 0, err
}
return len(files), nil
}
func listNonIndexBSONFiles(dir string) ([]string, error) {
var files []string
matchingFiles, err := getMatchingFiles(dir, ".*\\.bson")
if err != nil {
return nil, err
}
for _, fileName := range matchingFiles {
if fileName != "system.indexes.bson" {
files = append(files, fileName)
}
}
return files, nil
}
// returns count of metadata files
func countMetaDataFiles(dir string) (int, error) {
matchingFiles, err := getMatchingFiles(dir, ".*\\.metadata\\.json")
if err != nil {
return 0, err
}
return len(matchingFiles), nil
}
// returns count of oplog entries with 'ui' field
func countOplogUI(iter *db.DecodedBSONSource) int {
var count int
var doc bson.M
for iter.Next(&doc) {
count += countOpsWithUI(doc)
}
return count
}
func countOpsWithUI(doc bson.M) int {
var count int
switch doc["op"] {
case "i", "u", "d":
if _, ok := doc["ui"]; ok {
count++
}
case "c":
if _, ok := doc["ui"]; ok {
count++
} else if v, ok := doc["o"]; ok {
opts, _ := v.(bson.M)
if applyOps, ok := opts["applyOps"]; ok {
list := applyOps.([]bson.M)
for _, v := range list {
count += countOpsWithUI(v)
}
}
}
}
return count
}
// returns filenames that match the given pattern
func getMatchingFiles(dir, pattern string) ([]string, error) {
fileInfos, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
matchingFiles := []string{}
var matched bool
for _, fileInfo := range fileInfos {
fileName := fileInfo.Name()
if matched, err = regexp.MatchString(pattern, fileName); matched {
matchingFiles = append(matchingFiles, fileName)
}
if err != nil {
return nil, err
}
}
return matchingFiles, nil
}
// read all the database bson documents from dir and put it into another DB
// ignore the indexes for now
func readBSONIntoDatabase(dir, restoreDBName string) error {
if ok := fileDirExists(dir); !ok {
return fmt.Errorf("error finding '%v' on local FS", dir)
}
session, err := testutil.GetBareSession()
if err != nil {
return err
}
fileInfos, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, fileInfo := range fileInfos {
fileName := fileInfo.Name()
if !strings.HasSuffix(fileName, ".bson") || fileName == "system.indexes.bson" {
continue
}
collectionName, err := util.UnescapeCollectionName(fileName[:strings.LastIndex(fileName, ".bson")])
if err != nil {
return err
}
collection := session.Database(restoreDBName).Collection(collectionName)
file, err := os.Open(fmt.Sprintf("%s/%s", dir, fileName))
if err != nil {
return err
}
defer file.Close()
bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(file))
defer bsonSource.Close()
var result bson.D
for bsonSource.Next(&result) {
_, err = collection.InsertOne(nil, result)
if err != nil {
return err
}
}
if err = bsonSource.Err(); err != nil {
return err
}
}
return nil
}
func setUpMongoDumpTestData() error {
session, err := testutil.GetBareSession()
if err != nil {
return err
}
for i, collectionName := range testCollectionNames {
coll := session.Database(testDB).Collection(collectionName)
for j := 0; j < 10*(i+1); j++ {
_, err = coll.InsertOne(nil, bson.M{"collectionName": collectionName, "age": j, "coords": bson.D{{"x", i}, {"y", j}}})
if err != nil {
return err
}
}
}
return nil
}
// backgroundInsert inserts into random collections until provided done
// channel is closed. The function closes the ready channel to signal that
// background insertion has started. When the done channel is closed, the
// function returns. Any errors are passed back on the errs channel.
func backgroundInsert(ready, done chan struct{}, errs chan error) {
defer close(errs)
session, err := testutil.GetBareSession()
if err != nil {
errs <- err
close(ready)
return
}
colls := make([]*mongo.Collection, len(testCollectionNames))
for i, v := range testCollectionNames {
colls[i] = session.Database(testDB).Collection(v)
}
var n int
// Insert a doc to ensure the DB is actually ready for inserts
// and not pausing while a dropDatabase is processing.
_, err = colls[0].InsertOne(nil, bson.M{"n": n})
if err != nil {
errs <- err
close(ready)
return
}
close(ready)
n++
for {
select {
case <-done:
return
default:
coll := colls[rand.Intn(len(colls))]
_, err := coll.InsertOne(nil, bson.M{"n": n})
if err != nil {
errs <- err
return
}
n++
}
}
}
func tearDownMongoDumpTestData() error {
session, err := testutil.GetBareSession()
if err != nil {
return err
}
err = session.Database(testDB).Drop(nil)
if err != nil {
return err
}
return nil
}
func fileDirExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func testQuery(md *MongoDump, session *mongo.Client) string {
origDB := session.Database(testDB)
restoredDB := session.Database(testRestoreDB)
// query to test --query* flags
bsonQuery := bson.M{"age": bson.M{"$lt": 10}}
// we can only dump using query per collection
for _, testCollName := range testCollectionNames {
md.ToolOptions.Namespace.Collection = testCollName
err := md.Init()
So(err, ShouldBeNil)
err = md.Dump()
So(err, ShouldBeNil)
}
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
So(fileDirExists(dumpDir), ShouldBeTrue)
So(fileDirExists(dumpDBDir), ShouldBeTrue)
So(restoredDB.Drop(nil), ShouldBeNil)
err = readBSONIntoDatabase(dumpDBDir, testRestoreDB)
So(err, ShouldBeNil)
for _, testCollName := range testCollectionNames {
// count filtered docs
origDocCount, err := origDB.Collection(testCollName).CountDocuments(nil, bsonQuery)
So(err, ShouldBeNil)
// count number of all restored documents
restDocCount, err := restoredDB.Collection(testCollName).CountDocuments(nil, bson.D{})
So(err, ShouldBeNil)
So(restDocCount, ShouldEqual, origDocCount)
}
return dumpDir
}
func testDumpOneCollection(md *MongoDump, dumpDir string) {
path, err := os.Getwd()
So(err, ShouldBeNil)
absDumpDir := util.ToUniversalPath(filepath.Join(path, dumpDir))
So(os.RemoveAll(absDumpDir), ShouldBeNil)
So(fileDirExists(absDumpDir), ShouldBeFalse)
dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
So(fileDirExists(dumpDBDir), ShouldBeFalse)
md.OutputOptions.Out = dumpDir
err = md.Dump()
So(err, ShouldBeNil)
So(fileDirExists(dumpDBDir), ShouldBeTrue)
session, err := testutil.GetBareSession()
So(err, ShouldBeNil)
countColls, err := countNonIndexBSONFiles(dumpDBDir)
So(err, ShouldBeNil)
So(countColls, ShouldEqual, 1)
collOriginal := session.Database(testDB).Collection(md.ToolOptions.Namespace.Collection)
So(session.Database(testRestoreDB).Drop(nil), ShouldBeNil)
collRestore := session.Database(testRestoreDB).Collection(md.ToolOptions.Namespace.Collection)
err = readBSONIntoDatabase(dumpDBDir, testRestoreDB)
So(err, ShouldBeNil)
Convey("with the correct number of documents", func() {
numDocsOrig, err := collOriginal.CountDocuments(nil, bson.D{})
So(err, ShouldBeNil)
numDocsRestore, err := collRestore.CountDocuments(nil, bson.D{})
So(err, ShouldBeNil)
So(numDocsRestore, ShouldEqual, numDocsOrig)
})
Convey("that are the same as the documents in the test database", func() {
iter, err := collOriginal.Find(nil, bson.D{})
So(err, ShouldBeNil)
var result bson.D
for iter.Next(nil) {
iter.Decode(&result)
restoredCount, err := collRestore.CountDocuments(nil, result)
So(err, ShouldBeNil)
So(restoredCount, ShouldNotEqual, 0)
}
So(iter.Err(), ShouldBeNil)
So(iter.Close(context.Background()), ShouldBeNil)
})
}
func TestMongoDumpValidateOptions(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.UnitTestType)
Convey("With a MongoDump instance", t, func() {
md := simpleMongoDumpInstance()
Convey("we cannot dump a collection when a database specified", func() {
md.ToolOptions.Namespace.Collection = "some_collection"
md.ToolOptions.Namespace.DB = ""
err := md.Init()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "cannot dump a collection without a specified database")
})
Convey("we have to specify a collection name if using a query", func() {
md.ToolOptions.Namespace.Collection = ""
md.OutputOptions.Out = ""
md.InputOptions.Query = "{_id:\"\"}"
err := md.Init()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "cannot dump using a query without a specified collection")
})
})
}
func TestMongoDumpKerberos(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.KerberosTestType)
Convey("Should be able to run mongodump with Kerberos auth", t, func() {
opts, err := testutil.GetKerberosOptions()
So(err, ShouldBeNil)
mongoDump := MongoDump{
ToolOptions: opts,
InputOptions: &InputOptions{},
OutputOptions: &OutputOptions{
NumParallelCollections: 1,
},
}
mongoDump.OutputOptions.Out = KerberosDumpDirectory
err = mongoDump.Init()
So(err, ShouldBeNil)
err = mongoDump.Dump()
So(err, ShouldBeNil)
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, KerberosDumpDirectory))
dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, opts.Namespace.DB))
So(fileDirExists(dumpDir), ShouldBeTrue)
So(fileDirExists(dumpDBDir), ShouldBeTrue)
dumpCollectionFile := util.ToUniversalPath(filepath.Join(dumpDBDir, opts.Namespace.Collection+".bson"))
So(fileDirExists(dumpCollectionFile), ShouldBeTrue)
countColls, err := countNonIndexBSONFiles(dumpDBDir)
So(err, ShouldBeNil)
So(countColls, ShouldEqual, 1)
})
}
func TestMongoDumpBSON(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.IntegrationTestType)
log.SetWriter(ioutil.Discard)
Convey("With a MongoDump instance", t, func() {
err := setUpMongoDumpTestData()
So(err, ShouldBeNil)
Convey("testing that using MongoDump WITHOUT giving a query dumps everything in the database and/or collection", func() {
md := simpleMongoDumpInstance()
md.InputOptions.Query = ""
Convey("and that for a particular collection", func() {
md.ToolOptions.Namespace.Collection = testCollectionNames[0]
err = md.Init()
So(err, ShouldBeNil)
Convey("it dumps to the default output directory", func() {
testDumpOneCollection(md, "dump")
})
Convey("it dumps to a user-specified output directory", func() {
testDumpOneCollection(md, "dump_user")
})
Convey("it dumps to standard output", func() {
md.OutputOptions.Out = "-"
stdoutBuf := &bytes.Buffer{}
md.OutputWriter = stdoutBuf
err = md.Dump()
So(err, ShouldBeNil)
var count int
bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(ioutil.NopCloser(stdoutBuf)))
defer bsonSource.Close()
var result bson.Raw
for bsonSource.Next(&result) {
count++
}
So(bsonSource.Err(), ShouldBeNil)
So(count, ShouldEqual, 10) //The 0th collection has 10 documents
Reset(func() {
})
})
})
Convey("and that it dumps a collection with a slash in its name", func() {
md.ToolOptions.Namespace.Collection = testCollectionNames[2]
Convey("to the filesystem", func() {
err = md.Init()
So(err, ShouldBeNil)
testDumpOneCollection(md, "dump_slash")
})
Convey("to an archive", func() {
md.OutputOptions.Archive = "dump_slash.archive"
err = md.Init()
So(err, ShouldBeNil)
})
})
Convey("for an entire database", func() {
md.ToolOptions.Namespace.Collection = ""
err = md.Init()
So(err, ShouldBeNil)
Convey("that exists. The dumped directory should contain the necessary bson files", func() {
md.OutputOptions.Out = "dump"
err = md.Dump()
So(err, ShouldBeNil)
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
So(fileDirExists(dumpDir), ShouldBeTrue)
So(fileDirExists(dumpDBDir), ShouldBeTrue)
countColls, err := countNonIndexBSONFiles(dumpDBDir)
So(err, ShouldBeNil)
So(countColls, ShouldEqual, len(testCollectionNames))
Reset(func() {
So(os.RemoveAll(dumpDir), ShouldBeNil)
})
})
Convey("that does not exist. The dumped directory shouldn't be created", func() {
md.OutputOptions.Out = "dump"
md.ToolOptions.Namespace.DB = "nottestdb"
err = md.Dump()
So(err, ShouldBeNil)
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, "nottestdb"))
So(fileDirExists(dumpDir), ShouldBeFalse)
So(fileDirExists(dumpDBDir), ShouldBeFalse)
})
})
})
Convey("testing that using MongoDump WITH a query dumps a subset of documents in a database and/or collection", func() {
session, err := testutil.GetBareSession()
So(err, ShouldBeNil)
md := simpleMongoDumpInstance()
// expect 10 documents per collection
bsonQuery := bson.M{"age": bson.M{"$lt": 10}}
jsonQuery, err := bsonutil.ConvertBSONValueToLegacyExtJSON(bsonQuery)
So(err, ShouldBeNil)
jsonQueryBytes, err := json.Marshal(jsonQuery)
So(err, ShouldBeNil)
Convey("using --query for all the collections in the database", func() {
md.InputOptions.Query = string(jsonQueryBytes)
md.ToolOptions.Namespace.DB = testDB
md.OutputOptions.Out = "dump"
dumpDir := testQuery(md, session)
Reset(func() {
So(session.Database(testRestoreDB).Drop(nil), ShouldBeNil)
So(os.RemoveAll(dumpDir), ShouldBeNil)
})
})
Convey("using --queryFile for all the collections in the database", func() {
ioutil.WriteFile("example.json", jsonQueryBytes, 0777)
md.InputOptions.QueryFile = "example.json"
md.ToolOptions.Namespace.DB = testDB
md.OutputOptions.Out = "dump"
dumpDir := testQuery(md, session)
Reset(func() {
So(session.Database(testRestoreDB).Drop(nil), ShouldBeNil)
So(os.RemoveAll(dumpDir), ShouldBeNil)
So(os.Remove("example.json"), ShouldBeNil)
})
})
})
Convey("using MongoDump against a collection that doesn't exist succeeds", func() {
md := simpleMongoDumpInstance()
md.ToolOptions.Namespace.DB = "nonExistentDB"
md.ToolOptions.Namespace.Collection = "nonExistentColl"
err := md.Init()
So(err, ShouldBeNil)
err = md.Dump()
So(err, ShouldBeNil)
})
Reset(func() {
So(tearDownMongoDumpTestData(), ShouldBeNil)
})
})
}
func TestMongoDumpMetaData(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.IntegrationTestType)
log.SetWriter(ioutil.Discard)
Convey("With a MongoDump instance", t, func() {
session, err := testutil.GetBareSession()
So(session, ShouldNotBeNil)
So(err, ShouldBeNil)
err = setUpMongoDumpTestData()
So(err, ShouldBeNil)
Convey("testing that the dumped directory contains information about indexes", func() {
md := simpleMongoDumpInstance()
md.OutputOptions.Out = "dump"
err = md.Init()
So(err, ShouldBeNil)
err = md.Dump()
So(err, ShouldBeNil)
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
So(fileDirExists(dumpDir), ShouldBeTrue)
So(fileDirExists(dumpDBDir), ShouldBeTrue)
Convey("having one metadata file per collection", func() {
c1, err := countNonIndexBSONFiles(dumpDBDir)
So(err, ShouldBeNil)
c2, err := countMetaDataFiles(dumpDBDir)
So(err, ShouldBeNil)
So(c1, ShouldEqual, c2)
Convey("and that the JSON in a metadata file is valid", func() {
metaFiles, err := getMatchingFiles(dumpDBDir, ".*\\.metadata\\.json")
So(err, ShouldBeNil)
So(len(metaFiles), ShouldBeGreaterThan, 0)
oneMetaFile, err := os.Open(util.ToUniversalPath(filepath.Join(dumpDBDir, metaFiles[0])))
defer oneMetaFile.Close()
So(err, ShouldBeNil)
contents, err := ioutil.ReadAll(oneMetaFile)
var jsonResult map[string]interface{}
err = json.Unmarshal(contents, &jsonResult)
So(err, ShouldBeNil)
Convey("and contains an 'indexes' key", func() {
_, ok := jsonResult["indexes"]
So(ok, ShouldBeTrue)
})
fcv := testutil.GetFCV(session)
cmp, err := testutil.CompareFCV(fcv, "3.6")
So(err, ShouldBeNil)
if cmp >= 0 {
Convey("and on FCV 3.6+, contains a 'uuid' key", func() {
uuid, ok := jsonResult["uuid"]
So(ok, ShouldBeTrue)
checkUUID := regexp.MustCompile(`(?i)^[a-z0-9]{32}$`)
So(checkUUID.MatchString(uuid.(string)), ShouldBeTrue)
// XXX useless -- xdg, 2018-09-21
So(err, ShouldBeNil)
})
}
})
})
Reset(func() {
So(os.RemoveAll(dumpDir), ShouldBeNil)
})
})
Reset(func() {
So(tearDownMongoDumpTestData(), ShouldBeNil)
})
})
}
func TestMongoDumpOplog(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.IntegrationTestType)
sessionProvider, _, err := testutil.GetBareSessionProvider()
if err != nil {
t.Fatalf("No cluster available: %v", err)
}
session, err := sessionProvider.GetSession()
if err != nil {
t.Fatalf("No client available: %v", err)
}
if ok, _ := sessionProvider.IsReplicaSet(); !ok {
t.SkipNow()
}
log.SetWriter(ioutil.Discard)
Convey("With a MongoDump instance", t, func() {
Convey("testing that the dumped directory contains an oplog", func() {
// Start with clean filesystem
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
dumpOplogFile := util.ToUniversalPath(filepath.Join(dumpDir, "oplog.bson"))
err = os.RemoveAll(dumpDir)
So(err, ShouldBeNil)
So(fileDirExists(dumpDir), ShouldBeFalse)
// Start with clean database
So(tearDownMongoDumpTestData(), ShouldBeNil)
// Prepare mongodump with options
md := simpleMongoDumpInstance()
md.OutputOptions.Oplog = true
md.ToolOptions.Namespace = &options.Namespace{}
err = md.Init()
So(err, ShouldBeNil)
// Start inserting docs in the background so the oplog has data
ready := make(chan struct{})
done := make(chan struct{})
errs := make(chan error, 1)
go backgroundInsert(ready, done, errs)
<-ready
// Run mongodump
err = md.Dump()
So(err, ShouldBeNil)
// Stop background insertion
close(done)
err = <-errs
So(err, ShouldBeNil)
// Check for and read the oplog file
So(fileDirExists(dumpDir), ShouldBeTrue)
So(fileDirExists(dumpOplogFile), ShouldBeTrue)
oplogFile, err := os.Open(dumpOplogFile)
defer oplogFile.Close()
So(err, ShouldBeNil)
rdr := db.NewBSONSource(oplogFile)
iter := db.NewDecodedBSONSource(rdr)
fcv := testutil.GetFCV(session)
cmp, err := testutil.CompareFCV(fcv, "3.6")
So(err, ShouldBeNil)
withUI := countOplogUI(iter)
So(iter.Err(), ShouldBeNil)
if cmp >= 0 {
// for FCV 3.6+, should have 'ui' field in oplog entries
So(withUI, ShouldBeGreaterThan, 0)
} else {
// for FCV <3.6, should no have 'ui' field in oplog entries
So(withUI, ShouldEqual, 0)
}
// Cleanup
So(os.RemoveAll(dumpDir), ShouldBeNil)
So(tearDownMongoDumpTestData(), ShouldBeNil)
})
})
}
// Test dumping a collection with autoIndexId:false. As of MongoDB 4.0,
// this is only allowed on the 'local' database.
func TestMongoDumpTOOLS2174(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.IntegrationTestType)
log.SetWriter(ioutil.Discard)
sessionProvider, _, err := testutil.GetBareSessionProvider()
if err != nil {
t.Fatalf("No cluster available: %v", err)
}
collName := "tools-2174"
dbName := "local"
var r1 bson.M
sessionProvider.Run(bson.D{{"drop", collName}}, &r1, dbName)
createCmd := bson.D{
{"create", collName},
{"autoIndexId", false},
}
var r2 bson.M
err = sessionProvider.Run(createCmd, &r2, dbName)
if err != nil {
t.Fatalf("Error creating capped, no-autoIndexId collection: %v", err)
}
Convey("testing dumping a capped, autoIndexId:false collection", t, func() {
md := simpleMongoDumpInstance()
md.ToolOptions.Namespace.Collection = collName
md.ToolOptions.Namespace.DB = dbName
md.OutputOptions.Out = "dump"
err = md.Init()
So(err, ShouldBeNil)
err = md.Dump()
So(err, ShouldBeNil)
})
}
func TestMongoDumpOrderedQuery(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.IntegrationTestType)
log.SetWriter(ioutil.Discard)
Convey("With a MongoDump instance", t, func() {
err := setUpMongoDumpTestData()
So(err, ShouldBeNil)
path, err := os.Getwd()
So(err, ShouldBeNil)
dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
Convey("testing that --query is order-preserving", func() {
// If order is not preserved, probabalistically, some of these
// loops will fail.
for i := 0; i < 100; i++ {
So(os.RemoveAll(dumpDir), ShouldBeNil)
md := simpleMongoDumpInstance()
md.InputOptions.Query = `{"coords":{"x":0,"y":1}}`
md.ToolOptions.Namespace.Collection = testCollectionNames[0]
md.ToolOptions.Namespace.DB = testDB
md.OutputOptions.Out = "dump"
err = md.Init()
So(err, ShouldBeNil)
err = md.Dump()
So(err, ShouldBeNil)
dumpBSON := util.ToUniversalPath(filepath.Join(dumpDir, testDB, testCollectionNames[0]+".bson"))
file, err := os.Open(dumpBSON)
So(err, ShouldBeNil)
bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(file))
var count int
var result bson.M
for bsonSource.Next(&result) {
count++
}
So(bsonSource.Err(), ShouldBeNil)
So(count, ShouldEqual, 1)
bsonSource.Close()
file.Close()
}
})
Reset(func() {
So(os.RemoveAll(dumpDir), ShouldBeNil)
So(tearDownMongoDumpTestData(), ShouldBeNil)
})
})
}
|
[
"\"MONGOD\""
] |
[] |
[
"MONGOD"
] |
[]
|
["MONGOD"]
|
go
| 1 | 0 | |
vendor/github.com/kubernetes-incubator/service-catalog/vendor/cloud.google.com/go/datastore/datastore.go
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"log"
"os"
"reflect"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
prodAddr = "datastore.googleapis.com:443"
userAgent = "gcloud-golang-datastore/20160401"
)
// ScopeDatastore grants permissions to view and/or manage datastore entities
const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// protoClient is an interface for *transport.ProtoClient to support injecting
// fake clients in tests.
type protoClient interface {
Call(context.Context, string, proto.Message, proto.Message) error
}
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(resourcePrefixHeader, "projects/"+projectID),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
return dc.c.Lookup(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
return dc.c.RunQuery(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
return dc.c.BeginTransaction(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
return dc.c.Commit(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
return dc.c.Rollback(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
return dc.c.AllocateIds(metadata.NewContext(ctx, dc.md), in, opts...)
}
// Client is a client for reading and writing data in a datastore dataset.
type Client struct {
conn *grpc.ClientConn
client pb.DatastoreClient
endpoint string
dataset string // Called dataset by the datastore API, synonym for project ID.
}
// NewClient creates a new Client for a given dataset.
// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable.
// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value
// to connect to a locally-running datastore emulator.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
var o []option.ClientOption
// Environment variables for gcd emulator:
// https://cloud.google.com/datastore/docs/tools/datastore-emulator
// If the emulator is available, dial it directly (and don't pass any credentials).
if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(ScopeDatastore),
option.WithUserAgent(userAgent),
}
}
// Warn if we see the legacy emulator environment variables.
if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.")
}
if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.")
}
if projectID == "" {
projectID = os.Getenv("DATASTORE_PROJECT_ID")
}
if projectID == "" {
return nil, errors.New("datastore: missing project/dataset id")
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: newDatastoreClient(conn, projectID),
dataset: projectID,
}, nil
}
var (
// ErrInvalidEntityType is returned when functions like Get or Next are
// passed a dst or src argument of invalid type.
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
// ErrInvalidKey is returned when an invalid key is presented.
ErrInvalidKey = errors.New("datastore: invalid key")
// ErrNoSuchEntity is returned when no entity was found for a given key.
ErrNoSuchEntity = errors.New("datastore: no such entity")
)
type multiArgType int
const (
multiArgTypeInvalid multiArgType = iota
multiArgTypePropertyLoadSaver
multiArgTypeStruct
multiArgTypeStructPtr
multiArgTypeInterface
)
// nsKey is the type of the context.Context key to store the datastore
// namespace.
type nsKey struct{}
// WithNamespace returns a new context that limits the scope its parent
// context with a Datastore namespace.
func WithNamespace(parent context.Context, namespace string) context.Context {
return context.WithValue(parent, nsKey{}, namespace)
}
// ctxNamespace returns the active namespace for a context.
// It defaults to "" if no namespace was specified.
func ctxNamespace(ctx context.Context) string {
v, _ := ctx.Value(nsKey{}).(string)
return v
}
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct.
// StructType is the type of the struct pointed to by the destination argument
// passed to Get or to Iterator.Next.
type ErrFieldMismatch struct {
StructType reflect.Type
FieldName string
Reason string
}
func (e *ErrFieldMismatch) Error() string {
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
e.FieldName, e.StructType, e.Reason)
}
// GeoPoint represents a location as latitude/longitude in degrees.
type GeoPoint struct {
Lat, Lng float64
}
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
func (g GeoPoint) Valid() bool {
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
}
func keyToProto(k *Key) *pb.Key {
if k == nil {
return nil
}
// TODO(jbd): Eliminate unrequired allocations.
var path []*pb.Key_PathElement
for {
el := &pb.Key_PathElement{Kind: k.kind}
if k.id != 0 {
el.IdType = &pb.Key_PathElement_Id{k.id}
} else if k.name != "" {
el.IdType = &pb.Key_PathElement_Name{k.name}
}
path = append([]*pb.Key_PathElement{el}, path...)
if k.parent == nil {
break
}
k = k.parent
}
key := &pb.Key{Path: path}
if k.namespace != "" {
key.PartitionId = &pb.PartitionId{
NamespaceId: k.namespace,
}
}
return key
}
// protoToKey decodes a protocol buffer representation of a key into an
// equivalent *Key object. If the key is invalid, protoToKey will return the
// invalid key along with ErrInvalidKey.
func protoToKey(p *pb.Key) (*Key, error) {
var key *Key
var namespace string
if partition := p.PartitionId; partition != nil {
namespace = partition.NamespaceId
}
for _, el := range p.Path {
key = &Key{
namespace: namespace,
kind: el.Kind,
id: el.GetId(),
name: el.GetName(),
parent: key,
}
}
if !key.valid() { // Also detects key == nil.
return key, ErrInvalidKey
}
return key, nil
}
// multiKeyToProto is a batch version of keyToProto.
func multiKeyToProto(keys []*Key) []*pb.Key {
ret := make([]*pb.Key, len(keys))
for i, k := range keys {
ret[i] = keyToProto(k)
}
return ret
}
// multiKeyToProto is a batch version of keyToProto.
func multiProtoToKey(keys []*pb.Key) ([]*Key, error) {
hasErr := false
ret := make([]*Key, len(keys))
err := make(MultiError, len(keys))
for i, k := range keys {
ret[i], err[i] = protoToKey(k)
if err[i] != nil {
hasErr = true
}
}
if hasErr {
return nil, err
}
return ret, nil
}
// multiValid is a batch version of Key.valid. It returns an error, not a
// []bool.
func multiValid(key []*Key) error {
invalid := false
for _, k := range key {
if !k.valid() {
invalid = true
break
}
}
if !invalid {
return nil
}
err := make(MultiError, len(key))
for i, k := range key {
if !k.valid() {
err[i] = ErrInvalidKey
}
}
return err
}
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
// type S, for some interface type I, or some non-interface non-pointer type P
// such that P or *P implements PropertyLoadSaver.
//
// It returns what category the slice's elements are, and the reflect.Type
// that represents S, I or P.
//
// As a special case, PropertyList is an invalid type for v.
//
// TODO(djd): multiArg is very confusing. Fold this logic into the
// relevant Put/Get methods to make the logic less opaque.
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
if v.Kind() != reflect.Slice {
return multiArgTypeInvalid, nil
}
if v.Type() == typeOfPropertyList {
return multiArgTypeInvalid, nil
}
elemType = v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return multiArgTypePropertyLoadSaver, elemType
}
switch elemType.Kind() {
case reflect.Struct:
return multiArgTypeStruct, elemType
case reflect.Interface:
return multiArgTypeInterface, elemType
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return multiArgTypeStructPtr, elemType
}
}
return multiArgTypeInvalid, nil
}
// Close closes the Client.
func (c *Client) Close() {
c.conn.Close()
}
// Get loads the entity stored for key into dst, which must be a struct pointer
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
// returns ErrNoSuchEntity.
//
// The values of dst's unmatched struct fields are not modified, and matching
// slice-typed fields are not reset before appending to them. In particular, it
// is recommended to pass a pointer to a zero valued struct on each Get call.
//
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
return ErrInvalidEntityType
}
err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
// type I, or some non-interface non-pointer type P such that P or *P
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {
return c.get(ctx, keys, dst, nil)
}
func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {
v := reflect.ValueOf(dst)
multiArgType, _ := checkMultiArg(v)
// Sanity checks
if multiArgType == multiArgTypeInvalid {
return errors.New("datastore: dst has invalid type")
}
if len(keys) != v.Len() {
return errors.New("datastore: keys and dst slices have different length")
}
if len(keys) == 0 {
return nil
}
// Go through keys, validate them, serialize then, and create a dict mapping them to their index
multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string]int)
pbKeys := make([]*pb.Key, len(keys))
for i, k := range keys {
if !k.valid() {
multiErr[i] = ErrInvalidKey
any = true
} else {
keyMap[k.String()] = i
pbKeys[i] = keyToProto(k)
}
}
if any {
return multiErr
}
req := &pb.LookupRequest{
ProjectId: c.dataset,
Keys: pbKeys,
ReadOptions: opts,
}
resp, err := c.client.Lookup(ctx, req)
if err != nil {
return err
}
if len(resp.Deferred) > 0 {
// TODO(jbd): Assess whether we should retry the deferred keys.
return errors.New("datastore: some entities temporarily unavailable")
}
if len(keys) != len(resp.Found)+len(resp.Missing) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
for _, e := range resp.Found {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
index := keyMap[k.String()]
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntity(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
}
}
for _, e := range resp.Missing {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
multiErr[keyMap[k.String()]] = ErrNoSuchEntity
any = true
}
if any {
return multiErr
}
return nil
}
// Put saves the entity src into the datastore with key k. src must be a struct
// pointer or implement PropertyLoadSaver; if a struct pointer then any
// unexported fields of that struct will be skipped. If k is an incomplete key,
// the returned key will be a unique key generated by the datastore.
func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {
k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(MultiError); ok {
return nil, me[0]
}
return nil, err
}
return k[0], nil
}
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
}
// Make the request.
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
resp, err := c.client.Commit(ctx, req)
if err != nil {
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret := make([]*Key, len(keys))
for i, key := range keys {
if key.Incomplete() {
// This key is in the mutation results.
ret[i], err = protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
} else {
ret[i] = key
}
}
return ret, nil
}
func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
v := reflect.ValueOf(src)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return nil, errors.New("datastore: src has invalid type")
}
if len(keys) != v.Len() {
return nil, errors.New("datastore: key and src slices have different length")
}
if len(keys) == 0 {
return nil, nil
}
if err := multiValid(keys); err != nil {
return nil, err
}
mutations := make([]*pb.Mutation, 0, len(keys))
for i, k := range keys {
elem := v.Index(i)
// Two cases where we need to take the address:
// 1) multiArgTypePropertyLoadSaver => &elem implements PLS
// 2) multiArgTypeStruct => saveEntity needs *struct
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
p, err := saveEntity(k, elem.Interface())
if err != nil {
return nil, fmt.Errorf("datastore: Error while saving %v: %v", k.String(), err)
}
var mut *pb.Mutation
if k.Incomplete() {
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{p}}
} else {
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{p}}
}
mutations = append(mutations, mut)
}
return mutations, nil
}
// Delete deletes the entity for the given key.
func (c *Client) Delete(ctx context.Context, key *Key) error {
err := c.DeleteMulti(ctx, []*Key{key})
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
mutations, err := deleteMutations(keys)
if err != nil {
return err
}
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
_, err = c.client.Commit(ctx, req)
return err
}
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys))
for _, k := range keys {
if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
}
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{keyToProto(k)},
})
}
return mutations, nil
}
|
[
"\"DATASTORE_EMULATOR_HOST\"",
"\"DATASTORE_HOST\"",
"\"DATASTORE_EMULATOR_HOST\"",
"\"DATASTORE_DATASET\"",
"\"DATASTORE_PROJECT_ID\"",
"\"DATASTORE_PROJECT_ID\""
] |
[] |
[
"DATASTORE_DATASET",
"DATASTORE_EMULATOR_HOST",
"DATASTORE_PROJECT_ID",
"DATASTORE_HOST"
] |
[]
|
["DATASTORE_DATASET", "DATASTORE_EMULATOR_HOST", "DATASTORE_PROJECT_ID", "DATASTORE_HOST"]
|
go
| 4 | 0 | |
nipype/workflows/dmri/fsl/tests/test_epi.py
|
# -*- coding: utf-8 -*-
import os
import pytest
import nipype.workflows.fmri.fsl as fsl_wf
import nipype.interfaces.fsl as fsl
import nipype.interfaces.utility as util
from nipype.interfaces.fsl import no_fsl, no_fsl_course_data
import nipype.pipeline.engine as pe
import warnings
import tempfile
import shutil
from nipype.workflows.dmri.fsl.epi import create_eddy_correct_pipeline
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available")
def test_create_eddy_correct_pipeline():
fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA'])
dwi_file = os.path.join(fsl_course_dir, "fdt1/subj1/data.nii.gz")
trim_dwi = pe.Node(fsl.ExtractROI(t_min=0,
t_size=2), name="trim_dwi")
trim_dwi.inputs.in_file = dwi_file
nipype_eddycorrect = create_eddy_correct_pipeline("nipype_eddycorrect")
nipype_eddycorrect.inputs.inputnode.ref_num = 0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
original_eddycorrect = pe.Node(interface=fsl.EddyCorrect(), name="original_eddycorrect")
original_eddycorrect.inputs.ref_num = 0
test = pe.Node(util.AssertEqual(), name="eddy_corrected_dwi_test")
pipeline = pe.Workflow(name="test_eddycorrect")
pipeline.base_dir = tempfile.mkdtemp(prefix="nipype_test_eddycorrect_")
pipeline.connect([(trim_dwi, original_eddycorrect, [("roi_file", "in_file")]),
(trim_dwi, nipype_eddycorrect, [("roi_file", "inputnode.in_file")]),
(nipype_eddycorrect, test, [("outputnode.eddy_corrected", "volume1")]),
(original_eddycorrect, test, [("eddy_corrected", "volume2")]),
])
pipeline.run(plugin='Linear')
shutil.rmtree(pipeline.base_dir)
|
[] |
[] |
[
"FSL_COURSE_DATA"
] |
[]
|
["FSL_COURSE_DATA"]
|
python
| 1 | 0 | |
salesforce/salesforce_clientutil.go
|
package salesforce
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"regexp"
"strings"
"github.com/grokify/go-salesforce/sobjects"
"github.com/grokify/goauth/credentials"
"github.com/grokify/mogo/net/httputilmore"
"github.com/grokify/mogo/net/urlutil"
)
type SalesforceClient struct {
ClientMore httputilmore.ClientMore
URLBuilder URLBuilder
}
func NewSalesforceClient(client *http.Client, instanceName string) SalesforceClient {
return SalesforceClient{
ClientMore: httputilmore.ClientMore{Client: client},
URLBuilder: NewURLBuilder(instanceName),
}
}
func NewSalesforceClientEnv() (SalesforceClient, error) {
sc := SalesforceClient{
URLBuilder: NewURLBuilder(os.Getenv("SALESFORCE_INSTANCE_NAME")),
}
client, err := NewClientPasswordSalesforceEnv()
if err != nil {
return sc, err
}
sc.ClientMore = httputilmore.ClientMore{Client: client}
return sc, nil
}
type OAuth2Credentials struct {
credentials.CredentialsOAuth2
InstanceName string
}
func NewSalesforceClientPassword(soc OAuth2Credentials) (SalesforceClient, error) {
httpClient, err := NewClientPassword(soc.CredentialsOAuth2)
if err != nil {
return SalesforceClient{}, err
}
return NewSalesforceClient(httpClient, soc.InstanceName), nil
}
func (sc *SalesforceClient) GetServicesData() (*http.Response, error) {
apiURL := sc.URLBuilder.Build("services/data")
return sc.ClientMore.Client.Get(apiURL.String())
}
func (sc *SalesforceClient) CreateContact(contact interface{}) (*http.Response, error) {
//apiURL := sc.URLBuilder.Build("/services/data/v40.0/sobjects/Contact/")
apiURL := sc.URLBuilder.BuildSobjectURL("Contact")
return sc.ClientMore.PostToJSON(apiURL.String(), contact)
}
func (sc *SalesforceClient) CreateSobject(sobjectName string, sobject interface{}) (*http.Response, error) {
apiURL := sc.URLBuilder.BuildSobjectURL(sobjectName)
return sc.ClientMore.PostToJSON(apiURL.String(), sobject)
}
func (sc *SalesforceClient) ExecSOQL(soql string) (*http.Response, error) {
//curl https://yourInstance.salesforce.com/services/data/v20.0/query/?q=SELECT+name+from+Account -H "Authorization: Bearer token"
apiURL := sc.URLBuilder.Build("/services/data/v40.0/query/")
soql = regexp.MustCompile(`\s+`).ReplaceAllString(strings.TrimSpace(soql), "+")
qryMap := map[string][]string{"q": {soql}}
apiURLString := urlutil.URLAddQuery(&apiURL, qryMap).String()
apiURLString = regexp.MustCompile(`\%2B`).ReplaceAllString(strings.TrimSpace(apiURLString), "+")
return sc.ClientMore.Client.Get(apiURLString)
}
func (sc *SalesforceClient) GetAccountsAll() (sobjects.AccountSet, error) {
resp, err := sc.ExecSOQL("SELECT id, name FROM account")
if err != nil {
return sobjects.AccountSet{}, err
}
err = httputilmore.PrintResponse(resp, true)
if err != nil {
return sobjects.AccountSet{}, err
}
return sobjects.NewAccountSetFromJSONResponse(resp)
}
func (sc *SalesforceClient) DeleteAccountsAll() error {
set, err := sc.GetAccountsAll()
if err != nil {
return err
}
for _, account := range set.Records {
resp, err := sc.DeleteAccount(account.ID)
if err != nil {
continue
}
if resp.StatusCode > 299 {
err := httputilmore.PrintResponse(resp, true)
if err != nil {
return err
}
fmt.Printf("%v\n", resp.StatusCode)
continue
}
}
return nil
}
func (sc *SalesforceClient) DeleteAccount(id string) (*http.Response, error) {
//apiURLString := fmt.Sprintf("/services/data/v40.0/sobjects/%v/%v", "Account", id)
//apiURL := sc.URLBuilder.Build(apiURLString)
apiURL := sc.URLBuilder.BuildSobjectURL("Account", id)
req, err := http.NewRequest("DELETE", apiURL.String(), nil)
if err != nil {
return &http.Response{}, err
}
return sc.ClientMore.Client.Do(req)
}
func (sc *SalesforceClient) GetContactsAll() (sobjects.ContactSet, error) {
resp, err := sc.ExecSOQL("SELECT id, name, email FROM contact")
if err != nil {
return sobjects.ContactSet{}, err
}
return sobjects.NewContactSetFromJSONResponse(resp)
}
func (sc *SalesforceClient) DeleteContactsAll() error {
set, err := sc.GetContactsAll()
if err != nil {
return err
}
for _, contact := range set.Records {
resp, err := sc.DeleteContact(contact.ID)
if err != nil {
return err
}
if resp.StatusCode > 299 {
err := httputilmore.PrintResponse(resp, true)
if err != nil {
return err
}
fmt.Printf("%v\n", resp.StatusCode)
continue
}
}
return nil
}
func (sc *SalesforceClient) DeleteContact(id string) (*http.Response, error) {
apiURL := sc.URLBuilder.BuildSobjectURL("Contact", id)
req, err := http.NewRequest("DELETE", apiURL.String(), nil)
if err != nil {
return &http.Response{}, err
}
return sc.ClientMore.Client.Do(req)
}
func (sc *SalesforceClient) UserInfo() (User, error) {
apiURL := "https://login.salesforce.com/services/oauth2/userinfo"
user := User{}
req, err := http.NewRequest("GETs", apiURL, nil)
if err != nil {
return user, err
}
resp, err := sc.ClientMore.Client.Do(req)
if err != nil {
return user, err
}
bytes, err := io.ReadAll(resp.Body)
if err != nil {
return user, err
}
err = json.Unmarshal(bytes, &user)
return user, err
}
type User struct {
UserID string `json:"user_id,omitempty"`
OrganizationID string `json:"organization_id,omitempty"`
}
type URLBuilder struct {
BaseURL url.URL
Version string
}
func NewURLBuilder(instanceName string) URLBuilder {
return URLBuilder{
BaseURL: url.URL{
Scheme: "https",
Host: fmt.Sprintf(HostFormat, instanceName),
},
Version: "v40.0",
}
}
func (b *URLBuilder) Build(path string) url.URL {
u := b.BaseURL
u.Path = path
return u
}
func (b *URLBuilder) BuildSobjectURL(parts ...string) url.URL {
partsString := path.Join(parts...)
apiURLString := fmt.Sprintf("/services/data/%v/sobjects/%v", b.Version, partsString)
return b.Build(apiURLString)
}
|
[
"\"SALESFORCE_INSTANCE_NAME\""
] |
[] |
[
"SALESFORCE_INSTANCE_NAME"
] |
[]
|
["SALESFORCE_INSTANCE_NAME"]
|
go
| 1 | 0 | |
strata/cmd/mongo/lreplica_drivers/lrminiodriver/lrminiodriver.go
|
package lrminiodriver
import (
"errors"
"os"
"strconv"
"github.com/facebookgo/rocks-strata/strata"
"github.com/facebookgo/rocks-strata/strata/cmd/mongo/lreplica_drivers/lrs3driver"
"github.com/facebookgo/rocks-strata/strata/miniostorage"
"github.com/facebookgo/rocks-strata/strata/mongo/lreplica"
)
// MinioOptions are common to all commands
type MinioOptions struct {
Region string `short:"R" long:"region" description:"Minio region name, such as \"us-east-1\"" default:"us-east-1"`
BucketName string `short:"b" long:"bucket" description:"Name of Minio bucket used to store the backups" required:"true"`
BucketPrefix string `short:"p" long:"bucket-prefix" description:"Prefix used when storing and retrieving files. Optional" optional:"true"`
}
// Options define the common options needed by this strata command
type Options struct {
Minio MinioOptions `group:"Minio Options"`
Replica lrs3driver.ReplicaOptions `group:"Replica Options"`
}
// DriverFactory implements strata.DriverFactory
type DriverFactory struct {
Ops *Options
}
// GetOptions returns the factory's Options
func (factory DriverFactory) GetOptions() interface{} {
return factory.Ops
}
// Driver uses the DriverFactory's Options to construct a strata.Driver
func (factory DriverFactory) Driver() (*strata.Driver, error) {
options := factory.GetOptions().(*Options)
endPoint := os.Getenv("MINIO_ENDPOINT")
secure := os.Getenv("MINIO_SECURE")
accessKey := os.Getenv("MINIO_ACCESS_KEY_ID")
secretKey := os.Getenv("MINIO_SECRET_ACCESS_KEY")
if endPoint == "" || accessKey == "" || secretKey == "" {
return nil, errors.New("Environment variables MINIO_ENDPOINT, MINIO_ACCESS_KEY_ID and MINIO_SECRET_ACCESS_KEY must be set")
}
if secure == "" {
secure = "true"
}
secureBool, err := strconv.ParseBool(secure)
if err != nil {
return nil, errors.New("Valid values for environment variable MINIO_SECURE are 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False")
}
minio, err := miniostorage.NewMinioStorage(
endPoint,
accessKey, secretKey,
options.Minio.BucketName,
options.Minio.BucketPrefix,
options.Minio.Region,
secureBool)
if err != nil {
return nil, err
}
replica, err := lreplica.NewLocalReplica(
options.Replica.MaxBackgroundCopies,
strconv.Itoa(options.Replica.Port),
options.Replica.Username,
options.Replica.Password,
)
if err != nil {
return nil, err
}
manager, err := strata.NewSnapshotManager(replica, minio)
if err != nil {
return nil, err
}
return &strata.Driver{Manager: manager}, err
}
|
[
"\"MINIO_ENDPOINT\"",
"\"MINIO_SECURE\"",
"\"MINIO_ACCESS_KEY_ID\"",
"\"MINIO_SECRET_ACCESS_KEY\""
] |
[] |
[
"MINIO_SECURE",
"MINIO_ENDPOINT",
"MINIO_ACCESS_KEY_ID",
"MINIO_SECRET_ACCESS_KEY"
] |
[]
|
["MINIO_SECURE", "MINIO_ENDPOINT", "MINIO_ACCESS_KEY_ID", "MINIO_SECRET_ACCESS_KEY"]
|
go
| 4 | 0 | |
client/main.go
|
/*
gRPC Client
*/
package main
import (
"context"
"io"
"log"
"net/url"
"os"
nso "github.com/nleiva/slack-nso/nso"
pb "github.com/nleiva/slack-nso/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
address = "grpc.nleiva.com:50051"
)
// GetCmd subscribes to a stream of commands, returns a channel.
func GetCmd(client pb.CommSvcClient) chan []string {
// 's' is the string channel where the data received will be sent.
s := make(chan []string)
stream, err := client.GetCmd(context.Background(), &pb.Id{})
if err != nil {
log.Fatalf("Server says: %v", err)
}
go func() {
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Server says: %v", err)
}
s <- []string{res.GetCmd(), res.GetArg1(), res.GetArg2()}
}
close(s)
}()
return s
}
func main() {
api := new(url.URL)
api.Scheme = "http"
api.Host = os.Getenv("NSO_SERVER")
api.User = url.UserPassword(os.Getenv("NSO_USER"), os.Getenv("NSO_PASSWORD"))
device := os.Getenv("NSO_DEVICE")
s := new(nso.Server)
s.Addr = api
// Security options
creds, err := credentials.NewClientTLSFromFile("cert.pem", "")
if err != nil {
log.Fatalf("could not process the credentials: %v", err)
}
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
// Set up a secure connection to the server.
conn, err := grpc.Dial(address, opts...)
//conn, err := grpc.Dial(address, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
client := pb.NewCommSvcClient(conn)
ch := GetCmd(client)
for msg := range ch {
s.StaticRoute(msg, device)
}
}
|
[
"\"NSO_SERVER\"",
"\"NSO_USER\"",
"\"NSO_PASSWORD\"",
"\"NSO_DEVICE\""
] |
[] |
[
"NSO_USER",
"NSO_PASSWORD",
"NSO_DEVICE",
"NSO_SERVER"
] |
[]
|
["NSO_USER", "NSO_PASSWORD", "NSO_DEVICE", "NSO_SERVER"]
|
go
| 4 | 0 | |
main_test.go
|
package gorm_test
import (
"database/sql"
"database/sql/driver"
"fmt"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"testing"
"time"
"github.com/erikstmartin/go-testdb"
"github.com/hidevopsio/gorm"
_ "github.com/hidevopsio/gorm/dialects/mssql"
_ "github.com/hidevopsio/gorm/dialects/mysql"
"github.com/hidevopsio/gorm/dialects/postgres"
_ "github.com/hidevopsio/gorm/dialects/sqlite"
"github.com/jinzhu/now"
)
var (
DB gorm.Repository
t1, t2, t3, t4, t5 time.Time
)
func init() {
var err error
if DB, err = OpenTestConnection(); err != nil {
panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err))
}
runMigration()
}
func OpenTestConnection() (db gorm.Repository, err error) {
dbDSN := os.Getenv("GORM_DSN")
switch os.Getenv("GORM_DIALECT") {
case "mysql":
fmt.Println("testing mysql...")
if dbDSN == "" {
dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True"
}
db, err = gorm.Open("mysql", dbDSN)
case "postgres":
fmt.Println("testing postgres...")
if dbDSN == "" {
dbDSN = "user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable"
}
db, err = gorm.Open("postgres", dbDSN)
case "mssql":
// CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86';
// CREATE DATABASE gorm;
// USE gorm;
// CREATE USER gorm FROM LOGIN gorm;
// sp_changedbowner 'gorm';
fmt.Println("testing mssql...")
if dbDSN == "" {
dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm"
}
db, err = gorm.Open("mssql", dbDSN)
default:
fmt.Println("testing sqlite3...")
db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db"))
}
// db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
// db.SetLogger(log.New(os.Stdout, "\r\n", 0))
if debug := os.Getenv("DEBUG"); debug == "true" {
db.LogMode(true)
} else if debug == "false" {
db.LogMode(false)
}
db.SqlDB().SetMaxIdleConns(10)
return
}
func TestOpen_ReturnsError_WithBadArgs(t *testing.T) {
stringRef := "foo"
testCases := []interface{}{42, time.Now(), &stringRef}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) {
_, err := gorm.Open("postgresql", tc)
if err == nil {
t.Error("Should got error with invalid database source")
}
if !strings.HasPrefix(err.Error(), "invalid database source:") {
t.Errorf("Should got error starting with \"invalid database source:\", but got %q", err.Error())
}
})
}
}
func TestStringPrimaryKey(t *testing.T) {
type UUIDStruct struct {
ID string `gorm:"primary_key"`
Name string
}
DB.DropTable(&UUIDStruct{})
DB.AutoMigrate(&UUIDStruct{})
data := UUIDStruct{ID: "uuid", Name: "hello"}
if err := DB.Save(&data).Error(); err != nil || data.ID != "uuid" || data.Name != "hello" {
t.Errorf("string primary key should not be populated")
}
data = UUIDStruct{ID: "uuid", Name: "hello world"}
if err := DB.Save(&data).Error(); err != nil || data.ID != "uuid" || data.Name != "hello world" {
t.Errorf("string primary key should not be populated")
}
}
func TestExceptionsWithInvalidSql(t *testing.T) {
var columns []string
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error() == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error() == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error() == nil {
t.Errorf("Should got error with invalid SQL")
}
var count1, count2 int64
DB.Model(&User{}).Count(&count1)
if count1 <= 0 {
t.Errorf("Should find some users")
}
if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error() == nil {
t.Errorf("Should got error with invalid SQL")
}
DB.Model(&User{}).Count(&count2)
if count1 != count2 {
t.Errorf("No user should not be deleted by invalid SQL")
}
}
func TestSetTable(t *testing.T) {
DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error(); err != nil {
t.Error("No errors should happen if set table for pluck", err)
}
var users []User
if DB.Table("users").Find(&[]User{}).Error() != nil {
t.Errorf("No errors should happen if set table for find")
}
if DB.Table("invalid_table").Find(&users).Error() == nil {
t.Errorf("Should got error when table is set to an invalid table")
}
DB.Exec("drop table deleted_users;")
if DB.Table("deleted_users").CreateTable(&User{}).Error() != nil {
t.Errorf("Create table with specified table")
}
DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
var deletedUsers []User
DB.Table("deleted_users").Find(&deletedUsers)
if len(deletedUsers) != 1 {
t.Errorf("Query from specified table")
}
DB.Save(getPreparedUser("normal_user", "reset_table"))
DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
var user1, user2, user3 User
DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
t.Errorf("unset specified table with blank string")
}
}
type Order struct {
}
type Cart struct {
}
func (c Cart) TableName() string {
return "shopping_cart"
}
func TestHasTable(t *testing.T) {
type Foo struct {
Id int
Stuff string
}
DB.DropTable(&Foo{})
// Table should not exist at this point, HasTable should return false
if ok := DB.HasTable("foos"); ok {
t.Errorf("Table should not exist, but does")
}
if ok := DB.HasTable(&Foo{}); ok {
t.Errorf("Table should not exist, but does")
}
// We create the table
if err := DB.CreateTable(&Foo{}).Error(); err != nil {
t.Errorf("Table should be created")
}
// And now it should exits, and HasTable should return true
if ok := DB.HasTable("foos"); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
if ok := DB.HasTable(&Foo{}); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
}
func TestTableName(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
if DB.NewScope(&Order{}).TableName() != "orders" {
t.Errorf("&Order's table name should be orders")
}
if DB.NewScope([]Order{}).TableName() != "orders" {
t.Errorf("[]Order's table name should be orders")
}
if DB.NewScope(&[]Order{}).TableName() != "orders" {
t.Errorf("&[]Order's table name should be orders")
}
DB.SingularTable(true)
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
if DB.NewScope(&Order{}).TableName() != "order" {
t.Errorf("&Order's singular table name should be order")
}
if DB.NewScope([]Order{}).TableName() != "order" {
t.Errorf("[]Order's singular table name should be order")
}
if DB.NewScope(&[]Order{}).TableName() != "order" {
t.Errorf("&[]Order's singular table name should be order")
}
if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
t.Errorf("&Cart's singular table name should be shopping_cart")
}
if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
t.Errorf("Cart's singular table name should be shopping_cart")
}
if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
t.Errorf("&[]Cart's singular table name should be shopping_cart")
}
if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
t.Errorf("[]Cart's singular table name should be shopping_cart")
}
DB.SingularTable(false)
}
func TestNullValues(t *testing.T) {
DB.DropTable(&NullValue{})
DB.AutoMigrate(&NullValue{})
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: true},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: true},
}).Error(); err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv NullValue
DB.First(&nv, "name = ?", "hello")
if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-2", Valid: true},
Gender: &sql.NullString{String: "F", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error(); err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv2 NullValue
DB.First(&nv2, "name = ?", "hello-2")
if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-3", Valid: false},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error(); err == nil {
t.Errorf("Can't save because of name can't be null")
}
}
func TestNullValuesWithFirstOrCreate(t *testing.T) {
var nv1 = NullValue{
Name: sql.NullString{String: "first_or_create", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
}
var nv2 NullValue
result := DB.Where(nv1).FirstOrCreate(&nv2)
if result.RowsAffected() != 1 {
t.Errorf("RowsAffected should be 1 after create some record")
}
if result.Error() != nil {
t.Errorf("Should not raise any error, but got %v", result.Error())
}
if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" {
t.Errorf("first or create with nullvalues")
}
if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error(); err != nil {
t.Errorf("Should not raise any error, but got %v", err)
}
if nv2.Age.Int64 != 18 {
t.Errorf("should update age to 18")
}
}
func TestTransaction(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error(); err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error(); err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
tx.Rollback()
if err := tx.First(&User{}, "name = ?", "transcation").Error(); err == nil {
t.Errorf("Should not find record after rollback")
}
tx2 := DB.Begin()
u2 := User{Name: "transcation-2"}
if err := tx2.Save(&u2).Error(); err != nil {
t.Errorf("No error should raise")
}
if err := tx2.First(&User{}, "name = ?", "transcation-2").Error(); err != nil {
t.Errorf("Should find saved record")
}
tx2.Commit()
if err := DB.First(&User{}, "name = ?", "transcation-2").Error(); err != nil {
t.Errorf("Should be able to find committed record")
}
}
func TestRow(t *testing.T) {
user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
var age int64
row.Scan(&age)
if age != 10 {
t.Errorf("Scan with Row")
}
}
func TestRows(t *testing.T) {
user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
count := 0
for rows.Next() {
var name string
var age int64
rows.Scan(&name, &age)
count++
}
if count != 2 {
t.Errorf("Should found two records")
}
}
func TestScanRows(t *testing.T) {
user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
type Result struct {
Name string
Age int
}
var results []Result
for rows.Next() {
var result Result
if err := DB.ScanRows(rows, &result); err != nil {
t.Errorf("should get no error, but got %v", err)
}
results = append(results, result)
}
if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) {
t.Errorf("Should find expected results")
}
}
func TestScan(t *testing.T) {
user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Age int
}
var res result
DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
if res.Name != user3.Name {
t.Errorf("Scan into struct should work")
}
var doubleAgeRes = &result{}
if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error(); err != nil {
t.Errorf("Scan to pointer of pointer")
}
if doubleAgeRes.Age != res.Age*2 {
t.Errorf("Scan double age as age")
}
var ress []result
DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Scan into struct map")
}
}
func TestRaw(t *testing.T) {
user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Email string
}
var ress []result
DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Raw with scan")
}
rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
count := 0
for rows.Next() {
count++
}
if count != 1 {
t.Errorf("Raw with Rows should find one record with name 3")
}
DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error() != gorm.ErrRecordNotFound {
t.Error("Raw sql to update records")
}
}
func TestGroup(t *testing.T) {
rows, err := DB.Select("name").Table("users").Group("name").Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
rows.Scan(&name)
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestJoins(t *testing.T) {
var user = User{
Name: "joins",
CreditCard: CreditCard{Number: "411111111111"},
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var users1 []User
DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1)
if len(users1) != 2 {
t.Errorf("should find two users using left join")
}
var users2 []User
DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Where("name = ?", "joins").First(&users2)
if len(users2) != 1 {
t.Errorf("should find one users using left join with conditions")
}
var users3 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3)
if len(users3) != 1 {
t.Errorf("should find one users using multiple left join conditions")
}
var users4 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4)
if len(users4) != 0 {
t.Errorf("should find no user when searching with unexisting credit card")
}
var users5 []User
db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5)
if db5.Error() != nil {
t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error().Error())
}
}
func TestJoinsWithSelect(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins_with_select",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" {
t.Errorf("Should find all two emails with Join select")
}
}
func TestHaving(t *testing.T) {
rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var total int64
rows.Scan(&name, &total)
if name == "2" && total != 1 {
t.Errorf("Should have one user having name 2")
}
if name == "3" && total != 2 {
t.Errorf("Should have two users having name 3")
}
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestQueryBuilderSubselectInWhere(t *testing.T) {
user := User{Name: "query_expr_select_ruser1", Email: "[email protected]", Age: 32}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser2", Email: "[email protected]", Age: 16}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("*").Where("name IN (?)", DB.
Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 4 {
t.Errorf("Four users should be found, instead found %d", len(users))
}
DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB.
Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 2 {
t.Errorf("Two users should be found, instead found %d", len(users))
}
}
func TestQueryBuilderRawQueryWithSubquery(t *testing.T) {
user := User{Name: "subquery_test_user1", Age: 10}
DB.Save(&user)
user = User{Name: "subquery_test_user2", Age: 11}
DB.Save(&user)
user = User{Name: "subquery_test_user3", Age: 12}
DB.Save(&user)
var count int
err := DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error()
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 2 {
t.Errorf("Row count must be 2, instead got %d", count)
}
err = DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("name LIKE ?", "subquery_test%").
Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error()
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 1 {
t.Errorf("Row count must be 1, instead got %d", count)
}
}
func TestQueryBuilderSubselectInHaving(t *testing.T) {
user := User{Name: "query_expr_having_ruser1", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser2", Email: "[email protected]", Age: 128}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB.
Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users)
if len(users) != 1 {
t.Errorf("Two user group should be found, instead found %d", len(users))
}
}
func DialectHasTzSupport() bool {
// NB: mssql and FoundationDB do not support time zones.
if dialect := os.Getenv("GORM_DIALECT"); dialect == "foundation" {
return false
}
return true
}
func TestTimeWithZone(t *testing.T) {
var format = "2006-01-02 15:04:05 -0700"
var times []time.Time
GMT8, _ := time.LoadLocation("Asia/Shanghai")
times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
for index, vtime := range times {
name := "time_with_zone_" + strconv.Itoa(index)
user := User{Name: name, Birthday: &vtime}
if !DialectHasTzSupport() {
// If our driver dialect doesn't support TZ's, just use UTC for everything here.
utcBirthday := user.Birthday.UTC()
user.Birthday = &utcBirthday
}
DB.Save(&user)
expectedBirthday := "2013-02-18 17:51:49 +0000"
foundBirthday := user.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
var findUser, findUser2, findUser3 User
DB.First(&findUser, "name = ?", name)
foundBirthday = findUser.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
t.Errorf("User should be found")
}
if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() {
t.Errorf("User should not be found")
}
}
}
func TestHstore(t *testing.T) {
type Details struct {
Id int64
Bulk postgres.Hstore
}
if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
t.Skip()
}
if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error(); err != nil {
fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m")
panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
}
DB.Exec("drop table details")
if err := DB.CreateTable(&Details{}).Error(); err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
bulk := map[string]*string{
"bankAccountId": &bankAccountId,
"phoneNumber": &phoneNumber,
"opinion": &opinion,
}
d := Details{Bulk: bulk}
DB.Save(&d)
var d2 Details
if err := DB.First(&d2).Error(); err != nil {
t.Errorf("Got error when tried to fetch details: %+v", err)
}
for k := range bulk {
if r, ok := d2.Bulk[k]; ok {
if res, _ := bulk[k]; *res != *r {
t.Errorf("Details should be equal")
}
} else {
t.Errorf("Details should be existed")
}
}
}
func TestSetAndGet(t *testing.T) {
if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
t.Errorf("Should be able to get setting after set")
} else {
if value.(string) != "world" {
t.Errorf("Setted value should not be changed")
}
}
if _, ok := DB.Get("non_existing"); ok {
t.Errorf("Get non existing key should return error")
}
}
func TestCompatibilityMode(t *testing.T) {
DB, _ := gorm.Open("testdb", "")
testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
columns := []string{"id", "name", "age"}
result := `
1,Tim,20
2,Joe,25
3,Bob,30
`
return testdb.RowsFromCSVString(columns, result), nil
})
var users []User
DB.Find(&users)
if (users[0].Name != "Tim") || len(users) != 3 {
t.Errorf("Unexcepted result returned")
}
}
func TestOpenExistingDB(t *testing.T) {
DB.Save(&User{Name: "jnfeinstein"})
dialect := os.Getenv("GORM_DIALECT")
db, err := gorm.Open(dialect, DB.SqlDB())
if err != nil {
t.Errorf("Should have wrapped the existing DB connection")
}
var user User
if db.Where("name = ?", "jnfeinstein").First(&user).Error() == gorm.ErrRecordNotFound {
t.Errorf("Should have found existing record")
}
}
func TestDdlErrors(t *testing.T) {
var err error
if err = DB.Close(); err != nil {
t.Errorf("Closing DDL test db connection err=%s", err)
}
defer func() {
// Reopen DB connection.
if DB, err = OpenTestConnection(); err != nil {
t.Fatalf("Failed re-opening db connection: %s", err)
}
}()
if err := DB.Find(&User{}).Error(); err == nil {
t.Errorf("Expected operation on closed db to produce an error, but err was nil")
}
}
func TestOpenWithOneParameter(t *testing.T) {
db, err := gorm.Open("dialect")
if db != nil {
t.Error("Open with one parameter returned non nil for db")
}
if err == nil {
t.Error("Open with one parameter returned err as nil")
}
}
func TestBlockGlobalUpdate(t *testing.T) {
db := DB.New()
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err := db.Model(&Toy{}).Update("OwnerType", "Human").Error()
if err != nil {
t.Error("Unexpected error on global update")
}
err = db.Delete(&Toy{}).Error()
if err != nil {
t.Error("Unexpected error on global delete")
}
db.BlockGlobalUpdate(true)
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err = db.Model(&Toy{}).Update("OwnerType", "Human").Error()
if err == nil {
t.Error("Expected error on global update")
}
err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error()
if err != nil {
t.Error("Unxpected error on conditional update")
}
err = db.Delete(&Toy{}).Error()
if err == nil {
t.Error("Expected error on global delete")
}
err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error()
if err != nil {
t.Error("Unexpected error on conditional delete")
}
}
func BenchmarkGorm(b *testing.B) {
b.N = 2000
for x := 0; x < b.N; x++ {
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.Save(&email)
// Query
DB.First(&EmailWithIdx{}, "email = ?", e)
// Update
DB.Model(&email).UpdateColumn("email", "new-"+e)
// Delete
DB.Delete(&email)
}
}
func BenchmarkRawSql(b *testing.B) {
DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable")
DB.SetMaxIdleConns(10)
insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
deleteSql := "DELETE FROM orders WHERE id = $1"
b.N = 2000
for x := 0; x < b.N; x++ {
var id int64
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
// Query
rows, _ := DB.Query(querySql, email.Email)
rows.Close()
// Update
DB.Exec(updateSql, "new-"+e, time.Now(), id)
// Delete
DB.Exec(deleteSql, id)
}
}
func parseTime(str string) *time.Time {
t := now.New(time.Now().UTC()).MustParse(str)
return &t
}
|
[
"\"GORM_DSN\"",
"\"GORM_DIALECT\"",
"\"DEBUG\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\""
] |
[] |
[
"GORM_DIALECT",
"GORM_DSN",
"DEBUG"
] |
[]
|
["GORM_DIALECT", "GORM_DSN", "DEBUG"]
|
go
| 3 | 0 | |
wgengine/userspace.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wgengine
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tailscale/wireguard-go/device"
"github.com/tailscale/wireguard-go/tun"
"github.com/tailscale/wireguard-go/wgcfg"
"go4.org/mem"
"inet.af/netaddr"
"tailscale.com/control/controlclient"
"tailscale.com/internal/deepprint"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/interfaces"
"tailscale.com/net/packet"
"tailscale.com/net/tsaddr"
"tailscale.com/net/tshttpproxy"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/wgkey"
"tailscale.com/version"
"tailscale.com/version/distro"
"tailscale.com/wgengine/filter"
"tailscale.com/wgengine/magicsock"
"tailscale.com/wgengine/monitor"
"tailscale.com/wgengine/router"
"tailscale.com/wgengine/tsdns"
"tailscale.com/wgengine/tstun"
)
// minimalMTU is the MTU we set on tailscale's TUN
// interface. wireguard-go defaults to 1420 bytes, which only works if
// the "outer" MTU is 1500 bytes. This breaks on DSL connections
// (typically 1492 MTU) and on GCE (1460 MTU?!).
//
// 1280 is the smallest MTU allowed for IPv6, which is a sensible
// "probably works everywhere" setting until we develop proper PMTU
// discovery.
const minimalMTU = 1280
const magicDNSPort = 53
var magicDNSIP = netaddr.IPv4(100, 100, 100, 100)
// Lazy wireguard-go configuration parameters.
const (
// lazyPeerIdleThreshold is the idle duration after
// which we remove a peer from the wireguard configuration.
// (This includes peers that have never been idle, which
// effectively have infinite idleness)
lazyPeerIdleThreshold = 5 * time.Minute
// packetSendTimeUpdateFrequency controls how often we record
// the time that we wrote a packet to an IP address.
packetSendTimeUpdateFrequency = 10 * time.Second
// packetSendRecheckWireguardThreshold controls how long we can go
// between packet sends to an IP before checking to see
// whether this IP address needs to be added back to the
// Wireguard peer oconfig.
packetSendRecheckWireguardThreshold = 1 * time.Minute
)
type userspaceEngine struct {
logf logger.Logf
reqCh chan struct{}
waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool
timeNow func() time.Time
tundev *tstun.TUN
wgdev *device.Device
router router.Router
resolver *tsdns.Resolver
magicConn *magicsock.Conn
linkMon *monitor.Mon
testMaybeReconfigHook func() // for tests; if non-nil, fires if maybeReconfigWireguardLocked called
// localAddrs is the set of IP addresses assigned to the local
// tunnel interface. It's used to reflect local packets
// incorrectly sent to us.
localAddrs atomic.Value // of map[netaddr.IP]bool
wgLock sync.Mutex // serializes all wgdev operations; see lock order comment below
lastCfgFull wgcfg.Config
lastRouterSig string // of router.Config
lastEngineSigFull string // of full wireguard config
lastEngineSigTrim string // of trimmed wireguard config
recvActivityAt map[tailcfg.DiscoKey]time.Time
trimmedDisco map[tailcfg.DiscoKey]bool // set of disco keys of peers currently excluded from wireguard config
sentActivityAt map[netaddr.IP]*int64 // value is atomic int64 of unixtime
destIPActivityFuncs map[netaddr.IP]func()
mu sync.Mutex // guards following; see lock order comment below
closing bool // Close was called (even if we're still closing)
statusCallback StatusCallback
linkChangeCallback func(major bool, newState *interfaces.State)
peerSequence []wgkey.Key
endpoints []string
pingers map[wgkey.Key]*pinger // legacy pingers for pre-discovery peers
linkState *interfaces.State
// Lock ordering: magicsock.Conn.mu, wgLock, then mu.
}
// RouterGen is the signature for a function that creates a
// router.Router.
type RouterGen func(logf logger.Logf, wgdev *device.Device, tundev tun.Device) (router.Router, error)
type EngineConfig struct {
// Logf is the logging function used by the engine.
Logf logger.Logf
// TUN is the tun device used by the engine.
TUN tun.Device
// RouterGen is the function used to instantiate the router.
RouterGen RouterGen
// ListenPort is the port on which the engine will listen.
ListenPort uint16
// Fake determines whether this engine is running in fake mode,
// which disables such features as DNS configuration and unrestricted ICMP Echo responses.
Fake bool
}
func NewFakeUserspaceEngine(logf logger.Logf, listenPort uint16) (Engine, error) {
logf("Starting userspace wireguard engine (with fake TUN device)")
conf := EngineConfig{
Logf: logf,
TUN: tstun.NewFakeTUN(),
RouterGen: router.NewFake,
ListenPort: listenPort,
Fake: true,
}
return NewUserspaceEngineAdvanced(conf)
}
// NewUserspaceEngine creates the named tun device and returns a
// Tailscale Engine running on it.
func NewUserspaceEngine(logf logger.Logf, tunname string, listenPort uint16) (Engine, error) {
if tunname == "" {
return nil, fmt.Errorf("--tun name must not be blank")
}
logf("Starting userspace wireguard engine with tun device %q", tunname)
tun, err := tun.CreateTUN(tunname, minimalMTU)
if err != nil {
diagnoseTUNFailure(logf)
logf("CreateTUN: %v", err)
return nil, err
}
logf("CreateTUN ok.")
conf := EngineConfig{
Logf: logf,
TUN: tun,
RouterGen: router.New,
ListenPort: listenPort,
}
e, err := NewUserspaceEngineAdvanced(conf)
if err != nil {
return nil, err
}
return e, err
}
// NewUserspaceEngineAdvanced is like NewUserspaceEngine
// but provides control over all config fields.
func NewUserspaceEngineAdvanced(conf EngineConfig) (Engine, error) {
return newUserspaceEngineAdvanced(conf)
}
func newUserspaceEngineAdvanced(conf EngineConfig) (_ Engine, reterr error) {
logf := conf.Logf
rconf := tsdns.ResolverConfig{
Logf: conf.Logf,
Forward: true,
}
e := &userspaceEngine{
timeNow: time.Now,
logf: logf,
reqCh: make(chan struct{}, 1),
waitCh: make(chan struct{}),
tundev: tstun.WrapTUN(logf, conf.TUN),
resolver: tsdns.NewResolver(rconf),
pingers: make(map[wgkey.Key]*pinger),
}
e.localAddrs.Store(map[netaddr.IP]bool{})
e.linkState, _ = getLinkState()
logf("link state: %+v", e.linkState)
// Respond to all pings only in fake mode.
if conf.Fake {
e.tundev.PostFilterIn = echoRespondToAll
}
e.tundev.PreFilterOut = e.handleLocalPackets
mon, err := monitor.New(logf, func() {
e.LinkChange(false)
tshttpproxy.InvalidateCache()
})
if err != nil {
e.tundev.Close()
return nil, err
}
e.linkMon = mon
endpointsFn := func(endpoints []string) {
e.mu.Lock()
e.endpoints = append(e.endpoints[:0], endpoints...)
e.mu.Unlock()
e.RequestStatus()
}
magicsockOpts := magicsock.Options{
Logf: logf,
Port: conf.ListenPort,
EndpointsFunc: endpointsFn,
DERPActiveFunc: e.RequestStatus,
IdleFunc: e.tundev.IdleDuration,
NoteRecvActivity: e.noteReceiveActivity,
}
e.magicConn, err = magicsock.NewConn(magicsockOpts)
if err != nil {
e.tundev.Close()
return nil, fmt.Errorf("wgengine: %v", err)
}
e.magicConn.SetNetworkUp(e.linkState.AnyInterfaceUp())
// flags==0 because logf is already nested in another logger.
// The outer one can display the preferred log prefixes, etc.
dlog := logger.StdLogger(logf)
logger := device.Logger{
Debug: dlog,
Info: dlog,
Error: dlog,
}
opts := &device.DeviceOptions{
Logger: &logger,
HandshakeDone: func(peerKey wgcfg.Key, peer *device.Peer, deviceAllowedIPs *device.AllowedIPs) {
// Send an unsolicited status event every time a
// handshake completes. This makes sure our UI can
// update quickly as soon as it connects to a peer.
//
// We use a goroutine here to avoid deadlocking
// wireguard, since RequestStatus() will call back
// into it, and wireguard is what called us to get
// here.
go e.RequestStatus()
if e.magicConn.PeerHasDiscoKey(tailcfg.NodeKey(peerKey)) {
e.logf("wireguard handshake complete for %v", peerKey.ShortString())
// This is a modern peer with discovery support. No need to send pings.
return
}
e.logf("wireguard handshake complete for %v; sending legacy pings", peerKey.ShortString())
// Ping every single-IP that peer routes.
// These synthetic packets are used to traverse NATs.
var ips []netaddr.IP
allowedIPs := deviceAllowedIPs.EntriesForPeer(peer)
for _, ipNet := range allowedIPs {
if ones, bits := ipNet.Mask.Size(); ones == bits && ones != 0 {
ip, ok := netaddr.FromStdIP(ipNet.IP)
if !ok {
continue
}
ips = append(ips, ip)
}
}
if len(ips) > 0 {
go e.pinger(wgkey.Key(peerKey), ips)
} else {
logf("[unexpected] peer %s has no single-IP routes: %v", peerKey.ShortString(), allowedIPs)
}
},
CreateBind: e.magicConn.CreateBind,
CreateEndpoint: e.magicConn.CreateEndpoint,
SkipBindUpdate: true,
}
// wgdev takes ownership of tundev, will close it when closed.
e.logf("Creating wireguard device...")
e.wgdev = device.NewDevice(e.tundev, opts)
defer func() {
if reterr != nil {
e.wgdev.Close()
}
}()
// Pass the underlying tun.(*NativeDevice) to the router:
// routers do not Read or Write, but do access native interfaces.
e.logf("Creating router...")
e.router, err = conf.RouterGen(logf, e.wgdev, e.tundev.Unwrap())
if err != nil {
e.magicConn.Close()
return nil, err
}
go func() {
up := false
for event := range e.tundev.Events() {
if event&tun.EventMTUUpdate != 0 {
mtu, err := e.tundev.MTU()
e.logf("external route MTU: %d (%v)", mtu, err)
}
if event&tun.EventUp != 0 && !up {
e.logf("external route: up")
e.RequestStatus()
up = true
}
if event&tun.EventDown != 0 && up {
e.logf("external route: down")
e.RequestStatus()
up = false
}
}
}()
e.logf("Bringing wireguard device up...")
e.wgdev.Up()
e.logf("Bringing router up...")
if err := e.router.Up(); err != nil {
e.magicConn.Close()
e.wgdev.Close()
return nil, err
}
// TODO(danderson): we should delete this. It's pointless to apply
// a no-op settings here.
// TODO(bradfitz): counter-point: it tests the router implementation early
// to see if any part of it might fail.
e.logf("Clearing router settings...")
if err := e.router.Set(nil); err != nil {
e.magicConn.Close()
e.wgdev.Close()
return nil, err
}
e.logf("Starting link monitor...")
e.linkMon.Start()
e.logf("Starting magicsock...")
e.magicConn.Start()
e.logf("Starting resolver...")
e.resolver.Start()
go e.pollResolver()
e.logf("Engine created.")
return e, nil
}
// echoRespondToAll is an inbound post-filter responding to all echo requests.
func echoRespondToAll(p *packet.Parsed, t *tstun.TUN) filter.Response {
if p.IsEchoRequest() {
header := p.ICMP4Header()
header.ToResponse()
outp := packet.Generate(&header, p.Payload())
t.InjectOutbound(outp)
// We already responded to it, but it's not an error.
// Proceed with regular delivery. (Since this code is only
// used in fake mode, regular delivery just means throwing
// it away. If this ever gets run in non-fake mode, you'll
// get double responses to pings, which is an indicator you
// shouldn't be doing that I guess.)
return filter.Accept
}
return filter.Accept
}
// handleLocalPackets inspects packets coming from the local network
// stack, and intercepts any packets that should be handled by
// tailscaled directly. Other packets are allowed to proceed into the
// main ACL filter.
func (e *userspaceEngine) handleLocalPackets(p *packet.Parsed, t *tstun.TUN) filter.Response {
if verdict := e.handleDNS(p, t); verdict == filter.Drop {
// local DNS handled the packet.
return filter.Drop
}
if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && e.isLocalAddr(p.Dst.IP) {
// macOS NetworkExtension directs packets destined to the
// tunnel's local IP address into the tunnel, instead of
// looping back within the kernel network stack. We have to
// notice that an outbound packet is actually destined for
// ourselves, and loop it back into macOS.
t.InjectInboundCopy(p.Buffer())
return filter.Drop
}
return filter.Accept
}
func (e *userspaceEngine) isLocalAddr(ip netaddr.IP) bool {
localAddrs, ok := e.localAddrs.Load().(map[netaddr.IP]bool)
if !ok {
e.logf("[unexpected] e.localAddrs was nil, can't check for loopback packet")
return false
}
return localAddrs[ip]
}
// handleDNS is an outbound pre-filter resolving Tailscale domains.
func (e *userspaceEngine) handleDNS(p *packet.Parsed, t *tstun.TUN) filter.Response {
if p.Dst.IP == magicDNSIP && p.Dst.Port == magicDNSPort && p.IPProto == packet.UDP {
request := tsdns.Packet{
Payload: append([]byte(nil), p.Payload()...),
Addr: netaddr.IPPort{IP: p.Src.IP, Port: p.Src.Port},
}
err := e.resolver.EnqueueRequest(request)
if err != nil {
e.logf("tsdns: enqueue: %v", err)
}
return filter.Drop
}
return filter.Accept
}
// pollResolver reads responses from the DNS resolver and injects them inbound.
func (e *userspaceEngine) pollResolver() {
for {
resp, err := e.resolver.NextResponse()
if err == tsdns.ErrClosed {
return
}
if err != nil {
e.logf("tsdns: error: %v", err)
continue
}
h := packet.UDP4Header{
IP4Header: packet.IP4Header{
Src: magicDNSIP,
Dst: resp.Addr.IP,
},
SrcPort: magicDNSPort,
DstPort: resp.Addr.Port,
}
hlen := h.Len()
// TODO(dmytro): avoid this allocation without importing tstun quirks into tsdns.
const offset = tstun.PacketStartOffset
buf := make([]byte, offset+hlen+len(resp.Payload))
copy(buf[offset+hlen:], resp.Payload)
h.Marshal(buf[offset:])
e.tundev.InjectInboundDirect(buf, offset)
}
}
// pinger sends ping packets for a few seconds.
//
// These generated packets are used to ensure we trigger the spray logic in
// the magicsock package for NAT traversal.
//
// These are only used with legacy peers (before 0.100.0) that don't
// have advertised discovery keys.
type pinger struct {
e *userspaceEngine
done chan struct{} // closed after shutdown (not the ctx.Done() chan)
cancel context.CancelFunc
}
// close cleans up pinger and removes it from the userspaceEngine.pingers map.
// It cannot be called while p.e.mu is held.
func (p *pinger) close() {
p.cancel()
<-p.done
}
func (p *pinger) run(ctx context.Context, peerKey wgkey.Key, ips []netaddr.IP, srcIP netaddr.IP) {
defer func() {
p.e.mu.Lock()
if p.e.pingers[peerKey] == p {
delete(p.e.pingers, peerKey)
}
p.e.mu.Unlock()
close(p.done)
}()
header := packet.ICMP4Header{
IP4Header: packet.IP4Header{
Src: srcIP,
},
Type: packet.ICMP4EchoRequest,
Code: packet.ICMP4NoCode,
}
// sendFreq is slightly longer than sprayFreq in magicsock to ensure
// that if these ping packets are the only source of early packets
// sent to the peer, that each one will be sprayed.
const sendFreq = 300 * time.Millisecond
const stopAfter = 3 * time.Second
start := time.Now()
var dstIPs []netaddr.IP
for _, ip := range ips {
if ip.Is6() {
// This code is only used for legacy (pre-discovery)
// peers. They're not going to work right with IPv6 on the
// overlay anyway, so don't bother trying to make ping
// work.
continue
}
dstIPs = append(dstIPs, ip)
}
payload := []byte("magicsock_spray") // no meaning
header.IPID = 1
t := time.NewTicker(sendFreq)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
}
if time.Since(start) > stopAfter {
return
}
for _, dstIP := range dstIPs {
header.Dst = dstIP
// InjectOutbound take ownership of the packet, so we allocate.
b := packet.Generate(&header, payload)
p.e.tundev.InjectOutbound(b)
}
header.IPID++
}
}
// pinger sends ping packets for a few seconds.
//
// These generated packets are used to ensure we trigger the spray logic in
// the magicsock package for NAT traversal.
//
// This is only used with legacy peers (before 0.100.0) that don't
// have advertised discovery keys.
func (e *userspaceEngine) pinger(peerKey wgkey.Key, ips []netaddr.IP) {
e.logf("[v1] generating initial ping traffic to %s (%v)", peerKey.ShortString(), ips)
var srcIP netaddr.IP
e.wgLock.Lock()
if len(e.lastCfgFull.Addresses) > 0 {
srcIP = e.lastCfgFull.Addresses[0].IP
}
e.wgLock.Unlock()
if srcIP.IsZero() {
e.logf("generating initial ping traffic: no source IP")
return
}
ctx, cancel := context.WithCancel(context.Background())
p := &pinger{
e: e,
done: make(chan struct{}),
cancel: cancel,
}
e.mu.Lock()
if e.closing {
e.mu.Unlock()
return
}
oldPinger := e.pingers[peerKey]
e.pingers[peerKey] = p
e.mu.Unlock()
if oldPinger != nil {
oldPinger.close()
}
p.run(ctx, peerKey, ips, srcIP)
}
var (
debugTrimWireguardEnv = os.Getenv("TS_DEBUG_TRIM_WIREGUARD")
debugTrimWireguard, _ = strconv.ParseBool(debugTrimWireguardEnv)
)
// forceFullWireguardConfig reports whether we should give wireguard
// our full network map, even for inactive peers
//
// TODO(bradfitz): remove this after our 1.0 launch; we don't want to
// enable wireguard config trimming quite yet because it just landed
// and we haven't got enough time testing it.
func forceFullWireguardConfig(numPeers int) bool {
// Did the user explicitly enable trimmming via the environment variable knob?
if debugTrimWireguardEnv != "" {
return !debugTrimWireguard
}
if opt := controlclient.TrimWGConfig(); opt != "" {
return !opt.EqualBool(true)
}
// On iOS with large networks, it's critical, so turn on trimming.
// Otherwise we run out of memory from wireguard-go goroutine stacks+buffers.
// This will be the default later for all platforms and network sizes.
if numPeers > 50 && version.OS() == "iOS" {
return false
}
return false
}
// isTrimmablePeer reports whether p is a peer that we can trim out of the
// network map.
//
// We can only trim peers that both a) support discovery (because we
// know who they are when we receive their data and don't need to rely
// on wireguard-go figuring it out) and b) for implementation
// simplicity, have only non-subnet AllowedIPs (an IPv4 /32 or IPv6
// /128), which is the common case for most peers. Subnet router nodes
// will just always be created in the wireguard-go config.
func isTrimmablePeer(p *wgcfg.Peer, numPeers int) bool {
if forceFullWireguardConfig(numPeers) {
return false
}
if len(p.Endpoints) != 1 {
return false
}
if !strings.HasSuffix(p.Endpoints[0].Host, ".disco.tailscale") {
return false
}
// AllowedIPs must all be single IPs, not subnets.
for _, aip := range p.AllowedIPs {
if !aip.IsSingleIP() {
return false
}
}
return true
}
// noteReceiveActivity is called by magicsock when a packet has been received
// by the peer using discovery key dk. Magicsock calls this no more than
// every 10 seconds for a given peer.
func (e *userspaceEngine) noteReceiveActivity(dk tailcfg.DiscoKey) {
e.wgLock.Lock()
defer e.wgLock.Unlock()
if _, ok := e.recvActivityAt[dk]; !ok {
// Not a trimmable peer we care about tracking. (See isTrimmablePeer)
if e.trimmedDisco[dk] {
e.logf("wgengine: [unexpected] noteReceiveActivity called on idle discokey %v that's not in recvActivityAt", dk.ShortString())
}
return
}
now := e.timeNow()
e.recvActivityAt[dk] = now
// If the last activity time jumped a bunch (say, at least
// half the idle timeout) then see if we need to reprogram
// Wireguard. This could probably be just
// lazyPeerIdleThreshold without the divide by 2, but
// maybeReconfigWireguardLocked is cheap enough to call every
// couple minutes (just not on every packet).
if e.trimmedDisco[dk] {
e.logf("wgengine: idle peer %v now active, reconfiguring wireguard", dk.ShortString())
e.maybeReconfigWireguardLocked(nil)
}
}
// isActiveSince reports whether the peer identified by (dk, ip) has
// had a packet sent to or received from it since t.
//
// e.wgLock must be held.
func (e *userspaceEngine) isActiveSince(dk tailcfg.DiscoKey, ip netaddr.IP, t time.Time) bool {
if e.recvActivityAt[dk].After(t) {
return true
}
timePtr, ok := e.sentActivityAt[ip]
if !ok {
return false
}
unixTime := atomic.LoadInt64(timePtr)
return unixTime >= t.Unix()
}
// discoKeyFromPeer returns the DiscoKey for a wireguard config's Peer.
//
// Invariant: isTrimmablePeer(p) == true, so it should have 1 endpoint with
// Host of form "<64-hex-digits>.disco.tailscale". If invariant is violated,
// we return the zero value.
func discoKeyFromPeer(p *wgcfg.Peer) tailcfg.DiscoKey {
host := p.Endpoints[0].Host
if len(host) < 64 {
return tailcfg.DiscoKey{}
}
k, err := key.NewPublicFromHexMem(mem.S(host[:64]))
if err != nil {
return tailcfg.DiscoKey{}
}
return tailcfg.DiscoKey(k)
}
// discoChanged are the set of peers whose disco keys have changed, implying they've restarted.
// If a peer is in this set and was previously in the live wireguard config,
// it needs to be first removed and then re-added to flush out its wireguard session key.
// If discoChanged is nil or empty, this extra removal step isn't done.
//
// e.wgLock must be held.
func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Public]bool) error {
if hook := e.testMaybeReconfigHook; hook != nil {
hook()
return nil
}
full := e.lastCfgFull
// Compute a minimal config to pass to wireguard-go
// based on the full config. Prune off all the peers
// and only add the active ones back.
min := full
min.Peers = nil
// We'll only keep a peer around if it's been active in
// the past 5 minutes. That's more than WireGuard's key
// rotation time anyway so it's no harm if we remove it
// later if it's been inactive.
activeCutoff := e.timeNow().Add(-lazyPeerIdleThreshold)
// Not all peers can be trimmed from the network map (see
// isTrimmablePeer). For those are are trimmable, keep track
// of their DiscoKey and Tailscale IPs. These are the ones
// we'll need to install tracking hooks for to watch their
// send/receive activity.
trackDisco := make([]tailcfg.DiscoKey, 0, len(full.Peers))
trackIPs := make([]netaddr.IP, 0, len(full.Peers))
trimmedDisco := map[tailcfg.DiscoKey]bool{} // TODO: don't re-alloc this map each time
needRemoveStep := false
for i := range full.Peers {
p := &full.Peers[i]
if !isTrimmablePeer(p, len(full.Peers)) {
min.Peers = append(min.Peers, *p)
if discoChanged[key.Public(p.PublicKey)] {
needRemoveStep = true
}
continue
}
tsIP := p.AllowedIPs[0].IP
dk := discoKeyFromPeer(p)
trackDisco = append(trackDisco, dk)
trackIPs = append(trackIPs, tsIP)
if e.isActiveSince(dk, tsIP, activeCutoff) {
min.Peers = append(min.Peers, *p)
if discoChanged[key.Public(p.PublicKey)] {
needRemoveStep = true
}
} else {
trimmedDisco[dk] = true
}
}
if !deepprint.UpdateHash(&e.lastEngineSigTrim, min, trimmedDisco, trackDisco, trackIPs) {
// No changes
return nil
}
e.trimmedDisco = trimmedDisco
e.updateActivityMapsLocked(trackDisco, trackIPs)
if needRemoveStep {
minner := min
minner.Peers = nil
numRemove := 0
for _, p := range min.Peers {
if discoChanged[key.Public(p.PublicKey)] {
numRemove++
continue
}
minner.Peers = append(minner.Peers, p)
}
if numRemove > 0 {
e.logf("wgengine: Reconfig: removing session keys for %d peers", numRemove)
if err := e.wgdev.Reconfig(&minner); err != nil {
e.logf("wgdev.Reconfig: %v", err)
return err
}
}
}
e.logf("wgengine: Reconfig: configuring userspace wireguard config (with %d/%d peers)", len(min.Peers), len(full.Peers))
if err := e.wgdev.Reconfig(&min); err != nil {
e.logf("wgdev.Reconfig: %v", err)
return err
}
return nil
}
// updateActivityMapsLocked updates the data structures used for tracking the activity
// of wireguard peers that we might add/remove dynamically from the real config
// as given to wireguard-go.
//
// e.wgLock must be held.
func (e *userspaceEngine) updateActivityMapsLocked(trackDisco []tailcfg.DiscoKey, trackIPs []netaddr.IP) {
// Generate the new map of which discokeys we want to track
// receive times for.
mr := map[tailcfg.DiscoKey]time.Time{} // TODO: only recreate this if set of keys changed
for _, dk := range trackDisco {
// Preserve old times in the new map, but also
// populate map entries for new trackDisco values with
// time.Time{} zero values. (Only entries in this map
// are tracked, so the Time zero values allow it to be
// tracked later)
mr[dk] = e.recvActivityAt[dk]
}
e.recvActivityAt = mr
oldTime := e.sentActivityAt
e.sentActivityAt = make(map[netaddr.IP]*int64, len(oldTime))
oldFunc := e.destIPActivityFuncs
e.destIPActivityFuncs = make(map[netaddr.IP]func(), len(oldFunc))
updateFn := func(timePtr *int64) func() {
return func() {
now := e.timeNow().Unix()
old := atomic.LoadInt64(timePtr)
// How long's it been since we last sent a packet?
// For our first packet, old is Unix epoch time 0 (1970).
elapsedSec := now - old
if elapsedSec >= int64(packetSendTimeUpdateFrequency/time.Second) {
atomic.StoreInt64(timePtr, now)
}
// On a big jump, assume we might no longer be in the wireguard
// config and go check.
if elapsedSec >= int64(packetSendRecheckWireguardThreshold/time.Second) {
e.wgLock.Lock()
defer e.wgLock.Unlock()
e.maybeReconfigWireguardLocked(nil)
}
}
}
for _, ip := range trackIPs {
timePtr := oldTime[ip]
if timePtr == nil {
timePtr = new(int64)
}
e.sentActivityAt[ip] = timePtr
fn := oldFunc[ip]
if fn == nil {
fn = updateFn(timePtr)
}
e.destIPActivityFuncs[ip] = fn
}
e.tundev.SetDestIPActivityFuncs(e.destIPActivityFuncs)
}
func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config) error {
if routerCfg == nil {
panic("routerCfg must not be nil")
}
localAddrs := map[netaddr.IP]bool{}
for _, addr := range routerCfg.LocalAddrs {
localAddrs[addr.IP] = true
}
e.localAddrs.Store(localAddrs)
e.wgLock.Lock()
defer e.wgLock.Unlock()
peerSet := make(map[key.Public]struct{}, len(cfg.Peers))
e.mu.Lock()
e.peerSequence = e.peerSequence[:0]
for _, p := range cfg.Peers {
e.peerSequence = append(e.peerSequence, wgkey.Key(p.PublicKey))
peerSet[key.Public(p.PublicKey)] = struct{}{}
}
e.mu.Unlock()
engineChanged := deepprint.UpdateHash(&e.lastEngineSigFull, cfg)
routerChanged := deepprint.UpdateHash(&e.lastRouterSig, routerCfg)
if !engineChanged && !routerChanged {
return ErrNoChanges
}
// See if any peers have changed disco keys, which means they've restarted.
// If so, we need to update the wireguard-go/device.Device in two phases:
// once without the node which has restarted, to clear its wireguard session key,
// and a second time with it.
discoChanged := make(map[key.Public]bool)
{
prevEP := make(map[key.Public]wgcfg.Endpoint)
for i := range e.lastCfgFull.Peers {
if p := &e.lastCfgFull.Peers[i]; len(p.Endpoints) == 1 {
prevEP[key.Public(p.PublicKey)] = p.Endpoints[0]
}
}
for i := range cfg.Peers {
p := &cfg.Peers[i]
if len(p.Endpoints) != 1 {
continue
}
pub := key.Public(p.PublicKey)
if old, ok := prevEP[pub]; ok && old != p.Endpoints[0] {
discoChanged[pub] = true
e.logf("wgengine: Reconfig: %s changed from %s to %s", pub.ShortString(), &old, &p.Endpoints[0])
}
}
}
e.lastCfgFull = cfg.Copy()
// Tell magicsock about the new (or initial) private key
// (which is needed by DERP) before wgdev gets it, as wgdev
// will start trying to handshake, which we want to be able to
// go over DERP.
if err := e.magicConn.SetPrivateKey(wgkey.Private(cfg.PrivateKey)); err != nil {
e.logf("wgengine: Reconfig: SetPrivateKey: %v", err)
}
e.magicConn.UpdatePeers(peerSet)
if err := e.maybeReconfigWireguardLocked(discoChanged); err != nil {
return err
}
if routerChanged {
if routerCfg.DNS.Proxied {
ips := routerCfg.DNS.Nameservers
upstreams := make([]net.Addr, len(ips))
for i, ip := range ips {
stdIP := ip.IPAddr()
upstreams[i] = &net.UDPAddr{
IP: stdIP.IP,
Port: 53,
Zone: stdIP.Zone,
}
}
e.resolver.SetUpstreams(upstreams)
routerCfg.DNS.Nameservers = []netaddr.IP{tsaddr.TailscaleServiceIP()}
}
e.logf("wgengine: Reconfig: configuring router")
if err := e.router.Set(routerCfg); err != nil {
return err
}
}
e.logf("[v1] wgengine: Reconfig done")
return nil
}
func (e *userspaceEngine) GetFilter() *filter.Filter {
return e.tundev.GetFilter()
}
func (e *userspaceEngine) SetFilter(filt *filter.Filter) {
e.tundev.SetFilter(filt)
}
func (e *userspaceEngine) SetDNSMap(dm *tsdns.Map) {
e.resolver.SetMap(dm)
}
func (e *userspaceEngine) SetStatusCallback(cb StatusCallback) {
e.mu.Lock()
defer e.mu.Unlock()
e.statusCallback = cb
}
func (e *userspaceEngine) getStatusCallback() StatusCallback {
e.mu.Lock()
defer e.mu.Unlock()
return e.statusCallback
}
// TODO: this function returns an error but it's always nil, and when
// there's actually a problem it just calls log.Fatal. Why?
func (e *userspaceEngine) getStatus() (*Status, error) {
// Grab derpConns before acquiring wgLock to not violate lock ordering;
// the DERPs method acquires magicsock.Conn.mu.
// (See comment in userspaceEngine's declaration.)
derpConns := e.magicConn.DERPs()
e.wgLock.Lock()
defer e.wgLock.Unlock()
e.mu.Lock()
closing := e.closing
e.mu.Unlock()
if closing {
return nil, errors.New("engine closing; no status")
}
if e.wgdev == nil {
// RequestStatus was invoked before the wgengine has
// finished initializing. This can happen when wgegine
// provides a callback to magicsock for endpoint
// updates that calls RequestStatus.
return nil, nil
}
// lineLen is the max UAPI line we expect. The longest I see is
// len("preshared_key=")+64 hex+"\n" == 79. Add some slop.
const lineLen = 100
pr, pw := io.Pipe()
errc := make(chan error, 1)
go func() {
defer pw.Close()
bw := bufio.NewWriterSize(pw, lineLen)
// TODO(apenwarr): get rid of silly uapi stuff for in-process comms
// FIXME: get notified of status changes instead of polling.
filter := device.IPCGetFilter{
// The allowed_ips are somewhat expensive to compute and they're
// unused below; request that they not be sent instead.
FilterAllowedIPs: true,
}
if err := e.wgdev.IpcGetOperationFiltered(bw, filter); err != nil {
errc <- fmt.Errorf("IpcGetOperation: %w", err)
return
}
errc <- bw.Flush()
}()
pp := make(map[wgkey.Key]*PeerStatus)
p := &PeerStatus{}
var hst1, hst2, n int64
var err error
bs := bufio.NewScanner(pr)
bs.Buffer(make([]byte, lineLen), lineLen)
for bs.Scan() {
line := bs.Bytes()
k := line
var v mem.RO
if i := bytes.IndexByte(line, '='); i != -1 {
k = line[:i]
v = mem.B(line[i+1:])
}
switch string(k) {
case "public_key":
pk, err := key.NewPublicFromHexMem(v)
if err != nil {
log.Fatalf("IpcGetOperation: invalid key %#v", v)
}
p = &PeerStatus{}
pp[wgkey.Key(pk)] = p
key := tailcfg.NodeKey(pk)
p.NodeKey = key
case "rx_bytes":
n, err = mem.ParseInt(v, 10, 64)
p.RxBytes = ByteCount(n)
if err != nil {
log.Fatalf("IpcGetOperation: rx_bytes invalid: %#v", line)
}
case "tx_bytes":
n, err = mem.ParseInt(v, 10, 64)
p.TxBytes = ByteCount(n)
if err != nil {
log.Fatalf("IpcGetOperation: tx_bytes invalid: %#v", line)
}
case "last_handshake_time_sec":
hst1, err = mem.ParseInt(v, 10, 64)
if err != nil {
log.Fatalf("IpcGetOperation: hst1 invalid: %#v", line)
}
case "last_handshake_time_nsec":
hst2, err = mem.ParseInt(v, 10, 64)
if err != nil {
log.Fatalf("IpcGetOperation: hst2 invalid: %#v", line)
}
if hst1 != 0 || hst2 != 0 {
p.LastHandshake = time.Unix(hst1, hst2)
} // else leave at time.IsZero()
}
}
if err := bs.Err(); err != nil {
log.Fatalf("reading IpcGetOperation output: %v", err)
}
if err := <-errc; err != nil {
log.Fatalf("IpcGetOperation: %v", err)
}
e.mu.Lock()
defer e.mu.Unlock()
var peers []PeerStatus
for _, pk := range e.peerSequence {
if p, ok := pp[pk]; ok { // ignore idle ones not in wireguard-go's config
peers = append(peers, *p)
}
}
return &Status{
LocalAddrs: append([]string(nil), e.endpoints...),
Peers: peers,
DERPs: derpConns,
}, nil
}
func (e *userspaceEngine) RequestStatus() {
// This is slightly tricky. e.getStatus() can theoretically get
// blocked inside wireguard for a while, and RequestStatus() is
// sometimes called from a goroutine, so we don't want a lot of
// them hanging around. On the other hand, requesting multiple
// status updates simultaneously is pointless anyway; they will
// all say the same thing.
// Enqueue at most one request. If one is in progress already, this
// adds one more to the queue. If one has been requested but not
// started, it is a no-op.
select {
case e.reqCh <- struct{}{}:
default:
}
// Dequeue at most one request. Another thread may have already
// dequeued the request we enqueued above, which is fine, since the
// information is guaranteed to be at least as recent as the current
// call to RequestStatus().
select {
case <-e.reqCh:
s, err := e.getStatus()
if s == nil && err == nil {
e.logf("RequestStatus: weird: both s and err are nil")
return
}
if cb := e.getStatusCallback(); cb != nil {
cb(s, err)
}
default:
}
}
func (e *userspaceEngine) Close() {
var pingers []*pinger
e.mu.Lock()
if e.closing {
e.mu.Unlock()
return
}
e.closing = true
for _, pinger := range e.pingers {
pingers = append(pingers, pinger)
}
e.mu.Unlock()
r := bufio.NewReader(strings.NewReader(""))
e.wgdev.IpcSetOperation(r)
e.resolver.Close()
e.magicConn.Close()
e.linkMon.Close()
e.router.Close()
e.wgdev.Close()
e.tundev.Close()
// Shut down pingers after tundev is closed (by e.wgdev.Close) so the
// synchronous close does not get stuck on InjectOutbound.
for _, pinger := range pingers {
pinger.close()
}
close(e.waitCh)
}
func (e *userspaceEngine) Wait() {
<-e.waitCh
}
func (e *userspaceEngine) setLinkState(st *interfaces.State) (changed bool, cb func(major bool, newState *interfaces.State)) {
if st == nil {
return false, nil
}
e.mu.Lock()
defer e.mu.Unlock()
changed = e.linkState == nil || !st.Equal(e.linkState)
e.linkState = st
return changed, e.linkChangeCallback
}
func (e *userspaceEngine) LinkChange(isExpensive bool) {
cur, err := getLinkState()
if err != nil {
e.logf("LinkChange: interfaces.GetState: %v", err)
return
}
cur.IsExpensive = isExpensive
needRebind, linkChangeCallback := e.setLinkState(cur)
up := cur.AnyInterfaceUp()
if !up {
e.logf("LinkChange: all links down; pausing: %v", cur)
} else if needRebind {
e.logf("LinkChange: major, rebinding. New state: %v", cur)
} else {
e.logf("[v1] LinkChange: minor")
}
e.magicConn.SetNetworkUp(up)
why := "link-change-minor"
if needRebind {
why = "link-change-major"
e.magicConn.Rebind()
}
e.magicConn.ReSTUN(why)
if linkChangeCallback != nil {
go linkChangeCallback(needRebind, cur)
}
}
func (e *userspaceEngine) SetLinkChangeCallback(cb func(major bool, newState *interfaces.State)) {
e.mu.Lock()
defer e.mu.Unlock()
e.linkChangeCallback = cb
if e.linkState != nil {
go cb(false, e.linkState)
}
}
func getLinkState() (*interfaces.State, error) {
s, err := interfaces.GetState()
if s != nil {
s.RemoveTailscaleInterfaces()
}
return s, err
}
func (e *userspaceEngine) SetNetInfoCallback(cb NetInfoCallback) {
e.magicConn.SetNetInfoCallback(cb)
}
func (e *userspaceEngine) SetDERPMap(dm *tailcfg.DERPMap) {
e.magicConn.SetDERPMap(dm)
}
func (e *userspaceEngine) SetNetworkMap(nm *controlclient.NetworkMap) {
e.magicConn.SetNetworkMap(nm)
}
func (e *userspaceEngine) DiscoPublicKey() tailcfg.DiscoKey {
return e.magicConn.DiscoPublicKey()
}
func (e *userspaceEngine) UpdateStatus(sb *ipnstate.StatusBuilder) {
st, err := e.getStatus()
if err != nil {
e.logf("wgengine: getStatus: %v", err)
return
}
for _, ps := range st.Peers {
sb.AddPeer(key.Public(ps.NodeKey), &ipnstate.PeerStatus{
RxBytes: int64(ps.RxBytes),
TxBytes: int64(ps.TxBytes),
LastHandshake: ps.LastHandshake,
InEngine: true,
})
}
e.magicConn.UpdateStatus(sb)
}
func (e *userspaceEngine) Ping(ip netaddr.IP, cb func(*ipnstate.PingResult)) {
e.magicConn.Ping(ip, cb)
}
// diagnoseTUNFailure is called if tun.CreateTUN fails, to poke around
// the system and log some diagnostic info that might help debug why
// TUN failed. Because TUN's already failed and things the program's
// about to end, we might as well log a lot.
func diagnoseTUNFailure(logf logger.Logf) {
switch runtime.GOOS {
case "linux":
diagnoseLinuxTUNFailure(logf)
default:
logf("no TUN failure diagnostics for OS %q", runtime.GOOS)
}
}
func diagnoseLinuxTUNFailure(logf logger.Logf) {
kernel, err := exec.Command("uname", "-r").Output()
kernel = bytes.TrimSpace(kernel)
if err != nil {
logf("no TUN, and failed to look up kernel version: %v", err)
return
}
logf("Linux kernel version: %s", kernel)
modprobeOut, err := exec.Command("/sbin/modprobe", "tun").CombinedOutput()
if err == nil {
logf("'modprobe tun' successful")
// Either tun is currently loaded, or it's statically
// compiled into the kernel (which modprobe checks
// with /lib/modules/$(uname -r)/modules.builtin)
//
// So if there's a problem at this point, it's
// probably because /dev/net/tun doesn't exist.
const dev = "/dev/net/tun"
if fi, err := os.Stat(dev); err != nil {
logf("tun module loaded in kernel, but %s does not exist", dev)
} else {
logf("%s: %v", dev, fi.Mode())
}
// We failed to find why it failed. Just let our
// caller report the error it got from wireguard-go.
return
}
logf("is CONFIG_TUN enabled in your kernel? `modprobe tun` failed with: %s", modprobeOut)
switch distro.Get() {
case distro.Debian:
dpkgOut, err := exec.Command("dpkg", "-S", "kernel/drivers/net/tun.ko").CombinedOutput()
if len(bytes.TrimSpace(dpkgOut)) == 0 || err != nil {
logf("tun module not loaded nor found on disk")
return
}
if !bytes.Contains(dpkgOut, kernel) {
logf("kernel/drivers/net/tun.ko found on disk, but not for current kernel; are you in middle of a system update and haven't rebooted? found: %s", dpkgOut)
}
case distro.Arch:
findOut, err := exec.Command("find", "/lib/modules/", "-path", "*/net/tun.ko*").CombinedOutput()
if len(bytes.TrimSpace(findOut)) == 0 || err != nil {
logf("tun module not loaded nor found on disk")
return
}
if !bytes.Contains(findOut, kernel) {
logf("kernel/drivers/net/tun.ko found on disk, but not for current kernel; are you in middle of a system update and haven't rebooted? found: %s", findOut)
}
case distro.OpenWrt:
out, err := exec.Command("opkg", "list-installed").CombinedOutput()
if err != nil {
logf("error querying OpenWrt installed packages: %s", out)
return
}
for _, pkg := range []string{"kmod-tun", "ca-bundle"} {
if !bytes.Contains(out, []byte(pkg+" - ")) {
logf("Missing required package %s; run: opkg install %s", pkg, pkg)
}
}
}
}
|
[
"\"TS_DEBUG_TRIM_WIREGUARD\""
] |
[] |
[
"TS_DEBUG_TRIM_WIREGUARD"
] |
[]
|
["TS_DEBUG_TRIM_WIREGUARD"]
|
go
| 1 | 0 | |
gh/gh.go
|
package gh
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/google/go-github/v33/github"
"github.com/hairyhenderson/go-codeowners"
"github.com/k1LoW/ghdag/erro"
"github.com/k1LoW/ghdag/target"
"github.com/rs/zerolog/log"
"github.com/shurcooL/githubv4"
"golang.org/x/oauth2"
)
const limit = 100
type GhClient interface {
FetchTargets(ctx context.Context) (target.Targets, error)
FetchTarget(ctx context.Context, n int) (*target.Target, error)
SetLabels(ctx context.Context, n int, labels []string) error
SetAssignees(ctx context.Context, n int, assignees []string) error
SetReviewers(ctx context.Context, n int, reviewers []string) error
AddComment(ctx context.Context, n int, comment string) error
CloseIssue(ctx context.Context, n int) error
MergePullRequest(ctx context.Context, n int) error
ResolveUsers(ctx context.Context, in []string) ([]string, error)
}
type Client struct {
v3 *github.Client
v4 *githubv4.Client
owner string
repo string
}
// NewClient return Client
func NewClient() (*Client, error) {
ctx := context.Background()
// GITHUB_TOKEN
token := os.Getenv("GITHUB_TOKEN")
if token == "" {
return nil, fmt.Errorf("env %s is not set", "GITHUB_TOKEN")
}
// REST API Client
v3c := github.NewClient(httpClient(token))
if v3ep := os.Getenv("GITHUB_API_URL"); v3ep != "" {
baseEndpoint, err := url.Parse(v3ep)
if err != nil {
return nil, err
}
if !strings.HasSuffix(baseEndpoint.Path, "/") {
baseEndpoint.Path += "/"
}
v3c.BaseURL = baseEndpoint
}
// GraphQL API Client
src := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
v4hc := oauth2.NewClient(ctx, src)
v4ep := os.Getenv("GITHUB_GRAPHQL_URL")
if v4ep == "" {
v4ep = "https://api.github.com/graphql"
}
v4c := githubv4.NewEnterpriseClient(v4ep, v4hc)
ownerrepo := os.Getenv("GITHUB_REPOSITORY")
if ownerrepo == "" {
return nil, fmt.Errorf("env %s is not set", "GITHUB_REPOSITORY")
}
splitted := strings.Split(ownerrepo, "/")
owner := splitted[0]
repo := splitted[1]
_, res, err := v3c.Repositories.Get(ctx, owner, repo)
scopes := strings.Split(res.Header.Get("X-OAuth-Scopes"), ", ")
log.Debug().Msg(fmt.Sprintf("the scopes your token has authorized: '%s'", strings.Join(scopes, "', '")))
if err != nil {
return nil, err
}
return &Client{
v3: v3c,
v4: v4c,
owner: owner,
repo: repo,
}, nil
}
type issueNode struct {
Author struct {
Login githubv4.String
}
Number githubv4.Int
State githubv4.String
Title githubv4.String
Body githubv4.String
URL githubv4.String
CreatedAt githubv4.DateTime
UpdatedAt githubv4.DateTime
Labels struct {
Nodes []struct {
Name githubv4.String
}
} `graphql:"labels(first: 100)"`
Assignees struct {
Nodes []struct {
Login githubv4.String
}
} `graphql:"assignees(first: 100)"`
Comments struct {
Nodes []struct {
Author struct {
Login githubv4.String
}
Body githubv4.String
CreatedAt githubv4.DateTime
}
PageInfo struct {
HasNextPage bool
}
} `graphql:"comments(first: $limit, orderBy: {direction: DESC, field: UPDATED_AT})"`
}
type pullRequestNode struct {
Author struct {
Login githubv4.String
}
HeadRefName githubv4.String
Number githubv4.Int
State githubv4.String
Title githubv4.String
Body githubv4.String
URL githubv4.String
IsDraft githubv4.Boolean
ChangedFiles githubv4.Int
Mergeable githubv4.MergeableState
ReviewDecision githubv4.PullRequestReviewDecision
ReviewRequests struct {
Nodes []struct {
AsCodeOwner githubv4.Boolean
RequestedReviewer struct {
User struct {
Login githubv4.String
} `graphql:"... on User"`
Team struct {
Organization struct {
Login githubv4.String
}
Slug githubv4.String
} `graphql:"... on Team"`
}
}
} `graphql:"reviewRequests(first: 100)"`
LatestReviews struct {
Nodes []struct {
Author struct {
Login githubv4.String
}
State githubv4.PullRequestReviewState
}
} `graphql:"latestReviews(first: 100)"`
CreatedAt githubv4.DateTime
UpdatedAt githubv4.DateTime
Labels struct {
Nodes []struct {
Name githubv4.String
}
} `graphql:"labels(first: 100)"`
Assignees struct {
Nodes []struct {
Login githubv4.String
}
} `graphql:"assignees(first: 100)"`
Comments struct {
Nodes []struct {
Author struct {
Login githubv4.String
}
Body githubv4.String
CreatedAt githubv4.DateTime
}
PageInfo struct {
HasNextPage bool
}
} `graphql:"comments(first: $limit, orderBy: {direction: DESC, field: UPDATED_AT})"`
}
type pullRequestFilesNode struct {
Files struct {
Nodes []struct {
Path githubv4.String
}
PageInfo struct {
HasNextPage bool
EndCursor githubv4.String
}
} `graphql:"files(first: $limit, after: $cursor)"`
}
func (c *Client) FetchTargets(ctx context.Context) (target.Targets, error) {
targets := target.Targets{}
var q struct {
Viewer struct {
Login githubv4.String
} `graphql:"viewer"`
Repogitory struct {
Issues struct {
Nodes []issueNode
PageInfo struct {
HasNextPage bool
}
} `graphql:"issues(first: $limit, states: OPEN, orderBy: {direction: DESC, field: CREATED_AT})"`
PullRequests struct {
Nodes []pullRequestNode
PageInfo struct {
HasNextPage bool
}
} `graphql:"pullRequests(first: $limit, states: OPEN, orderBy: {direction: DESC, field: CREATED_AT})"`
} `graphql:"repository(owner: $owner, name: $repo)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(c.owner),
"repo": githubv4.String(c.repo),
"limit": githubv4.Int(limit),
}
if err := c.v4.Query(ctx, &q, variables); err != nil {
return nil, err
}
if q.Repogitory.Issues.PageInfo.HasNextPage {
return nil, fmt.Errorf("too many opened issues (limit: %d)", limit)
}
if q.Repogitory.PullRequests.PageInfo.HasNextPage {
return nil, fmt.Errorf("too many opened pull requests (limit: %d)", limit)
}
now := time.Now()
login := string(q.Viewer.Login)
for _, i := range q.Repogitory.Issues.Nodes {
t, err := buildTargetFromIssue(login, i, now)
if err != nil {
return nil, err
}
targets[t.Number] = t
}
for _, p := range q.Repogitory.PullRequests.Nodes {
if bool(p.IsDraft) {
// Skip draft pull request
continue
}
t, err := c.buildTargetFromPullRequest(ctx, login, p, now)
if err != nil {
return nil, err
}
targets[t.Number] = t
}
return targets, nil
}
func (c *Client) FetchTarget(ctx context.Context, n int) (*target.Target, error) {
var q struct {
Viewer struct {
Login githubv4.String
} `graphql:"viewer"`
Repogitory struct {
IssueOrPullRequest struct {
Issue issueNode `graphql:"... on Issue"`
PullRequest pullRequestNode `graphql:"... on PullRequest"`
} `graphql:"issueOrPullRequest(number: $number)"`
} `graphql:"repository(owner: $owner, name: $repo)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(c.owner),
"repo": githubv4.String(c.repo),
"number": githubv4.Int(n),
"limit": githubv4.Int(limit),
}
if err := c.v4.Query(ctx, &q, variables); err != nil {
return nil, err
}
now := time.Now()
login := string(q.Viewer.Login)
if strings.Contains(string(q.Repogitory.IssueOrPullRequest.Issue.URL), "/issues/") {
// Issue
i := q.Repogitory.IssueOrPullRequest.Issue
state := strings.ToLower(string(i.State))
if state != "open" {
return nil, erro.NewNotOpenError(fmt.Errorf("issue #%d is %s", int(i.Number), state))
}
return buildTargetFromIssue(login, i, now)
} else {
// Pull request
p := q.Repogitory.IssueOrPullRequest.PullRequest
state := strings.ToLower(string(p.State))
if state != "open" {
return nil, erro.NewNotOpenError(fmt.Errorf("pull request #%d is %s", int(p.Number), state))
}
if bool(p.IsDraft) {
return nil, erro.NewNotOpenError(fmt.Errorf("pull request #%d is draft", int(p.Number)))
}
return c.buildTargetFromPullRequest(ctx, login, p, now)
}
}
func (c *Client) SetLabels(ctx context.Context, n int, labels []string) error {
_, _, err := c.v3.Issues.Edit(ctx, c.owner, c.repo, n, &github.IssueRequest{
Labels: &labels,
})
return err
}
func (c *Client) SetAssignees(ctx context.Context, n int, assignees []string) error {
if _, _, err := c.v3.Issues.Edit(ctx, c.owner, c.repo, n, &github.IssueRequest{
Assignees: &assignees,
}); err != nil {
return err
}
return nil
}
func (c *Client) SetReviewers(ctx context.Context, n int, reviewers []string) error {
ru := map[string]struct{}{}
rt := map[string]struct{}{}
for _, r := range reviewers {
trimed := strings.TrimPrefix(r, "@")
if strings.Contains(trimed, "/") {
splitted := strings.Split(trimed, "/")
rt[splitted[1]] = struct{}{}
continue
}
ru[trimed] = struct{}{}
}
current, _, err := c.v3.PullRequests.ListReviewers(ctx, c.owner, c.repo, n, &github.ListOptions{})
if err != nil {
return err
}
du := []string{}
dt := []string{}
for _, u := range current.Users {
if _, ok := ru[u.GetLogin()]; ok {
delete(ru, u.GetLogin())
continue
}
du = append(du, u.GetLogin())
}
for _, t := range current.Teams {
if _, ok := rt[t.GetSlug()]; ok {
delete(rt, t.GetSlug())
continue
}
dt = append(dt, t.GetSlug())
}
if len(du) > 0 || len(dt) > 0 {
if len(du) == 0 {
du = append(du, "ghdag-dummy")
}
if _, err := c.v3.PullRequests.RemoveReviewers(ctx, c.owner, c.repo, n, github.ReviewersRequest{
Reviewers: du,
TeamReviewers: dt,
}); err != nil {
return err
}
}
au := []string{}
at := []string{}
for k := range ru {
au = append(au, k)
}
for k := range rt {
at = append(at, k)
}
if _, _, err := c.v3.PullRequests.RequestReviewers(ctx, c.owner, c.repo, n, github.ReviewersRequest{
Reviewers: au,
TeamReviewers: at,
}); err != nil {
return err
}
return nil
}
func (c *Client) AddComment(ctx context.Context, n int, comment string) error {
_, _, err := c.v3.Issues.CreateComment(ctx, c.owner, c.repo, n, &github.IssueComment{
Body: &comment,
})
return err
}
func (c *Client) CloseIssue(ctx context.Context, n int) error {
state := "closed"
_, _, err := c.v3.Issues.Edit(ctx, c.owner, c.repo, n, &github.IssueRequest{
State: &state,
})
return err
}
func (c *Client) MergePullRequest(ctx context.Context, n int) error {
_, _, err := c.v3.PullRequests.Merge(ctx, c.owner, c.repo, n, "", &github.PullRequestOptions{})
return err
}
func (c *Client) ResolveUsers(ctx context.Context, in []string) ([]string, error) {
res := []string{}
for _, inu := range in {
trimed := strings.TrimPrefix(inu, "@")
if !strings.Contains(trimed, "/") {
res = append(res, trimed)
continue
}
splitted := strings.Split(trimed, "/")
org := splitted[0]
slug := splitted[1]
opts := &github.TeamListTeamMembersOptions{}
users, _, err := c.v3.Teams.ListTeamMembersBySlug(ctx, org, slug, opts)
if err != nil {
return nil, err
}
for _, u := range users {
res = append(res, *u.Login)
}
}
return unique(res), nil
}
type roundTripper struct {
transport *http.Transport
accessToken string
}
func (rt roundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
r.Header.Set("Authorization", fmt.Sprintf("token %s", rt.accessToken))
return rt.transport.RoundTrip(r)
}
func buildTargetFromIssue(login string, i issueNode, now time.Time) (*target.Target, error) {
n := int(i.Number)
if i.Comments.PageInfo.HasNextPage {
return nil, fmt.Errorf("too many issue comments (number: %d, limit: %d)", n, limit)
}
latestComment := struct {
Author struct {
Login githubv4.String
}
Body githubv4.String
CreatedAt githubv4.DateTime
}{}
sort.Slice(i.Comments.Nodes, func(a, b int) bool {
// CreatedAt DESC
return (i.Comments.Nodes[a].CreatedAt.Unix() > i.Comments.Nodes[b].CreatedAt.Unix())
})
if len(i.Comments.Nodes) > 0 {
latestComment = i.Comments.Nodes[0]
}
numComments := 0
for _, c := range i.Comments.Nodes {
if string(c.Author.Login) != login {
break
}
numComments++
}
labels := []string{}
for _, l := range i.Labels.Nodes {
labels = append(labels, string(l.Name))
}
assignees := []string{}
for _, a := range i.Assignees.Nodes {
assignees = append(assignees, string(a.Login))
}
return &target.Target{
Number: n,
State: strings.ToLower(string(i.State)),
Title: string(i.Title),
Body: string(i.Body),
URL: string(i.URL),
Author: string(i.Author.Login),
Labels: labels,
Assignees: assignees,
IsIssue: true,
IsPullRequest: false,
HoursElapsedSinceCreated: int(now.Sub(i.CreatedAt.Time).Hours()),
HoursElapsedSinceUpdated: int(now.Sub(i.UpdatedAt.Time).Hours()),
NumberOfComments: len(i.Comments.Nodes),
LatestCommentAuthor: string(latestComment.Author.Login),
LatestCommentBody: string(latestComment.Body),
NumberOfConsecutiveComments: numComments,
Login: login,
}, nil
}
func (c *Client) buildTargetFromPullRequest(ctx context.Context, login string, p pullRequestNode, now time.Time) (*target.Target, error) {
n := int(p.Number)
if p.Comments.PageInfo.HasNextPage {
return nil, fmt.Errorf("too many pull request comments (number: %d, limit: %d)", n, limit)
}
latestComment := struct {
Author struct {
Login githubv4.String
}
Body githubv4.String
CreatedAt githubv4.DateTime
}{}
sort.Slice(p.Comments.Nodes, func(a, b int) bool {
// CreatedAt DESC
return (p.Comments.Nodes[a].CreatedAt.Unix() > p.Comments.Nodes[b].CreatedAt.Unix())
})
if len(p.Comments.Nodes) > 0 {
latestComment = p.Comments.Nodes[0]
}
numComments := 0
for _, c := range p.Comments.Nodes {
if string(c.Author.Login) != login {
break
}
numComments++
}
isApproved := false
isReviewRequired := false
isChangeRequested := false
switch p.ReviewDecision {
case githubv4.PullRequestReviewDecisionApproved:
isApproved = true
case githubv4.PullRequestReviewDecisionReviewRequired:
isReviewRequired = true
case githubv4.PullRequestReviewDecisionChangesRequested:
isChangeRequested = true
}
mergeable := false
if p.Mergeable == githubv4.MergeableStateMergeable {
mergeable = true
}
labels := []string{}
for _, l := range p.Labels.Nodes {
labels = append(labels, string(l.Name))
}
assignees := []string{}
for _, a := range p.Assignees.Nodes {
assignees = append(assignees, string(a.Login))
}
reviewers := []string{}
codeOwners := []string{}
codeOwnersWhoApproved := []string{}
for _, r := range p.ReviewRequests.Nodes {
var k string
if r.RequestedReviewer.User.Login != "" {
k = string(r.RequestedReviewer.User.Login)
}
if r.RequestedReviewer.Team.Slug != "" {
k = fmt.Sprintf("%s/%s", string(r.RequestedReviewer.Team.Organization.Login), string(r.RequestedReviewer.Team.Slug))
}
reviewers = append(reviewers, k)
if bool(r.AsCodeOwner) {
codeOwners = append(codeOwners, k)
}
}
reviewersWhoApproved := []string{}
for _, r := range p.LatestReviews.Nodes {
u := string(r.Author.Login)
reviewers = append(reviewers, u)
if r.State != githubv4.PullRequestReviewStateApproved {
continue
}
reviewersWhoApproved = append(reviewersWhoApproved, u)
}
reviewers = unique(reviewers)
if len(reviewersWhoApproved) > 0 {
// re-calc code_owners*
codeOwners = []string{}
// calcedCodeOwners contains users that exist in the CODEOWNERS file but do not actually exist or do not have permissions.
calcedCodeOwners, err := c.getCodeOwners(ctx, p)
if err != nil {
return nil, err
}
for _, u := range reviewersWhoApproved {
if contains(calcedCodeOwners, u) {
codeOwnersWhoApproved = append(codeOwnersWhoApproved, u)
}
}
for _, u := range reviewers {
if contains(calcedCodeOwners, u) {
codeOwners = append(codeOwners, u)
}
}
}
return &target.Target{
Number: n,
State: string(p.State),
Title: string(p.Title),
Body: string(p.Body),
URL: string(p.URL),
Author: string(p.Author.Login),
Labels: labels,
Assignees: assignees,
Reviewers: reviewers,
CodeOwners: codeOwners,
ReviewersWhoApproved: reviewersWhoApproved,
CodeOwnersWhoApproved: codeOwnersWhoApproved,
IsIssue: false,
IsPullRequest: true,
IsApproved: isApproved,
IsReviewRequired: isReviewRequired,
IsChangeRequested: isChangeRequested,
Mergeable: mergeable,
ChangedFiles: int(p.ChangedFiles),
HoursElapsedSinceCreated: int(now.Sub(p.CreatedAt.Time).Hours()),
HoursElapsedSinceUpdated: int(now.Sub(p.UpdatedAt.Time).Hours()),
NumberOfComments: len(p.Comments.Nodes),
LatestCommentAuthor: string(latestComment.Author.Login),
LatestCommentBody: string(latestComment.Body),
NumberOfConsecutiveComments: numComments,
Login: login,
}, nil
}
func (c *Client) getCodeOwners(ctx context.Context, p pullRequestNode) ([]string, error) {
// Get CODEOWNERS file
var cc string
for _, path := range []string{".github/CODEOWNERS", "docs/CODEOWNERS"} {
f, _, _, err := c.v3.Repositories.GetContents(ctx, c.owner, c.repo, path, &github.RepositoryContentGetOptions{
Ref: string(p.HeadRefName),
})
if err != nil {
continue
}
switch *f.Encoding {
case "base64":
if f.Content == nil {
break
}
c, err := base64.StdEncoding.DecodeString(*f.Content)
if err != nil {
break
}
cc = string(c)
case "":
if f.Content == nil {
cc = ""
} else {
cc = *f.Content
}
default:
break
}
break
}
if cc == "" {
return []string{}, nil
}
d, err := codeowners.FromReader(strings.NewReader(cc), ".")
if err != nil {
return nil, err
}
var cursor string
co := []string{}
var q struct {
Repogitory struct {
PullRequest pullRequestFilesNode `graphql:"pullRequest(number: $number)"`
} `graphql:"repository(owner: $owner, name: $repo)"`
}
for {
variables := map[string]interface{}{
"owner": githubv4.String(c.owner),
"repo": githubv4.String(c.repo),
"number": p.Number,
"limit": githubv4.Int(limit),
"cursor": githubv4.String(cursor),
}
if err := c.v4.Query(ctx, &q, variables); err != nil {
return nil, err
}
for _, f := range q.Repogitory.PullRequest.Files.Nodes {
co = append(co, d.Owners(string(f.Path))...)
}
if !q.Repogitory.PullRequest.Files.PageInfo.HasNextPage {
break
}
cursor = string(q.Repogitory.PullRequest.Files.PageInfo.EndCursor)
}
codeOwners := []string{}
for _, o := range unique(co) {
codeOwners = append(codeOwners, strings.TrimPrefix(o, "@"))
}
return codeOwners, nil
}
type GitHubEvent struct {
Name string
Number int
State string
Payload interface{}
}
func DecodeGitHubEvent() (*GitHubEvent, error) {
i := &GitHubEvent{}
n := os.Getenv("GITHUB_EVENT_NAME")
if n == "" {
return i, fmt.Errorf("env %s is not set.", "GITHUB_EVENT_NAME")
}
i.Name = n
p := os.Getenv("GITHUB_EVENT_PATH")
if p == "" {
return i, fmt.Errorf("env %s is not set.", "GITHUB_EVENT_PATH")
}
b, err := ioutil.ReadFile(filepath.Clean(p))
if err != nil {
return i, err
}
s := struct {
PullRequest struct {
Number int `json:"number,omitempty"`
State string `json:"state,omitempty"`
} `json:"pull_request,omitempty"`
Issue struct {
Number int `json:"number,omitempty"`
State string `json:"state,omitempty"`
} `json:"issue,omitempty"`
}{}
if err := json.Unmarshal(b, &s); err != nil {
return i, err
}
switch {
case s.PullRequest.Number > 0:
i.Number = s.PullRequest.Number
i.State = s.PullRequest.State
case s.Issue.Number > 0:
i.Number = s.Issue.Number
i.State = s.Issue.State
}
var payload interface{}
if err := json.Unmarshal(b, &payload); err != nil {
return i, err
}
i.Payload = payload
return i, nil
}
func httpClient(token string) *http.Client {
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
}
rt := roundTripper{
transport: t,
accessToken: token,
}
return &http.Client{
Timeout: time.Second * 10,
Transport: rt,
}
}
func contains(s []string, e string) bool {
for _, v := range s {
if e == v {
return true
}
}
return false
}
func unique(in []string) []string {
u := []string{}
m := map[string]struct{}{}
for _, s := range in {
if _, ok := m[s]; ok {
continue
}
u = append(u, s)
m[s] = struct{}{}
}
return u
}
|
[
"\"GITHUB_TOKEN\"",
"\"GITHUB_API_URL\"",
"\"GITHUB_GRAPHQL_URL\"",
"\"GITHUB_REPOSITORY\"",
"\"GITHUB_EVENT_NAME\"",
"\"GITHUB_EVENT_PATH\""
] |
[] |
[
"GITHUB_REPOSITORY",
"GITHUB_GRAPHQL_URL",
"GITHUB_EVENT_PATH",
"GITHUB_EVENT_NAME",
"GITHUB_API_URL",
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_REPOSITORY", "GITHUB_GRAPHQL_URL", "GITHUB_EVENT_PATH", "GITHUB_EVENT_NAME", "GITHUB_API_URL", "GITHUB_TOKEN"]
|
go
| 6 | 0 | |
src/sentry/runner/commands/cleanup.py
|
"""
sentry.runner.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
from datetime import timedelta
from uuid import uuid4
import click
from django.utils import timezone
from sentry.runner.decorators import log_options
from six.moves import xrange
# allows services like tagstore to add their own (abstracted) models
# to cleanup
EXTRA_BULK_QUERY_DELETES = []
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER = '91650ec271ae4b3e8a67cdc909d80f8c'
def multiprocess_worker(task_queue):
# Configure within each Process
import logging
from sentry.utils.imports import import_string
logger = logging.getLogger('sentry.cleanup')
configured = False
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
# On first task, configure Sentry environment
if not configured:
from sentry.runner import configure
configure()
from sentry import models
from sentry import deletions
from sentry import similarity
skip_models = [
# Handled by other parts of cleanup
models.Event,
models.EventMapping,
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity.features,
] + [b[0] for b in EXTRA_BULK_QUERY_DELETES]
configured = True
model, chunk = j
model = import_string(model)
try:
task = deletions.get(
model=model,
query={'id__in': chunk},
skip_models=skip_models,
transaction_id=uuid4().hex,
)
while True:
if not task.chunk():
break
except Exception as e:
logger.exception(e)
finally:
task_queue.task_done()
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option(
'--concurrency',
type=int,
default=1,
show_default=True,
help='The total number of concurrent worker processes to run.'
)
@click.option(
'--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.'
)
@click.option('--model', '-m', multiple=True)
@click.option('--router', '-r', default=None, help='Database router')
@click.option(
'--timed',
'-t',
default=False,
is_flag=True,
help='Send the duration of this command to internal metrics.'
)
@log_options()
def cleanup(days, project, concurrency, silent, model, router, timed):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
if concurrency < 1:
click.echo('Error: Minimum concurrency is 1', err=True)
raise click.Abort()
os.environ['_SENTRY_CLEANUP'] = '1'
# Make sure we fork off multiprocessing pool
# before we import or configure the app
from multiprocessing import Process, JoinableQueue as Queue
pool = []
task_queue = Queue(1000)
for _ in xrange(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
from sentry.runner import configure
configure()
from django.db import router as db_router
from sentry.app import nodestore
from sentry.db.deletion import BulkDeleteQuery
from sentry import models
if timed:
import time
from sentry.utils import metrics
start_time = time.time()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
def is_filtered(model):
if router is not None and db_router.db_for_write(model) != router:
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
BULK_QUERY_DELETES = [
(models.EventMapping, 'date_added', '-date_added'),
(models.EventAttachment, 'date_added', None),
(models.UserReport, 'date_added', None),
(models.GroupEmailThread, 'date', None),
(models.GroupRuleStatus, 'date_added', None),
] + EXTRA_BULK_QUERY_DELETES
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
DELETES = (
(models.Event, 'datetime', 'datetime'),
(models.Group, 'last_seen', 'last_seen'),
)
if not silent:
click.echo('Removing expired values for LostPasswordHash')
if is_filtered(models.LostPasswordHash):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
models.LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
if is_filtered(models.OrganizationMember) and not silent:
click.echo('>> Skipping OrganizationMember')
else:
click.echo('Removing expired values for OrganizationMember')
expired_threshold = timezone.now() - timedelta(days=days)
models.OrganizationMember.delete_expired(expired_threshold)
for model in [models.ApiGrant, models.ApiToken]:
if not silent:
click.echo(u'Removing expired values for {}'.format(model.__name__))
if is_filtered(model):
if not silent:
click.echo(u'>> Skipping {}'.format(model.__name__))
else:
model.objects.filter(expires_at__lt=timezone.now()).delete()
project_id = None
if project:
click.echo(
"Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
else:
if not silent:
click.echo("Removing old NodeStore values")
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.cleanup(cutoff)
except NotImplementedError:
click.echo(
"NodeStore backend does not support cleanup operation", err=True)
for bqd in BULK_QUERY_DELETES:
if len(bqd) == 4:
model, dtfield, order_by, chunk_size = bqd
else:
chunk_size = 10000
model, dtfield, order_by = bqd
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
for model, dtfield, order_by in DELETES:
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
imp = '.'.join((model.__module__, model.__name__))
q = BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
task_queue.join()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered(models.FileBlob):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
# Shut down our pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
if timed:
duration = int(time.time() - start_time)
metrics.timing('cleanup.duration', duration, instance=router)
click.echo("Clean up took %s second(s)." % duration)
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
|
[] |
[] |
[
"_SENTRY_CLEANUP"
] |
[]
|
["_SENTRY_CLEANUP"]
|
python
| 1 | 0 | |
pytorch_lightning/trainer/trainer.py
|
"""
The trainer handles all the logic for running a val loop, training loop, distributing, etc.. .
"""
import os
import sys
import warnings
import logging
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tqdm
from torch.optim.optimizer import Optimizer
from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin
from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin
from pytorch_lightning.trainer.distrib_parts import (
TrainerDPMixin,
parse_gpu_ids,
determine_root_gpu_device
)
from pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin
from pytorch_lightning.trainer.trainer_io import TrainerIOMixin
from pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin
from pytorch_lightning.utilities.debugging import MisconfigurationException
try:
from apex import amp
APEX_AVAILABLE = True
except ImportError:
APEX_AVAILABLE = False
class Trainer(TrainerIOMixin,
TrainerDPMixin,
TrainerDDPMixin,
TrainerLoggingMixin,
TrainerModelHooksMixin,
TrainerTrainingTricksMixin,
TrainerDataLoadingMixin,
TrainerAMPMixin,
TrainerEvaluationLoopMixin,
TrainerTrainLoopMixin,
TrainerCallbackConfigMixin,
):
def __init__(
self,
logger=True,
checkpoint_callback=True,
early_stop_callback=True,
default_save_path=None,
gradient_clip_val=0,
gradient_clip=None, # backward compatible, todo: remove in v0.8.0
process_position=0,
nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0
num_nodes=1,
gpus=None,
log_gpu_memory=None,
show_progress_bar=True,
overfit_pct=0.0,
track_grad_norm=-1,
check_val_every_n_epoch=1,
fast_dev_run=False,
accumulate_grad_batches=1,
max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
max_num_epochs=1000,
min_num_epochs=1,
train_percent_check=1.0,
val_percent_check=1.0,
test_percent_check=1.0,
val_check_interval=1.0,
log_save_interval=100,
row_log_interval=10,
add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0
distributed_backend=None,
use_amp=False,
print_nan_grads=False,
weights_summary='full',
weights_save_path=None,
amp_level='O1',
nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0
num_sanity_val_steps=5,
truncated_bptt_steps=None,
resume_from_checkpoint=None,
):
"""
:param logger: Logger for experiment tracking
:param checkpoint_callback: Callback for checkpointing
:param early_stop_callback: Callback for early stopping
:param str default_save_path: Default path for logs+weights if no logger/ckpt_callback passed
:param int gradient_clip_val: 0 means don't clip.
:param int gradient_clip: 0 means don't clip. Deprecated.
:param process_position: shown in the tqdm bar
:param int num_nodes: number of GPU nodes
:param list|str|int gpus: int. (ie: 2 gpus) OR list to specify which GPUs [0, 1] OR '0,1'
OR '-1' / -1 to use all available gpus
:param str log_gpu_memory: None, 'min_max', 'all'
:param bool show_progress_bar: If true shows tqdm bar
:param float overfit_pct: uses this much of all datasets
:param int track_grad_norm: -1 no tracking. Otherwise tracks that norm
:param int check_val_every_n_epoch: check val every n train epochs
:param bool fast_dev_run: runs full iteration over everything to find bugs
:param int accumulate_grad_batches: Accumulates grads every k batches
:param int max_num_epochs:
:param int min_num_epochs:
:param int train_percent_check: How much of train set to check
:param int val_percent_check: How much of val set to check
:param int test_percent_check: How much of test set to check
:param float|int val_check_interval: If float, % of tng epoch. If int, check every n batch
:param int log_save_interval: Writes logs to disk this often
:param int row_log_interval: How often to add logging rows
:param int add_row_log_interval: How often to add logging rows. Deprecated.
:param str distributed_backend: Options: 'dp', 'ddp', 'ddp2'.
:param bool use_amp: If true uses apex for 16bit precision
:param bool print_nan_grads: Prints nan gradients
:param str weights_summary: Options: 'full', 'top', None to not print.
:param bool weights_save_path: Where to save weights if on cluster
:param str amp_level: Check nvidia docs for level
:param int num_sanity_val_steps: How many val steps before a full train loop.
:param int truncated_bptt_steps: Enables multiple backward passes for each batch.
.. warning:: Following arguments become deprecated and they will be removed in v0.8.0:
- `gradient_clip`,
- `nb_gpu_nodes`,
- `max_nb_epochs`,
- `min_nb_epochs`,
- `add_row_log_interval`,
- `nb_sanity_val_steps`
"""
# Transfer params
if nb_gpu_nodes is not None: # Backward compatibility
warnings.warn("`nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not num_nodes: # in case you did not set the proper value
num_nodes = nb_gpu_nodes
self.num_gpu_nodes = num_nodes
self.log_gpu_memory = log_gpu_memory
if gradient_clip is not None: # Backward compatibility
warnings.warn("`gradient_clip` has renamed to `gradient_clip_val` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not gradient_clip_val: # in case you did not set the proper value
gradient_clip_val = gradient_clip
self.gradient_clip_val = gradient_clip_val
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
self.process_position = process_position
self.weights_summary = weights_summary
if max_nb_epochs is not None: # Backward compatibility
warnings.warn("`max_nb_epochs` has renamed to `max_num_epochs` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not max_num_epochs: # in case you did not set the proper value
max_num_epochs = max_nb_epochs
self.max_num_epochs = max_num_epochs
if min_nb_epochs is not None: # Backward compatibility
warnings.warn("`min_nb_epochs` has renamed to `min_num_epochs` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not min_num_epochs: # in case you did not set the proper value
min_num_epochs = min_nb_epochs
self.min_num_epochs = min_num_epochs
if nb_sanity_val_steps is not None: # Backward compatibility
warnings.warn("`nb_sanity_val_steps` has renamed to `num_sanity_val_steps` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not num_sanity_val_steps: # in case you did not set the proper value
num_sanity_val_steps = nb_sanity_val_steps
self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.truncated_bptt_steps = truncated_bptt_steps
self.resume_from_checkpoint = resume_from_checkpoint
self.shown_warnings = set()
self.fast_dev_run = fast_dev_run
if self.fast_dev_run:
self.num_sanity_val_steps = 1
self.max_num_epochs = 1
m = '''
Running in fast_dev_run mode: will run a full train,
val loop using a single batch
'''
logging.info(m)
# set default save path if user didn't provide one
self.default_save_path = default_save_path
if self.default_save_path is None:
self.default_save_path = os.getcwd()
# training bookeeping
self.total_batch_idx = 0
self.running_loss = []
self.avg_loss = 0
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.get_train_dataloader = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
self.is_iterable_train_dataloader = False
# training state
self.model = None
self.testing = False
self.lr_schedulers = []
self.optimizers = None
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
# configure early stop callback
# creates a default one if none passed in
self.early_stop_callback = None
self.configure_early_stopping(early_stop_callback, logger)
self.reduce_lr_on_plateau_scheduler = None
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.weights_save_path = weights_save_path
# accumulated grads
self.configure_accumulated_gradients(accumulate_grad_batches)
# allow int, string and gpu list
self.data_parallel_device_ids = parse_gpu_ids(gpus)
self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)
# distributed backend choice
self.use_ddp = False
self.use_ddp2 = False
self.use_dp = False
self.single_gpu = False
self.distributed_backend = distributed_backend
self.set_distributed_mode(distributed_backend, num_nodes)
# init flags for SLURM+ddp to work
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
self.configure_slurm_ddp(num_nodes)
# nvidia setup
self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
if add_row_log_interval is not None:
# backward compatibility
warnings.warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not row_log_interval: # in case you did not set the proper value
row_log_interval = add_row_log_interval
self.row_log_interval = row_log_interval
# how much of the data to use
self.determine_data_use_amount(train_percent_check, val_percent_check,
test_percent_check, overfit_pct)
# 16 bit mixed precision training using apex
self.amp_level = amp_level
self.init_amp(use_amp)
@property
def slurm_job_id(self):
try:
job_id = os.environ['SLURM_JOB_ID']
job_id = int(job_id)
except Exception:
job_id = None
return job_id
def __parse_gpu_ids(self, gpus):
"""Parse GPUs id.
:param list|str|int gpus: input GPU ids
:return list(int):
"""
# if gpus = -1 then use all available devices
# otherwise, split the string using commas
if gpus is not None:
if isinstance(gpus, list):
gpus = gpus
elif isinstance(gpus, str):
if gpus == '-1':
gpus = list(range(0, torch.cuda.device_count()))
else:
gpus = [int(x.strip()) for x in gpus.split(',')]
elif isinstance(gpus, int):
gpus = gpus
else:
raise ValueError('`gpus` has to be a string, int or list of ints')
return gpus
def __set_root_gpu(self, gpus):
if gpus is None:
return None
# set root gpu
root_gpu = 0
if type(gpus) is list:
root_gpu = gpus[0]
return root_gpu
@property
def num_gpus(self):
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
else:
return len(gpus)
@property
def data_parallel(self):
return self.use_dp or self.use_ddp or self.use_ddp2
@property
def training_tqdm_dict(self):
"""Read-only for tqdm metrics.
:return:
"""
tqdm_dict = {
'loss': '{0:.3f}'.format(self.avg_loss),
'batch_idx': '{}'.format(self.batch_idx),
}
if self.truncated_bptt_steps is not None:
tqdm_dict['split_idx'] = self.split_idx
if self.logger is not None and self.logger.version is not None:
tqdm_dict['v_nb'] = self.logger.version
tqdm_dict.update(self.tqdm_metrics)
if self.on_gpu:
tqdm_dict['gpu'] = '{}'.format(torch.cuda.current_device())
return tqdm_dict
@property
def tng_tqdm_dic(self):
"""Read-only for tqdm metrics.
.. warning:: Deprecated in v0.5.0. use training_tqdm_dict instead.
:return:
"""
warnings.warn("`tng_tqdm_dic` has renamed to `training_tqdm_dict` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
return self.training_tqdm_dict
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(self, model):
# when using multi-node or DDP within a node start each module in a separate process
if self.use_ddp2:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
elif self.use_ddp:
if self.is_slurm_managing_tasks:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
else:
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
# 1 gpu or dp option triggers training using DP module
# easier to avoid NCCL issues
elif self.use_dp:
self.dp_train(model)
elif self.single_gpu:
self.single_gpu_train(model)
# ON CPU
else:
# run through amp wrapper
if self.use_amp:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
self.run_pretrain_routine(model)
# return 1 when finished
# used for testing or when we need to know that training succeeded
return 1
def init_optimizers(self, optimizers):
# single optimizer
if isinstance(optimizers, Optimizer):
return [optimizers], []
# two lists
elif len(optimizers) == 2 and isinstance(optimizers[0], list):
optimizers, lr_schedulers = optimizers
lr_schedulers, self.reduce_lr_on_plateau_scheduler = self.configure_schedulers(lr_schedulers)
return optimizers, lr_schedulers
# single list or tuple
elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
return optimizers, []
def configure_schedulers(self, schedulers):
for i, scheduler in enumerate(schedulers):
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
reduce_lr_on_plateau_scheduler = schedulers.pop(i)
return schedulers, reduce_lr_on_plateau_scheduler
return schedulers, None
def run_pretrain_routine(self, model):
"""Sanity check a few things before starting actual training.
:param model:
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# link up experiment object
if self.logger is not None:
ref_model.logger = self.logger
# save exp to get started
if hasattr(ref_model, "hparams"):
self.logger.log_hyperparams(ref_model.hparams)
self.logger.save()
if self.use_ddp or self.use_ddp2:
dist.barrier()
# set up checkpoint callback
self.configure_checkpoint_callback()
# register auto-resubmit when on SLURM
self.register_slurm_signal_handlers()
# transfer data loaders from model
self.get_dataloaders(ref_model)
# print model summary
if self.proc_rank == 0 and self.weights_summary is not None:
if self.weights_summary in ['full', 'top']:
ref_model.summarize(mode=self.weights_summary)
else:
m = "weights_summary can be None, 'full' or 'top'"
raise MisconfigurationException(m)
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# restore training and model before hpc call
self.restore_weights(model)
# when testing requested only run test and return
if self.testing:
self.run_evaluation(test=True)
return
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
if self.get_val_dataloaders() is not None and self.num_sanity_val_steps > 0:
# init progress bars for validation sanity check
pbar = tqdm.tqdm(desc='Validation sanity check', total=self.num_sanity_val_steps,
leave=False, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm.tqdm(disable=True)
self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
# init progress bar
pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
file=sys.stdout)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model=None):
self.testing = True
if model is not None:
self.fit(model)
else:
self.run_evaluation(test=True)
|
[] |
[] |
[
"SLURM_JOB_ID",
"SLURM_LOCALID"
] |
[]
|
["SLURM_JOB_ID", "SLURM_LOCALID"]
|
python
| 2 | 0 | |
conanfile.py
|
import os
from conans import ConanFile, tools
from conans.errors import ConanException, ConanInvalidConfiguration
from conans.tools import Version
class BotanConan(ConanFile):
name = 'botan'
version = '2.12.1'
url = "https://github.com/bincrafters/conan-botan"
homepage = "https://github.com/randombit/botan"
license = "BSD 2-clause"
exports = ["LICENSE.md", "patches/*"]
description = "Botan is a cryptography library written in C++11."
settings = 'os', 'arch', 'compiler', 'build_type'
options = {
'amalgamation': [True, False],
'bzip2': [True, False],
'debug_info': [True, False],
'openssl': [True, False],
'quiet': [True, False],
'shared': [True, False],
'fPIC': [True, False],
'single_amalgamation': [True, False],
'sqlite3': [True, False],
'zlib': [True, False],
'boost': [True, False],
'enable_modules': "ANY",
'system_cert_bundle': "ANY"
}
default_options = {'amalgamation': True,
'bzip2': False,
'debug_info': False,
'openssl': False,
'quiet': True,
'shared': True,
'fPIC': True,
'single_amalgamation': False,
'sqlite3': False,
'zlib': False,
'boost': False,
'enable_modules': None,
'system_cert_bundle': None}
def configure(self):
msvc_too_old = self.settings.os == "Windows" and \
self.settings.compiler == "Visual Studio" and \
Version(self.settings.compiler.version.value) < "14"
if msvc_too_old:
raise ConanInvalidConfiguration("Botan doesn't support MSVC < 14")
if self.options.boost:
self.options["boost"].add("shared=False")
self.options["boost"].add("magic_autolink=False")
self.options["boost"].add("without_coroutine=False")
self.options["boost"].add("without_system=False")
def build_requirements(self):
if self.settings.os == "Windows":
self.build_requires("jom_installer/1.1.2@bincrafters/stable")
def requirements(self):
if self.options.bzip2:
self.requires('bzip2/1.0.6')
if self.options.openssl:
self.requires('openssl/1.0.2t')
if self.options.zlib:
self.requires('zlib/1.2.11')
if self.options.sqlite3:
self.requires('sqlite3/3.25.3@bincrafters/stable')
if self.options.boost:
self.requires("boost/1.69.0@conan/stable")
def config_options(self):
if self.settings.compiler != 'Visual Studio':
self.check_cxx_abi_settings()
if self.options.single_amalgamation:
self.options.amalgamation = True
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
tools.get("{0}/archive/{1}.tar.gz".format(self.homepage, self.version))
extracted_dir = "botan-" + self.version
os.rename(extracted_dir, "sources")
def build(self):
with tools.chdir('sources'):
self.run(self._configure_cmd)
self.run(self._make_cmd)
def package(self):
self.copy(pattern="license.txt", dst="licenses", src="sources")
with tools.chdir("sources"):
self.run(self._make_install_cmd)
def package_info(self):
if self.settings.compiler == 'Visual Studio':
self.cpp_info.libs.append('botan')
else:
self.cpp_info.libs.extend(['botan-2'])
if self.settings.os != 'Windows':
self.cpp_info.libs.append('dl')
if self.settings.os == 'Linux':
self.cpp_info.libs.append('rt')
if self.settings.os == 'Macos':
self.cpp_info.exelinkflags = ['-framework Security', '-framework CoreFoundation']
if not self.options.shared:
self.cpp_info.libs.append('pthread')
if self.settings.os == "Windows":
self.cpp_info.libs.extend(["ws2_32", "Crypt32"])
self.cpp_info.libdirs = ['lib']
self.cpp_info.bindirs = ['lib', 'bin']
self.cpp_info.includedirs = ['include/botan-2']
@property
def _is_mingw_windows(self):
return self.settings.os == "Windows" and self.settings.compiler == "gcc" and os.name == "nt"
@property
def _botan_os(self):
if self._is_mingw_windows:
return "mingw"
return {"Windows": "windows",
"Linux": "linux",
"Macos": "darwin",
"Android": "linux",
"iOS": "ios"}.get(str(self.settings.os))
def _dependency_build_flags(self, dependency):
# Since botan has a custom build system, we need to specifically inject
# these build parameters so that it picks up the correct dependencies.
dep_cpp_info = self.deps_cpp_info[dependency]
return \
['--with-external-includedir={}'.format(include_path) for include_path in dep_cpp_info.include_paths] + \
['--with-external-libdir={}'.format(lib_path) for lib_path in dep_cpp_info.lib_paths] + \
['--define-build-macro={}'.format(define) for define in dep_cpp_info.defines]
@property
def _configure_cmd(self):
if self.settings.compiler in ('clang', 'apple-clang'):
botan_compiler = 'clang'
elif self.settings.compiler == 'gcc':
botan_compiler = 'gcc'
else:
botan_compiler = 'msvc'
botan_abi_flags = []
botan_extra_cxx_flags = []
build_flags = []
if self._is_linux_clang_libcxx:
botan_abi_flags.extend(["-stdlib=libc++", "-lc++abi"])
if botan_compiler in ['clang', 'apple-clang', 'gcc']:
if self.settings.arch == "x86":
botan_abi_flags.append('-m32')
elif self.settings.arch == "x86_64":
botan_abi_flags.append('-m64')
if self.settings.os != "Windows" and self.options.fPIC:
botan_extra_cxx_flags.append('-fPIC')
if self.settings.os == "Macos" and self.settings.os.version:
macos_min_version = tools.apple_deployment_target_flag(self.settings.os, self.settings.os.version)
macos_sdk_path = "-isysroot {}".format(tools.XCRun(self.settings).sdk_path)
botan_extra_cxx_flags.extend([macos_min_version, macos_sdk_path])
# This is to work around botan's configure script that *replaces* its
# standard (platform dependent) flags in presence of an environment
# variable ${CXXFLAGS}. Most notably, this would build botan with
# disabled compiler optimizations.
environment_cxxflags = tools.get_env("CXXFLAGS")
if environment_cxxflags:
del os.environ["CXXFLAGS"]
botan_extra_cxx_flags.append(environment_cxxflags)
if self.options.enable_modules:
build_flags.append('--minimized-build')
build_flags.append('--enable-modules={}'.format(self.options.enable_modules))
if self.options.amalgamation:
build_flags.append('--amalgamation')
if self.options.single_amalgamation:
build_flags.append('--single-amalgamation-file')
if self.options.system_cert_bundle:
build_flags.append('--system-cert-bundle={}'.format(self.options.system_cert_bundle))
if self.options.bzip2:
build_flags.append('--with-bzip2')
build_flags.extend(self._dependency_build_flags("bzip2"))
if self.options.openssl:
build_flags.append('--with-openssl')
build_flags.extend(self._dependency_build_flags("OpenSSL"))
if self.options.quiet:
build_flags.append('--quiet')
if self.options.sqlite3:
build_flags.append('--with-sqlite3')
build_flags.extend(self._dependency_build_flags("sqlite3"))
if self.options.zlib:
build_flags.append('--with-zlib')
build_flags.extend(self._dependency_build_flags("zlib"))
if self.options.boost:
build_flags.append('--with-boost')
build_flags.extend(self._dependency_build_flags("boost"))
# required boost libraries are listed in Botan's src/utils/boost/info.txt
# under the <libs></libs> tag...
# Note that boost_system is actually a header-only library as of
# boost 1.69. We are linking this for compatibility with older boost
# versions...
boost_system = [lib for lib in self.deps_cpp_info["boost"].libs if "boost_system" in lib]
if len(boost_system) != 1:
raise ConanException("did not find a comprehensive boost_system library name: " + str(boost_system))
boost_system_name = boost_system[0] + ".lib" if self.settings.os == "Windows" else boost_system[0]
build_flags.append('--boost-library-name={}'.format(boost_system_name))
if self.settings.build_type == 'RelWithDebInfo' or self.options.debug_info:
build_flags.append('--with-debug-info')
if str(self.settings.build_type).lower() == 'debug':
build_flags.append('--debug-mode')
build_targets = ["shared"] if self.options.shared else ["static"]
if self._is_mingw_windows:
build_flags.append('--without-stack-protector')
if self.settings.compiler == 'Visual Studio':
build_flags.append('--msvc-runtime=%s' % str(self.settings.compiler.runtime))
call_python = 'python' if self.settings.os == 'Windows' else ''
prefix = tools.unix_path(self.package_folder) if self._is_mingw_windows else self.package_folder
botan_abi = ' '.join(botan_abi_flags) if botan_abi_flags else ' '
botan_cxx_extras = ' '.join(botan_extra_cxx_flags) if botan_extra_cxx_flags else ' '
configure_cmd = ('{python_call} ./configure.py'
' --build-targets={targets}'
' --distribution-info="Conan"'
' --without-documentation'
' --cc-abi-flags="{abi}"'
' --extra-cxxflags="{cxxflags}"'
' --cc={compiler}'
' --cpu={cpu}'
' --prefix={prefix}'
' --os={os}'
' {build_flags}').format(
python_call=call_python,
targets=",".join(build_targets),
abi=botan_abi,
cxxflags=botan_cxx_extras,
compiler=botan_compiler,
cpu=self.settings.arch,
prefix=prefix,
os=self._botan_os,
build_flags=' '.join(build_flags))
return configure_cmd
@property
def _make_cmd(self):
return self._jom_cmd if self.settings.compiler == 'Visual Studio' else self._gnumake_cmd
def check_cxx_abi_settings(self):
compiler = self.settings.compiler
version = float(self.settings.compiler.version.value)
libcxx = compiler.libcxx
if compiler == 'gcc' and version > 5 and libcxx != 'libstdc++11':
raise ConanException(
'Using Botan with GCC > 5 on Linux requires '
'"compiler.libcxx=libstdc++11"')
elif compiler == 'clang' and libcxx not in ['libstdc++11', 'libc++']:
raise ConanException(
'Using Botan with Clang on Linux requires either '
'"compiler.libcxx=libstdc++11" '
'or '
'"compiler.libcxx=libc++"')
@property
def _make_program(self):
return tools.get_env("CONAN_MAKE_PROGRAM", tools.which("make") or tools.which('mingw32-make'))
@property
def _gnumake_cmd(self):
make_ldflags = 'LDFLAGS=-lc++abi' if self._is_linux_clang_libcxx else ''
botan_quiet = '--quiet' if self.options.quiet else ''
make_cmd = ('{ldflags}'
' {make}'
' {quiet}'
' -j{cpucount}').format(
ldflags=make_ldflags,
make=self._make_program,
quiet=botan_quiet,
cpucount=tools.cpu_count())
return make_cmd
@property
def _jom_cmd(self):
vcvars = tools.vcvars_command(self.settings)
make_cmd = vcvars + ' && jom -j {}'.format(tools.cpu_count())
return make_cmd
@property
def _make_install_cmd(self):
if self.settings.compiler == 'Visual Studio':
vcvars = tools.vcvars_command(self.settings)
make_install_cmd = vcvars + ' && jom install'
else:
make_install_cmd = '{make} install'.format(make=self._make_program)
return make_install_cmd
@property
def _is_linux_clang_libcxx(self):
return (
self.settings.os == 'Linux' and
self.settings.compiler == 'clang' and
self.settings.compiler.libcxx == 'libc++'
)
|
[] |
[] |
[
"CXXFLAGS"
] |
[]
|
["CXXFLAGS"]
|
python
| 1 | 0 | |
test/e2e/kubectl/kubectl.go
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/cli
package kubectl
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/elazarl/goproxy"
openapi_v2 "github.com/google/gnostic/openapiv2"
"sigs.k8s.io/yaml"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubernetes/pkg/controller"
commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
uexec "k8s.io/utils/exec"
"k8s.io/utils/pointer"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
updateDemoSelector = "name=update-demo"
guestbookStartupTimeout = 10 * time.Minute
guestbookResponseTimeout = 3 * time.Minute
simplePodSelector = "name=httpd"
simplePodName = "httpd"
simplePodResourceName = "pod/httpd"
httpdDefaultOutput = "It works!"
simplePodPort = 80
pausePodSelector = "name=pause"
pausePodName = "pause"
busyboxPodSelector = "app=busybox1"
busyboxPodName = "busybox1"
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
agnhostControllerFilename = "agnhost-primary-controller.json.in"
agnhostServiceFilename = "agnhost-primary-service.json"
httpdDeployment1Filename = "httpd-deployment1.yaml.in"
httpdDeployment2Filename = "httpd-deployment2.yaml.in"
httpdDeployment3Filename = "httpd-deployment3.yaml.in"
metaPattern = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
)
func unknownFieldMetadataJSON(gvk schema.GroupVersionKind, name string) string {
return fmt.Sprintf(`"kind":"%s","apiVersion":"%s/%s","metadata":{"unknownMeta": "foo", "name":"%s"}`, gvk.Kind, gvk.Group, gvk.Version, name)
}
var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
httpdImage = imageutils.GetE2EImage(imageutils.Httpd)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost)
// If this suite still flakes due to timeouts we should change this to framework.PodStartTimeout
podRunningTimeoutArg = fmt.Sprintf("--pod-running-timeout=%s", framework.PodStartShortTimeout.String())
)
var proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
var schemaFoo = []byte(`description: Foo CRD for Testing
type: object
properties:
spec:
type: object
description: Specification of Foo
properties:
bars:
description: List of Bars and their specs.
type: array
items:
type: object
required:
- name
properties:
name:
description: Name of Bar.
type: string
age:
description: Age of Bar.
type: string
bazs:
description: List of Bazs.
items:
type: string
type: array
status:
description: Status of Foo
type: object
properties:
bars:
description: List of Bars and their statuses.
type: array
items:
type: object
properties:
name:
description: Name of Bar.
type: string
available:
description: Whether the Bar is installed.
type: boolean
quxType:
description: Indicates to external qux type.
pattern: in-tree|out-of-tree
type: string`)
var schemaFooEmbedded = []byte(`description: Foo CRD with an embedded resource
type: object
properties:
spec:
type: object
properties:
template:
type: object
x-kubernetes-embedded-resource: true
properties:
metadata:
type: object
properties:
name:
type: string
spec:
type: object
metadata:
type: object
properties:
name:
type: string`)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Aware of the kubectl example files map.
func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
ginkgo.By("using delete to clean up resources")
// support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test.
framework.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-")
assertCleanup(ns, selectors...)
}
// assertCleanup asserts that cleanup of a namespace wrt selectors occurred.
func assertCleanup(ns string, selectors ...string) {
var e error
verifyCleanupFunc := func() (bool, error) {
e = nil
for _, selector := range selectors {
resources := framework.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers")
if resources != "" {
e = fmt.Errorf("Resources left running after stop:\n%s", resources)
return false, nil
}
pods := framework.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
return false, nil
}
}
return true, nil
}
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
if err != nil {
framework.Failf(e.Error())
}
}
func readTestFileOrDie(file string) []byte {
data, err := e2etestfiles.Read(path.Join(kubeCtlManifestPath, file))
if err != nil {
framework.Fail(err.Error(), 1)
}
return data
}
func runKubectlRetryOrDie(ns string, args ...string) string {
var err error
var output string
for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(ns, args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break
}
time.Sleep(time.Second)
}
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
framework.ExpectNoError(err)
return output
}
var _ = SIGDescribe("Kubectl client", func() {
defer ginkgo.GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
// Reusable cluster state function. This won't be adversely affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{"app": "agnhost"},
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
var ns string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
// test files.
// Print debug info if atLeast Pods are not found before the timeout
waitForOrFailWithDebug := func(atLeast int) {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
}
}
debugDiscovery := func() {
home := os.Getenv("HOME")
if len(home) == 0 {
framework.Logf("no $HOME envvar set")
return
}
cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// only pay attention to $host_$port/v1/serverresources.json files
subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
parts := filepath.SplitList(subpath)
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil
}
framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := os.ReadFile(path)
if readError != nil {
framework.Logf("%s error: %v", path, readError)
} else {
framework.Logf("%s content: %s", path, string(data))
}
return nil
})
framework.Logf("scanned %s for discovery docs: %v", home, err)
}
ginkgo.Describe("Update Demo", func() {
var nautilus string
ginkgo.BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
data, err := e2etestfiles.Read(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))
if err != nil {
framework.Fail(err.Error())
}
nautilus = commonutils.SubstituteImageName(string(data))
})
/*
Release: v1.9
Testname: Kubectl, replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2.
*/
framework.ConformanceIt("should create and stop a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
/*
Release: v1.9
Testname: Kubectl, scale replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2.
*/
framework.ConformanceIt("should scale a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling down the replication controller")
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling up the replication controller")
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
})
ginkgo.Describe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
guestbookRoot := "test/e2e/testing-manifests/guestbook"
for _, gbAppFile := range []string{
"agnhost-replica-service.yaml",
"agnhost-primary-service.yaml",
"frontend-service.yaml",
"frontend-deployment.yaml.in",
"agnhost-primary-deployment.yaml.in",
"agnhost-replica-deployment.yaml.in",
} {
data, err := e2etestfiles.Read(filepath.Join(guestbookRoot, gbAppFile))
if err != nil {
framework.Fail(err.Error())
}
contents := commonutils.SubstituteImageName(string(data))
run(contents)
}
}
/*
Release: v1.9
Testname: Kubectl, guestbook application
Description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend agnhost store. Application flow MUST work as expected and the data written MUST be available to read.
*/
framework.ConformanceIt("should create and stop a working application ", func() {
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
ginkgo.By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
})
ginkgo.By("validating guestbook app")
validateGuestbookApp(c, ns)
})
})
ginkgo.Describe("Simple pod", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
})
ginkgo.It("should support exec", func() {
ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
ginkgo.By("executing a very long command in the container")
veryLongData := make([]rune, 20000)
for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a'
}
execOutput = framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData))
framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output")
ginkgo.By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat").
WithStdinData("abcd1234").
ExecOrDie(ns)
if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
// pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil {
framework.Failf("Error creating blocking reader: %v", err)
}
// NOTE this is solely for test cleanup!
defer closer.Close()
ginkgo.By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh").
WithStdinReader(r).
ExecOrDie(ns)
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
ginkgo.It("should support exec using resource/name", func() {
ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
ginkgo.It("should support exec through an HTTP proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
ginkgo.By("Starting goproxy")
testSrv, proxyLogs := startLocalProxy()
defer testSrv.Close()
proxyAddr := testSrv.URL
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset()
ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie(ns)
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
// Verify the proxy server logs saw the connection
expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(framework.TestContext.Host, "https://"), "/api"))
proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
}
}
})
ginkgo.It("should support exec through kubectl proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
ginkgo.By("Starting kubectl proxy")
port, proxyCmd, err := startProxyServer(ns)
framework.ExpectNoError(err)
defer framework.TryKill(proxyCmd)
//proxyLogs.Reset()
host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
ginkgo.By("Running kubectl via kubectl proxy using " + host)
output := framework.NewKubectlCommand(
ns, host,
"exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container",
).ExecOrDie(ns)
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
})
ginkgo.Context("should return command exit codes", func() {
ginkgo.It("execing into a container with a successful command", func() {
_, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
})
ginkgo.It("execing into a container with a failing command", func() {
_, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
}
framework.ExpectEqual(ee.ExitStatus(), 42)
})
ginkgo.It("running a successful command", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
})
ginkgo.It("running a failing command", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
}
framework.ExpectEqual(ee.ExitStatus(), 42)
})
ginkgo.It("[Slow] running a failing command without --restart=Never", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
ee, ok := err.(uexec.ExitError)
if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
}
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
})
ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
ee, ok := err.(uexec.ExitError)
if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
}
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
})
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
})
})
ginkgo.It("should support inline execution and attach", func() {
waitForStdinContent := func(pod, content string) string {
var logOutput string
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
logOutput = framework.RunKubectlOrDie(ns, "logs", pod)
return strings.Contains(logOutput, content), nil
})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("unexpected error waiting for '%v' output", content))
return logOutput
}
ginkgo.By("executing a command with run and attach with stdin")
// We wait for a non-empty line so we know kubectl has attached
framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
WithStdinData("value\nabcd1234").
ExecOrDie(ns)
runOutput := waitForStdinContent("run-test", "stdin closed")
gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach without stdin")
// There is a race on this scenario described in #73099
// It fails if we are not able to attach before the container prints
// "stdin closed", but hasn't exited yet.
// We wait 10 seconds before printing to give time to kubectl to attach
// to the container, this does not solve the race though.
framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie(ns)
runOutput = waitForStdinContent("run-test-2", "stdin closed")
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n").
ExecOrDie(ns)
runOutput = waitForStdinContent("run-test-3", "abcd1234")
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
framework.ExpectNoError(err)
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
})
ginkgo.It("should contain last line of the log", func() {
podName := "run-log-test"
ginkgo.By("executing a command with run")
framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready")
}
logOutput := framework.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
})
ginkgo.It("should support port-forward", func() {
ginkgo.By("forwarding the container port to a local port")
cmd := runPortForward(ns, simplePodName, simplePodPort)
defer cmd.Stop()
ginkgo.By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr)
framework.Logf("got: %s", body)
if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
}
if !strings.Contains(body, httpdDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body)
}
})
ginkgo.It("should handle in-cluster config", func() {
// This test does not work for dynamically linked kubectl binaries; only statically linked ones. The
// problem happens when the kubectl binary is copied to a pod in the cluster. For dynamically linked
// binaries, the necessary libraries are not also copied. For this reason, the test can not be
// guaranteed to work with GKE, which sometimes run tests using a dynamically linked kubectl.
e2eskipper.SkipIfProviderIs("gke")
// TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image
// for now this only works on amd64
e2eskipper.SkipUnlessNodeOSArchIs("amd64")
ginkgo.By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err)
err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
ginkgo.By("overriding icc with values provided by flags")
kubectlPath := framework.TestContext.KubectlPath
// we need the actual kubectl binary, not the script wrapper
kubectlPathNormalizer := exec.Command("which", kubectlPath)
if strings.HasSuffix(kubectlPath, "kubectl.sh") {
kubectlPathNormalizer = exec.Command(kubectlPath, "path")
}
kubectlPathNormalized, err := kubectlPathNormalizer.Output()
framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace
tmpDir, err := os.MkdirTemp("", "icc-override")
overrideKubeconfigName := "icc-override.kubeconfig"
framework.ExpectNoError(err)
defer func() { os.Remove(tmpDir) }()
framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap with namespace and invalid name"
namespace: configmap-namespace
`), os.FileMode(0755)))
framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap without namespace and invalid name"
`), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
ginkgo.By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
ginkgo.By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL)))
ginkgo.By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name)))
ginkgo.By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized"))
ginkgo.By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err)
framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
ginkgo.By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
ginkgo.By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
})
})
ginkgo.Describe("Kubectl api-versions", func() {
/*
Release: v1.9
Testname: Kubectl, check version v1
Description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed.
*/
framework.ConformanceIt("should check if v1 is in available api versions ", func() {
ginkgo.By("validating api versions")
output := framework.RunKubectlOrDie(ns, "api-versions")
if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions")
}
})
})
ginkgo.Describe("Kubectl get componentstatuses", func() {
ginkgo.It("should get componentstatuses", func() {
ginkgo.By("getting list of componentstatuses")
output := framework.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}")
components := strings.Split(output, " ")
ginkgo.By("getting details of componentstatuses")
for _, component := range components {
ginkgo.By("getting status of " + component)
framework.RunKubectlOrDie(ns, "get", "componentstatuses", component)
}
})
})
ginkgo.Describe("Kubectl apply", func() {
ginkgo.It("should apply a new configuration to an existing RC", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(controllerJSON)
framework.NewKubectlCommand(ns, "apply", "-f", "-").
WithStdinReader(stdin).
ExecOrDie(ns)
ginkgo.By("checking the result")
forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
})
ginkgo.It("should reuse port when apply to an existing SVC", func() {
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
ginkgo.By("creating Agnhost SVC")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("getting the original port")
originalNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("applying the same configuration")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-")
ginkgo.By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("checking the result")
if originalNodePort != currentNodePort {
framework.Failf("port should keep the same")
}
})
ginkgo.It("apply set/view last-applied", func() {
deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment1Filename)))
deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment2Filename)))
deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
ginkgo.By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-")
ginkgo.By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
ginkgo.By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-")
ginkgo.By("check last-applied has been updated, annotations doesn't have replicas")
output = framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) {
framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
}
ginkgo.By("scale set replicas to 3")
httpdDeploy := "httpd-deployment"
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3")
ginkgo.By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-")
ginkgo.By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
}
}
})
})
ginkgo.Describe("Kubectl diff", func() {
/*
Release: v1.19
Testname: Kubectl, diff Deployment
Description: Create a Deployment with httpd image. Declare the same Deployment with a different image, busybox. Diff of live Deployment with declared Deployment MUST include the difference between live and declared image.
*/
framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() {
ginkgo.By("create deployment with httpd image")
deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
framework.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
ginkgo.By("verify diff finds difference between live and declared image")
deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1)
if !strings.Contains(deployment, busyboxImage) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment)
}
output, err := framework.RunKubectlInput(ns, deployment, "diff", "-f", "-")
if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
}
requiredItems := []string{httpdImage, busyboxImage}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err)
}
}
framework.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-")
})
})
ginkgo.Describe("Kubectl server-side dry-run", func() {
/*
Release: v1.19
Testname: Kubectl, server-side dry-run Pod
Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl patch pod -p {...} --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change.
*/
framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() {
ginkgo.By("running the image " + httpdImage)
podName := "e2e-test-httpd-pod"
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
ginkgo.By("replace the image in the pod with server-side dry-run")
specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage)
framework.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server")
ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating pod with expected image %s", httpdImage)
}
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
})
// definitionMatchesGVK returns true if the specified GVK is listed as an x-kubernetes-group-version-kind extension
definitionMatchesGVK := func(extensions []*openapi_v2.NamedAny, desiredGVK schema.GroupVersionKind) bool {
for _, extension := range extensions {
if extension.GetValue().GetYaml() == "" ||
extension.GetName() != "x-kubernetes-group-version-kind" {
continue
}
var values []map[string]string
err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values)
if err != nil {
framework.Logf("%v\n%s", err, string(extension.GetValue().GetYaml()))
continue
}
for _, value := range values {
if value["group"] != desiredGVK.Group {
continue
}
if value["version"] != desiredGVK.Version {
continue
}
if value["kind"] != desiredGVK.Kind {
continue
}
return true
}
}
return false
}
// schemaForGVK returns a schema (if defined) for the specified GVK
schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
d, err := f.ClientSet.Discovery().OpenAPISchema()
if err != nil {
framework.Failf("%v", err)
}
if d == nil || d.Definitions == nil {
return nil
}
for _, p := range d.Definitions.AdditionalProperties {
if p == nil || p.Value == nil {
continue
}
if !definitionMatchesGVK(p.Value.VendorExtension, desiredGVK) {
continue
}
return p.Value
}
return nil
}
ginkgo.Describe("Kubectl validation", func() {
ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func() {
ginkgo.By("create CRD with no validation schema")
crd, err := crd.CreateTestCRD(f)
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err)
}
})
ginkgo.It("should create/apply a valid CR for CRD with validation schema", func() {
ginkgo.By("prepare CRD with validation schema")
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
}
})
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err)
}
})
ginkgo.It("should create/apply a valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func() {
ginkgo.By("prepare CRD with partially-specified validation schema")
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
// Allow for arbitrary-extra properties.
props.XPreserveUnknownFields = pointer.BoolPtr(true)
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
}
})
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
schema := schemaForGVK(schema.GroupVersionKind{Group: crd.Crd.Spec.Group, Version: crd.Crd.Spec.Versions[0].Name, Kind: crd.Crd.Spec.Names.Kind})
framework.ExpectNotEqual(schema, nil, "retrieving a schema for the crd")
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
// unknown fields on the root are considered valid
validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]},"extraProperty":"arbitrary-value"}`, meta)
err = createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd)
framework.ExpectNoError(err, "creating custom resource")
})
ginkgo.It("should detect unknown metadata fields in both the root and embedded object of a CR", func() {
ginkgo.By("prepare CRD with x-kubernetes-embedded-resource: true")
opt := func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFooEmbedded, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: props,
},
},
}
}
group := fmt.Sprintf("%s.example.com", f.BaseName)
testCRD, err := crd.CreateMultiVersionTestCRD(f, group, opt)
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer testCRD.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
ginkgo.By("attempting to create a CR with unknown metadata fields at the root level")
gvk := schema.GroupVersionKind{Group: testCRD.Crd.Spec.Group, Version: testCRD.Crd.Spec.Versions[0].Name, Kind: testCRD.Crd.Spec.Names.Kind}
schema := schemaForGVK(gvk)
framework.ExpectNotEqual(schema, nil, "retrieving a schema for the crd")
embeddedCRPattern := `
{%s,
"spec": {
"template": {
"apiVersion": "foo/v1",
"kind": "Sub",
"metadata": {
%s
"name": "subobject",
"namespace": "my-ns"
}
}
}
}`
meta := unknownFieldMetadataJSON(gvk, "test-cr")
unknownRootMetaCR := fmt.Sprintf(embeddedCRPattern, meta, "")
_, err = framework.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-")
if err == nil {
framework.Failf("unexpected nil error when creating CR with unknown root metadata field")
}
if !(strings.Contains(err.Error(), `unknown field "unknownMeta"`) || strings.Contains(err.Error(), `unknown field "metadata.unknownMeta"`)) {
framework.Failf("error missing root unknown metadata field, got: %v", err)
}
if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "metadata.namespace"`) {
framework.Failf("unexpected error, CR's root metadata namespace field unrecognized: %v", err)
}
ginkgo.By("attempting to create a CR with unknown metadata fields in the embedded object")
metaEmbedded := fmt.Sprintf(metaPattern, testCRD.Crd.Spec.Names.Kind, testCRD.Crd.Spec.Group, testCRD.Crd.Spec.Versions[0].Name, "test-cr-embedded")
unknownEmbeddedMetaCR := fmt.Sprintf(embeddedCRPattern, metaEmbedded, `"unknownMetaEmbedded": "bar",`)
_, err = framework.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-")
if err == nil {
framework.Failf("unexpected nil error when creating CR with unknown embedded metadata field")
}
if !(strings.Contains(err.Error(), `unknown field "unknownMetaEmbedded"`) || strings.Contains(err.Error(), `unknown field "spec.template.metadata.unknownMetaEmbedded"`)) {
framework.Failf("error missing embedded unknown metadata field, got: %v", err)
}
if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "spec.template.metadata.namespace"`) {
framework.Failf("unexpected error, CR's embedded metadata namespace field unrecognized: %v", err)
}
})
ginkgo.It("should detect unknown metadata fields of a typed object", func() {
ginkgo.By("calling kubectl create deployment")
invalidMetaDeployment := `
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "my-dep",
"unknownMeta": "foo",
"labels": {"app": "nginx"}
},
"spec": {
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [{
"name": "nginx",
"image": "nginx:latest"
}]
}
}
}
}
`
_, err := framework.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-")
if err == nil {
framework.Failf("unexpected nil error when creating deployment with unknown metadata field")
}
if !(strings.Contains(err.Error(), `unknown field "unknownMeta"`) || strings.Contains(err.Error(), `unknown field "metadata.unknownMeta"`)) {
framework.Failf("error missing unknown metadata field, got: %v", err)
}
if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "metadata.namespace"`) {
framework.Failf("unexpected error, deployment's metadata namespace field unrecognized: %v", err)
}
})
})
ginkgo.Describe("Kubectl cluster-info", func() {
/*
Release: v1.9
Testname: Kubectl, cluster info
Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running.
*/
framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() {
ginkgo.By("validating cluster-info")
output := framework.RunKubectlOrDie(ns, "cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes control plane", "is running at"}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
}
}
})
})
ginkgo.Describe("Kubectl cluster-info dump", func() {
ginkgo.It("should check if cluster-info dump succeeds", func() {
ginkgo.By("running cluster-info dump")
framework.RunKubectlOrDie(ns, "cluster-info", "dump")
})
})
ginkgo.Describe("Kubectl describe", func() {
/*
Release: v1.9
Testname: Kubectl, describe pod or rc
Description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
*/
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
// Pod
forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
requiredStrings := [][]string{
{"Name:", "agnhost-primary-"},
{"Namespace:", ns},
{"Node:"},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Status:", "Running"},
{"IP:"},
{"Controlled By:", "ReplicationController/agnhost-primary"},
{"Image:", agnhostImage},
{"State:", "Running"},
{"QoS Class:", "BestEffort"},
}
checkOutput(output, requiredStrings)
})
// Rc
requiredStrings := [][]string{
{"Name:", "agnhost-primary"},
{"Namespace:", ns},
{"Selector:", "app=agnhost,role=primary"},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"},
{"Image:", agnhostImage},
{"Events:"}}
checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
// Service
output := framework.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary")
requiredStrings = [][]string{
{"Name:", "agnhost-primary"},
{"Namespace:", ns},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Selector:", "app=agnhost", "role=primary"},
{"Type:", "ClusterIP"},
{"IP:"},
{"Port:", "<unset>", "6379/TCP"},
{"Endpoints:"},
{"Session Affinity:", "None"}}
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
node := nodes.Items[0]
output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name)
requiredStrings = [][]string{
{"Name:", node.Name},
{"Labels:"},
{"Annotations:"},
{"CreationTimestamp:"},
{"Conditions:"},
{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
{"Addresses:"},
{"Capacity:"},
{"Version:"},
{"Kernel Version:"},
{"OS Image:"},
{"Container Runtime Version:"},
{"Kubelet Version:"},
{"Kube-Proxy Version:"},
{"Pods:"}}
checkOutput(output, requiredStrings)
// Namespace
output = framework.RunKubectlOrDie(ns, "describe", "namespace", ns)
requiredStrings = [][]string{
{"Name:", ns},
{"Labels:"},
{"Annotations:"},
{"Status:", "Active"}}
checkOutput(output, requiredStrings)
// Quota and limitrange are skipped for now.
})
ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() {
ginkgo.By("creating a cronjob")
cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in")))
framework.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-")
ginkgo.By("waiting for cronjob to start.")
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cj, err := c.BatchV1().CronJobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err)
}
return len(cj.Items) > 0, nil
})
framework.ExpectNoError(err)
ginkgo.By("verifying kubectl describe prints")
output := framework.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test")
requiredStrings := [][]string{
{"Name:", "cronjob-test"},
{"Namespace:", ns},
{"Labels:"},
{"Annotations:"},
{"Schedule:", "*/1 * * * *"},
{"Concurrency Policy:", "Allow"},
{"Suspend:", "False"},
{"Successful Job History Limit:", "3"},
{"Failed Job History Limit:", "1"},
{"Starting Deadline Seconds:", "30s"},
{"Selector:"},
{"Parallelism:"},
{"Completions:"},
}
checkOutput(output, requiredStrings)
})
})
ginkgo.Describe("Kubectl expose", func() {
/*
Release: v1.9
Testname: Kubectl, create service, replication controller
Description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that agnhost primary is listening. Using kubectl expose the agnhost primary as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that agnhost primary is listening.
*/
framework.ConformanceIt("should create services for rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
agnhostPort := 6379
ginkgo.By("creating Agnhost RC")
framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
// It may take a while for the pods to get registered in some cases, wait to be sure.
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
framework.Logf("wait on agnhost-primary startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
})
validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
ep, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
// log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrors.IsNotFound(err) ||
apierrors.IsUnauthorized(err) ||
apierrors.IsServerTimeout(err) {
err = nil
}
return false, err
}
uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
if len(uidToPort) == 0 {
framework.Logf("No endpoint found, retrying")
return false, nil
}
if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != agnhostPort {
framework.Failf("Wrong endpoint port: %d", port[0])
}
}
return true, nil
})
framework.ExpectNoError(err)
e2eservice, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err)
if len(e2eservice.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
}
port := e2eservice.Spec.Ports[0]
if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port)
}
if port.TargetPort.IntValue() != agnhostPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
}
}
ginkgo.By("exposing RC")
framework.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout)
ginkgo.By("exposing service")
framework.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout)
})
})
ginkgo.Describe("Kubectl label", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
})
/*
Release: v1.9
Testname: Kubectl, label update
Description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted label as the label is removed.
*/
framework.ConformanceIt("should update the label on a resource ", func() {
labelName := "testing-label"
labelValue := "testing-label-value"
ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue)
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
}
ginkgo.By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-")
ginkgo.By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
}
})
})
ginkgo.Describe("Kubectl copy", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
})
/*
Release: v1.12
Testname: Kubectl, copy
Description: When a Pod is running, copy a known file from it to a temporary local destination.
*/
ginkgo.It("should copy a file from a running Pod", func() {
remoteContents := "foobar\n"
podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
tempDestination, err := os.CreateTemp(os.TempDir(), "copy-foobar")
if err != nil {
framework.Failf("Failed creating temporary destination file: %v", err)
}
ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
framework.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name())
ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := io.ReadAll(tempDestination)
if err != nil {
framework.Failf("Failed reading temporary local file: %v", err)
}
if string(localData) != remoteContents {
framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
}
})
})
ginkgo.Describe("Kubectl logs", func() {
podName := "logs-generator"
containerName := "logs-generator"
ginkgo.BeforeEach(func() {
ginkgo.By("creating an pod")
// Agnhost image generates logs for a total of 100 lines over 20s.
framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pod", podName)
})
/*
Release: v1.9
Testname: Kubectl, logs
Description: When a Pod is running then it MUST generate logs.
Starting a Pod should have a expected log line. Also log command options MUST work as expected and described below.
'kubectl logs -tail=1' should generate a output of one line, the last line in the log.
'kubectl --limit-bytes=1' should generate a single byte output.
'kubectl --tail=1 --timestamp should generate one line with timestamp in RFC3339 format
'kubectl --since=1s' should output logs that are only 1 second older from now
'kubectl --since=24h' should output logs that are only 1 day older from now
*/
framework.ConformanceIt("should be able to retrieve and filter logs ", func() {
// Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first
lines := func(out string) []string {
return strings.Split(strings.TrimRight(out, "\n"), "\n")
}
ginkgo.By("Waiting for log generator to start.")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod %s was not ready", podName)
}
ginkgo.By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout)
framework.ExpectNoError(err)
ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1")
framework.Logf("got output %q", out)
gomega.Expect(len(out)).NotTo(gomega.BeZero())
framework.ExpectEqual(len(lines(out)), 1)
ginkgo.By("limiting log bytes")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1")
framework.Logf("got output %q", out)
framework.ExpectEqual(len(lines(out)), 1)
framework.ExpectEqual(len(out), 1)
ginkgo.By("exposing timestamps")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps")
framework.Logf("got output %q", out)
l := lines(out)
framework.ExpectEqual(len(l), 1)
words := strings.Split(l[0], " ")
gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
}
}
ginkgo.By("restricting to a time range")
// Note: we must wait at least two seconds,
// because the granularity is only 1 second and
// it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recentOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s")
recent := len(strings.Split(recentOut, "\n"))
olderOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h")
older := len(strings.Split(olderOut, "\n"))
gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut)
})
})
ginkgo.Describe("Kubectl patch", func() {
/*
Release: v1.9
Testname: Kubectl, patch to annotate
Description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller.
*/
framework.ConformanceIt("should add annotations for pods in rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
ginkgo.By("patching all pods")
forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
ginkgo.By("checking annotations")
forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
found = true
break
}
}
if !found {
framework.Failf("Added annotation not found")
}
})
})
})
ginkgo.Describe("Kubectl version", func() {
/*
Release: v1.9
Testname: Kubectl, version
Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
*/
framework.ConformanceIt("should check is all data is printed ", func() {
versionString := framework.RunKubectlOrDie(ns, "version")
// we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric
requiredItems := []string{"Client Version: ", "Server Version: "}
for _, item := range requiredItems {
if matched, _ := regexp.MatchString(item+`version.Info\{Major:"\d", Minor:"\d+\+?", GitVersion:"v\d\.\d+\.[\d\w\-\.\+]+", GitCommit:"[0-9a-f]+"`, versionString); !matched {
framework.Failf("Item %s value is not valid in %s\n", item, versionString)
}
}
})
})
ginkgo.Describe("Kubectl run pod", func() {
var podName string
ginkgo.BeforeEach(func() {
podName = "e2e-test-httpd-pod"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
/*
Release: v1.9
Testname: Kubectl, run pod
Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
*/
framework.ConformanceIt("should create a pod from an image when restart is Never ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, httpdImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
})
ginkgo.Describe("Kubectl replace", func() {
var podName string
ginkgo.BeforeEach(func() {
podName = "e2e-test-httpd-pod"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
/*
Release: v1.9
Testname: Kubectl, replace
Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
*/
framework.ConformanceIt("should update a single-container pod's image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
ginkgo.By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
ginkgo.By("verifying the pod " + podName + " was created")
podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
if !strings.Contains(podJSON, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
}
ginkgo.By("replace the image in the pod")
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
})
ginkgo.Describe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
/*
Release: v1.9
Testname: Kubectl, proxy port zero
Description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string.
*/
framework.ConformanceIt("should support proxy with --port 0 ", func() {
ginkgo.By("starting the proxy server")
port, cmd, err := startProxyServer(ns)
if cmd != nil {
defer framework.TryKill(cmd)
}
if err != nil {
framework.Failf("Failed to start proxy server: %v", err)
}
ginkgo.By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr)
if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err)
}
if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
}
})
/*
Release: v1.9
Testname: Kubectl, proxy socket
Description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=<some path>. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string
*/
framework.ConformanceIt("should support --unix-socket=/path ", func() {
ginkgo.By("Starting the proxy")
tmpdir, err := os.MkdirTemp("", "kubectl-proxy-unix")
if err != nil {
framework.Failf("Failed to create temporary directory: %v", err)
}
path := filepath.Join(tmpdir, "test")
defer os.Remove(path)
defer os.Remove(tmpdir)
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
cmd := tk.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
framework.Failf("Failed to start kubectl command: %v", err)
}
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err)
}
ginkgo.By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path)
if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err)
}
})
})
// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
// it, which will affect anything else running in parallel.
ginkgo.Describe("Kubectl taint [Serial]", func() {
ginkgo.It("should update the taint on a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
ginkgo.By("removing the taint " + testTaint.ToString() + " of a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
}
})
ginkgo.It("should remove all the taints with the same key off a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
Effect: v1.TaintEffectPreferNoSchedule,
}
ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{newTestTaint.ToString()},
}
checkOutput(output, requiredStrings)
noExecuteTaint := v1.Taint{
Key: testTaint.Key,
Value: "testing-taint-value-no-execute",
Effect: v1.TaintEffectNoExecute,
}
ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{noExecuteTaint.ToString()},
}
checkOutput(output, requiredStrings)
ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+"-")
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
}
})
})
ginkgo.Describe("Kubectl create quota", func() {
ginkgo.It("should create a quota without scopes", func() {
quotaName := "million"
ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
}
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
})
ginkgo.It("should create a quota with scopes", func() {
quotaName := "scopes"
ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
ginkgo.It("should reject quota with invalid scopes", func() {
quotaName := "scopes"
ginkgo.By("calling kubectl quota")
out, err := framework.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo")
if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
}
})
})
ginkgo.Describe("kubectl wait", func() {
ginkgo.It("should ignore not found error with --for=delete", func() {
ginkgo.By("calling kubectl wait --for=delete")
framework.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist")
framework.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist")
})
})
})
// Checks whether the output split by line contains the required elements.
func checkOutputReturnError(output string, required [][]string) error {
outputLines := strings.Split(output, "\n")
currentLine := 0
for _, requirement := range required {
for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
currentLine++
}
if currentLine == len(outputLines) {
return fmt.Errorf("failed to find %s in %s", requirement[0], output)
}
for _, item := range requirement[1:] {
if !strings.Contains(outputLines[currentLine], item) {
return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
}
}
}
return nil
}
func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required)
if err != nil {
framework.Failf("%v", err)
}
}
func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) {
var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(namespace, args...)
err := checkOutputReturnError(output, required)
if err != nil {
pollErr = err
return false, nil
}
pollErr = nil
return true, nil
})
if pollErr != nil {
framework.Failf("%v", pollErr)
}
return
}
func checkContainersImage(containers []v1.Container, expectImage string) bool {
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
}
return &apiVersions, nil
}
func startProxyServer(ns string) (int, *exec.Cmd, error) {
// Specifying port 0 indicates we want the os to pick a random port.
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
cmd := tk.KubectlCmd("proxy", "-p", "0", "--disable-filter")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
return -1, nil, err
}
defer stdout.Close()
defer stderr.Close()
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
if len(match) == 2 {
if port, err := strconv.Atoi(match[1]); err == nil {
return port, cmd, nil
}
}
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
}
func curlUnix(url string, path string) (string, error) {
dial := func(ctx context.Context, proto, addr string) (net.Conn, error) {
var d net.Dialer
return d.DialContext(ctx, "unix", path)
}
transport := utilnet.SetTransportDefaults(&http.Transport{
DialContext: dial,
})
return curlTransport(url, transport)
}
func curlTransport(url string, transport *http.Transport) (string, error) {
client := &http.Client{Transport: transport}
resp, err := client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body[:]), nil
}
func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
}
func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
framework.ExpectNoError(err)
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
return false
}
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
return "", errProxy
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := proxyRequest.Namespace(ns).
Name("frontend").
Suffix("/guestbook").
Param("cmd", cmd).
Param("key", "messages").
Param("value", value).
Do(ctx).
Raw()
return string(result), err
}
type updateDemoData struct {
Image string
}
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
return &rc
}
func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc := readReplicationControllerFromString(contents)
rc.Labels[applyTestLabel] = "ADDED"
rc.Spec.Selector[applyTestLabel] = "ADDED"
rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc)
if err != nil {
framework.Failf("json marshal failed: %s\n", err)
}
return bytes.NewReader(data)
}
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.CoreV1().ReplicationControllers(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
if len(rcs.Items) > 0 {
break
}
}
if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found")
}
for _, rc := range rcs.Items {
fn(rc)
}
}
func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "agnhost-primary" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
}
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
}
}
}
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := c.CoreV1().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podID).
Suffix("data.json").
Do(context.TODO()).
Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err)
}
return err
}
framework.Logf("got data: %s", body)
var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil {
return err
}
framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) {
return nil
}
return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
}
}
// newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer.
//
// We're explicitly returning the reader and closer separately, because
// the closer needs to be the *os.File we get from os.Pipe(). This is required
// so the exec of kubectl can pass the underlying file descriptor to the exec
// syscall, instead of creating another os.Pipe and blocking on the io.Copy
// between the source (e.g. stdin) and the write half of the pipe.
func newBlockingReader(s string) (io.Reader, io.Closer, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
w.Write([]byte(s))
return r, w, nil
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()
p.Verbose = true
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
// createApplyCustomResource asserts that given CustomResource be created and applied
// without being rejected by kubectl validation
func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
ginkgo.By("successfully create CR")
if _, err := framework.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err)
}
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
}
ginkgo.By("successfully apply CR")
if _, err := framework.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err)
}
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
}
return nil
}
// trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
// If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
// So this function will help to trim the docker.io/library if exists
func trimDockerRegistry(imagename string) string {
imagename = strings.Replace(imagename, "docker.io/", "", 1)
return strings.Replace(imagename, "library/", "", 1)
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c clientset.Interface, podID string) error
// validateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func validateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
containerImage = trimDockerRegistry(containerImage)
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := framework.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate)
if running != "true" {
framework.Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate)
currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage {
framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
framework.Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
framework.Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
framework.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", framework.PodStartTimeout.Seconds(), testname)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
goyacc/strutil/strutil.go
|
// Copyright (c) 2014 The sortutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package strutil collects utils supplemental to the standard strings package.
package strutil // import "github.com/skycoin/cx/yacc/strutil"
import (
"bytes"
"encoding/base32"
"encoding/base64"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
)
// Base32ExtDecode decodes base32 extended (RFC 4648) text to binary data.
func Base32ExtDecode(text []byte) (data []byte, err error) {
n := base32.HexEncoding.DecodedLen(len(text))
data = make([]byte, n)
decoder := base32.NewDecoder(base32.HexEncoding, bytes.NewBuffer(text))
if n, err = decoder.Read(data); err != nil {
n = 0
}
data = data[:n]
return
}
// Base32ExtEncode encodes binary data to base32 extended (RFC 4648) encoded text.
func Base32ExtEncode(data []byte) (text []byte) {
n := base32.HexEncoding.EncodedLen(len(data))
buf := bytes.NewBuffer(make([]byte, 0, n))
encoder := base32.NewEncoder(base32.HexEncoding, buf)
encoder.Write(data)
encoder.Close()
if buf.Len() != n {
panic("internal error")
}
return buf.Bytes()
}
// Base64Decode decodes base64 text to binary data.
func Base64Decode(text []byte) (data []byte, err error) {
n := base64.StdEncoding.DecodedLen(len(text))
data = make([]byte, n)
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBuffer(text))
if n, err = decoder.Read(data); err != nil {
n = 0
}
data = data[:n]
return
}
// Base64Encode encodes binary data to base64 encoded text.
func Base64Encode(data []byte) (text []byte) {
n := base64.StdEncoding.EncodedLen(len(data))
buf := bytes.NewBuffer(make([]byte, 0, n))
encoder := base64.NewEncoder(base64.StdEncoding, buf)
encoder.Write(data)
encoder.Close()
if buf.Len() != n {
panic("internal error")
}
return buf.Bytes()
}
// Formatter is an io.Writer extended by a fmt.Printf like function Format
type Formatter interface {
io.Writer
Format(format string, args ...interface{}) (n int, errno error)
}
type indentFormatter struct {
io.Writer
indent []byte
indentLevel int
state int
}
const (
st0 = iota
stBOL
stPERC
stBOLPERC
)
// IndentFormatter returns a new Formatter which interprets %i and %u in the
// Format() format string as indent and undent commands. The commands can
// nest. The Formatter writes to io.Writer 'w' and inserts one 'indent'
// string per current indent level value.
// Behaviour of commands reaching negative indent levels is undefined.
// IndentFormatter(os.Stdout, "\t").Format("abc%d%%e%i\nx\ny\n%uz\n", 3)
// output:
// abc3%e
// x
// y
// z
// The Go quoted string literal form of the above is:
// "abc%%e\n\tx\n\tx\nz\n"
// The commands can be scattered between separate invocations of Format(),
// i.e. the formatter keeps track of the indent level and knows if it is
// positioned on start of a line and should emit indentation(s).
// The same output as above can be produced by e.g.:
// f := IndentFormatter(os.Stdout, " ")
// f.Format("abc%d%%e%i\nx\n", 3)
// f.Format("y\n%uz\n")
func IndentFormatter(w io.Writer, indent string) Formatter {
return &indentFormatter{w, []byte(indent), 0, stBOL}
}
func (f *indentFormatter) format(flat bool, format string, args ...interface{}) (n int, errno error) {
buf := []byte{}
for i := 0; i < len(format); i++ {
c := format[i]
switch f.state {
case st0:
switch c {
case '\n':
cc := c
if flat && f.indentLevel != 0 {
cc = ' '
}
buf = append(buf, cc)
f.state = stBOL
case '%':
f.state = stPERC
default:
buf = append(buf, c)
}
case stBOL:
switch c {
case '\n':
cc := c
if flat && f.indentLevel != 0 {
cc = ' '
}
buf = append(buf, cc)
case '%':
f.state = stBOLPERC
default:
if !flat {
for i := 0; i < f.indentLevel; i++ {
buf = append(buf, f.indent...)
}
}
buf = append(buf, c)
f.state = st0
}
case stBOLPERC:
switch c {
case 'i':
f.indentLevel++
f.state = stBOL
case 'u':
f.indentLevel--
f.state = stBOL
default:
if !flat {
for i := 0; i < f.indentLevel; i++ {
buf = append(buf, f.indent...)
}
}
buf = append(buf, '%', c)
f.state = st0
}
case stPERC:
switch c {
case 'i':
f.indentLevel++
f.state = st0
case 'u':
f.indentLevel--
f.state = st0
default:
buf = append(buf, '%', c)
f.state = st0
}
default:
panic("unexpected state")
}
}
switch f.state {
case stPERC, stBOLPERC:
buf = append(buf, '%')
}
return f.Write([]byte(fmt.Sprintf(string(buf), args...)))
}
func (f *indentFormatter) Format(format string, args ...interface{}) (n int, errno error) {
return f.format(false, format, args...)
}
type flatFormatter indentFormatter
// FlatFormatter returns a newly created Formatter with the same functionality as the one returned
// by IndentFormatter except it allows a newline in the 'format' string argument of Format
// to pass through iff indent level is currently zero.
//
// If indent level is non-zero then such new lines are changed to a space character.
// There is no indent string, the %i and %u format verbs are used solely to determine the indent level.
//
// The FlatFormatter is intended for flattening of normally nested structure textual representation to
// a one top level structure per line form.
// FlatFormatter(os.Stdout, " ").Format("abc%d%%e%i\nx\ny\n%uz\n", 3)
// output in the form of a Go quoted string literal:
// "abc3%%e x y z\n"
func FlatFormatter(w io.Writer) Formatter {
return (*flatFormatter)(IndentFormatter(w, "").(*indentFormatter))
}
func (f *flatFormatter) Format(format string, args ...interface{}) (n int, errno error) {
return (*indentFormatter)(f).format(true, format, args...)
}
// Pool handles aligning of strings having equal values to the same string instance.
// Intended use is to conserve some memory e.g. where a large number of identically valued strings
// with non identical backing arrays may exists in several semantically distinct instances of some structs.
// Pool is *not* concurrent access safe. It doesn't handle common prefix/suffix aligning,
// e.g. having s1 == "abc" and s2 == "bc", s2 is not automatically aligned as s1[1:].
type Pool struct {
pool map[string]string
}
// NewPool returns a newly created Pool.
func NewPool() *Pool {
return &Pool{map[string]string{}}
}
// Align returns a string with the same value as its argument. It guarantees that
// all aligned strings share a single instance in memory.
func (p *Pool) Align(s string) string {
if a, ok := p.pool[s]; ok {
return a
}
s = StrPack(s)
p.pool[s] = s
return s
}
// Count returns the number of items in the pool.
func (p *Pool) Count() int {
return len(p.pool)
}
// GoPool is a concurrent access safe version of Pool.
type GoPool struct {
pool map[string]string
rwm *sync.RWMutex
}
// NewGoPool returns a newly created GoPool.
func NewGoPool() (p *GoPool) {
return &GoPool{map[string]string{}, &sync.RWMutex{}}
}
// Align returns a string with the same value as its argument. It guarantees that
// all aligned strings share a single instance in memory.
func (p *GoPool) Align(s string) (y string) {
if s != "" {
p.rwm.RLock() // R++
if a, ok := p.pool[s]; ok { // found
p.rwm.RUnlock() // R--
return a
}
p.rwm.RUnlock() // R--
// not found but with a race condition, retry within a write lock
p.rwm.Lock() // W++
defer p.rwm.Unlock() // W--
if a, ok := p.pool[s]; ok { // done in a race
return a
}
// we won
s = StrPack(s)
p.pool[s] = s
return s
}
return
}
// Count returns the number of items in the pool.
func (p *GoPool) Count() int {
return len(p.pool)
}
// Dict is a string <-> id bijection. Dict is *not* concurrent access safe for assigning new ids
// to strings not yet contained in the bijection.
// Id for an empty string is guaranteed to be 0,
// thus Id for any non empty string is guaranteed to be non zero.
type Dict struct {
si map[string]int
is []string
}
// NewDict returns a newly created Dict.
func NewDict() (d *Dict) {
d = &Dict{map[string]int{}, []string{}}
d.Id("")
return
}
// Count returns the number of items in the dict.
func (d *Dict) Count() int {
return len(d.is)
}
// Id maps string s to its numeric identificator.
func (d *Dict) Id(s string) (y int) {
if y, ok := d.si[s]; ok {
return y
}
s = StrPack(s)
y = len(d.is)
d.si[s] = y
d.is = append(d.is, s)
return
}
// S maps an id to its string value and ok == true. Id values not contained in the bijection
// return "", false.
func (d *Dict) S(id int) (s string, ok bool) {
if id >= len(d.is) {
return "", false
}
return d.is[id], true
}
// GoDict is a concurrent access safe version of Dict.
type GoDict struct {
si map[string]int
is []string
rwm *sync.RWMutex
}
// NewGoDict returns a newly created GoDict.
func NewGoDict() (d *GoDict) {
d = &GoDict{map[string]int{}, []string{}, &sync.RWMutex{}}
d.Id("")
return
}
// Count returns the number of items in the dict.
func (d *GoDict) Count() int {
return len(d.is)
}
// Id maps string s to its numeric identificator. The implementation honors getting
// an existing id at the cost of assigning a new one.
func (d *GoDict) Id(s string) (y int) {
d.rwm.RLock() // R++
if y, ok := d.si[s]; ok { // found
d.rwm.RUnlock() // R--
return y
}
d.rwm.RUnlock() // R--
// not found but with a race condition
d.rwm.Lock() // W++ recheck with write lock
defer d.rwm.Unlock() // W--
if y, ok := d.si[s]; ok { // some other goroutine won already
return y
}
// a race free not found state => insert the string
s = StrPack(s)
y = len(d.is)
d.si[s] = y
d.is = append(d.is, s)
return
}
// S maps an id to its string value and ok == true. Id values not contained in the bijection
// return "", false.
func (d *GoDict) S(id int) (s string, ok bool) {
d.rwm.RLock() // R++
defer d.rwm.RUnlock() // R--
if id >= len(d.is) {
return "", false
}
return d.is[id], true
}
// StrPack returns a new instance of s which is tightly packed in memory.
// It is intended for avoiding the situation where having a live reference
// to a string slice over an unreferenced biger underlying string keeps the biger one
// in memory anyway - it can't be GCed.
func StrPack(s string) string {
return string([]byte(s)) // T(U(T)) intentional.
}
// JoinFields returns strings in flds joined by sep. Flds may contain arbitrary
// bytes, including the sep as they are safely escaped. JoinFields panics if
// sep is the backslash character or if len(sep) != 1.
func JoinFields(flds []string, sep string) string {
if len(sep) != 1 || sep == "\\" {
panic("invalid separator")
}
a := make([]string, len(flds))
for i, v := range flds {
v = strings.Replace(v, "\\", "\\0", -1)
a[i] = strings.Replace(v, sep, "\\1", -1)
}
return strings.Join(a, sep)
}
// SplitFields splits s, which must be produced by JoinFields using the same
// sep, into flds. SplitFields panics if sep is the backslash character or if
// len(sep) != 1.
func SplitFields(s, sep string) (flds []string) {
if len(sep) != 1 || sep == "\\" {
panic("invalid separator")
}
a := strings.Split(s, sep)
r := make([]string, len(a))
for i, v := range a {
v = strings.Replace(v, "\\1", sep, -1)
r[i] = strings.Replace(v, "\\0", "\\", -1)
}
return r
}
// PrettyPrintHooks allow to customize the result of PrettyPrint for types
// listed in the map value.
type PrettyPrintHooks map[reflect.Type]func(f Formatter, v interface{}, prefix, suffix string)
// PrettyString returns the output of PrettyPrint as a string.
func PrettyString(v interface{}, prefix, suffix string, hooks PrettyPrintHooks) string {
var b bytes.Buffer
PrettyPrint(&b, v, prefix, suffix, hooks)
return b.String()
}
// PrettyPrint pretty prints v to w. Zero values and unexported struct fields
// are omitted.
//
// Force printing of zero values of struct fields by including in the field tag
// PrettyPrint:"zero".
//
// Enable using a String method, if any, of a struct field type by including in
// the field tag PrettyPrint:"stringer".
//
// The tags can be combined as in PrettyPrint:"zero,stringer". The order is not
// important, so PrettyPrint:stringer,zero has the same effect.
//
// A hook attached to the field type has priority over the struct field tag
// described above.
func PrettyPrint(w io.Writer, v interface{}, prefix, suffix string, hooks PrettyPrintHooks) {
if v == nil {
return
}
f := IndentFormatter(w, "· ")
defer func() {
if e := recover(); e != nil {
f.Format("\npanic: %v", e)
}
}()
prettyPrint(nil, f, prefix, suffix, v, hooks, false, false)
}
func prettyPrint(protect map[interface{}]struct{}, sf Formatter, prefix, suffix string, v interface{}, hooks PrettyPrintHooks, zero, stringer bool) {
if v == nil {
return
}
rt := reflect.TypeOf(v)
if handler := hooks[rt]; handler != nil {
handler(sf, v, prefix, suffix)
return
}
rv := reflect.ValueOf(v)
if stringer {
if _, ok := v.(fmt.Stringer); ok {
sf.Format("%s%s", prefix, v)
sf.Format(suffix)
return
}
}
switch rt.Kind() {
case reflect.Slice:
if rv.Len() == 0 && !zero {
return
}
sf.Format("%s[]%T{ // len %d%i\n", prefix, rv.Index(0).Interface(), rv.Len())
for i := 0; i < rv.Len(); i++ {
prettyPrint(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface(), hooks, false, false)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
case reflect.Array:
if reflect.Zero(rt).Interface() == rv.Interface() && !zero {
return
}
sf.Format("%s[%d]%T{%i\n", prefix, rv.Len(), rv.Index(0).Interface())
for i := 0; i < rv.Len(); i++ {
prettyPrint(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface(), hooks, false, false)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
case reflect.Struct:
if rt.NumField() == 0 {
return
}
if reflect.DeepEqual(reflect.Zero(rt).Interface(), rv.Interface()) && !zero {
return
}
sf.Format("%s%T{%i\n", prefix, v)
for i := 0; i < rt.NumField(); i++ {
f := rv.Field(i)
if !f.CanInterface() {
continue
}
var stringer, zero bool
ft := rt.Field(i)
if tag, ok := ft.Tag.Lookup("PrettyPrint"); ok {
a := strings.Split(tag, ",")
for _, v := range a {
switch strings.TrimSpace(v) {
case "stringer":
stringer = true
case "zero":
zero = true
}
}
}
prettyPrint(protect, sf, fmt.Sprintf("%s: ", rt.Field(i).Name), ",\n", f.Interface(), hooks, zero, stringer)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
case reflect.Ptr:
if rv.IsNil() && !zero {
return
}
rvi := rv.Interface()
if _, ok := protect[rvi]; ok {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s&%T{ /* recursive/repetitive pointee not shown */ }"+suffix, prefix, rv.Elem().Interface())
return
}
if protect == nil {
protect = map[interface{}]struct{}{}
}
protect[rvi] = struct{}{}
prettyPrint(protect, sf, prefix+"&", suffix, rv.Elem().Interface(), hooks, false, false)
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
if v := rv.Int(); v != 0 || zero {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
if v := rv.Uint(); v != 0 || zero {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Float32, reflect.Float64:
if v := rv.Float(); v != 0 || zero {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Complex64, reflect.Complex128:
if v := rv.Complex(); v != 0 || zero {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Uintptr:
if v := rv.Uint(); v != 0 || zero {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.UnsafePointer:
s := fmt.Sprintf("%p", rv.Interface())
if s == "0x0" && !zero {
return
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%s"+suffix, prefix, s)
case reflect.Bool:
if v := rv.Bool(); v || zero {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, rv.Bool())
}
case reflect.String:
s := rv.Interface().(string)
if s == "" && !zero {
return
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%q"+suffix, prefix, s)
case reflect.Chan:
if reflect.Zero(rt).Interface() == rv.Interface() && !zero {
return
}
c := rv.Cap()
s := ""
if c != 0 {
s = fmt.Sprintf("// capacity: %d", c)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%s %s%s"+suffix, prefix, rt.ChanDir(), rt.Elem().Name(), s)
case reflect.Func:
if rv.IsNil() && !zero {
return
}
var in, out []string
for i := 0; i < rt.NumIn(); i++ {
x := reflect.Zero(rt.In(i))
in = append(in, fmt.Sprintf("%T", x.Interface()))
}
if rt.IsVariadic() {
i := len(in) - 1
in[i] = "..." + in[i][2:]
}
for i := 0; i < rt.NumOut(); i++ {
out = append(out, rt.Out(i).Name())
}
s := "(" + strings.Join(in, ", ") + ")"
t := strings.Join(out, ", ")
if len(out) > 1 {
t = "(" + t + ")"
}
if t != "" {
t = " " + t
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%sfunc%s%s { ... }"+suffix, prefix, s, t)
case reflect.Map:
keys := rv.MapKeys()
if len(keys) == 0 && !zero {
return
}
var buf bytes.Buffer
nf := IndentFormatter(&buf, "· ")
var skeys []string
for i, k := range keys {
prettyPrint(protect, nf, "", "", k.Interface(), hooks, false, false)
skeys = append(skeys, fmt.Sprintf("%s%10d", buf.Bytes(), i))
buf.Reset()
}
sort.Strings(skeys)
sf.Format("%s%T{%i\n", prefix, v)
for _, k := range skeys {
si := strings.TrimSpace(k[len(k)-10:])
k = k[:len(k)-10]
n, _ := strconv.ParseUint(si, 10, 64)
mv := rv.MapIndex(keys[n])
prettyPrint(protect, sf, fmt.Sprintf("%s: ", k), ",\n", mv.Interface(), hooks, false, false)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
}
}
// Gopath returns the value of the $GOPATH environment variable or its default
// value if not set.
func Gopath() string {
if r := os.Getenv("GOPATH"); r != "" {
return r
}
// go1.8: https://github.com/golang/go/blob/74628a8b9f102bddd5078ee426efe0fd57033115/doc/code.html#L122
switch runtime.GOOS {
case "plan9":
return os.Getenv("home")
case "windows":
return filepath.Join(os.Getenv("USERPROFILE"), "go")
default:
return filepath.Join(os.Getenv("HOME"), "go")
}
}
// Homepath returns the user's home directory path.
func Homepath() string {
// go1.8: https://github.com/golang/go/blob/74628a8b9f102bddd5078ee426efe0fd57033115/doc/code.html#L122
switch runtime.GOOS {
case "plan9":
return os.Getenv("home")
case "windows":
return os.Getenv("USERPROFILE")
default:
return os.Getenv("HOME")
}
}
// ImportPath returns the import path of the caller or an error, if any.
func ImportPath() (string, error) {
_, file, _, ok := runtime.Caller(1)
if !ok {
return "", fmt.Errorf("runtime.Caller failed")
}
gopath := Gopath()
for _, v := range filepath.SplitList(gopath) {
gp := filepath.Join(v, "src")
path, err := filepath.Rel(gp, file)
if err != nil {
continue
}
return filepath.Dir(path), nil
}
return "", fmt.Errorf("cannot determine import path using GOPATH=%s", gopath)
}
|
[
"\"GOPATH\"",
"\"home\"",
"\"USERPROFILE\"",
"\"HOME\"",
"\"home\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"GOPATH",
"home",
"HOME",
"USERPROFILE"
] |
[]
|
["GOPATH", "home", "HOME", "USERPROFILE"]
|
go
| 4 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mongo_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
packages/simcore-sdk/src/simcore_sdk/node_ports/config.py
|
"""Takes care of the configurations.
"""
import os
from typing import Dict, Type
# required configurations
PROJECT_ID: str = os.environ.get("SIMCORE_PROJECT_ID", default="undefined")
NODE_UUID: str = os.environ.get("SIMCORE_NODE_UUID", default="undefined")
USER_ID: str = os.environ.get("SIMCORE_USER_ID", default="undefined")
STORAGE_ENDPOINT: str = os.environ.get("STORAGE_ENDPOINT", default="undefined")
STORAGE_VERSION: str = "v0"
POSTGRES_ENDPOINT: str = os.environ.get("POSTGRES_ENDPOINT", "postgres:5432")
POSTGRES_DB: str = os.environ.get("POSTGRES_DB", "simcoredb")
POSTGRES_PW: str = os.environ.get("POSTGRES_PASSWORD", "simcore")
POSTGRES_USER: str = os.environ.get("POSTGRES_USER", "simcore")
# overridable required configurations
STORE: str = os.environ.get("STORAGE_STORE_LOCATION_NAME", default="simcore.s3")
BUCKET: str = os.environ.get("S3_BUCKET_NAME", default="simcore")
# -------------------------------------------------------------------------
NODE_KEYS: Dict[str, bool] = {
"version": True,
"schema": True,
"inputs": True,
"outputs": True,
}
DATA_ITEM_KEYS: Dict[str, bool] = {"key": True, "value": True}
# True if required, defined by JSON schema
SCHEMA_ITEM_KEYS: Dict[str, bool] = {
"key": True,
"label": True,
"description": True,
"type": True,
"displayOrder": True,
"fileToKeyMap": False,
"defaultValue": False,
"widget": False,
}
# allowed types
TYPE_TO_PYTHON_TYPE_MAP: Dict[str, Dict[str, Type]] = {
"integer": {"type": int, "converter": int},
"number": {"type": float, "converter": float},
"boolean": {"type": bool, "converter": bool},
"string": {"type": str, "converter": str},
}
FILE_TYPE_PREFIX: str = "data:"
|
[] |
[] |
[
"STORAGE_STORE_LOCATION_NAME",
"STORAGE_ENDPOINT",
"POSTGRES_ENDPOINT",
"POSTGRES_USER",
"SIMCORE_USER_ID",
"S3_BUCKET_NAME",
"SIMCORE_PROJECT_ID",
"POSTGRES_DB",
"SIMCORE_NODE_UUID",
"POSTGRES_PASSWORD"
] |
[]
|
["STORAGE_STORE_LOCATION_NAME", "STORAGE_ENDPOINT", "POSTGRES_ENDPOINT", "POSTGRES_USER", "SIMCORE_USER_ID", "S3_BUCKET_NAME", "SIMCORE_PROJECT_ID", "POSTGRES_DB", "SIMCORE_NODE_UUID", "POSTGRES_PASSWORD"]
|
python
| 10 | 0 | |
lib/slack-notify-cmd/main.go
|
package main
import (
"bytes"
"flag"
"fmt"
"log"
"net/http"
"os"
"github.com/joho/godotenv"
)
var (
name = flag.String("n", "報告者", "Who is ?")
message = flag.String("m", "順調じゃよ!", "Message")
)
func init() {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
}
func main() {
flag.Parse()
channel := os.Getenv("CHANNEL")
jsonStr := `{"channel":"` + channel + `","username":"` + *name + `","text":"` + *message + `","icon_emoji":":ghost:"}"`
req, err := http.NewRequest(
"POST",
os.Getenv("WEBHOOK"),
bytes.NewBuffer([]byte(jsonStr)),
)
fmt.Println(jsonStr)
if err != nil {
fmt.Print(err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Print(err)
}
fmt.Print(resp)
defer resp.Body.Close()
}
|
[
"\"CHANNEL\"",
"\"WEBHOOK\""
] |
[] |
[
"WEBHOOK",
"CHANNEL"
] |
[]
|
["WEBHOOK", "CHANNEL"]
|
go
| 2 | 0 | |
scripts/SV1/gatherSV_zinfo_alltiles_denali_inpar.py
|
'''
gather redshift info across all observations for a given target type
'''
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
#sys.path.append('../py')
#from this package
import LSS.zcomp.zinfo as zi
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="tracer type to be selected")
parser.add_argument("--release", help="what spectro release to use, e.g. blanc or daily",default='denali') #eventually remove this and just gather everything
parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH'])
parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test')
parser.add_argument("--runmd", help="prod/test; test is for debugging result on first tile",default='prod')
parser.add_argument("--doh5", help="whether or not to do the N best redshift fits, which adds bit of time",default='n')
args = parser.parse_args()
print(args)
type = args.type
basedir = args.basedir
release = args.release
version = args.version
from desitarget.sv1 import sv1_targetmask
tarbit = -1
if type != 'All':
tarbit = int(np.log2(sv1_targetmask.desi_mask[type]))
print('gathering all tile data for type '+type +' in '+release)
tp = 'SV1_DESI_TARGET'
print('targeting bit, target program type; CHECK THEY ARE CORRECT!')
print(tarbit,tp)
#outputs
#basedir for official catalogs'/global/cfs/cdirs/desi/survey/catalogs
svdir = basedir+'/SV1/'
dirout = svdir+'redshift_comps/'+release+'/'+version+'/'+type
if not os.path.exists(svdir):
os.mkdir(svdir)
print('made '+svdir+' directory')
if not os.path.exists(svdir+'redshift_comps'):
os.mkdir(svdir+'redshift_comps')
print('made '+svdir+'redshift_comps directory')
if not os.path.exists(svdir+'redshift_comps/'+release):
os.mkdir(svdir+'redshift_comps/'+release)
print('made '+svdir+'redshift_comps/'+release+' directory')
if not os.path.exists(svdir+'redshift_comps/'+release+'/'+version):
os.mkdir(svdir+'redshift_comps/'+release+'/'+version)
print('made '+svdir+'redshift_comps/'+release+'/'+version+' directory')
if not os.path.exists(dirout):
os.mkdir(dirout)
print('made '+dirout)
if not os.path.exists(svdir+'/redshift_comps/logs'):
os.mkdir(svdir+'/redshift_comps/logs')
print('made '+svdir+'/redshift_comps/logs')
#set up log file
logfn = svdir+'/redshift_comps/logs/log'+datetime.now().isoformat()+'.txt'
logf = open(logfn,'w')
print('a log of what was run is going to '+logfn)
logf.write('running gatherSV_zinfo_alltiles_inpar.py from '+os.getcwd()+'\n\n')
logf.write('arguments were:\n')
logf.write(str(args)+'\n')
expf = '/global/cfs/cdirs/desi/survey/observations/SV1/sv1-exposures.fits'
exposures = fitsio.read(expf) #this will be used in depth calculations
gt = ['BGS+MWS', 'ELG', 'QSO+ELG', 'QSO+LRG','BACKUP','SSV','SCND']
#location of inputs
tiledir = '/global/cfs/cdirs/desi/spectro/redux/'+release+'/tiles/'
tiles = np.unique(exposures['TILEID'])
print('looking for data in these tiles:')
print(tiles)
mfn = svdir+'/redshift_comps/logs/missingexposures.txt'
fo = open(svdir+'/redshift_comps/logs/missingexposures.txt','w')
fo.close()
tilew = []
#for tile in tiles:
def get_tilezinfo(tile):
tt = np.unique(exposures['TARGETS'][exposures['TILEID']==tile])[0]
if np.isin(tt,gt): #that tile used cmx target bits
tile = str(tile)
print('going through tile '+tile)
outf = dirout +'/'+tile+'_'+type+'zinfo.fits'
if os.path.isfile(outf):
print(outf+' exists already')
#tilew.append(tile)
a = True
else:
a = zi.comb_subset_vert_denali(tarbit,tp,tile,exposures,outf,tt,mfn=mfn)
logf.write('compiled data for tile '+str(tile)+' written to '+outf+'\n')
if a:
if args.doh5 == 'y':
print('adding info from hd5 files')
outfall = dirout +'/'+tile+'_'+type+'zinfo_wh5.fits'
if os.path.isfile(outfall):
print(outfall+' exists already')
else:
dt = Table.read(outf)
cols = ['z','zwarn','chi2','deltachi2','spectype','subtype']
for i in range(1,5):
dt['z_'+str(i)]=np.zeros(len(dt))
dt['zwarn_'+str(i)]=np.zeros(len(dt))
dt['chi2_'+str(i)]=np.zeros(len(dt))
dt['deltachi2_'+str(i)]=np.zeros(len(dt))
dt['spectype_'+str(i)] = 'GALAXY'
dt['subtype_'+str(i)] = 'GALAXY'
for ii in range(0,len(dt)):
ln = dt[ii]
#if ln['RZR'] != 'N':
# zfitdir = '/global/cfs/cdirs/desi/users/rongpu/redux/cascades/'+ln['RZR']+'/'+str(ln['TILEID'])
#else:
#zfitdir = tiledir+str(ln['TILEID'])+'/'+ln['subset']+'/'
zfitdir = tiledir+ln['coadd_type']+'/'+str(ln['TILEID'])+'/'+ln['subset'][-8:]+'/'
fl = zfitdir+'/redrock-'+str(ln['PETAL_LOC'])+'-'+str(ln['TILEID'])+'-'+ln['subset']+'.h5'
zfits = zi.get_zfits(fl,ln['TARGETID'])
for jj in range(1,5):
for col in cols:
dt[col+'_'+str(jj)][ii] = zfits[jj][col]
dt.write(outfall,format='fits', overwrite=True)
print('wrote to '+outfall)
return a
else:
print('did not find data in '+release +' for tile '+tile)
return False
if __name__ == '__main__':
from multiprocessing import Pool
import sys
#N = int(sys.argv[2])
N = 32
p = Pool(N)
expf = '/global/cfs/cdirs/desi/survey/observations/SV1/sv1-exposures.fits'
exps = fitsio.read(expf)
tiles = np.unique(exps['TILEID'])
print('going through '+str(len(tiles))+' tiles')
if args.runmd == 'prod':
ntile = len(tiles)
if args.runmd == 'test':
ntile = 1
for j in range(0,ntile,N):
#get_tilezinfo(tiles[j])
inds = []
for i in range(j,j+N):
if i == len(tiles):
break
inds.append(tiles[i])
p.map(get_tilezinfo,inds)
#combine all the tiles
fapp = 'zinfo.fits'
if args.doh5 == 'y':
fapp = 'zinfo_wh5.fits'
dt = Table.read(dirout +'/'+str(tiles[0])+'_'+type+fapp)
dt['TILEID'] = int(tiles[0])
for i in range(1,len(tiles)):
tf = dirout +'/'+str(tiles[i])+'_'+type+fapp
if os.path.isfile(tf):
dtn = Table.read(dirout +'/'+str(tiles[i])+'_'+type+fapp)
dtn['TILEID'] = int(tiles[i])
dt = vstack([dt,dtn])
else:
print('did not find file for tile '+str(tiles[i]))
dt.sort('TARGETID')
col2remove = ['NUMEXP','NUMTILE','LAMBDA_REF','OBJTYPE','NUMTARGET','FIBERFLUX_IVAR_G','FIBERFLUX_IVAR_R','FIBERFLUX_IVAR_Z','DESI_TARGET','BGS_TARGET','MWS_TARGET','HPXPIXEL','NUM_TILEID','NUM_FIBER']
for col in col2remove:
try:
dt.remove_columns([col])
except:
print('didnt find column to remove '+col)
outfall = dirout +'/alltiles_'+type+fapp
dt.write(outfall,format='fits', overwrite=True)
print('wrote to '+outfall)
logf.write('combined all tiles, written to '+outfall)
|
[] |
[] |
[
"CSCRATCH"
] |
[]
|
["CSCRATCH"]
|
python
| 1 | 0 | |
pocket/settings/dev.py
|
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
SECRET_KEY = os.environ.get('SECRET_KEY', DEFAULT_SECRET_KEY)
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
reto-01/atareao/main2.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Lorenzo Carbonell <a.k.a. atareao>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from pathlib import Path
def get_config_dir():
return os.environ['XDG_CONFIG_DIR'] if 'XDG_CONFIG_DIR' in os.environ \
else Path.home() / '.config'
def get_download_dir():
if 'XDG_DOWNLOAD_DIR' in os.environ:
return os.environ['XDG_DOWNLOAD_DIR']
config_dir = Path(get_config_dir())
user_dirs_file = config_dir / 'user-dirs.dirs'
with open(user_dirs_file, 'r') as fr:
for line in fr.readlines():
if line.startswith('XDG_DOWNLOAD_DIR'):
directory = line.split("=")[1].replace("\"",'')
download_dir = directory.replace("$HOME", str(Path.home()))[:-1]
return Path(download_dir)
return None
def main():
try:
download_dir = get_download_dir()
if download_dir is not None and isinstance(download_dir, Path) and \
download_dir.is_dir():
print(f"\nDirectorio: {download_dir}\n")
for afile in [x for x in download_dir.iterdir() if not x.is_dir()]:
print(afile.name)
except Exception as exception:
print(exception)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"XDG_DOWNLOAD_DIR",
"XDG_CONFIG_DIR"
] |
[]
|
["XDG_DOWNLOAD_DIR", "XDG_CONFIG_DIR"]
|
python
| 2 | 0 | |
PyInstaller/configure.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Configure PyInstaller for the current Python installation.
"""
import os
from PyInstaller import compat
from PyInstaller import log as logging
from PyInstaller.compat import is_darwin, is_win
logger = logging.getLogger(__name__)
def test_UPX(config, upx_dir):
logger.debug('Testing for UPX ...')
cmd = "upx"
if upx_dir:
cmd = os.path.normpath(os.path.join(upx_dir, cmd))
hasUPX = 0
try:
vers = compat.exec_command(cmd, '-V', raise_enoent=True).strip().splitlines()
if vers:
v = vers[0].split()[1]
try:
# v = "3.96-git-d7ba31cab8ce"
v = v.split("-")[0]
except Exception:
pass
hasUPX = tuple(map(int, v.split(".")))
if is_win and hasUPX < (1, 92):
logger.error('UPX is too old! Python 2.4 under Windows requires UPX 1.92+.')
hasUPX = 0
except Exception as e:
if isinstance(e, OSError) and e.errno == 2:
# No such file or directory
pass
else:
logger.info('An exception occured when testing for UPX:')
logger.info(' %r', e)
if hasUPX:
is_available = 'available'
else:
is_available = 'not available'
logger.info('UPX is %s.', is_available)
config['hasUPX'] = hasUPX
config['upx_dir'] = upx_dir
def _get_pyinst_cache_dir():
old_cache_dir = None
if compat.getenv('PYINSTALLER_CONFIG_DIR'):
cache_dir = compat.getenv('PYINSTALLER_CONFIG_DIR')
elif is_win:
cache_dir = compat.getenv('LOCALAPPDATA')
if not cache_dir:
cache_dir = os.path.expanduser('~\\Application Data')
elif is_darwin:
cache_dir = os.path.expanduser('~/Library/Application Support')
else:
# According to XDG specification: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
old_cache_dir = compat.getenv('XDG_DATA_HOME')
if not old_cache_dir:
old_cache_dir = os.path.expanduser('~/.local/share')
cache_dir = compat.getenv('XDG_CACHE_HOME')
if not cache_dir:
cache_dir = os.path.expanduser('~/.cache')
cache_dir = os.path.join(cache_dir, 'pyinstaller')
# Move old cache-dir, if any, to new location.
if old_cache_dir and not os.path.exists(cache_dir):
old_cache_dir = os.path.join(old_cache_dir, 'pyinstaller')
if os.path.exists(old_cache_dir):
parent_dir = os.path.dirname(cache_dir)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
os.rename(old_cache_dir, cache_dir)
return cache_dir
def get_config(upx_dir, **kw):
config = {}
test_UPX(config, upx_dir)
config['cachedir'] = _get_pyinst_cache_dir()
return config
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
mxnet/util.java
|
package deepwater.backends.mxnet;
import javax.imageio.ImageIO;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.FloatBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Map;
import java.util.Locale;
import java.nio.file.Paths;
//import java.util.UUID;
import static java.nio.file.FileVisitResult.CONTINUE;
import static java.nio.file.FileVisitResult.TERMINATE;
public final class util {
public static String path(String dirname, String ... more ){
return Paths.get(dirname, more).toString();
}
public static String libName(String name){
String OS = System.getProperty("os.name", "generic").toLowerCase(Locale.ENGLISH);
String lib_suffix = "";
if ((OS.indexOf("mac") >= 0) || (OS.indexOf("darwin") >= 0)) {
lib_suffix = "so";
} else if (OS.indexOf("nux") >= 0) {
lib_suffix = "so";
} else {
System.err.println("Operating system not supported: "+OS);
}
return "lib" + name + "." + lib_suffix;
}
public static void loadCudaLib() throws IOException {
String cuda_path = System.getenv().get("CUDA_HOME");
if (cuda_path == null) {
cuda_path = System.getenv().get("CUDA_PATH");
}
checkNotNull(cuda_path,"CUDA_HOME or CUDA_PATH hasn't been set!");
System.load(path(cuda_path, "lib64", libName("cudart")));
System.load(path(cuda_path, "lib64", libName("cublas")));
System.load(path(cuda_path, "lib64", libName("curand")));
System.load(path(cuda_path, "lib64", libName("cudnn")));
}
// static String uuid = UUID.randomUUID().toString();
public static String extractLibrary(String resourceName) throws IOException {
String libname = libName(resourceName);
String tmpdir = System.getProperty("java.io.tmpdir");
if (tmpdir.isEmpty()){
tmpdir = Files.createTempDirectory(tmpdir).toString();
}
String target = path(tmpdir,libname);// + uuid;
String origin = path("/deepwater/backends/mxnet/",libname);
InputStream in = util.class.getResourceAsStream(origin);
checkNotNull(in, "No native lib " + origin + " found in jar. Please check installation!");
OutputStream out = new FileOutputStream(target);
checkNotNull(out, "could not create file");
copy(in, out);
assert Files.exists(Paths.get(target));
return target;
}
public static String loadNativeLib(String resourceName) throws IOException {
String f = extractLibrary(resourceName);
System.load(f);
return f;
}
public static <T> T checkNotNull(T reference, String msg) {
if (reference == null) {
throw new NullPointerException(msg);
}
return reference;
}
private static final int BUF_SIZE = 0x1000; // 4K
public static long copy(InputStream from, OutputStream to)
throws IOException {
byte[] buf = new byte[BUF_SIZE];
long total = 0;
while (true) {
int r = from.read(buf);
if (r == -1) {
break;
}
to.write(buf, 0, r);
total += r;
}
return total;
}
public static void deleteFileOrFolder(final Path path) throws IOException {
Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs)
throws IOException {
Files.delete(file);
return CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(final Path file, final IOException e) {
return handleException(e);
}
private FileVisitResult handleException(final IOException e) {
e.printStackTrace(); // replace with more robust error handling
return TERMINATE;
}
@Override
public FileVisitResult postVisitDirectory(final Path dir, final IOException e)
throws IOException {
if (e != null) return handleException(e);
Files.delete(dir);
return CONTINUE;
}
});
}
;
public static float[] readFC(String fname, int length) throws IOException {
float[] res = new float[length];
FileChannel inChannel = new RandomAccessFile(fname, "rw").getChannel();
ByteBuffer buffer = ByteBuffer.allocate(4 * res.length);
inChannel.read(buffer);
buffer.flip();
FloatBuffer buffer2 = buffer.asFloatBuffer();
for (int i = 0; i < res.length; i++) {
res[i] = buffer2.get(i);
}
inChannel.close();
return res;
}
public static void writeFC(String fname, float[] res) throws IOException {
new File(fname).createNewFile();
ByteBuffer bbuffer = ByteBuffer.allocate(4 * res.length);
FloatBuffer buffer = bbuffer.asFloatBuffer();
for (int i = 0; i < res.length; i++) buffer.put(res[i]);
buffer.flip();
FileChannel fc = new RandomAccessFile(fname, "rw").getChannel();
fc.write(bbuffer);
fc.close();
}
public static float[] img2pixels(String fname, int w, int h) throws IOException {
float[] pixels = new float[w * h * 3];
img2pixels(fname,w,h,3,pixels,0,null);
return pixels;
}
public static void img2pixels(String fname, int w, int h, int channels, float[] pixels, int start, float[] mean) throws IOException {
// resize the image
BufferedImage img = ImageIO.read(new File(fname.trim()));
BufferedImage scaledImg = new BufferedImage(w, h, img.getType());
Graphics2D g2d = scaledImg.createGraphics();
g2d.drawImage(img, 0, 0, w, h, null);
g2d.dispose();
int r_idx = start;
int g_idx = r_idx + w * h;
int b_idx = g_idx + w * h;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
Color mycolor = new Color(scaledImg.getRGB(j, i));
int red = mycolor.getRed();
int green = mycolor.getGreen();
int blue = mycolor.getBlue();
if (channels==1) {
pixels[r_idx] = (red+green+blue)/3;
if (mean!=null) {
pixels[r_idx] -= mean[r_idx];
}
} else {
pixels[r_idx] = red;
pixels[g_idx] = green;
pixels[b_idx] = blue;
if (mean!=null) {
pixels[r_idx] -= mean[r_idx-start];
pixels[g_idx] -= mean[g_idx-start];
pixels[b_idx] -= mean[b_idx-start];
}
}
r_idx++;
g_idx++;
b_idx++;
}
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
cmd_get.go
|
package main
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/mattn/go-isatty"
"github.com/x-motemen/ghq/cmdutil"
"github.com/x-motemen/ghq/logger"
"github.com/urfave/cli/v2"
"golang.org/x/sync/errgroup"
)
func doGet(c *cli.Context) error {
var (
args = c.Args().Slice()
andLook = c.Bool("look")
parallel = c.Bool("parallel")
)
g := &getter{
update: c.Bool("update"),
shallow: c.Bool("shallow"),
ssh: c.Bool("p"),
vcs: c.String("vcs"),
silent: c.Bool("silent"),
branch: c.String("branch"),
recursive: !c.Bool("no-recursive"),
}
if parallel {
// force silent in parallel import
g.silent = true
}
var (
firstArg string
scr scanner
)
if len(args) > 0 {
scr = &sliceScanner{slice: args}
} else {
fd := os.Stdin.Fd()
if isatty.IsTerminal(fd) || isatty.IsCygwinTerminal(fd) {
return fmt.Errorf("no target args specified. see `ghq get -h` for more details")
}
scr = bufio.NewScanner(os.Stdin)
}
eg := &errgroup.Group{}
sem := make(chan struct{}, 6)
for scr.Scan() {
target := scr.Text()
if firstArg == "" {
firstArg = target
}
if parallel {
sem <- struct{}{}
eg.Go(func() error {
defer func() { <-sem }()
if err := g.get(target); err != nil {
logger.Logf("error", "faied to get %q: %s", target, err)
}
return nil
})
} else {
if err := g.get(target); err != nil {
return fmt.Errorf("failed to get %q: %w", target, err)
}
}
}
if err := scr.Err(); err != nil {
return fmt.Errorf("error occurred while reading input: %w", err)
}
if err := eg.Wait(); err != nil {
return err
}
if andLook && firstArg != "" {
return look(firstArg)
}
return nil
}
type sliceScanner struct {
slice []string
index int
}
func (s *sliceScanner) Scan() bool {
s.index++
return s.index <= len(s.slice)
}
func (s *sliceScanner) Text() string {
return s.slice[s.index-1]
}
func (s *sliceScanner) Err() error {
return nil
}
type scanner interface {
Scan() bool
Text() string
Err() error
}
func detectShell() string {
shell := os.Getenv("SHELL")
if shell != "" {
return shell
}
if runtime.GOOS == "windows" {
return os.Getenv("COMSPEC")
}
return "/bin/sh"
}
func look(name string) error {
var (
reposFound []*LocalRepository
mu sync.Mutex
)
if err := walkAllLocalRepositories(func(repo *LocalRepository) {
if repo.Matches(name) {
mu.Lock()
reposFound = append(reposFound, repo)
mu.Unlock()
}
}); err != nil {
return err
}
if len(reposFound) == 0 {
if url, err := newURL(name, false, false); err == nil {
repo, err := LocalRepositoryFromURL(url)
if err != nil {
return err
}
_, err = os.Stat(repo.FullPath)
// if the directory exists
if err == nil {
reposFound = append(reposFound, repo)
}
}
}
switch len(reposFound) {
case 0:
return fmt.Errorf("No repository found")
case 1:
repo := reposFound[0]
cmd := exec.Command(detectShell())
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Dir = repo.FullPath
cmd.Env = append(os.Environ(), "GHQ_LOOK="+filepath.ToSlash(repo.RelPath))
return cmdutil.RunCommand(cmd, true)
default:
b := &strings.Builder{}
b.WriteString("More than one repositories are found; Try more precise name\n")
for _, repo := range reposFound {
b.WriteString(fmt.Sprintf(" - %s\n", strings.Join(repo.PathParts, "/")))
}
return errors.New(b.String())
}
}
|
[
"\"SHELL\"",
"\"COMSPEC\""
] |
[] |
[
"SHELL",
"COMSPEC"
] |
[]
|
["SHELL", "COMSPEC"]
|
go
| 2 | 0 | |
chronos/config.py
|
# TODO: Find another place for this line
import os
CHRONOS = os.getenv('CHRONOS_PATH')
|
[] |
[] |
[
"CHRONOS_PATH"
] |
[]
|
["CHRONOS_PATH"]
|
python
| 1 | 0 | |
src/main/java/io/hawt/dockerui/ProxyServlet.java
|
package io.hawt.dockerui;
import org.apache.http.*;
import org.apache.http.client.methods.AbortableHttpRequest;
import org.apache.http.client.utils.URIUtils;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicHttpEntityEnclosingRequest;
import org.apache.http.message.BasicHttpRequest;
import org.apache.http.message.HeaderGroup;
import org.apache.http.util.EntityUtils;
import javax.servlet.ServletConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.BitSet;
import java.util.Enumeration;
import java.util.Formatter;
import io.hawt.util.Strings;
/**
* Original implementation at https://github.com/mitre/HTTP-Proxy-Servlet, released under ASL 2.0.
*/
/**
* An HTTP reverse proxy/gateway servlet. It is designed to be extended for customization
* if desired. Most of the work is handled by
* <a href="http://hc.apache.org/httpcomponents-client-ga/">Apache HttpClient</a>.
* <p>
* There are alternatives to a servlet based proxy such as Apache mod_proxy if that is available to you. However
* this servlet is easily customizable by Java, secure-able by your web application's security (e.g. spring-security),
* portable across servlet engines, and is embeddable into another web application.
* </p>
* <p>
* Inspiration: http://httpd.apache.org/docs/2.0/mod/mod_proxy.html
* </p>
*
* @author David Smiley [email protected]
*/
public class ProxyServlet extends HttpServlet {
/* INIT PARAMETER NAME CONSTANTS */
/**
* A boolean parameter name to enable logging of input and target URLs to the servlet log.
*/
public static final String P_LOG = "log";
/**
* A boolean parameter name to enable forwarding of the client IP
*/
public static final String P_FORWARDEDFOR = "forwardip";
/**
* The parameter name for the target (destination) URI to proxy to.
*/
private static final String P_TARGET_URI = "targetUri";
/* MISC */
protected boolean doLog = false;
protected boolean doForwardIP = true;
/**
* User agents shouldn't send the url fragment but what if it does?
*/
protected boolean doSendUrlFragment = true;
protected CloseableHttpClient proxyClient;
private static final String STRING_HOST_HEADER_NAME = "Host";
public static final String DEFAULT_HOST_AND_PORT = "http://localhost:4243";
public static final String DEFAULT_SOCKET_PATH = "/var/run/docker.sock";
private ServletContext servletContext;
private String hostAndPort = DEFAULT_HOST_AND_PORT;
@Override
public String getServletInfo() {
return "A proxy servlet by David Smiley, [email protected]";
}
@Override
public void init(ServletConfig servletConfig) throws ServletException {
super.init(servletConfig);
String doForwardIPString = servletConfig.getInitParameter(P_FORWARDEDFOR);
if (doForwardIPString != null) {
this.doForwardIP = Boolean.parseBoolean(doForwardIPString);
}
String doLogStr = servletConfig.getInitParameter(P_LOG);
if (doLogStr != null) {
this.doLog = Boolean.parseBoolean(doLogStr);
}
proxyClient = HttpClients.createSystem();
this.servletContext = servletConfig.getServletContext();
/*
if (LOG.isDebugEnabled()) {
LOG.debug("Registered OpenShiftProtocolSocketFactory Protocol for http: " + Protocol.getProtocol("http").getSocketFactory());
}
*/
boolean useSocket = false;
String dockerHost = System.getenv("DOCKER_HOST");
if (Strings.isBlank(dockerHost)) {
dockerHost = DEFAULT_HOST_AND_PORT;
}
hostAndPort = dockerHost;
if (hostAndPort.startsWith("tcp:")) {
hostAndPort = "http:" + hostAndPort.substring(4);
}
String socketPath = DEFAULT_SOCKET_PATH;
if (useSocket) {
servletContext.log("Using docker socket : " + socketPath);
/*
UnixSocketFactory socketFactory = new UnixSocketFactory(socketPath);
Protocol http = new Protocol("http", socketFactory, 80);
Protocol.registerProtocol("http", http);
*/
} else {
servletContext.log("Using docker URL: " + hostAndPort);
}
}
@Override
public void destroy() {
try {
proxyClient.close();
} catch (IOException e) {
log("While destroying servlet, shutting down httpclient: " + e, e);
}
super.destroy();
}
@Override
protected void service(HttpServletRequest servletRequest, HttpServletResponse servletResponse)
throws ServletException, IOException {
// Make the Request
//note: we won't transfer the protocol version because I'm not sure it would truly be compatible
String method = servletRequest.getMethod();
String proxyRequestUri = rewriteUrlFromRequest(servletRequest);
URI targetUriObj = null;
try {
targetUriObj = new URI(proxyRequestUri);
} catch (URISyntaxException e) {
throw new ServletException(e);
}
HttpRequest proxyRequest;
//spec: RFC 2616, sec 4.3: either of these two headers signal that there is a message body.
if (servletRequest.getHeader(HttpHeaders.CONTENT_LENGTH) != null ||
servletRequest.getHeader(HttpHeaders.TRANSFER_ENCODING) != null) {
HttpEntityEnclosingRequest eProxyRequest = new BasicHttpEntityEnclosingRequest(method, proxyRequestUri);
// Add the input entity (streamed)
// note: we don't bother ensuring we close the servletInputStream since the container handles it
eProxyRequest.setEntity(new InputStreamEntity(servletRequest.getInputStream(), servletRequest.getContentLength()));
proxyRequest = eProxyRequest;
} else
proxyRequest = new BasicHttpRequest(method, proxyRequestUri);
copyRequestHeaders(servletRequest, proxyRequest, targetUriObj);
setXForwardedForHeader(servletRequest, proxyRequest);
HttpResponse proxyResponse = null;
try {
// Execute the request
if (doLog) {
log("proxy " + method + " uri: " + servletRequest.getRequestURI() + " -- " + proxyRequest.getRequestLine().getUri());
}
proxyResponse = proxyClient.execute(URIUtils.extractHost(targetUriObj), proxyRequest);
// Process the response
int statusCode = proxyResponse.getStatusLine().getStatusCode();
if (doResponseRedirectOrNotModifiedLogic(servletRequest, servletResponse, proxyResponse, statusCode, targetUriObj)) {
//the response is already "committed" now without any body to send
//TODO copy response headers?
return;
}
// Pass the response code. This method with the "reason phrase" is deprecated but it's the only way to pass the
// reason along too.
//noinspection deprecation
servletResponse.setStatus(statusCode, proxyResponse.getStatusLine().getReasonPhrase());
copyResponseHeaders(proxyResponse, servletResponse);
// Send the content to the client
copyResponseEntity(proxyResponse, servletResponse);
} catch (Exception e) {
//abort request, according to best practice with HttpClient
if (proxyRequest instanceof AbortableHttpRequest) {
AbortableHttpRequest abortableHttpRequest = (AbortableHttpRequest) proxyRequest;
abortableHttpRequest.abort();
}
if (e instanceof RuntimeException)
throw (RuntimeException) e;
if (e instanceof ServletException)
throw (ServletException) e;
//noinspection ConstantConditions
if (e instanceof IOException)
throw (IOException) e;
throw new RuntimeException(e);
} finally {
// make sure the entire entity was consumed, so the connection is released
if (proxyResponse != null)
EntityUtils.consumeQuietly(proxyResponse.getEntity());
//Note: Don't need to close servlet outputStream:
// http://stackoverflow.com/questions/1159168/should-one-call-close-on-httpservletresponse-getoutputstream-getwriter
}
}
protected boolean doResponseRedirectOrNotModifiedLogic(
HttpServletRequest servletRequest, HttpServletResponse servletResponse,
HttpResponse proxyResponse, int statusCode, URI targetUriObj)
throws ServletException, IOException {
// Check if the proxy response is a redirect
// The following code is adapted from org.tigris.noodle.filters.CheckForRedirect
if (statusCode >= HttpServletResponse.SC_MULTIPLE_CHOICES /* 300 */
&& statusCode < HttpServletResponse.SC_NOT_MODIFIED /* 304 */) {
Header locationHeader = proxyResponse.getLastHeader(HttpHeaders.LOCATION);
if (locationHeader == null) {
throw new ServletException("Received status code: " + statusCode
+ " but no " + HttpHeaders.LOCATION + " header was found in the response");
}
// Modify the redirect to go to this proxy servlet rather that the proxied host
String locStr = rewriteUrlFromResponse(servletRequest, locationHeader.getValue(), targetUriObj.toString());
servletResponse.sendRedirect(locStr);
return true;
}
// 304 needs special handling. See:
// http://www.ics.uci.edu/pub/ietf/http/rfc1945.html#Code304
// We get a 304 whenever passed an 'If-Modified-Since'
// header and the data on disk has not changed; server
// responds w/ a 304 saying I'm not going to send the
// body because the file has not changed.
if (statusCode == HttpServletResponse.SC_NOT_MODIFIED) {
servletResponse.setIntHeader(HttpHeaders.CONTENT_LENGTH, 0);
servletResponse.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
return true;
}
return false;
}
protected void closeQuietly(Closeable closeable) {
try {
closeable.close();
} catch (IOException e) {
log(e.getMessage(), e);
}
}
/**
* These are the "hop-by-hop" headers that should not be copied.
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
* I use an HttpClient HeaderGroup class instead of Set<String> because this
* approach does case insensitive lookup faster.
*/
protected static final HeaderGroup hopByHopHeaders;
static {
hopByHopHeaders = new HeaderGroup();
String[] headers = new String[]{
"Connection", "Keep-Alive", "Proxy-Authenticate", "Proxy-Authorization",
"TE", "Trailers", "Transfer-Encoding", "Upgrade"};
for (String header : headers) {
hopByHopHeaders.addHeader(new BasicHeader(header, null));
}
}
/**
* Copy request headers from the servlet client to the proxy request.
*/
protected void copyRequestHeaders(HttpServletRequest servletRequest, HttpRequest proxyRequest, URI targetUriObj) {
// Get an Enumeration of all of the header names sent by the client
Enumeration enumerationOfHeaderNames = servletRequest.getHeaderNames();
while (enumerationOfHeaderNames.hasMoreElements()) {
String headerName = (String) enumerationOfHeaderNames.nextElement();
//Instead the content-length is effectively set via InputStreamEntity
if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_LENGTH))
continue;
if (hopByHopHeaders.containsHeader(headerName))
continue;
Enumeration headers = servletRequest.getHeaders(headerName);
while (headers.hasMoreElements()) {//sometimes more than one value
String headerValue = (String) headers.nextElement();
// In case the proxy host is running multiple virtual servers,
// rewrite the Host header to ensure that we get content from
// the correct virtual server
if (headerName.equalsIgnoreCase(HttpHeaders.HOST)) {
HttpHost host = URIUtils.extractHost(targetUriObj);
headerValue = host.getHostName();
if (host.getPort() != -1)
headerValue += ":" + host.getPort();
}
proxyRequest.addHeader(headerName, headerValue);
}
}
}
private void setXForwardedForHeader(HttpServletRequest servletRequest,
HttpRequest proxyRequest) {
String headerName = "X-Forwarded-For";
if (doForwardIP) {
String newHeader = servletRequest.getRemoteAddr();
String existingHeader = servletRequest.getHeader(headerName);
if (existingHeader != null) {
newHeader = existingHeader + ", " + newHeader;
}
proxyRequest.setHeader(headerName, newHeader);
}
}
/**
* Copy proxied response headers back to the servlet client.
*/
protected void copyResponseHeaders(HttpResponse proxyResponse, HttpServletResponse servletResponse) {
for (Header header : proxyResponse.getAllHeaders()) {
if (hopByHopHeaders.containsHeader(header.getName()))
continue;
servletResponse.addHeader(header.getName(), header.getValue());
}
}
/**
* Copy response body data (the entity) from the proxy to the servlet client.
*/
protected void copyResponseEntity(HttpResponse proxyResponse, HttpServletResponse servletResponse) throws IOException {
HttpEntity entity = proxyResponse.getEntity();
if (entity != null) {
OutputStream servletOutputStream = servletResponse.getOutputStream();
entity.writeTo(servletOutputStream);
}
}
/**
* Reads the request URI from {@code servletRequest} and rewrites it. It's used to make the new request.
*/
protected String rewriteUrlFromRequest(HttpServletRequest servletRequest) {
ProxyDetails details = new ProxyDetails(hostAndPort, servletRequest, servletContext);
return details.getStringProxyURL();
}
/**
* For a redirect response from the target server, this translates {@code theUrl} to redirect to
* and translates it to one the original client can use.
*/
protected String rewriteUrlFromResponse(HttpServletRequest servletRequest, String theUrl, String targetUri) {
//TODO document example paths
if (theUrl.startsWith(targetUri)) {
String curUrl = servletRequest.getRequestURL().toString();//no query
String pathInfo = servletRequest.getPathInfo();
if (pathInfo != null) {
assert curUrl.endsWith(pathInfo);
curUrl = curUrl.substring(0, curUrl.length() - pathInfo.length());//take pathInfo off
}
theUrl = curUrl + theUrl.substring(targetUri.length());
}
return theUrl;
}
/**
* Encodes characters in the query or fragment part of the URI.
* <p/>
* <p>Unfortunately, an incoming URI sometimes has characters disallowed by the spec. HttpClient
* insists that the outgoing proxied request has a valid URI because it uses Java's {@link URI}.
* To be more forgiving, we must escape the problematic characters. See the URI class for the
* spec.
*
* @param in example: name=value&foo=bar#fragment
*/
protected static CharSequence encodeUriQuery(CharSequence in) {
//Note that I can't simply use URI.java to encode because it will escape pre-existing escaped things.
StringBuilder outBuf = null;
Formatter formatter = null;
for (int i = 0; i < in.length(); i++) {
char c = in.charAt(i);
boolean escape = true;
if (c < 128) {
if (asciiQueryChars.get((int) c)) {
escape = false;
}
} else if (!Character.isISOControl(c) && !Character.isSpaceChar(c)) {//not-ascii
escape = false;
}
if (!escape) {
if (outBuf != null)
outBuf.append(c);
} else {
//escape
if (outBuf == null) {
outBuf = new StringBuilder(in.length() + 5 * 3);
outBuf.append(in, 0, i);
formatter = new Formatter(outBuf);
}
//leading %, 0 padded, width 2, capital hex
formatter.format("%%%02X", (int) c);//TODO
}
}
return outBuf != null ? outBuf : in;
}
protected static final BitSet asciiQueryChars;
static {
char[] c_unreserved = "_-!.~'()*".toCharArray();//plus alphanum
char[] c_punct = ",;:$&+=".toCharArray();
char[] c_reserved = "?/[]@".toCharArray();//plus punct
asciiQueryChars = new BitSet(128);
for (char c = 'a'; c <= 'z'; c++) asciiQueryChars.set((int) c);
for (char c = 'A'; c <= 'Z'; c++) asciiQueryChars.set((int) c);
for (char c = '0'; c <= '9'; c++) asciiQueryChars.set((int) c);
for (char c : c_unreserved) asciiQueryChars.set((int) c);
for (char c : c_punct) asciiQueryChars.set((int) c);
for (char c : c_reserved) asciiQueryChars.set((int) c);
asciiQueryChars.set((int) '%');//leave existing percent escapes in place
}
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
java
| 1 | 0 | |
cmd/abapAddonAssemblyKitCheckPV_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapAddonAssemblyKitCheckPVOptions struct {
AbapAddonAssemblyKitEndpoint string `json:"abapAddonAssemblyKitEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
AddonDescriptorFileName string `json:"addonDescriptorFileName,omitempty"`
AddonDescriptor string `json:"addonDescriptor,omitempty"`
}
type abapAddonAssemblyKitCheckPVCommonPipelineEnvironment struct {
abap struct {
addonDescriptor string
}
}
func (p *abapAddonAssemblyKitCheckPVCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// AbapAddonAssemblyKitCheckPVCommand This step checks the validity of a Addon Product Version.
func AbapAddonAssemblyKitCheckPVCommand() *cobra.Command {
const STEP_NAME = "abapAddonAssemblyKitCheckPV"
metadata := abapAddonAssemblyKitCheckPVMetadata()
var stepConfig abapAddonAssemblyKitCheckPVOptions
var startTime time.Time
var commonPipelineEnvironment abapAddonAssemblyKitCheckPVCommonPipelineEnvironment
var logCollector *log.CollectorHook
var createAbapAddonAssemblyKitCheckPVCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step checks the validity of a Addon Product Version.",
Long: `This step checks by calling AAKaaS whether the Addon Product Version in the addonDescriptor configuration file specified via addonDescriptorFileName (e.g. addon.yml) does exist or is a valid successor of an existing Product Version.
It resolves the dotted version string into version, support package stack level and patch level and writes it to the addonDescriptor structure in the Piper commonPipelineEnvironment for usage of subsequent pipeline steps.
<br />
For Terminology refer to the [Scenario Description](https://www.project-piper.io/scenarios/abapEnvironmentAddons/).`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
abapAddonAssemblyKitCheckPV(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapAddonAssemblyKitCheckPVFlags(createAbapAddonAssemblyKitCheckPVCmd, &stepConfig)
return createAbapAddonAssemblyKitCheckPVCmd
}
func addAbapAddonAssemblyKitCheckPVFlags(cmd *cobra.Command, stepConfig *abapAddonAssemblyKitCheckPVOptions) {
cmd.Flags().StringVar(&stepConfig.AbapAddonAssemblyKitEndpoint, "abapAddonAssemblyKitEndpoint", `https://apps.support.sap.com`, "Base URL to the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.AddonDescriptorFileName, "addonDescriptorFileName", `addon.yml`, "File name of the YAML file which describes the Product Version and corresponding Software Component Versions")
cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "Structure in the commonPipelineEnvironment containing information about the Product Version and corresponding Software Component Versions")
cmd.MarkFlagRequired("abapAddonAssemblyKitEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("addonDescriptorFileName")
}
// retrieve step metadata
func abapAddonAssemblyKitCheckPVMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapAddonAssemblyKitCheckPV",
Aliases: []config.Alias{},
Description: "This step checks the validity of a Addon Product Version.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "abapAddonAssemblyKitCredentialsId", Description: "CredentialsId stored in Jenkins for the Addon Assembly Kit as a Service (AAKaaS) system", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "abapAddonAssemblyKitEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `https://apps.support.sap.com`,
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "addonDescriptorFileName",
ResourceRef: []config.ResourceReference{},
Scope: []string{},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `addon.yml`,
},
{
Name: "addonDescriptor",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "abap/addonDescriptor",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_addonDescriptor"),
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "abap/addonDescriptor"},
},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\""
] |
[] |
[
"PIPER_addonDescriptor",
"PIPER_password",
"PIPER_username"
] |
[]
|
["PIPER_addonDescriptor", "PIPER_password", "PIPER_username"]
|
go
| 3 | 0 | |
pkg/kube/clients.go
|
package kube
import (
"os"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/jenkins-x/jx-kube-client/v3/pkg/kubeclient"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
fakedyn "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
)
// LazyCreateDynamicClient lazily creates the dynamic client if its not defined
func LazyCreateDynamicClient(client dynamic.Interface) (dynamic.Interface, error) {
if client != nil {
return client, nil
}
if IsNoKubernetes() {
scheme := runtime.NewScheme()
return fakedyn.NewSimpleDynamicClient(scheme), nil
}
f := kubeclient.NewFactory()
cfg, err := f.CreateKubeConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to get kubernetes config")
}
client, err = dynamic.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "error building dynamic clientset")
}
return client, nil
}
// LazyCreateKubeClient lazy creates the kube client if its not defined
func LazyCreateKubeClient(client kubernetes.Interface) (kubernetes.Interface, error) {
return LazyCreateKubeClientWithMandatory(client, false)
}
// LazyCreateKubeClientWithMandatory if mandatory is specified then the env vars are ignored to determine if we use
// kubernetes or not
func LazyCreateKubeClientWithMandatory(client kubernetes.Interface, mandatory bool) (kubernetes.Interface, error) {
if client != nil {
return client, nil
}
if !mandatory && IsNoKubernetes() {
return NewFakeKubernetesClient("default"), nil
}
f := kubeclient.NewFactory()
cfg, err := f.CreateKubeConfig()
if err != nil {
return client, errors.Wrap(err, "failed to get kubernetes config")
}
client, err = kubernetes.NewForConfig(cfg)
if err != nil {
return client, errors.Wrap(err, "error building kubernetes clientset")
}
return client, nil
}
// LazyCreateKubeClientAndNamespace lazy creates the kube client and/or the current namespace if not already defined
func LazyCreateKubeClientAndNamespace(client kubernetes.Interface, ns string) (kubernetes.Interface, string, error) {
if client != nil && ns != "" {
return client, ns, nil
}
if IsNoKubernetes() {
if ns == "" {
ns = "default"
}
if client == nil {
client = NewFakeKubernetesClient(ns)
}
return client, ns, nil
}
if client == nil {
f := kubeclient.NewFactory()
cfg, err := f.CreateKubeConfig()
if err != nil {
return client, ns, errors.Wrap(err, "failed to get kubernetes config")
}
client, err = kubernetes.NewForConfig(cfg)
if err != nil {
return client, ns, errors.Wrap(err, "error building kubernetes clientset")
}
}
if ns == "" {
var err error
ns, err = kubeclient.CurrentNamespace()
if err != nil {
return client, ns, errors.Wrap(err, "failed to get current kubernetes namespace")
}
}
return client, ns, nil
}
// IsInCluster tells if we are running incluster
func IsInCluster() bool {
_, err := rest.InClusterConfig()
return err == nil
}
// IsNoKubernetes returns true if we are inside a GitHub Action or not using kubernetes
func IsNoKubernetes() bool {
// disable k8s by default if inside a github action
if strings.ToLower(os.Getenv("GITHUB_ACTIONS")) == "true" {
return strings.ToLower(os.Getenv("JX_KUBERNETES")) != "true"
}
return strings.ToLower(os.Getenv("JX_NO_KUBERNETES")) == "true"
}
// NewFakeKubernetesClient creates a fake k8s client if we have disabled kubernetes
func NewFakeKubernetesClient(ns string) *fake.Clientset {
return fake.NewSimpleClientset(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
})
}
|
[
"\"GITHUB_ACTIONS\"",
"\"JX_KUBERNETES\"",
"\"JX_NO_KUBERNETES\""
] |
[] |
[
"GITHUB_ACTIONS",
"JX_NO_KUBERNETES",
"JX_KUBERNETES"
] |
[]
|
["GITHUB_ACTIONS", "JX_NO_KUBERNETES", "JX_KUBERNETES"]
|
go
| 3 | 0 | |
source1/dmx/dmx.py
|
import sys
import os.path
from pathlib import Path
from typing import List
from ...utilities.valve_utils import GameInfoFile
from ...utilities import datamodel
def find_by_name_n_type(array, name, elem_type):
for elem in array:
if elem.name == name and elem.type == elem_type:
return elem
return None
class DmeChannel:
def __init__(self, transform: datamodel.Element):
self.__trans = transform
print(self.__trans)
def _quaternion_to_euler(x, y, z, w):
import math
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
nx = math.degrees(math.atan2(t0, t1))
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
ny = math.degrees(math.asin(t2))
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
nz = math.degrees(math.atan2(t3, t4))
return nx, ny, nz
class Entity:
root = None # type: Session
@classmethod
def set_root(cls, root):
cls.root = root
def __init__(self, animset: datamodel.Element, channel_set: datamodel.Element):
self.animset: datamodel.Element = animset
self.channel_set: datamodel.Element = channel_set
@property
def __transform(self):
return find_by_name_n_type(self.animset.controls, 'transform', 'DmeTransformControl') or \
find_by_name_n_type(self.animset.controls, 'rootTransform', 'DmeTransformControl')
@property
def position(self):
if self.__transform:
return self.__transform.positionChannel.fromElement.get(self.__transform.positionChannel.fromAttribute)
return 0, 0, 0
@property
def orientation_q(self):
if self.__transform:
return self.__transform.orientationChannel.fromElement.get(
self.__transform.orientationChannel.fromAttribute)
return 0, 0, 0
@property
def orientation(self):
if self.orientation_q[0] != float('nan'):
return _quaternion_to_euler(*self.orientation_q)
def __repr__(self):
return '{}<name:{} at X:{:.2f} Y:{:.2f} Z:{:.2f} rot: X:{:.2f} Y:{:.2f} Z:{:.2f}>'.format(
self.__class__.__name__, self.animset.name,
*self.position,
*self.orientation)
class Camera(Entity):
pass
class Bone:
@property
def name(self):
return self._element.name
@property
def position(self):
return getattr(self._element.positionChannel.toElement, self._element.positionChannel.toAttribute)
@property
def rotation(self):
return _quaternion_to_euler(
*getattr(self._element.orientationChannel.toElement, self._element.orientationChannel.toAttribute))
@property
def rotation_q(self):
return getattr(self._element.orientationChannel.toElement, self._element.orientationChannel.toAttribute)
def __init__(self, bone_element: datamodel.Element):
self._element = bone_element
def __repr__(self):
return 'Bone<name:{} at X:{:.2f} Y:{:.2f} Z:{:.2f} rot: X:{:.2f} Y:{:.2f} Z:{:.2f} W:{:.2f}>'.format(self.name,
*self.position,
*self.rotation_q)
class Model(Entity):
def __init__(self, animset: datamodel.Element, channel_set: datamodel.Element):
super().__init__(animset, channel_set)
self.bones = []
self.flexes = {}
self.parse()
@property
def name(self):
return self.animset.name
def parse(self):
for bone_elem in self.animset.controls:
if bone_elem.type == 'DmeTransformControl':
bone = Bone(bone_elem)
# print(bone)
self.bones.append(bone)
self.flexes = {a: b for (a, b) in zip(self.animset.gameModel.flexnames, self.animset.gameModel.flexWeights)}
@property
def model_path(self):
return self.animset.gameModel.modelName
@property
def model_file(self):
return self.root.find_model(self.model_path)
@property
def __transform(self):
return find_by_name_n_type(self.animset.controls, 'rootTransform', 'DmeTransformControl')
@property
def root_transform(self):
return self.__transform
class Light(Entity):
pass
@property
def color(self):
r = find_by_name_n_type(self.animset.controls, 'color_red', 'DmElement').value
g = find_by_name_n_type(self.animset.controls, 'color_green', 'DmElement').value
b = find_by_name_n_type(self.animset.controls, 'color_blue', 'DmElement').value
return r, g, b
def __repr__(self):
return '{}<name:{} at X:{:.2f} Y:{:.2f} Z:{:.2f} rot: X:{:.2f} Y:{:.2f} Z:{:.2f} ' \
'color: R:{:.2f} G:{:.2f} B:{:.2f}>'.format(self.__class__.__name__, self.animset.name,
*self.position,
*self.orientation,
*self.color)
class Session:
def _get_proj_root(self, path: Path):
if path.parts[-1] == 'game':
return path.parent
else:
return self._get_proj_root(path.parent)
def find_map(self, map_name):
for game in self.gameinfo:
maps_folder = os.path.join(self.sfm_path, 'game', game, 'maps')
all_maps = os.listdir(maps_folder)
if map_name in all_maps:
return os.path.join(maps_folder, map_name)
else:
sys.stderr.write('Can\'t find map {}'.format(map_name))
return ""
def find_model(self, model: str):
return self.gameinfo.find_file(model, extention='.mdl', use_recursive=True)
def __init__(self, filepath, game_dir=None):
Entity.set_root(self)
self.sfm_path = game_dir
self.dmx = datamodel.load(filepath)
self.filepath = Path(filepath)
if not self.sfm_path:
game_dir = self._get_proj_root(self.filepath)
self.sfm_path = game_dir
os.environ['VProject'] = str(game_dir)
self.gameinfo = None
gameinfo_path = Path(game_dir) / 'gameinfo.txt'
if gameinfo_path.is_file():
self.gameinfo = GameInfoFile(gameinfo_path)
self.entities = [] # type: List[Entity]
self.session = self.dmx.root
self.map = ''
def parse(self):
active_clip = self.session.activeClip
self.map = active_clip.mapname
sub_clip_track_group = active_clip.subClipTrackGroup
tracks = sub_clip_track_group.tracks[0]
film_clip = tracks.children[0]
animation_sets = film_clip.animationSets
for anim_set in animation_sets: # type: datamodel.Element
channel_set = \
list(filter(lambda a: a.type == 'DmeChannelsClip', self.dmx.find_elements(name=anim_set.name)))[0]
if anim_set.get('gameModel', False):
entity = Model(anim_set, channel_set)
elif anim_set.get('camera', False):
entity = Camera(anim_set, channel_set)
elif anim_set.get('light', False):
entity = Light(anim_set, channel_set)
else:
entity = Entity(anim_set, channel_set)
self.entities.append(entity)
# def load_map(self):
# try:
# import BSP_import
# except:
# return
# map = self.find_map(self.map)
# if map:
# BSP_import.mesh(map, False, os.path.join(self.sfm_path, 'game', 'usermod'), False)
@staticmethod
def get_root_transform(controls: List[datamodel.Element]):
for control in controls:
if control.name == 'rootTransform' and control.type == 'DmeTransformControl':
return control
@staticmethod
def get_element(controls: List[datamodel.Element], name, type):
for control in controls:
if control.name == name and control.type == type:
return control
@staticmethod
def lerp(value, lo, hi):
f = hi - lo
return lo + (f * value)
def load_scene(self):
for entity in self.entities:
if type(entity) is Model:
print('Loading model', entity.name)
self.load_model(entity)
def load_model(self, entity: Model):
from SourceIO.source1.mdl.mdl2model import Source2Blender
import bpy, mathutils
model = entity.model_file
# rot = mathutils.Quaternion(entity.orientation_q)
# rot = rot.to_euler('XZY')
# rot.x, rot.y, rot.z = rot.y, rot.x, rot.z
# rot.x = math.pi / 2 - rot.x
# rot.z = rot.z - math.pi / 2
bl_model = Source2Blender(model, False, self.sfm_path, custom_name=entity.name)
bl_model.load(False)
rot = mathutils.Quaternion(entity.orientation_q).to_euler('XYZ')
rot.y = rot.y - math.pi
# rot.z = rot.z - math.pi
rot = rot.to_quaternion()
bl_model.armature_obj.location = entity.position
bl_model.armature_obj.rotation_mode = "QUATERNION"
bl_model.armature_obj.rotation_quaternion = rot
ob = bl_model.armature_obj
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
bpy.ops.object.mode_set(mode='POSE')
for bone_ in entity.bones: # type: Bone
print(bone_)
bone = ob.pose.bones.get(bone_.name)
if bone:
rot = mathutils.Quaternion()
rot.x, rot.y, rot.z, rot.w = bone_.rotation_q
# rot.x,rot.y,rot.z,rot.w = bone_.valueOrientation
rot = rot.to_euler('YXZ')
mat = mathutils.Matrix.Translation(bone_.position) @ rot.to_matrix().to_4x4()
bone.matrix_basis.identity()
bone.matrix = bone.parent.matrix @ mat if bone.parent else mat
else:
print("Missing", bone_.name, "bone")
bpy.ops.object.mode_set(mode='OBJECT')
#
# def load_lights(self):
# for aset, cset in self.lights: # type: datamodel.Element,datamodel.Element
# transform = self.get_element(aset.controls, 'transform', 'DmeTransformControl')
# pos = transform.valuePosition
# rot = mathutils.Quaternion()
# rot.x, rot.y, rot.z, rot.w = transform.valueOrientation
# verticalFOV = self.get_element(aset.controls, 'verticalFOV', 'DmElement').channel.toElement
# intensity = self.get_element(aset.controls, 'intensity', 'DmElement').channel.toElement
# r = self.get_element(aset.controls, 'color_red', 'DmElement').value
# g = self.get_element(aset.controls, 'color_green', 'DmElement').value
# b = self.get_element(aset.controls, 'color_blue', 'DmElement').value
# print(aset.items())
# print(transform.items())
# fov = self.lerp(verticalFOV.value, verticalFOV.lo, verticalFOV.hi)
# print(fov)
# rot = rot.to_euler('XYZ')
# rot.x, rot.y, rot.z = rot.y, rot.x, rot.z
# rot.x = math.pi / 2 - rot.x
# rot.z = rot.z - math.pi / 2
# intensity = self.lerp(intensity.value, intensity.lo, intensity.hi)
#
# light_data = bpy.data.lights.new(name="light_2.80", type='SPOT')
# light_data.energy = 30
#
# # create new object with our light datablock
# light_object = bpy.data.objects.new(name="light_2.80", object_data=light_data)
#
# # link light object
# bpy.context.collection.objects.link(light_object)
#
# # make it active
# bpy.context.view_layer.objects.active = light_object
#
# lamp = light_object
# lamp.rotation_euler = rot
# lamp.location = pos
#
# lamp.name = aset.name
# lamp_data = lamp.data
# lamp.scale = [100, 100, 100]
# lamp_data.spot_size = fov * (math.pi / 180)
# lamp_data.use_nodes = True
# lamp_nodes = lamp_data.node_tree.nodes['Emission']
# lamp_nodes.inputs['Strength'].default_value = intensity * 10
# lamp_nodes.inputs['Color'].default_value = (r, g, b, 1)
#
# def create_cameras(self):
# for aset, cset in self.cameras: # type: datamodel.Element,datamodel.Element
# name = aset.name
# cam = bpy.data.cameras.new(name)
# main_collection = bpy.data.collections.new("SFM OBJECTS")
# bpy.context.scene.collection.children.link(main_collection)
# cam_ob = bpy.data.objects.new(name, cam)
# main_collection.objects.link(cam_ob)
# cam_ob.data.lens_unit = 'MILLIMETERS'
# transform = self.get_element(aset.controls, 'transform', 'DmeTransformControl')
# print(transform.positionChannel.toElement.items())
# print(transform.orientationChannel.toElement.items())
# print(transform.items())
# pos = transform.valuePosition
# rot = mathutils.Quaternion()
# rot.x, rot.y, rot.z, rot.w = transform.valueOrientation
# rot = rot.to_euler('XYZ')
# # rot.y = rot.y - math.pi
# rot.x, rot.y, rot.z = rot.y, rot.x, rot.z
# rot.x = math.pi / 2 - rot.x
# rot.z = rot.z - math.pi / 2
# focalDistance = self.get_element(aset.controls, 'focalDistance', 'DmElement').channel.toElement
# focalDistance = self.lerp(focalDistance.value, focalDistance.lo, focalDistance.hi)
# cam_ob.data.lens = focalDistance
# cam_ob.location = pos
# cam_ob.rotation_euler = rot
# cam_ob.data.clip_end = 100 * 500
# cam_ob.scale = [100, 100, 100]
#
# # print(focalDistance)
|
[] |
[] |
[
"VProject"
] |
[]
|
["VProject"]
|
python
| 1 | 0 | |
recipe/gen_patch_json.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import copy
import json
import os
from os.path import join, isdir
import sys
import tqdm
import re
import requests
import pkg_resources
from get_license_family import get_license_family
CHANNEL_NAME = "conda-forge"
CHANNEL_ALIAS = "https://conda.anaconda.org"
SUBDIRS = (
"noarch",
"linux-64",
"linux-armv7l",
"linux-aarch64",
"linux-ppc64le",
"osx-64",
"osx-arm64",
"win-32",
"win-64",
)
REMOVALS = {
"noarch": (
"sendgrid-5.3.0-py_0.tar.bz2",
),
"linux-64": (
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"airflow-with-gcp_api-1.9.0-3.tar.bz2",
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"gdk-pixbuf-2.36.9-0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"libgsasl-1.8.0-py36_1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"postgis-2.4.3+9.6.8-0.tar.bz2",
"pyarrow-0.1.post-0.tar.bz2",
"pyarrow-0.1.post-1.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"rapidpy-2.5.2-py36_0.tar.bz2",
"smesh-8.3.0b0-1.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"tokenize-rt-2.0.1-py27_0.tar.bz2",
"vaex-core-0.4.0-py27_0.tar.bz2",
),
"osx-64": (
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"arpack-3.6.1-blas_openblash1f444ea_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"flask-rest-orm-0.5.0-py35_0.tar.bz2",
"flask-rest-orm-0.5.0-py36_0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"lammps-2018.03.16-.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"mpb-1.6.2-1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"reentry-1.1.0-py27_0.tar.bz2",
"resampy-0.2.0-py27_0.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"sundials-3.1.0-blas_openblash0edd121_202.tar.bz2",
"vlfeat-0.9.20-h470a237_2.tar.bz2",
"xtensor-python-0.19.1-h3e44d54_0.tar.bz2",
),
"osx-arm64": (
),
"win-32": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
),
"win-64": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
),
}
OPERATORS = ["==", ">=", "<=", ">", "<", "!="]
OSX_SDK_FIXES = {
'nodejs-12.8.0-hec2bf70_1': '10.10',
'nodejs-12.1.0-h6de7cb9_1': '10.10',
'nodejs-12.3.1-h6de7cb9_0': '10.10',
'nodejs-12.9.0-hec2bf70_0': '10.10',
'nodejs-12.9.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-hec2bf70_1': '10.10',
'nodejs-12.10.0-hec2bf70_0': '10.10',
'nodejs-12.4.0-h6de7cb9_0': '10.10',
'nodejs-12.11.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-h6de7cb9_0': '10.10',
'nodejs-12.3.0-h6de7cb9_0': '10.10',
'nodejs-10.16.3-hec2bf70_0': '10.10',
'nodejs-12.12.0-hfddbe92_0': '10.10',
'nodejs-12.8.1-hec2bf70_0': '10.10',
'javafx-sdk-11.0.4-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_0': '10.11',
'javafx-sdk-11.0.4-h6dcaf97_0': '10.11',
'qt-5.12.1-h1b46049_0': '10.12',
'qt-5.9.7-h8cf7e54_3': '10.12',
'qt-5.9.7-h93ee506_0': '10.12',
'qt-5.9.7-h93ee506_1': '10.12',
'qt-5.12.5-h1b46049_0': '10.12',
'qt-5.9.7-h93ee506_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_1': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_2': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_1': '10.12',
'freecad-0.18.3-py37h4764a83_2': '10.12',
'freecad-0.18.3-py37hc453731_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_0': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_1': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_2': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_1': '10.12',
'openmpi-mpicc-4.0.1-h516909a_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_0': '10.12',
'openmpi-mpifort-4.0.1-h6ad152f_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_0': '10.12',
'openmpi-mpifort-4.0.1-he991be0_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_2': '10.12',
'reaktoro-1.0.7-py37h99eb986_0': '10.12',
'reaktoro-1.0.7-py37h99eb986_1': '10.12',
'reaktoro-1.0.7-py36h99eb986_0': '10.12',
'reaktoro-1.0.7-py36h99eb986_1': '10.12',
'pyqt-5.12.3-py38he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py36he22c54c_1': '10.12',
'pyqt-5.9.2-py27h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_4': '10.12',
'pyqt-5.9.2-py36h2a560b1_3': '10.12',
'pyqt-5.9.2-py27h2a560b1_2': '10.12',
'pyqt-5.9.2-py36h2a560b1_1': '10.12',
'pyqt-5.12.3-py27h2a560b1_0': '10.12',
'pyqt-5.12.3-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py27he22c54c_0': '10.12',
'pyqt-5.12.3-py27he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_2': '10.12',
'pyqt-5.9.2-py37h2a560b1_1': '10.12',
'pyqt-5.9.2-py36h2a560b1_0': '10.12',
'pyqt-5.9.2-py36h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_0': '10.12',
'pyqt-5.9.2-py37h2a560b1_3': '10.12',
'pyqt-5.12.3-py38he22c54c_0': '10.12',
'pyqt-5.9.2-py27h2a560b1_3': '10.12',
'pyqt-5.9.2-py36h2a560b1_2': '10.12',
'pyqt-5.12.3-py37he22c54c_0': '10.12',
'pyqt-5.12.3-py36he22c54c_0': '10.12',
'pyqt-5.12.3-py37he22c54c_1': '10.12',
'pyqt-5.12.3-py36h2a560b1_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_0': '10.12',
'openmpi-4.0.1-ha90c164_2': '10.12',
'openmpi-4.0.1-ha90c164_0': '10.12',
'openmpi-4.0.1-hfcebdee_2': '10.12',
'openmpi-4.0.1-ha90c164_1': '10.12',
'openmpi-4.0.1-hc99cbb1_1': '10.12',
'openmpi-4.0.1-hc99cbb1_0': '10.12',
'openmpi-4.0.1-hc99cbb1_2': '10.12',
}
def _add_removals(instructions, subdir):
r = requests.get(
"https://conda.anaconda.org/conda-forge/"
"label/broken/%s/repodata.json" % subdir
)
if r.status_code != 200:
r.raise_for_status()
data = r.json()
currvals = list(REMOVALS.get(subdir, []))
for pkg_name in data["packages"]:
currvals.append(pkg_name)
instructions["remove"].extend(tuple(set(currvals)))
def _gen_patch_instructions(index, new_index, subdir):
instructions = {
"patch_instructions_version": 1,
"packages": defaultdict(dict),
"revoke": [],
"remove": [],
}
_add_removals(instructions, subdir)
# diff all items in the index and put any differences in the instructions
for fn in index:
assert fn in new_index
# replace any old keys
for key in index[fn]:
assert key in new_index[fn], (key, index[fn], new_index[fn])
if index[fn][key] != new_index[fn][key]:
instructions['packages'][fn][key] = new_index[fn][key]
# add any new keys
for key in new_index[fn]:
if key not in index[fn]:
instructions['packages'][fn][key] = new_index[fn][key]
return instructions
def has_dep(record, name):
return any(dep.split(' ')[0] == name for dep in record.get('depends', ()))
def get_python_abi(version, subdir, build=None):
if build is not None:
if re.match(".*py\d\d", build):
version = f"{build[2]}.{build[3]}"
if version.startswith("2.7"):
if subdir.startswith("linux"):
return "cp27mu"
return "cp27m"
elif version.startswith("2.6"):
if subdir.startswith("linux"):
return "cp26mu"
return "cp26m"
elif version.startswith("3.4"):
return "cp34m"
elif version.startswith("3.5"):
return "cp35m"
elif version.startswith("3.6"):
return "cp36m"
elif version.startswith("3.7"):
return "cp37m"
elif version.startswith("3.8"):
return "cp38"
elif version.startswith("3.9"):
return "cp39"
return None
# Workaround for https://github.com/conda/conda-build/pull/3868
def remove_python_abi(record):
if record['name'] in ['python', 'python_abi', 'pypy']:
return
if not has_dep(record, 'python_abi'):
return
depends = record.get('depends', [])
record['depends'] = [dep for dep in depends if dep.split(" ")[0] != "python_abi"]
changes = set([])
def add_python_abi(record, subdir):
record_name = record['name']
# Make existing python and python-dependent packages conflict with pypy
if record_name == "python" and not record['build'].endswith("pypy"):
version = record['version']
new_constrains = record.get('constrains', [])
python_abi = get_python_abi(version, subdir)
new_constrains.append(f"python_abi * *_{python_abi}")
record['constrains'] = new_constrains
return
if has_dep(record, 'python') and not has_dep(record, 'pypy') and not has_dep(record, 'python_abi'):
python_abi = None
new_constrains = record.get('constrains', [])
build = record["build"]
ver_strict_found = False
ver_relax_found = False
for dep in record.get('depends', []):
dep_split = dep.split(' ')
if dep_split[0] == 'python':
if len(dep_split) == 3:
continue
if len(dep_split) == 1:
continue
elif dep_split[1] == "<3":
python_abi = get_python_abi("2.7", subdir, build)
elif dep_split[1].startswith(">="):
m = cb_pin_regex.match(dep_split[1])
if m == None:
python_abi = get_python_abi("", subdir, build)
else:
lower = pad_list(m.group("lower").split("."), 2)[:2]
upper = pad_list(m.group("upper").split("."), 2)[:2]
if lower[0] == upper[0] and int(lower[1]) + 1 == int(upper[1]):
python_abi = get_python_abi(m.group("lower"), subdir, build)
else:
python_abi = get_python_abi("", subdir, build)
else:
python_abi = get_python_abi(dep_split[1], subdir, build)
if python_abi:
new_constrains.append(f"python_abi * *_{python_abi}")
changes.add((dep, f"python_abi * *_{python_abi}"))
ver_strict_found = True
else:
ver_relax_found = True
if not ver_strict_found and ver_relax_found:
new_constrains.append("pypy <0a0")
record['constrains'] = new_constrains
def _gen_new_index(repodata, subdir):
"""Make any changes to the index by adjusting the values directly.
This function returns the new index with the adjustments.
Finally, the new and old indices are then diff'ed to produce the repo
data patches.
"""
index = copy.deepcopy(repodata["packages"])
# deal with windows vc features
if subdir.startswith("win-"):
python_vc_deps = {
'2.6': 'vc 9.*',
'2.7': 'vc 9.*',
'3.3': 'vc 10.*',
'3.4': 'vc 10.*',
'3.5': 'vc 14.*',
'3.6': 'vc 14.*',
'3.7': 'vc 14.*',
}
for fn, record in index.items():
record_name = record['name']
if record_name == 'python':
# remove the track_features key
if 'track_features' in record:
record['track_features'] = None
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append(python_vc_deps[record['version'][:3]])
record['depends'] = depends
elif 'vc' in record.get('features', ''):
# remove vc from the features key
vc_version = _extract_and_remove_vc_feature(record)
if vc_version:
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append('vc %d.*' % vc_version)
record['depends'] = depends
proj4_fixes = {"cartopy", "cdo", "gdal", "libspatialite", "pynio", "qgis"}
for fn, record in index.items():
record_name = record["name"]
if record.get('timestamp', 0) < 1604417730000:
if subdir == 'noarch':
remove_python_abi(record)
else:
add_python_abi(record, subdir)
if "license" in record and "license_family" not in record and record["license"]:
family = get_license_family(record["license"])
if family:
record['license_family'] = family
# remove dependency from constrains for twisted
if record_name == "twisted":
new_constrains = [dep for dep in record.get('constrains', ())
if not dep.startswith("pyobjc-framework-cococa")]
if new_constrains != record.get('constrains', ()):
record['constrains'] = new_constrains
if record_name == "starlette-base":
if not any(dep.split(' ')[0] == "starlette" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append(f"starlette {record['version']}")
else:
record['constrains'] = [f"starlette {record['version']}"]
if record_name == "arrow-cpp":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if "aws-sdk-cpp" in record['depends']:
i = record['depends'].index('aws-sdk-cpp')
record['depends'][i] = 'aws-sdk-cpp 1.7.164'
if record_name == "pyarrow":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if record_name == "kartothek":
if record["version"] in ["3.15.0", "3.15.1", "3.16.0"] \
and "pyarrow >=0.13.0,!=0.14.0,<2" in record["depends"]:
i = record["depends"].index("pyarrow >=0.13.0,!=0.14.0,<2")
record["depends"][i] = "pyarrow >=0.17.1,<2"
# distributed <2.11.0 does not work with msgpack-python >=1.0
# newer versions of distributed require at least msgpack-python >=0.6.0
# so we can fix cases where msgpack-python is unbounded
# https://github.com/conda-forge/distributed-feedstock/pull/114
if record_name == 'distributed':
if 'msgpack-python' in record['depends']:
i = record['depends'].index('msgpack-python')
record['depends'][i] = 'msgpack-python <1.0.0'
# python-language-server <=0.31.9 requires pyflakes <2.2.2
# included explicitly in 0.31.10+
# https://github.com/conda-forge/python-language-server-feedstock/pull/50
version = record['version']
if record_name == 'python-language-server':
pversion = pkg_resources.parse_version(version)
v0_31_9 = pkg_resources.parse_version('0.31.9')
if pversion <= v0_31_9 and 'pyflakes >=1.6.0' in record['depends']:
i = record['depends'].index('pyflakes >=1.6.0')
record['depends'][i] = 'pyflakes >=1.6.0,<2.2.0'
# aioftp >=0.17.0 requires python >=3.7
# aioftp 0.17.x was incorrectly built with 3.6 support
# https://github.com/conda-forge/aioftp-feedstock/pull/12
version = record['version']
if record_name == 'aioftp':
pversion = pkg_resources.parse_version(version)
base_version = pkg_resources.parse_version('0.17.0')
max_version = pkg_resources.parse_version('0.17.2')
if base_version <= pversion <= max_version and 'python >=3.6' in record['depends']:
i = record['depends'].index('python >=3.6')
record['depends'][i] = 'python >=3.7'
# numpydoc >=1.0.0 requires python >=3.5
# https://github.com/conda-forge/numpydoc-feedstock/pull/14
version = record['version']
if record_name == 'numpydoc':
pversion = pkg_resources.parse_version(version)
v1_0_0 = pkg_resources.parse_version('1.0.0')
v1_1_0 = pkg_resources.parse_version('1.1.0')
if v1_0_0 <= pversion <= v1_1_0 and 'python' in record['depends']:
i = record['depends'].index('python')
record['depends'][i] = 'python >=3.5'
# fix deps with wrong names
if record_name in proj4_fixes:
_rename_dependency(fn, record, "proj.4", "proj4")
if record_name == "airflow-with-async":
_rename_dependency(fn, record, "evenlet", "eventlet")
if record_name == "iris":
_rename_dependency(fn, record, "nc_time_axis", "nc-time-axis")
if (record_name == "r-base" and
not any(dep.startswith("_r-mutex ")
for dep in record["depends"])):
depends = record["depends"]
depends.append("_r-mutex 1.* anacondar_1")
record["depends"] = depends
if record_name == "gcc_impl_{}".format(subdir):
_relax_exact(fn, record, "binutils_impl_{}".format(subdir))
deps = record.get("depends", ())
if "ntl" in deps and record_name != "sage":
_rename_dependency(fn, record, "ntl", "ntl 10.3.0")
if "libiconv >=1.15,<1.16.0a0" in deps:
_pin_looser(fn, record, "libiconv", upper_bound="1.17.0")
if 're2' in deps and record.get('timestamp', 0) < 1588349339243:
_rename_dependency(fn, record, "re2", "re2 <2020.05.01")
if 'libffi' in deps and record.get('timestamp', 0) < 1605980936031:
_rename_dependency(fn, record, "libffi", "libffi <3.3.0.a0")
if 'libffi >=3.2.1,<4.0a0' in deps and record.get('timestamp', 0) < 1605980936031:
_pin_stricter(fn, record, "libffi", "x.x")
_relax_libssh2_1_x_pinning(fn, record)
if any(dep.startswith("gf2x") for dep in deps):
_pin_stricter(fn, record, "gf2x", "x.x")
if any(dep.startswith("libnetcdf >=4.7.3") for dep in deps):
_pin_stricter(fn, record, "libnetcdf", "x.x.x.x")
if any(dep.startswith("libarchive >=3.3") for dep in deps):
_pin_looser(fn, record, "libarchive", upper_bound="3.6.0")
if any(dep.startswith("libignition-") or dep == 'libsdformat' for dep in deps):
for dep_idx, _ in enumerate(deps):
dep = record['depends'][dep_idx]
if dep.startswith('libignition-'):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
if dep.startswith('libsdformat '):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
# this doesn't seem to match the _pin_looser or _pin_stricter patterns
# nor _replace_pin
if record_name == "jedi" and record.get("timestamp", 0) < 1592619891258:
for i, dep in enumerate(record["depends"]):
if dep.startswith("parso") and "<" not in dep:
_dep_parts = dep.split(" ")
_dep_parts[1] = _dep_parts[1] + ",<0.8.0"
record["depends"][i] = " ".join(_dep_parts)
# FIXME: disable patching-out blas_openblas feature
# because hotfixes are not applied to gcc7 label
# causing inconsistent behavior
# if (record_name == "blas" and
# record["track_features"] == "blas_openblas"):
# instructions["packages"][fn]["track_features"] = None
# if "features" in record:
# if "blas_openblas" in record["features"]:
# # remove blas_openblas feature
# instructions["packages"][fn]["features"] = _extract_feature(
# record, "blas_openblas")
# if not any(d.startswith("blas ") for d in record["depends"]):
# depends = record['depends']
# depends.append("blas 1.* openblas")
# instructions["packages"][fn]["depends"] = depends
if any(dep.startswith("zstd >=1.4") for dep in deps):
_pin_looser(fn, record, "zstd", max_pin="x.x")
_replace_pin('libunwind >=1.2.1,<1.3.0a0', 'libunwind >=1.2.1,<2.0.0a0', deps, record)
_replace_pin('snappy >=1.1.7,<1.1.8.0a0', 'snappy >=1.1.7,<2.0.0.0a0', deps, record)
_replace_pin('ncurses >=6.1,<6.2.0a0', 'ncurses >=6.1,<6.3.0a0', deps, record)
_replace_pin('abseil-cpp', 'abseil-cpp =20190808', deps, record)
if record_name not in ["blas", "libblas", "libcblas", "liblapack",
"liblapacke", "lapack", "blas-devel"]:
_replace_pin('liblapack >=3.8.0,<3.9.0a0', 'liblapack >=3.8.0,<4.0.0a0', deps, record)
_replace_pin('liblapacke >=3.8.0,<3.9.0a0', 'liblapacke >=3.8.0,<4.0.0a0', deps, record)
# Filter by timestamp as pythia8 also contains python bindings that shouldn't be pinned
if 'pythia8' in deps and record.get('timestamp', 0) < 1584264455759:
i = record['depends'].index('pythia8')
record['depends'][i] = 'pythia8 >=8.240,<8.300.0a0'
# remove features for openjdk and rb2
if ("track_features" in record and
record['track_features'] is not None):
for feat in record["track_features"].split():
if feat.startswith(("rb2", "openjdk")):
record["track_features"] = _extract_track_feature(
record, feat)
llvm_pkgs = ["libclang", "clang", "clang-tools", "llvm", "llvm-tools", "llvmdev"]
for llvm in ["libllvm8", "libllvm9"]:
if any(dep.startswith(llvm) for dep in deps):
if record_name not in llvm_pkgs:
_relax_exact(fn, record, llvm, max_pin="x.x")
else:
_relax_exact(fn, record, llvm, max_pin="x.x.x")
if record_name in llvm_pkgs:
new_constrains = record.get('constrains', [])
version = record["version"]
for pkg in llvm_pkgs:
if record_name == pkg:
continue
if pkg in new_constrains:
del new_constrains[pkg]
if any(constraint.startswith(f"{pkg} ") for constraint in new_constrains):
continue
new_constrains.append(f'{pkg} {version}.*')
record['constrains'] = new_constrains
# make sure the libgfortran version is bound from 3 to 4 for osx
if subdir == "osx-64":
_fix_libgfortran(fn, record)
_fix_libcxx(fn, record)
full_pkg_name = fn.replace('.tar.bz2', '')
if full_pkg_name in OSX_SDK_FIXES:
_set_osx_virt_min(fn, record, OSX_SDK_FIXES[full_pkg_name])
# make old binutils packages conflict with the new sysroot packages
# that have renamed the sysroot from conda_cos6 or conda_cos7 to just
# conda
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"binutils", "binutils_impl_" + subdir, "ld_impl_" + subdir]
and record.get('timestamp', 0) < 1589953178153 # 2020-05-20
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure the old compilers conflict with the new sysroot packages
# and they only use libraries from the old compilers
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
):
new_constrains = record.get('constrains', [])
for pkg in ["libgcc-ng", "libstdcxx-ng", "libgfortran", "libgomp"]:
new_constrains.append("{} 5.4.*|7.2.*|7.3.*|8.2.*|9.1.*|9.2.*".format(pkg))
new_constrains.append("binutils_impl_" + subdir + " <2.34")
new_constrains.append("ld_impl_" + subdir + " <2.34")
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# we pushed a few builds of the compilers past the list of versions
# above which do not use the sysroot packages - this block catches those
# it will also break some test builds of the new compilers but we should
# not be using those anyways and they are marked as broken.
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] not in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# all ctng activation packages that don't depend on the sysroot_*
# packages are not compatible with the new sysroot_*-based compilers
# root and cling must also be included as they have a builtin C++ interpreter
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_" + subdir, "gxx_" + subdir, "gfortran_" + subdir,
"binutils_" + subdir, "gcc_bootstrap_" + subdir, "root_base", "cling"]
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# old CDTs with the conda_cos6 or conda_cos7 name in the sysroot need to
# conflict with the new CDT and compiler packages
# all of the new CDTs and compilers depend on the sysroot_{subdir} packages
# so we use a constraint on those
if (
subdir == "noarch"
and (
record_name.endswith("-cos6-x86_64") or
record_name.endswith("-cos7-x86_64") or
record_name.endswith("-cos7-aarch64") or
record_name.endswith("-cos7-ppc64le")
)
and not record_name.startswith("sysroot-")
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
if record_name.endswith("x86_64"):
sys_subdir = "linux-64"
elif record_name.endswith("aarch64"):
sys_subdir = "linux-aarch64"
elif record_name.endswith("ppc64le"):
sys_subdir = "linux-ppc64le"
new_constrains = record.get('constrains', [])
if not any(__r.startswith("sysroot_") for __r in new_constrains):
new_constrains.append("sysroot_" + sys_subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure pybind11 and pybind11-global have run constraints on
# the abi metapackage
# see https://github.com/conda-forge/conda-forge-repodata-patches-feedstock/issues/104 # noqa
if (
record_name in ["pybind11", "pybind11-global"]
# this version has a constraint sometimes
and (
pkg_resources.parse_version(record["version"])
<= pkg_resources.parse_version("2.6.1")
)
and not any(
c.startswith("pybind11-abi ")
for c in record.get("constrains", [])
)
):
_add_pybind11_abi_constraint(fn, record)
return index
def _add_pybind11_abi_constraint(fn, record):
"""the pybind11-abi package uses the internals version
here are the ranges
v2.2.0 1
v2.2.1 1
v2.2.2 1
v2.2.3 1
v2.2.4 2
v2.3.0 3
v2.4.0 3
v2.4.1 3
v2.4.2 3
v2.4.3 3
v2.5.0 4
v2.6.0 4
v2.6.0b1 4
v2.6.0rc1 4
v2.6.0rc2 4
v2.6.0rc3 4
v2.6.1 4
prior to 2.2.0 we set it to 0
"""
ver = pkg_resources.parse_version(record["version"])
if ver < pkg_resources.parse_version("2.2.0"):
abi_ver = "0"
elif ver < pkg_resources.parse_version("2.2.4"):
abi_ver = "1"
elif ver < pkg_resources.parse_version("2.3.0"):
abi_ver = "2"
elif ver < pkg_resources.parse_version("2.5.0"):
abi_ver = "3"
elif ver <= pkg_resources.parse_version("2.6.1"):
abi_ver = "4"
else:
# past this we should have a constrains there already
raise RuntimeError(
"pybind11 version %s out of range for abi" % record["version"]
)
constrains = record.get("constrains", [])
found_idx = None
for idx in range(len(constrains)):
if constrains[idx].startswith("pybind11-abi "):
found_idx = idx
if found_idx is None:
constrains.append("pybind11-abi ==" + abi_ver)
else:
constrains[found_idx] = "pybind11-abi ==" + abi_ver
record["constrains"] = constrains
def _replace_pin(old_pin, new_pin, deps, record):
"""Replace an exact pin with a new one."""
if old_pin in deps:
i = record['depends'].index(old_pin)
record['depends'][i] = new_pin
def _rename_dependency(fn, record, old_name, new_name):
depends = record["depends"]
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == old_name),
None
)
if dep_idx is not None:
parts = depends[dep_idx].split(" ")
remainder = (" " + " ".join(parts[1:])) if len(parts) > 1 else ""
depends[dep_idx] = new_name + remainder
record['depends'] = depends
def _fix_libgfortran(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libgfortran"),
None
)
if dep_idx is not None:
# make sure respect minimum versions still there
# 'libgfortran' -> >=3.0.1,<4.0.0.a0
# 'libgfortran ==3.0.1' -> ==3.0.1
# 'libgfortran >=3.0' -> >=3.0,<4.0.0.a0
# 'libgfortran >=3.0.1' -> >=3.0.1,<4.0.0.a0
if ("==" in depends[dep_idx]) or ("<" in depends[dep_idx]):
pass
elif depends[dep_idx] == "libgfortran":
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0.1" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0,<4.0.0.a0"
record['depends'] = depends
elif ">=4" in depends[dep_idx]:
# catches all of 4.*
depends[dep_idx] = "libgfortran >=4.0.0,<5.0.0.a0"
record['depends'] = depends
def _set_osx_virt_min(fn, record, min_vers):
rconst = record.get("constrains", ())
dep_idx = next(
(q for q, dep in enumerate(rconst)
if dep.split(' ')[0] == "__osx"),
None
)
run_constrained = list(rconst)
if dep_idx is None:
run_constrained.append("__osx >=%s" % min_vers)
if run_constrained:
record['constrains'] = run_constrained
def _fix_libcxx(fn, record):
record_name = record["name"]
if not record_name in ["cctools", "ld64", "llvm-lto-tapi"]:
return
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libcxx"),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) >= 2 and dep_parts[1] == "4.0.1":
# catches all of 4.*
depends[dep_idx] = "libcxx >=4.0.1"
record['depends'] = depends
def pad_list(l, num):
if len(l) >= num:
return l
return l + ["0"]*(num - len(l))
def get_upper_bound(version, max_pin):
num_x = max_pin.count("x")
ver = pad_list(version.split("."), num_x)
ver[num_x:] = ["0"]*(len(ver)-num_x)
ver[num_x-1] = str(int(ver[num_x-1])+1)
return ".".join(ver)
def _relax_exact(fn, record, fix_dep, max_pin=None):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == fix_dep),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if (len(dep_parts) == 3 and \
not any(dep_parts[1].startswith(op) for op in OPERATORS)):
if max_pin is not None:
upper_bound = get_upper_bound(dep_parts[1], max_pin) + "a0"
depends[dep_idx] = "{} >={},<{}".format(*dep_parts[:2], upper_bound)
else:
depends[dep_idx] = "{} >={}".format(*dep_parts[:2])
record['depends'] = depends
def _match_strict_libssh2_1_x_pin(dep):
if dep.startswith("libssh2 >=1.8.0,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.1,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.2,<1.9.0a0"):
return True
if dep.startswith("libssh2 1.8.*"):
return True
return False
def _relax_libssh2_1_x_pinning(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if _match_strict_libssh2_1_x_pin(dep)),
None
)
if dep_idx is not None:
depends[dep_idx] = "libssh2 >=1.8.0,<2.0.0a0"
cb_pin_regex = re.compile(r"^>=(?P<lower>\d(\.\d+)*a?),<(?P<upper>\d(\.\d+)*)a0$")
def _pin_stricter(fn, record, fix_dep, max_pin):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
new_upper = get_upper_bound(lower, max_pin).split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) > tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _pin_looser(fn, record, fix_dep, max_pin=None, upper_bound=None):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
if upper_bound is None:
new_upper = get_upper_bound(lower, max_pin).split(".")
else:
new_upper = upper_bound.split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) < tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _extract_and_remove_vc_feature(record):
features = record.get('features', '').split()
vc_features = tuple(f for f in features if f.startswith('vc'))
if not vc_features:
return None
non_vc_features = tuple(f for f in features if f not in vc_features)
vc_version = int(vc_features[0][2:]) # throw away all but the first
if non_vc_features:
record['features'] = ' '.join(non_vc_features)
else:
record['features'] = None
return vc_version
def _extract_feature(record, feature_name):
features = record.get('features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def _extract_track_feature(record, feature_name):
features = record.get('track_features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def main():
# Step 1. Collect initial repodata for all subdirs.
repodatas = {}
if "CF_SUBDIR" in os.environ:
# For local debugging
subdirs = os.environ["CF_SUBDIR"].split(";")
else:
subdirs = SUBDIRS
for subdir in tqdm.tqdm(subdirs, desc="Downloading repodata"):
repodata_url = "/".join(
(CHANNEL_ALIAS, CHANNEL_NAME, subdir, "repodata_from_packages.json"))
response = requests.get(repodata_url)
response.raise_for_status()
repodatas[subdir] = response.json()
# Step 2. Create all patch instructions.
prefix_dir = os.getenv("PREFIX", "tmp")
for subdir in subdirs:
prefix_subdir = join(prefix_dir, subdir)
if not isdir(prefix_subdir):
os.makedirs(prefix_subdir)
# Step 2a. Generate a new index.
new_index = _gen_new_index(repodatas[subdir], subdir)
# Step 2b. Generate the instructions by diff'ing the indices.
instructions = _gen_patch_instructions(
repodatas[subdir]['packages'], new_index, subdir)
# Step 2c. Output this to $PREFIX so that we bundle the JSON files.
patch_instructions_path = join(
prefix_subdir, "patch_instructions.json")
with open(patch_instructions_path, 'w') as fh:
json.dump(
instructions, fh, indent=2,
sort_keys=True, separators=(',', ': '))
if __name__ == "__main__":
sys.exit(main())
|
[] |
[] |
[
"CF_SUBDIR",
"PREFIX"
] |
[]
|
["CF_SUBDIR", "PREFIX"]
|
python
| 2 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Read the Docs Template documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# If this is not inserted at the beginning of the list, our 'jira' ticket
# extension is hidden by the system 'jira' API, if it exists.
sys.path.insert(0, os.path.abspath('extensions'))
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src', 'condor_tests'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'ticket',
'macro',
'macro-def',
'jira',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HTCondor Manual'
copyright = u'1990-2020, Center for High Throughput Computing, Computer \
Sciences Department, University of Wisconsin-Madison, Madison, WI, US. \
Licensed under the Apache License, Version 2.0.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '9.3'
# The full version, including alpha/beta/rc tags.
release = '9.3.0'
rst_epilog = """
.. |release_date| replace:: Month Day, 2021
"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'extensions', 'utils']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man-pages/bosco_cluster', 'bosco_cluster', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_findplatform', 'bosco_findplatform', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_install', 'bosco_install', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_ssh_start', 'bosco_ssh_start', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_start', 'bosco_start', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_stop', 'bosco_stop', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_uninstall', 'bosco_uninstall', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/classad_eval', 'classad_eval', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/classads', 'classads', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_adstash', 'condor_adstash', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_advertise', 'condor_advertise', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_annex', 'condor_annex', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_check_password', 'condor_check_password', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_check_userlogs', 'condor_check_userlogs', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_chirp', 'condor_chirp', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_configure', 'condor_configure', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_config_val', 'condor_config_val', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_continue', 'condor_continue', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_dagman', 'condor_dagman', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_drain', 'condor_drain', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_evicted_files', 'condor_evicted_files', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_fetchlog', 'condor_fetchlog', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_findhost', 'condor_findhost', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_gather_info', 'condor_gather_info', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_gpu_discovery', 'condor_gpu_discovery', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_history', 'condor_history', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_hold', 'condor_hold', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_install', 'condor_install', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_job_router_info', 'condor_job_router_info', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_master', 'condor_master', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_now', 'condor_now', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_off', 'condor_off', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_on', 'condor_on', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_ping', 'condor_ping', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_pool_job_report', 'condor_pool_job_report', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_power', 'condor_power', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_preen', 'condor_preen', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_prio', 'condor_prio', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_procd', 'condor_procd', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_q', 'condor_q', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_qedit', 'condor_qedit', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_qsub', 'condor_qsub', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_reconfig', 'condor_reconfig', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_release', 'condor_release', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_reschedule', 'condor_reschedule', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_restart', 'condor_restart', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_rm', 'condor_rm', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_rmdir', 'condor_rmdir', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_router_history', 'condor_router_history', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_router_q', 'condor_router_q', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_router_rm', 'condor_router_rm', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_run', 'condor_run', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_set_shutdown', 'condor_set_shutdown', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_sos', 'condor_sos', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_ssh_to_job', 'condor_ssh_to_job', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_stats', 'condor_stats', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_status', 'condor_status', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_store_cred', 'condor_store_cred', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_submit', 'condor_submit', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_submit_dag', 'condor_submit_dag', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_suspend', 'condor_suspend', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_tail', 'condor_tail', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_create', 'condor_token_create', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_fetch', 'condor_token_fetch', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_list', 'condor_token_list', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request', 'condor_token_request', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request_approve', 'condor_token_request_approve', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request_auto_approve', 'condor_token_request_auto_approve', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request_list', 'condor_token_request_list', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_top', 'condor_top', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_transfer_data', 'condor_transfer_data', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_transform_ads', 'condor_transform_ads', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_update_machine_ad', 'condor_update_machine_ad', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_updates_stats', 'condor_updates_stats', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_urlfetch', 'condor_urlfetch', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_userlog', 'condor_userlog', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_userprio', 'condor_userprio', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_vacate', 'condor_vacate', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_vacate_job', 'condor_vacate_job', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_version', 'condor_version', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_wait', 'condor_wait', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_watch_q', 'condor_watch_q', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_who', 'condor_who', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/get_htcondor', 'get_htcondor', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/gidd_alloc', 'gidd_alloc', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/htcondor', 'htcondor', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/procd_ctl', 'procd_ctl', u'HTCondor Manual', [u'HTCondor Team'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- conf.py contains common configuration and man pages configuration
# -- full_conf.py contains configuration for the whole manual
sys.path.append(os.path.dirname(__file__))
MANPAGES = os.environ.get('MANPAGES') == 'True'
if not MANPAGES:
from full_conf import *
|
[] |
[] |
[
"MANPAGES"
] |
[]
|
["MANPAGES"]
|
python
| 1 | 0 | |
logger/logrus.go
|
package logger
import (
"os"
"github.com/sirupsen/logrus"
prefixed "github.com/x-cray/logrus-prefixed-formatter"
)
// Init - InitLogging
func Init() {
logrus.SetFormatter(&prefixed.TextFormatter{
TimestampFormat: "2006/01/02 15:04:05",
FullTimestamp: true,
})
logrus.SetOutput(os.Stdout)
if len(os.Getenv("DEBUG")) != 0 {
logrus.SetLevel(logrus.DebugLevel)
}
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
cmd/gitopsUpdateDeployment_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type gitopsUpdateDeploymentOptions struct {
BranchName string `json:"branchName,omitempty"`
CommitMessage string `json:"commitMessage,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
FilePath string `json:"filePath,omitempty"`
ContainerName string `json:"containerName,omitempty"`
ContainerRegistryURL string `json:"containerRegistryUrl,omitempty"`
ContainerImageNameTag string `json:"containerImageNameTag,omitempty"`
ChartPath string `json:"chartPath,omitempty"`
HelmValues []string `json:"helmValues,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
Tool string `json:"tool,omitempty" validate:"possible-values=kubectl helm"`
}
// GitopsUpdateDeploymentCommand Updates Kubernetes Deployment Manifest in an Infrastructure Git Repository
func GitopsUpdateDeploymentCommand() *cobra.Command {
const STEP_NAME = "gitopsUpdateDeployment"
metadata := gitopsUpdateDeploymentMetadata()
var stepConfig gitopsUpdateDeploymentOptions
var startTime time.Time
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createGitopsUpdateDeploymentCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Updates Kubernetes Deployment Manifest in an Infrastructure Git Repository",
Long: `This step allows you to update the deployment manifest for Kubernetes in a git repository.
It can for example be used for GitOps scenarios where the update of the manifests triggers an update of the corresponding deployment in Kubernetes.
As of today, it supports the update of deployment yaml files via kubectl patch and update a whole helm template.
For kubectl the container inside the yaml must be described within the following hierarchy: ` + "`" + `{"spec":{"template":{"spec":{"containers":[{...}]}}}}` + "`" + `
For helm the whole template is generated into a file and uploaded into the repository.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
gitopsUpdateDeployment(stepConfig, &stepTelemetryData)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addGitopsUpdateDeploymentFlags(createGitopsUpdateDeploymentCmd, &stepConfig)
return createGitopsUpdateDeploymentCmd
}
func addGitopsUpdateDeploymentFlags(cmd *cobra.Command, stepConfig *gitopsUpdateDeploymentOptions) {
cmd.Flags().StringVar(&stepConfig.BranchName, "branchName", `master`, "The name of the branch where the changes should get pushed into.")
cmd.Flags().StringVar(&stepConfig.CommitMessage, "commitMessage", os.Getenv("PIPER_commitMessage"), "The commit message of the commit that will be done to do the changes.")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", `https://github.com`, "GitHub server url to the repository.")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User name for git authentication")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password/token for git authentication.")
cmd.Flags().StringVar(&stepConfig.FilePath, "filePath", os.Getenv("PIPER_filePath"), "Relative path in the git repository to the deployment descriptor file that shall be updated")
cmd.Flags().StringVar(&stepConfig.ContainerName, "containerName", os.Getenv("PIPER_containerName"), "The name of the container to update")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryURL, "containerRegistryUrl", os.Getenv("PIPER_containerRegistryUrl"), "http(s) url of the Container registry where the image is located")
cmd.Flags().StringVar(&stepConfig.ContainerImageNameTag, "containerImageNameTag", os.Getenv("PIPER_containerImageNameTag"), "Container image name with version tag to annotate in the deployment configuration.")
cmd.Flags().StringVar(&stepConfig.ChartPath, "chartPath", os.Getenv("PIPER_chartPath"), "Defines the chart path for deployments using helm.")
cmd.Flags().StringSliceVar(&stepConfig.HelmValues, "helmValues", []string{}, "List of helm values as YAML file reference or URL (as per helm parameter description for `-f` / `--values`)")
cmd.Flags().StringVar(&stepConfig.DeploymentName, "deploymentName", os.Getenv("PIPER_deploymentName"), "Defines the name of the deployment.")
cmd.Flags().StringVar(&stepConfig.Tool, "tool", `kubectl`, "Defines the tool which should be used to update the deployment description.")
cmd.MarkFlagRequired("branchName")
cmd.MarkFlagRequired("serverUrl")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("filePath")
cmd.MarkFlagRequired("containerRegistryUrl")
cmd.MarkFlagRequired("containerImageNameTag")
cmd.MarkFlagRequired("tool")
}
// retrieve step metadata
func gitopsUpdateDeploymentMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "gitopsUpdateDeployment",
Aliases: []config.Alias{},
Description: "Updates Kubernetes Deployment Manifest in an Infrastructure Git Repository",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "gitHttpsCredentialsId", Description: "Jenkins 'Username with password' credentials ID containing username/password for http access to your git repository.", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "deployDescriptor", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "branchName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `master`,
},
{
Name: "commitMessage",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_commitMessage"),
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubServerUrl"}},
Default: `https://github.com`,
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "gitHttpsCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "gitHttpsCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "filePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_filePath"),
},
{
Name: "containerName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_containerName"),
},
{
Name: "containerRegistryUrl",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/registryUrl",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "dockerRegistryUrl"}},
Default: os.Getenv("PIPER_containerRegistryUrl"),
},
{
Name: "containerImageNameTag",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/imageNameTag",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "image", Deprecated: true}, {Name: "containerImage"}},
Default: os.Getenv("PIPER_containerImageNameTag"),
},
{
Name: "chartPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmChartPath"}},
Default: os.Getenv("PIPER_chartPath"),
},
{
Name: "helmValues",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "deploymentName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentName"}},
Default: os.Getenv("PIPER_deploymentName"),
},
{
Name: "tool",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `kubectl`,
},
},
},
Containers: []config.Container{
{Image: "dtzar/helm-kubectl:3.3.4", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "tool", Value: "helm"}}}}},
{Image: "dtzar/helm-kubectl:2.17.0", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "tool", Value: "kubectl"}}}}},
},
},
}
return theMetaData
}
|
[
"\"PIPER_commitMessage\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_filePath\"",
"\"PIPER_containerName\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerImageNameTag\"",
"\"PIPER_chartPath\"",
"\"PIPER_deploymentName\"",
"\"PIPER_commitMessage\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_filePath\"",
"\"PIPER_containerName\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerImageNameTag\"",
"\"PIPER_chartPath\"",
"\"PIPER_deploymentName\""
] |
[] |
[
"PIPER_deploymentName",
"PIPER_commitMessage",
"PIPER_password",
"PIPER_username",
"PIPER_chartPath",
"PIPER_containerImageNameTag",
"PIPER_containerRegistryUrl",
"PIPER_containerName",
"PIPER_filePath"
] |
[]
|
["PIPER_deploymentName", "PIPER_commitMessage", "PIPER_password", "PIPER_username", "PIPER_chartPath", "PIPER_containerImageNameTag", "PIPER_containerRegistryUrl", "PIPER_containerName", "PIPER_filePath"]
|
go
| 9 | 0 | |
src/test/java/com/rockset/client/TestQuery.java
|
package com.rockset.client;
import com.rockset.client.model.QueryRequest;
import com.rockset.client.model.QueryRequestSql;
import com.rockset.client.model.QueryResponse;
import org.testng.Assert;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
public class TestQuery {
private RocksetClient client;
private String collectionName;
@BeforeSuite
public void setUp() throws Exception {
String apiKey = System.getenv("ROCKSET_APIKEY");
String apiServer = System.getenv("ROCKSET_APISERVER");
if (apiKey == null || apiServer == null) {
throw new Exception(
"To run unit tests, please set ROCKSET_APIKEY and ROCKSET_APISERVER " +
"environment variables.");
}
this.client = new RocksetClient(apiKey, apiServer);
}
@Test
public void testQuery() throws Exception {
QueryRequest request = new QueryRequest()
.sql(new QueryRequestSql()
.query("select * from \"_events\" limit 1"));
QueryResponse response = client.query(request);
Assert.assertTrue(response.getResults().size() == 1
|| response.getResults().size() == 0);
}
}
|
[
"\"ROCKSET_APIKEY\"",
"\"ROCKSET_APISERVER\""
] |
[] |
[
"ROCKSET_APIKEY",
"ROCKSET_APISERVER"
] |
[]
|
["ROCKSET_APIKEY", "ROCKSET_APISERVER"]
|
java
| 2 | 0 | |
backend/tessend/tessend.go
|
package tessend
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"sync"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/google/uuid"
dto "github.com/prometheus/client_model/go"
corev2 "github.com/sensu/sensu-go/api/core/v2"
"github.com/sensu/sensu-go/backend/eventd"
"github.com/sensu/sensu-go/backend/messaging"
"github.com/sensu/sensu-go/backend/ringv2"
"github.com/sensu/sensu-go/backend/store"
"github.com/sensu/sensu-go/backend/store/etcd"
"github.com/sensu/sensu-go/version"
"github.com/sirupsen/logrus"
)
const (
// componentName identifies Tessend as the component/daemon implemented in this
// package.
componentName = "tessend"
// tessenURL is the http endpoint for the tessen service.
tessenURL = "https://tessen.sensu.io/v2/data"
// tessenIntervalHeader is the name of the header that the tessen service
// will return to update the reporting interval of the tessen daemon.
tessenIntervalHeader = "tessen-reporting-interval"
// ringUpdateInterval is the interval, in seconds, that TessenD will
// update the ring with any added/removed cluster members.
ringUpdateInterval = 450 * time.Second
// ringBackendKeepalive is the length of time, in seconds, that the
// ring considers an entry alive.
ringBackendKeepalive = 900
// perResourceDuration is the duration of time, in seconds, that TessenD will
// wait in between resources when collecting its respective count.
perResourceDuration = 5 * time.Second
)
var (
// resourceMetrics maps the metric name to the etcd function
// responsible for retrieving the resources store path.
resourceMetrics = map[string]func(context.Context, string) string{
"asset_count": etcd.GetAssetsPath,
"check_count": etcd.GetCheckConfigsPath,
"cluster_role_count": etcd.GetClusterRolesPath,
"cluster_role_binding_count": etcd.GetClusterRoleBindingsPath,
"entity_count": etcd.GetEntitiesPath,
"event_count": etcd.GetEventsPath,
"filter_count": etcd.GetEventFiltersPath,
"handler_count": etcd.GetHandlersPath,
"hook_count": etcd.GetHookConfigsPath,
"mutator_count": etcd.GetMutatorsPath,
"namespace_count": etcd.GetNamespacesPath,
"role_count": etcd.GetRolesPath,
"role_binding_count": etcd.GetRoleBindingsPath,
"silenced_count": etcd.GetSilencedPath,
"user_count": etcd.GetUsersPath,
}
)
// Tessend is the tessen daemon.
type Tessend struct {
interval uint32
store store.Store
ctx context.Context
cancel context.CancelFunc
errChan chan error
ring *ringv2.Ring
interrupt chan *corev2.TessenConfig
client *clientv3.Client
url string
backendID string
bus messaging.MessageBus
messageChan chan interface{}
subscription []messaging.Subscription
duration time.Duration
AllowOptOut bool
config *corev2.TessenConfig
}
// Option is a functional option.
type Option func(*Tessend) error
// Config configures Tessend.
type Config struct {
Store store.Store
RingPool *ringv2.Pool
Client *clientv3.Client
Bus messaging.MessageBus
}
// New creates a new TessenD.
func New(ctx context.Context, c Config, opts ...Option) (*Tessend, error) {
t := &Tessend{
interval: corev2.DefaultTessenInterval,
store: c.Store,
client: c.Client,
errChan: make(chan error, 1),
url: tessenURL,
backendID: uuid.New().String(),
bus: c.Bus,
messageChan: make(chan interface{}, 1),
duration: perResourceDuration,
AllowOptOut: true,
}
t.ctx, t.cancel = context.WithCancel(ctx)
t.interrupt = make(chan *corev2.TessenConfig, 1)
key := ringv2.Path("global", "backends")
t.ring = c.RingPool.Get(key)
return t, nil
}
// Start the Tessen daemon.
func (t *Tessend) Start() error {
tessen, err := t.store.GetTessenConfig(t.ctx)
// create the default tessen config if one does not already exist
if err != nil || tessen == nil {
tessen = corev2.DefaultTessenConfig()
err = t.store.CreateOrUpdateTessenConfig(t.ctx, tessen)
if err != nil {
// log the error and continue with the default config
logger.WithError(err).Error("unable to update tessen store")
}
}
t.config = tessen
if err := t.ctx.Err(); err != nil {
return err
}
if err = t.subscribe(messaging.TopicTessen, messaging.TopicTessenMetric); err != nil {
return err
}
go t.startMessageHandler()
go t.startWatcher()
go t.startRingUpdates()
go t.startPromMetricsUpdates()
go t.start()
// Attempt to send data immediately if tessen is enabled
if t.enabled() {
go t.collectAndSend()
}
return nil
}
// Stop the Tessen daemon.
func (t *Tessend) Stop() error {
if err := t.ring.Remove(t.ctx, t.backendID); err != nil {
logger.WithField("key", t.backendID).WithError(err).Error("error removing key from the ring")
} else {
logger.WithField("key", t.backendID).Debug("removed a key from the ring")
}
for _, sub := range t.subscription {
if err := sub.Cancel(); err != nil {
logger.WithError(err).Error("unable to unsubscribe from message bus")
}
}
t.cancel()
close(t.messageChan)
return nil
}
// Err returns a channel on which to listen for terminal errors.
func (t *Tessend) Err() <-chan error {
return t.errChan
}
// Name returns the daemon name.
func (t *Tessend) Name() string {
return componentName
}
// Receiver returns the tessen receiver channel.
func (t *Tessend) Receiver() chan<- interface{} {
return t.messageChan
}
// subscribes to multiple message bus topics.
func (t *Tessend) subscribe(subscriptions ...string) error {
for _, s := range subscriptions {
sub, err := t.bus.Subscribe(s, componentName, t)
if err != nil {
return err
}
t.subscription = append(t.subscription, sub)
}
return nil
}
// startMessageHandler listens to the message channel and handles incoming messages.
func (t *Tessend) startMessageHandler() {
var hostname string
var err error
for {
msg, ok := <-t.messageChan
if !ok {
logger.Debug("tessen message channel closed")
return
}
tessen, ok := msg.(*corev2.TessenConfig)
if ok {
data := t.getDataPayload()
t.getTessenConfigMetrics(time.Now().Unix(), tessen, data)
logger.WithFields(logrus.Fields{
"url": t.url,
"id": data.Cluster.ID,
"opt-out": tessen.OptOut,
data.Metrics.Points[0].Name: data.Metrics.Points[0].Value,
}).Info("sending opt-out status event to tessen")
_ = t.send(data)
continue
}
metrics, ok := msg.([]corev2.MetricPoint)
if ok {
if t.enabled() {
data := t.getDataPayload()
now := time.Now().Unix()
for _, metric := range metrics {
if hostname, err = os.Hostname(); err != nil {
logger.WithError(err).Error("error getting hostname")
}
metric.Tags = append(metric.Tags, &corev2.MetricTag{Name: "hostname", Value: hostname})
metric.Timestamp = now
appendInternalTag(&metric)
logMetric(&metric)
data.Metrics.Points = append(data.Metrics.Points, &metric)
}
logger.WithFields(logrus.Fields{
"url": t.url,
"id": data.Cluster.ID,
"metric_points": len(data.Metrics.Points),
}).Info("sending web ui metrics to tessen")
_ = t.send(data)
}
continue
}
logger.WithField("msg", msg).Errorf("received invalid message on tessen subscription channel")
}
}
// startWatcher watches the TessenConfig store for changes to the opt-out configuration.
func (t *Tessend) startWatcher() {
watchChan := t.store.GetTessenConfigWatcher(t.ctx)
for {
select {
case watchEvent, ok := <-watchChan:
if !ok {
// The watchChan has closed. Restart the watcher.
watchChan = t.store.GetTessenConfigWatcher(t.ctx)
continue
}
t.handleWatchEvent(watchEvent)
case <-t.ctx.Done():
return
}
}
}
// handleWatchEvent issues an interrupt if a change to the stored TessenConfig has been detected.
func (t *Tessend) handleWatchEvent(watchEvent store.WatchEventTessenConfig) {
tessen := watchEvent.TessenConfig
switch watchEvent.Action {
case store.WatchCreate:
logger.WithField("opt-out", tessen.OptOut).Debug("tessen configuration created")
case store.WatchUpdate:
logger.WithField("opt-out", tessen.OptOut).Debug("tessen configuration updated")
case store.WatchDelete:
logger.WithField("opt-out", tessen.OptOut).Debug("tessen configuration deleted")
}
t.config = tessen
t.interrupt <- t.config
}
// startRingUpdates starts a loop to periodically update the ring.
func (t *Tessend) startRingUpdates() {
ticker := time.NewTicker(ringUpdateInterval)
defer ticker.Stop()
t.updateRing()
for {
select {
case <-t.ctx.Done():
return
case <-ticker.C:
t.updateRing()
}
}
}
// updateRing adds/updates the ring with a given key.
func (t *Tessend) updateRing() {
if err := t.ring.Add(t.ctx, t.backendID, ringBackendKeepalive); err != nil {
logger.WithField("key", t.backendID).WithError(err).Error("error adding key to the ring")
} else {
logger.WithField("key", t.backendID).Debug("added a key to the ring")
}
}
// watchRing watches the ring and handles ring events. It recreates watchers
// when they terminate due to error.
func (t *Tessend) watchRing(ctx context.Context, tessen *corev2.TessenConfig, wg *sync.WaitGroup) {
wc := t.ring.Watch(ctx, "tessen", 1, int(t.interval), "")
go func() {
t.handleEvents(tessen, wc)
defer wg.Done()
}()
}
// handleEvents logs different ring events and triggers tessen to run if applicable.
func (t *Tessend) handleEvents(tessen *corev2.TessenConfig, ch <-chan ringv2.Event) {
for event := range ch {
switch event.Type {
case ringv2.EventError:
logger.WithError(event.Err).Error("ring event error")
case ringv2.EventAdd:
logger.WithField("values", event.Values).Debug("added a backend to tessen")
case ringv2.EventRemove:
logger.WithField("values", event.Values).Debug("removed a backend from tessen")
case ringv2.EventTrigger:
logger.WithField("values", event.Values).Debug("tessen ring trigger")
// only trigger tessen if the next backend in the ring is this backend
if event.Values[0] == t.backendID {
if t.enabled() {
go t.collectAndSend()
}
}
case ringv2.EventClosing:
logger.Debug("tessen ring closing")
}
}
}
// startPromMetricsUpdates starts a loop to periodically send prometheus metrics
// from each backend to tessen.
func (t *Tessend) startPromMetricsUpdates() {
ticker := time.NewTicker(time.Duration(t.interval) * time.Second)
defer ticker.Stop()
for {
select {
case <-t.ctx.Done():
return
case <-ticker.C:
if t.enabled() {
t.sendPromMetrics()
}
}
}
}
// sendPromMetrics collects and sends prometheus metrics for event processing to tessen.
func (t *Tessend) sendPromMetrics() {
var hostname string
// collect data
data := t.getDataPayload()
now := time.Now().Unix()
c := eventd.EventsProcessed.WithLabelValues(eventd.EventsProcessedLabelSuccess)
pb := &dto.Metric{}
err := c.Write(pb)
if err != nil {
logger.WithError(err).Warn("failed to retrieve prometheus event counter")
return
}
// get the backend hostname to use as a metric tag
hostname, err = os.Hostname()
if err != nil {
logger.WithError(err).Error("error getting hostname")
}
// populate data payload
mp := &corev2.MetricPoint{
Name: eventd.EventsProcessedCounterVec,
Value: pb.GetCounter().GetValue(),
Timestamp: now,
Tags: []*corev2.MetricTag{
&corev2.MetricTag{
Name: "hostname",
Value: hostname,
},
},
}
appendInternalTag(mp)
logMetric(mp)
data.Metrics.Points = append(data.Metrics.Points, mp)
logger.WithFields(logrus.Fields{
"url": t.url,
"id": data.Cluster.ID,
}).Info("sending event processing metrics to tessen")
// send data
_ = t.send(data)
}
// start starts the tessen service.
func (t *Tessend) start() {
ctx, cancel := context.WithCancel(t.ctx)
wg := new(sync.WaitGroup)
wg.Add(1)
t.watchRing(ctx, t.config, wg)
for {
select {
case <-t.ctx.Done():
cancel()
return
case config := <-t.interrupt:
// Config change indicates the need to recreate the watcher
cancel()
wg.Wait()
ctx, cancel = context.WithCancel(t.ctx)
wg.Add(1)
t.watchRing(ctx, config, wg)
}
}
}
// enabled checks the tessen config for opt-out status, and verifies the existence of an enterprise license.
// It returns a boolean value indicating if tessen should be enabled or not.
func (t *Tessend) enabled() bool {
if !t.config.OptOut {
logger.WithField("opt-out", t.config.OptOut).Info("tessen is opted in, enabling tessen.. thank you so much for your support 💚")
return true
}
if t.AllowOptOut {
logger.WithField("opt-out", t.config.OptOut).Info("tessen is opted out, patiently waiting for you to opt back in")
return false
}
logger.WithField("opt-out", t.config.OptOut).Info("tessen is opted out but per the license agreement, we're enabling tessen.. thank you so much for your support 💚")
return true
}
// collectAndSend is a durable function to collect and send data to tessen.
// Errors are logged and tessen continues to the best of its ability.
func (t *Tessend) collectAndSend() {
// collect data
data := t.getDataPayload()
t.getPerResourceMetrics(time.Now().Unix(), data)
logger.WithFields(logrus.Fields{
"url": t.url,
"id": data.Cluster.ID,
"metric_points": len(data.Metrics.Points),
}).Info("sending resource counts to tessen")
// send data
respHeader := t.send(data)
if respHeader == "" {
logger.Debug("no tessen response header")
return
}
// parse the response header for an integer value
interval, err := strconv.ParseUint(respHeader, 10, 32)
if err != nil {
logger.Debugf("invalid tessen response header: %v", err)
return
}
// validate the returned interval is within the upper/lower bound limits
err = corev2.ValidateInterval(uint32(interval))
if err != nil {
logger.Debugf("invalid tessen response header: %v", err)
return
}
// update the tessen interval if the response header returns a new value
if t.interval != uint32(interval) {
t.interval = uint32(interval)
logger.WithField("interval", t.interval).Debug("tessen interval updated")
t.interrupt <- t.config
}
}
// getDataPayload retrieves cluster, version, and license information
// and returns the populated data payload.
func (t *Tessend) getDataPayload() *Data {
// collect cluster id
clusterID, err := t.store.GetClusterID(t.ctx)
if err != nil {
logger.WithError(err).Error("unable to retrieve cluster id")
}
// collect license information
wrapper := &Wrapper{}
err = etcd.Get(t.ctx, t.client, licenseStorePath, wrapper)
if err != nil {
logger.WithError(err).Debug("unable to retrieve license")
}
// populate data payload
data := &Data{
Cluster: Cluster{
ID: clusterID,
Version: version.Semver(),
License: wrapper.Value.License,
},
}
return data
}
// getPerResourceMetrics populates the data payload with the total number of each resource.
func (t *Tessend) getPerResourceMetrics(now int64, data *Data) {
var backendCount float64
// collect backend count
cluster, err := t.client.Cluster.MemberList(t.ctx)
if err != nil {
logger.WithError(err).Error("unable to retrieve backend count")
}
if cluster != nil {
backendCount = float64(len(cluster.Members))
}
// populate data payload
mp := &corev2.MetricPoint{
Name: "backend_count",
Value: backendCount,
Timestamp: now,
}
appendInternalTag(mp)
logMetric(mp)
data.Metrics.Points = append(data.Metrics.Points, mp)
// loop through the resource map and collect the count of each
// resource every 5 seconds to distribute the load on etcd
for metricName, metricFunc := range resourceMetrics {
time.Sleep(t.duration)
count, err := etcd.Count(t.ctx, t.client, metricFunc(t.ctx, ""))
if err != nil {
logger.WithError(err).Error("unable to retrieve resource count")
continue
}
mp = &corev2.MetricPoint{
Name: metricName,
Value: float64(count),
Timestamp: now,
}
appendInternalTag(mp)
logMetric(mp)
data.Metrics.Points = append(data.Metrics.Points, mp)
}
}
// getTessenConfigMetrics populates the data payload with an opt-out status event.
func (t *Tessend) getTessenConfigMetrics(now int64, tessen *corev2.TessenConfig, data *Data) {
mp := &corev2.MetricPoint{
Name: "tessen_config_update",
Value: 1,
Timestamp: now,
Tags: []*corev2.MetricTag{
&corev2.MetricTag{
Name: "opt_out",
Value: strconv.FormatBool(tessen.OptOut),
},
},
}
appendInternalTag(mp)
logMetric(mp)
data.Metrics.Points = append(data.Metrics.Points, mp)
}
// send sends the data payload to the tessen url and retrieves the interval response header.
func (t *Tessend) send(data *Data) string {
b, _ := json.Marshal(data)
resp, err := http.Post(t.url, "application/json", bytes.NewBuffer(b))
// TODO(nikki): special case logs on a per error basis
if err != nil {
logger.WithError(err).Error("tessen phone-home service failed")
return ""
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 4096))
logger.Errorf("bad status: %d (%q)", resp.StatusCode, string(body))
return ""
}
return resp.Header.Get(tessenIntervalHeader)
}
// logMetric logs the metric name and value collected for transparency.
func logMetric(m *corev2.MetricPoint) {
logger.WithFields(logrus.Fields{
"metric_name": m.Name,
"metric_value": m.Value,
}).Debug("collected a metric for tessen")
}
// appendInternalTag tags the metric with an internal environment variable value
func appendInternalTag(m *corev2.MetricPoint) {
if internalEnv := os.Getenv("SENSU_INTERNAL_ENVIRONMENT"); internalEnv != "" {
m.Tags = append(m.Tags, &corev2.MetricTag{
Name: "sensu_internal_environment",
Value: internalEnv,
})
}
}
|
[
"\"SENSU_INTERNAL_ENVIRONMENT\""
] |
[] |
[
"SENSU_INTERNAL_ENVIRONMENT"
] |
[]
|
["SENSU_INTERNAL_ENVIRONMENT"]
|
go
| 1 | 0 | |
faq-publish-api/tests/unit/test_topic_service_get_user.py
|
import unittest
import os
import uuid
import names
import random
from services.topic_service import TopicService
from services.user_service import UserService
from dotenv import load_dotenv
load_dotenv()
class TestGetUser(unittest.TestCase):
LOCAL_DB_FILE = '/data//datastores/local_test.sqlite3'
def setUp(self):
os.environ['USER_TOPICS_DATASTORE_CONNECTION_STRING'] = 'sqlite://' + self.LOCAL_DB_FILE + '?check_same_thread=False'
self.user_service = UserService()
self.topic_service = TopicService()
self.user_name = names.get_full_name()
self.user_id = self.user_service.create_user(self.user_name)
def tearDown(self):
self.user_service = None
self.topic_service = None
if os.path.exists('.' + self.LOCAL_DB_FILE):
os.remove('.' + self.LOCAL_DB_FILE)
def test_get_user_sunny_day(self):
user = self.topic_service.get_user(self.user_id)
self.assertIsNotNone(user)
def test_get_topics_nonexisting_user(self):
nonexisting_user_id = str(uuid.uuid4())
user = self.topic_service.get_user(nonexisting_user_id)
self.assertIsNone(user)
def test_get_topics_empty_user_id(self):
empty_user_id = ''
user = self.topic_service.get_user(empty_user_id)
self.assertIsNone(user)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"USER_TOPICS_DATASTORE_CONNECTION_STRING"
] |
[]
|
["USER_TOPICS_DATASTORE_CONNECTION_STRING"]
|
python
| 1 | 0 | |
bot/main.py
|
import pathlib
import os
import dotenv
from aiogram import Bot, Dispatcher
from aiogram.contrib.fsm_storage.files import PickleStorage
from config.logger import logger_init
from .handlers import start_handler
# Load dotenv
dotenv.load_dotenv()
# Configure logging.
# 4-levels for logging: INFO, DEBUG, WARNING, ERROR
logger_init("INFO")
async def main():
"""Main function"""
# Initialize bot and dispatcher
bot = Bot(token=os.getenv("API_TOKEN"))
try:
storage = PickleStorage(pathlib.Path("db"))
dp = Dispatcher(bot, storage=storage)
# start_handler register
dp.register_message_handler(
start_handler, commands={"start"}, state="*")
# text_handler register
# dp.register_message_handler(
# text_handler, content_types="text")
await dp.start_polling()
finally:
await bot.close()
|
[] |
[] |
[
"API_TOKEN"
] |
[]
|
["API_TOKEN"]
|
python
| 1 | 0 | |
qa/rpc-tests/maxuploadtarget.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import LUASCOINTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(LUASCOINTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LUASCOIND", "luascoind"),
help="luascoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
[] |
[] |
[
"LUASCOIND"
] |
[]
|
["LUASCOIND"]
|
python
| 1 | 0 | |
hackerrank/TimeConversion.py
|
#!/bin/python3
import os
import sys
def timeConversion(s):
hour = int(s[:2])
if s.endswith("AM"):
if hour == 12:
hour = 0
else:
if hour != 12:
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = "0" + hour
return hour + s[2:-2]
if __name__ == "__main__":
f = open(os.environ["OUTPUT_PATH"], "w")
s = input()
result = timeConversion(s)
f.write(result + "\n")
f.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
clab/config.go
|
// Copyright 2020 Nokia
// Licensed under the BSD 3-Clause License.
// SPDX-License-Identifier: BSD-3-Clause
package clab
import (
"bufio"
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"syscall"
"github.com/mitchellh/go-homedir"
log "github.com/sirupsen/logrus"
"github.com/srl-labs/containerlab/nodes"
clabRuntimes "github.com/srl-labs/containerlab/runtime"
"github.com/srl-labs/containerlab/types"
"github.com/srl-labs/containerlab/utils"
"github.com/vishvananda/netlink"
)
const (
// prefix is used to distinct containerlab created files/dirs/containers
defaultPrefix = "clab"
// a name of a docker network that nodes management interfaces connect to
dockerNetName = "clab"
dockerNetIPv4Addr = "172.20.20.0/24"
dockerNetIPv6Addr = "2001:172:20:20::/64"
// NSPath value assigned to host interfaces
hostNSPath = "__host"
// veth link mtu
DefaultVethLinkMTU = 9500
// containerlab's reserved OUI
ClabOUI = "aa:c1:ab"
// label names
ContainerlabLabel = "containerlab"
NodeNameLabel = "clab-node-name"
NodeKindLabel = "clab-node-kind"
NodeTypeLabel = "clab-node-type"
NodeGroupLabel = "clab-node-group"
NodeLabDirLabel = "clab-node-lab-dir"
TopoFileLabel = "clab-topo-file"
)
// supported kinds
var kinds = []string{
"srl",
"ceos",
"crpd",
"sonic-vs",
"vr-ftosv",
"vr-n9kv",
"vr-sros",
"vr-vmx",
"vr-vqfx",
"vr-xrv",
"vr-xrv9k",
"vr-veos",
"vr-pan",
"vr-csr",
"vr-ros",
"linux",
"bridge",
"ovs-bridge",
"mysocketio",
"host",
"cvx",
}
// Config defines lab configuration as it is provided in the YAML file
type Config struct {
Name string `json:"name,omitempty"`
Prefix *string `json:"prefix,omitempty"`
Mgmt *types.MgmtNet `json:"mgmt,omitempty"`
Topology *types.Topology `json:"topology,omitempty"`
ConfigPath string
}
// ParseTopology parses the lab topology
func (c *CLab) parseTopology() error {
log.Infof("Parsing & checking topology file: %s", c.TopoFile.fullName)
if c.Config.ConfigPath == "" {
c.Config.ConfigPath, _ = filepath.Abs(os.Getenv("PWD"))
}
if c.Config.Prefix == nil {
c.Config.Prefix = new(string)
*c.Config.Prefix = defaultPrefix
}
c.Dir = &Directory{}
// labDir is always named clab-$labName, regardless of the prefix
labDir := strings.Join([]string{"clab", c.Config.Name}, "-")
c.Dir.Lab = filepath.Join(c.Config.ConfigPath, labDir)
c.Dir.LabCA = filepath.Join(c.Dir.Lab, "ca")
c.Dir.LabCARoot = filepath.Join(c.Dir.LabCA, "root")
c.Dir.LabGraph = filepath.Join(c.Dir.Lab, "graph")
// initialize Nodes and Links variable
c.Nodes = make(map[string]nodes.Node)
c.Links = make(map[int]*types.Link)
// initialize the Node information from the topology map
nodeNames := make([]string, 0, len(c.Config.Topology.Nodes))
for nodeName := range c.Config.Topology.Nodes {
nodeNames = append(nodeNames, nodeName)
}
sort.Strings(nodeNames)
// collect node runtimes in a map[NodeName] -> RuntimeName
var nodeRuntimes = make(map[string]string)
for nodeName, topologyNode := range c.Config.Topology.Nodes {
// this case is when runtime was overridden at the node level
if r := c.Config.Topology.GetNodeRuntime(nodeName); r != "" {
nodeRuntimes[nodeName] = r
continue
}
// this case if for non-default runtimes overriding the global default
if r, ok := nodes.NonDefaultRuntimes[topologyNode.GetKind()]; ok {
nodeRuntimes[nodeName] = r
continue
}
// saving the global default runtime
nodeRuntimes[nodeName] = c.globalRuntime
}
// initialize any extra runtimes
for _, r := range nodeRuntimes {
// this is the case for already init'ed runtimes
if _, ok := c.Runtimes[r]; ok {
continue
}
if rInit, ok := clabRuntimes.ContainerRuntimes[r]; ok {
newRuntime := rInit()
defaultConfig := c.Runtimes[c.globalRuntime].Config()
err := newRuntime.Init(
clabRuntimes.WithConfig(&defaultConfig),
)
if err != nil {
return fmt.Errorf("failed to init the container runtime: %s", err)
}
c.Runtimes[r] = newRuntime
}
}
var err error
for idx, nodeName := range nodeNames {
err = c.NewNode(nodeName, nodeRuntimes[nodeName], c.Config.Topology.Nodes[nodeName], idx)
if err != nil {
return err
}
}
for i, l := range c.Config.Topology.Links {
// i represents the endpoint integer and l provide the link struct
c.Links[i] = c.NewLink(l)
}
// set any containerlab defaults after we've parsed the input
c.setDefaults()
return nil
}
// NewNode initializes a new node object
func (c *CLab) NewNode(nodeName, nodeRuntime string, nodeDef *types.NodeDefinition, idx int) error {
nodeCfg, err := c.createNodeCfg(nodeName, nodeDef, idx)
if err != nil {
return err
}
// Init
nodeInitializer, ok := nodes.Nodes[nodeCfg.Kind]
if !ok {
return fmt.Errorf("node %q refers to a kind %q which is not supported. Supported kinds are %q", nodeCfg.ShortName, nodeCfg.Kind, kinds)
}
n := nodeInitializer()
// Init
err = n.Init(nodeCfg, nodes.WithRuntime(c.Runtimes[nodeRuntime]), nodes.WithMgmtNet(c.Config.Mgmt))
if err != nil {
log.Errorf("failed to initialize node %q: %v", nodeCfg.ShortName, err)
return fmt.Errorf("failed to initialize node %q: %v", nodeCfg.ShortName, err)
}
n.Config().Labels = utils.MergeStringMaps(n.Config().Labels, map[string]string{
ContainerlabLabel: c.Config.Name,
NodeNameLabel: n.Config().ShortName,
NodeKindLabel: n.Config().Kind,
NodeTypeLabel: n.Config().NodeType,
NodeGroupLabel: n.Config().Group,
NodeLabDirLabel: n.Config().LabDir,
TopoFileLabel: c.TopoFile.path,
})
c.Nodes[nodeName] = n
return nil
}
func (c *CLab) createNodeCfg(nodeName string, nodeDef *types.NodeDefinition, idx int) (*types.NodeConfig, error) {
// default longName follows $prefix-$lab-$nodeName pattern
longName := fmt.Sprintf("%s-%s-%s", *c.Config.Prefix, c.Config.Name, nodeName)
switch {
// when prefix is an empty string longName will match shortName/nodeName
case *c.Config.Prefix == "":
longName = nodeName
case *c.Config.Prefix == "__lab-name":
longName = fmt.Sprintf("%s-%s", c.Config.Name, nodeName)
}
nodeCfg := &types.NodeConfig{
ShortName: nodeName, // just the node name as seen in the topo file
LongName: longName, // by default clab-$labName-$nodeName
Fqdn: strings.Join([]string{nodeName, c.Config.Name, "io"}, "."),
LabDir: filepath.Join(c.Dir.Lab, nodeName),
Index: idx,
Group: c.Config.Topology.GetNodeGroup(nodeName),
Kind: strings.ToLower(c.Config.Topology.GetNodeKind(nodeName)),
NodeType: c.Config.Topology.GetNodeType(nodeName),
Position: c.Config.Topology.GetNodePosition(nodeName),
Image: c.Config.Topology.GetNodeImage(nodeName),
User: c.Config.Topology.GetNodeUser(nodeName),
Entrypoint: c.Config.Topology.GetNodeEntrypoint(nodeName),
Cmd: c.Config.Topology.GetNodeCmd(nodeName),
Exec: c.Config.Topology.GetNodeExec(nodeName),
Env: c.Config.Topology.GetNodeEnv(nodeName),
NetworkMode: strings.ToLower(c.Config.Topology.GetNodeNetworkMode(nodeName)),
MgmtIPv4Address: nodeDef.GetMgmtIPv4(),
MgmtIPv6Address: nodeDef.GetMgmtIPv6(),
Publish: c.Config.Topology.GetNodePublish(nodeName),
Sysctls: make(map[string]string),
Endpoints: make([]types.Endpoint, 0),
Sandbox: c.Config.Topology.GetNodeSandbox(nodeName),
Kernel: c.Config.Topology.GetNodeKernel(nodeName),
Runtime: c.Config.Topology.GetNodeRuntime(nodeName),
CPU: c.Config.Topology.GetNodeCPU(nodeName),
CPUSet: c.Config.Topology.GetNodeCPUSet(nodeName),
Memory: c.Config.Topology.GetNodeMemory(nodeName),
StartupDelay: c.Config.Topology.GetNodeStartupDelay(nodeName),
// Extras
Extras: c.Config.Topology.GetNodeExtras(nodeName),
}
log.Debugf("node config: %+v", nodeCfg)
var err error
// initialize config
nodeCfg.StartupConfig, err = c.Config.Topology.GetNodeStartupConfig(nodeCfg.ShortName)
if err != nil {
return nil, err
}
nodeCfg.EnforceStartupConfig = c.Config.Topology.GetNodeEnforceStartupConfig(nodeCfg.ShortName)
// initialize license field
nodeCfg.License, err = c.Config.Topology.GetNodeLicense(nodeCfg.ShortName)
if err != nil {
return nil, err
}
// initialize bind mounts
binds := c.Config.Topology.GetNodeBinds(nodeName)
err = resolveBindPaths(binds, nodeCfg.LabDir)
if err != nil {
return nil, err
}
nodeCfg.Binds = binds
nodeCfg.PortSet, nodeCfg.PortBindings, err = c.Config.Topology.GetNodePorts(nodeName)
if err != nil {
return nil, err
}
nodeCfg.Labels = c.Config.Topology.GetNodeLabels(nodeCfg.ShortName)
nodeCfg.Config = c.Config.Topology.GetNodeConfigDispatcher(nodeCfg.ShortName)
return nodeCfg, nil
}
// NewLink initializes a new link object
func (c *CLab) NewLink(l *types.LinkConfig) *types.Link {
if len(l.Endpoints) != 2 {
log.Fatalf("endpoint %q has wrong syntax, unexpected number of items", l.Endpoints) // skipcq: RVV-A0003
}
return &types.Link{
A: c.NewEndpoint(l.Endpoints[0]),
B: c.NewEndpoint(l.Endpoints[1]),
MTU: DefaultVethLinkMTU,
Labels: l.Labels,
Vars: l.Vars,
}
}
// NewEndpoint initializes a new endpoint object
func (c *CLab) NewEndpoint(e string) *types.Endpoint {
// initialize a new endpoint
endpoint := new(types.Endpoint)
// split the string to get node name and endpoint name
split := strings.Split(e, ":")
if len(split) != 2 {
log.Fatalf("endpoint %s has wrong syntax", e) // skipcq: GO-S0904, RVV-A0003
}
nName := split[0] // node name
// initialize the endpoint name based on the split function
endpoint.EndpointName = split[1] // endpoint name
if len(endpoint.EndpointName) > 15 {
log.Fatalf("interface '%s' name exceeds maximum length of 15 characters", endpoint.EndpointName) //skipcq: RVV-A0003
}
// generate unique MAC
endpoint.MAC = utils.GenMac(ClabOUI)
// search the node pointer for a node name referenced in endpoint section
switch nName {
// "host" is a special reference to host namespace
// for which we create an special Node with kind "host"
case "host":
endpoint.Node = &types.NodeConfig{
Kind: "host",
ShortName: "host",
NSPath: hostNSPath,
DeploymentStatus: "created",
}
// mgmt-net is a special reference to a bridge of the docker network
// that is used as the management network
case "mgmt-net":
endpoint.Node = &types.NodeConfig{
Kind: "bridge",
ShortName: "mgmt-net",
DeploymentStatus: "created",
}
default:
c.m.Lock()
if n, ok := c.Nodes[nName]; ok {
endpoint.Node = n.Config()
n.Config().Endpoints = append(n.Config().Endpoints, *endpoint)
}
c.m.Unlock()
}
// stop the deployment if the matching node element was not found
// "host" node name is an exception, it may exist without a matching node
if endpoint.Node == nil {
log.Fatalf("not all nodes are specified in the 'topology.nodes' section or the names don't match in the 'links.endpoints' section: %s", nName) // skipcq: GO-S0904, RVV-A0003
}
return endpoint
}
// CheckTopologyDefinition runs topology checks and returns any errors found
func (c *CLab) CheckTopologyDefinition(ctx context.Context) error {
var err error
if err = c.verifyBridgesExist(); err != nil {
return err
}
if err = c.verifyLinks(); err != nil {
return err
}
if err = c.verifyRootNetnsInterfaceUniqueness(); err != nil {
return err
}
if err = c.VerifyContainersUniqueness(ctx); err != nil {
return err
}
if err = c.verifyVirtSupport(); err != nil {
return err
}
if err = c.verifyHostIfaces(); err != nil {
return err
}
return c.VerifyImages(ctx)
}
// VerifyBridgeExists verifies if every node of kind=bridge/ovs-bridge exists on the lab host
func (c *CLab) verifyBridgesExist() error {
for name, node := range c.Nodes {
if node.Config().Kind == nodes.NodeKindBridge || node.Config().Kind == nodes.NodeKindOVS {
if _, err := netlink.LinkByName(name); err != nil {
return fmt.Errorf("bridge %s is referenced in the endpoints section but was not found in the default network namespace", name)
}
}
}
return nil
}
func (c *CLab) verifyLinks() error {
endpoints := map[string]struct{}{}
// dups accumulates duplicate links
dups := []string{}
for _, lc := range c.Config.Topology.Links {
for _, e := range lc.Endpoints {
if err := checkEndpoint(e); err != nil {
return err
}
if _, ok := endpoints[e]; ok {
dups = append(dups, e)
}
endpoints[e] = struct{}{}
}
}
if len(dups) != 0 {
return fmt.Errorf("endpoints %q appeared more than once in the links section of the topology file", dups)
}
return nil
}
// VerifyImages will check if image referred in the node config
// either pullable or is available in the local image store
func (c *CLab) VerifyImages(ctx context.Context) error {
images := make(map[string]string)
for _, node := range c.Nodes {
for imageKey, imageName := range node.GetImages() {
if imageName == "" {
return fmt.Errorf("missing required %q image for node %q", imageKey, node.Config().ShortName)
}
images[imageName] = node.GetRuntime().GetName()
}
}
for image, runtimeName := range images {
err := c.Runtimes[runtimeName].PullImageIfRequired(ctx, image)
if err != nil {
return err
}
}
return nil
}
// VerifyContainersUniqueness ensures that nodes defined in the topology do not have names of the existing containers
// additionally it checks that the lab name is unique and no containers are currently running with the same lab name label
func (c *CLab) VerifyContainersUniqueness(ctx context.Context) error {
nctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
containers, err := c.ListContainers(nctx, nil)
if err != nil {
return err
}
if len(containers) == 0 {
return nil
}
dups := []string{}
for _, n := range c.Nodes {
for _, cnt := range containers {
if "/"+n.Config().LongName == cnt.Names[0] {
dups = append(dups, n.Config().LongName)
}
}
}
if len(dups) != 0 {
return fmt.Errorf("containers %q already exist. Add '--reconfigure' flag to the deploy command to first remove the containers and then deploy the lab", dups)
}
// check that none of the existing containers has a label that matches
// the lab name of a currently deploying lab
// this ensures lab uniqueness
for _, cnt := range containers {
if cnt.Labels[ContainerlabLabel] == c.Config.Name {
return fmt.Errorf("the '%s' lab has already been deployed. Destroy the lab before deploying a lab with the same name", c.Config.Name)
}
}
return nil
}
// verifyHostIfaces ensures that host interfaces referenced in the topology
// do not exist already in the root namespace
// and ensure that nodes that are configured with host networking mode do not have any interfaces defined
func (c *CLab) verifyHostIfaces() error {
for _, l := range c.Links {
if l.A.Node.ShortName == "host" {
if nl, _ := netlink.LinkByName(l.A.EndpointName); nl != nil {
return fmt.Errorf("host interface %s referenced in topology already exists", l.A.EndpointName)
}
}
if l.A.Node.NetworkMode == "host" {
return fmt.Errorf("node '%s' is defined with host network mode, it can't have any links. Remove '%s' node links from the topology definition",
l.A.Node.ShortName, l.A.Node.ShortName)
}
if l.B.Node.ShortName == "host" {
if nl, _ := netlink.LinkByName(l.B.EndpointName); nl != nil {
return fmt.Errorf("host interface %s referenced in topology already exists", l.B.EndpointName)
}
}
if l.B.Node.NetworkMode == "host" {
return fmt.Errorf("node '%s' is defined with host network mode, it can't have any links. Remove '%s' node links from the topology definition",
l.B.Node.ShortName, l.B.Node.ShortName)
}
}
return nil
}
// verifyRootNetnsInterfaceUniqueness ensures that interafaces that appear in the root ns (bridge, ovs-bridge and host)
// are uniquely defined in the topology file
func (c *CLab) verifyRootNetnsInterfaceUniqueness() error {
rootNsIfaces := map[string]struct{}{}
for _, l := range c.Links {
endpoints := [2]*types.Endpoint{l.A, l.B}
for _, e := range endpoints {
if e.Node.Kind == nodes.NodeKindBridge || e.Node.Kind == nodes.NodeKindOVS || e.Node.Kind == nodes.NodeKindHOST {
if _, ok := rootNsIfaces[e.EndpointName]; ok {
return fmt.Errorf(`interface %s defined for node %s has already been used in other bridges, ovs-bridges or host interfaces.
Make sure that nodes of these kinds use unique interface names`, e.EndpointName, e.Node.ShortName)
}
rootNsIfaces[e.EndpointName] = struct{}{}
}
}
}
return nil
}
// verifyVirtSupport checks if virtualization supported by vcpu if vrnetlab nodes are used
func (c *CLab) verifyVirtSupport() error {
virtNeeded := false
for _, n := range c.Nodes {
if strings.HasPrefix(n.Config().Kind, "vr-") {
virtNeeded = true
break
}
}
if !virtNeeded {
return nil
}
f, err := os.Open("/proc/cpuinfo")
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if strings.Contains(scanner.Text(), "vmx") || strings.Contains(scanner.Text(), "svm") {
return nil
}
}
if err := scanner.Err(); err != nil {
return err
}
return fmt.Errorf("virtualization seems to be not supported and it is required for VM based nodes. Check if virtualization can be enabled")
}
// checkEndpoint runs checks on the endpoint syntax
func checkEndpoint(e string) error {
split := strings.Split(e, ":")
if len(split) != 2 {
return fmt.Errorf("malformed endpoint definition: %s", e)
}
if split[1] == "eth0" {
return fmt.Errorf("eth0 interface can't be used in the endpoint definition as it is added by docker automatically: '%s'", e)
}
return nil
}
//resolvePath resolves a string path by expanding `~` to home dir or getting Abs path for the given path
func resolvePath(p string) (string, error) {
if p == "" {
return "", nil
}
var err error
switch {
// resolve ~/ path
case p[0] == '~':
p, err = homedir.Expand(p)
if err != nil {
return "", err
}
default:
p, err = filepath.Abs(p)
if err != nil {
return "", err
}
}
return p, nil
}
// resolveBindPaths resolves the host paths in a bind string, such as /hostpath:/remotepath(:options) string
// it allows host path to have `~` and returns absolute path for a relative path
// if the host path doesn't exist, the error will be returned
func resolveBindPaths(binds []string, nodedir string) error {
for i := range binds {
// host path is a first element in a /hostpath:/remotepath(:options) string
elems := strings.Split(binds[i], ":")
r := strings.NewReplacer("$nodeDir", nodedir)
hp := r.Replace(elems[0])
hp, err := resolvePath(hp)
if err != nil {
return err
}
_, err = os.Stat(hp)
if err != nil {
// check if the hostpath mount has a reference to ansible-inventory.yml
// if that is the case, we do not emit an error on missing file, since this file
// will be created by containerlab upon lab deployment
labdir := filepath.Base(filepath.Dir(nodedir))
s := strings.Split(hp, string(os.PathSeparator))
// creating a path from last two elements of a resolved host path
h := filepath.Join(s[len(s)-2], s[len(s)-1])
if h != filepath.Join(labdir, "ansible-inventory.yml") {
return fmt.Errorf("failed to verify bind path: %v", err)
}
}
elems[0] = hp
binds[i] = strings.Join(elems, ":")
}
return nil
}
// CheckResources runs container host resources check
func (*CLab) CheckResources() error {
vcpu := runtime.NumCPU()
log.Debugf("Number of vcpu: %d", vcpu)
if vcpu < 2 {
log.Warn("Only 1 vcpu detected on this container host. Most containerlab nodes require at least 2 vcpu")
}
freeMemG := sysMemory("free") / 1024 / 1024 / 1024
if freeMemG < 1 {
log.Warnf("it appears that container host has low memory available: ~%dGi. This might lead to runtime errors. Consider freeing up more memory.", freeMemG)
}
return nil
}
// sets defaults after the topology has been parsed
func (c *CLab) setDefaults() {
for _, n := range c.Nodes {
// Injecting the env var with expected number of links
numLinks := map[string]string{
types.CLAB_ENV_INTFS: fmt.Sprintf("%d", len(n.Config().Endpoints)),
}
n.Config().Env = utils.MergeStringMaps(n.Config().Env, numLinks)
}
}
// sysMemory reports on total installed or free memory (in bytes)
// used from https://github.com/pbnjay/memory
func sysMemory(v string) uint64 {
in := &syscall.Sysinfo_t{}
err := syscall.Sysinfo(in)
if err != nil {
return 0
}
var m uint64
// If this is a 32-bit system, then these fields are
// uint32 instead of uint64.
// So we always convert to uint64 to match signature.
switch v {
case "total":
m = uint64(in.Totalram) * uint64(in.Unit)
case "free":
m = uint64(in.Freeram) * uint64(in.Unit)
}
return m
}
// returns nodeCfg.ShortName based on the provided containerName and labName
func getShortName(labName, containerName string) (string, error) {
result := strings.Split(containerName, "-"+labName+"-")
if len(result) != 2 {
return "", fmt.Errorf("failed to parse container name %q", containerName)
}
return result[1], nil
}
|
[
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
src/scitokens/scitokens.py
|
"""
SciTokens reference library.
This library provides the primitives necessary for working with SciTokens
authorization tokens.
"""
import time
import os
import jwt
from . import urltools
import logging
from six import string_types
LOGGER = logging.getLogger("scitokens")
import uuid
import cryptography.hazmat.backends as backends
from .utils import keycache as KeyCache
from .utils import config
from .utils.errors import MissingIssuerException, InvalidTokenFormat, MissingKeyException, UnsupportedKeyException
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.primitives.asymmetric import rsa, ec
class SciToken(object):
"""
An object representing the contents of a SciToken.
"""
def __init__(self, key=None, algorithm=None, key_id=None, parent=None, claims=None):
"""
Construct a SciToken object.
:param key: Private key to sign the SciToken with. It should be the PEM contents.
:param algorithm: Private key algorithm to sign the SciToken with. Default: RS256
:param str key_id: A string representing the Key ID that is used at the issuer
:param parent: Parent SciToken that will be chained
"""
if claims is not None:
raise NotImplementedError()
self._key = key
derived_alg = None
if key:
derived_alg = self._derive_algorithm(key)
# Make sure we support the key algorithm
if key and not algorithm and not derived_alg:
# We don't know the key algorithm
raise UnsupportedKeyException("Key was given for SciToken, but algorithm was not "
"passed to SciToken creation and it cannot be derived "
"from the provided key")
elif derived_alg and not algorithm:
self._key_alg = derived_alg
elif derived_alg and algorithm and derived_alg != algorithm:
error_str = ("Key provided reports algorithm type: {0}, ".format(derived_alg) +
"while scitoken creation argument was {0}".format(algorithm))
raise UnsupportedKeyException(error_str)
elif key and algorithm:
self._key_alg = algorithm
else:
# If key is not specified, and neither is algorithm
self._key_alg = algorithm if algorithm is not None else config.get('default_alg')
if self._key_alg not in ["RS256", "ES256"]:
raise UnsupportedKeyException()
self._key_id = key_id
self._parent = parent
self._claims = {}
self._verified_claims = {}
self.insecure = False
self._serialized_token = None
@staticmethod
def _derive_algorithm(key):
"""
Derive the algorithm type from the PEM contents of the key
returns: Key algorithm if known, otherwise None
"""
if isinstance(key, rsa.RSAPrivateKey):
return "RS256"
elif isinstance(key, ec.EllipticCurvePrivateKey):
if key.curve.name == "secp256r1":
return "ES256"
# If it gets here, we don't know what type of key
return None
def claims(self):
"""
Return an iterator of (key, value) pairs of claims, starting
with the claims from the first token in the chain.
"""
if self._parent:
for claim, value in self._parent.claims():
yield claim, value
for claim, value in self._verified_claims.items():
yield claim, value
for claim, value in self._claims.items():
yield claim, value
def verify(self):
"""
Verify the claims of the in-memory token.
Automatically called by deserialize.
"""
raise NotImplementedError()
def serialize(self, include_key=False, issuer=None, lifetime=600):
"""
Serialize the existing SciToken.
:param bool include_key: When true, include the public key to the serialized token. Default=False
:param str issuer: A string indicating the issuer for the token. It should be an HTTPS address,
as specified in https://tools.ietf.org/html/draft-ietf-oauth-discovery-07
:param int lifetime: Number of seconds that the token should be valid
:return str: base64 encoded token
"""
if include_key is not False:
raise NotImplementedError()
if self._key == None:
raise MissingKeyException("Unable to serialize, missing private key")
# Issuer needs to be available, otherwise throw an error
if issuer is None and 'iss' not in self:
raise MissingIssuerException("Issuer not specific in claims or as argument")
if not issuer:
issuer = self['iss']
# Set the issue and expiration time of the token
issue_time = int(time.time())
exp_time = int(issue_time + lifetime)
# Add to validated and other claims
payload = dict(self._verified_claims)
payload.update(self._claims)
# Anything below will override what is in the claims
payload.update({
"iss": issuer,
"exp": exp_time,
"iat": issue_time,
"nbf": issue_time
})
if 'jti' not in payload:
# Create a jti from a uuid
payload['jti'] = str(uuid.uuid4())
self._claims['jti'] = payload['jti']
if self._key_id != None:
encoded = jwt.encode(payload, self._key, algorithm = self._key_alg, headers={'kid': self._key_id})
else:
encoded = jwt.encode(payload, self._key, algorithm = self._key_alg)
self._serialized_token = encoded
# Move claims over to verified claims
self._verified_claims.update(self._claims)
self._claims = {}
global LOGGER
LOGGER.info("Signed Token: {0}".format(str(payload)))
# Encode the returned string for backwards compatibility.
# Previous versions of PyJWT returned bytes
if not isinstance(encoded, bytes):
encoded = str.encode(encoded)
return encoded
def update_claims(self, claims):
"""
Add new claims to the token.
:param claims: Dictionary of claims to add to the token
"""
self._claims.update(claims)
def __setitem__(self, claim, value):
"""
Assign a new claim to the token.
"""
self._claims[claim] = value
def __getitem__(self, claim):
"""
Access the value corresponding to a particular claim; will
return claims from both the verified and unverified claims.
If a claim is not present, then a KeyError is thrown.
"""
if claim in self._claims:
return self._claims[claim]
if claim in self._verified_claims:
return self._verified_claims[claim]
raise KeyError(claim)
def __contains__(self, claim):
"""
Check if the claim exists in the SciToken
"""
if claim in self._claims:
return True
if claim in self._verified_claims:
return True
return False
def __delitem__(self, claim):
"""
Delete the claim from the SciToken
"""
deleted = False
if claim in self._claims:
del self._claims[claim]
deleted = True
if claim in self._verified_claims:
del self._verified_claims[claim]
deleted = True
if deleted:
return
else:
raise KeyError(claim)
def get(self, claim, default=None, verified_only=False):
"""
Return the value associated with a claim, returning the
default if the claim is not present. If `verified_only` is
True, then a claim is returned only if it is in the verified claims
"""
if verified_only:
return self._verified_claims.get(claim, default)
return self._claims.get(claim, self._verified_claims.get(claim, default))
def clone_chain(self):
"""
Return a new, empty SciToken
"""
raise NotImplementedError()
def _deserialize_key(self, key_serialized, unverified_headers):
"""
Given a serialized key and a set of UNVERIFIED headers, return
a corresponding private key object.
"""
@staticmethod
def deserialize(serialized_token, audience=None, require_key=False, insecure=False, public_key=None):
"""
Given a serialized SciToken, load it into a SciTokens object.
Verifies the claims pass the current set of validation scripts.
:param str serialized_token: The serialized token.
:param str audience: The audience URI that this principle is claiming. Default: None
:param bool require_key: When True, require the key
:param bool insecure: When True, allow insecure methods to verify the issuer,
including allowing "localhost" issuer (useful in testing). Default=False
:param str public_key: A PEM formatted public key string to be used to validate the token
"""
if require_key is not False:
raise NotImplementedError()
if isinstance(serialized_token, bytes):
serialized_token = serialized_token.decode('utf8')
info = serialized_token.split(".")
if len(info) != 3 and len(info) != 4: # header, format, signature[, key]
raise InvalidTokenFormat("Serialized token is not a readable format.")
if (len(info) != 4) and require_key:
raise MissingKeyException("No key present in serialized token")
serialized_jwt = info[0] + "." + info[1] + "." + info[2]
unverified_headers = jwt.get_unverified_header(serialized_jwt)
unverified_payload = jwt.decode(serialized_jwt, algorithms=['RS256', 'ES256'],
options={"verify_signature": False})
# Get the public key from the issuer
keycache = KeyCache.KeyCache().getinstance()
if public_key == None:
issuer_public_key = keycache.getkeyinfo(unverified_payload['iss'],
key_id=unverified_headers['kid'] if 'kid' in unverified_headers else None,
insecure=insecure)
else:
issuer_public_key = load_pem_public_key(public_key, backend=backends.default_backend())
if audience:
claims = jwt.decode(serialized_token, issuer_public_key, audience = audience, algorithms=['RS256', 'ES256'])
else:
claims = jwt.decode(serialized_token, issuer_public_key, algorithms=['RS256', 'ES256'])
to_return = SciToken()
to_return._verified_claims = claims
to_return._serialized_token = serialized_token
return to_return
@staticmethod
def discover(audience=None, require_key=False, insecure=False, public_key=None):
"""
Create a SciToken by looking for a token with WLCG Bearer Token Discovery protocol
https://github.com/WLCG-AuthZ-WG/bearer-token-discovery/blob/master/specification.md
The serialized token is read in and passed to the deserialize() method to load it
into a SciTokens object. Raises IOError is a token cannot be found or the errors
of SciTokens.deserialize() if there is an error reading the discovered token.
:param str audience: The audience URI that this principle is claiming. Default: None
:param bool require_key: When True, require the key
:param bool insecure: When True, allow insecure methods to verify the issuer,
including allowing "localhost" issuer (useful in testing). Default=False
:param str public_key: A PEM formatted public key string to be used to validate the token
"""
if os.environ.get('BEARER_TOKEN'):
return SciToken.deserialize(os.environ['BEARER_TOKEN'].strip(),
audience, require_key, insecure, public_key)
if os.environ.get('BEARER_TOKEN_FILE') and os.path.isfile(os.environ.get('BEARER_TOKEN_FILE')):
with open(os.environ.get('BEARER_TOKEN_FILE')) as t:
token_data = t.read().strip()
return SciToken.deserialize(token_data,
audience, require_key, insecure, public_key)
bt_file = 'bt_u{}'.format(os.geteuid())
if os.environ.get('XDG_RUNTIME_DIR'):
bt_path = os.path.join(os.environ.get('XDG_RUNTIME_DIR'), bt_file)
else:
bt_path = os.path.join('/tmp', bt_file)
if os.path.isfile(bt_path):
with open(bt_path) as t:
token_data = t.read().strip()
return SciToken.deserialize(token_data,
audience, require_key, insecure, public_key)
raise IOError
class ValidationFailure(Exception):
"""
Validation of a token was attempted but failed for an unknown reason.
"""
class NoRegisteredValidator(ValidationFailure):
"""
The Validator object attempted validation of a token, but encountered a
claim with no registered validator.
"""
class ClaimInvalid(ValidationFailure):
"""
The Validator object attempted validation of a given claim, but one of the
callbacks marked the claim as invalid.
"""
class MissingClaims(ValidationFailure):
"""
Validation failed because one or more claim marked as critical is missing
from the token.
"""
class Validator(object):
"""
Validate the contents of a SciToken.
Given a SciToken, validate the contents of its claims. Unlike verification,
which checks that the token is correctly signed, validation provides an easy-to-use
interface that ensures the claims in the token are understood by the user.
"""
def __init__(self):
self._callbacks = {}
def add_validator(self, claim, validate_op):
"""
Add a validation callback for a given claim. When the given ``claim``
encountered in a token, ``validate_op`` object will be called with the
following signature::
>>> validate_op(value)
where ``value`` is the value of the token's claim converted to a python
object.
The validator should return ``True`` if the value is acceptable and ``False``
otherwise.
"""
validator_list = self._callbacks.setdefault(claim, [])
validator_list.append(validate_op)
def validate(self, token, critical_claims=None):
"""
Validate the claims of a token.
This will iterate through all claims in the given :class:`SciToken`
and determine whether all claims a valid, given the current set of
validators.
If ``critical_claims`` is specified, then validation will fail if one
or more claim in this list is not present in the token.
This will throw an exception if the token is invalid and return ``True``
if the token is valid.
"""
if critical_claims:
critical_claims = set(critical_claims)
else:
critical_claims = set()
for claim, value in token.claims():
if claim in critical_claims:
critical_claims.remove(claim)
validator_list = self._callbacks.setdefault(claim, [])
if not validator_list:
if "ver" not in token or token["ver"] != "scitoken:2.0":
raise NoRegisteredValidator("No validator was registered to handle encountered claim '%s'" % claim)
for validator in validator_list:
if not validator(value):
raise ClaimInvalid("Validator rejected value of '%s' for claim '%s'" % (value, claim))
if len(critical_claims):
raise MissingClaims("Validation failed because the following claims are missing: %s" % \
", ".join(critical_claims))
return True
def __call__(self, token):
return self.validate(token)
class EnforcementError(Exception):
"""
A generic error during the enforcement of a SciToken.
"""
class InvalidPathError(EnforcementError):
"""
An invalid test path was provided to the Enforcer object.
Test paths must be absolute paths (start with '/')
"""
class InvalidAuthorizationResource(EnforcementError):
"""
A scope was encountered with an invalid authorization.
Examples include:
- Authorizations that require paths (read, write) but none
were included.
- Scopes that include relative paths (read:~/foo)
"""
class Enforcer(object):
"""
Enforce SciTokens-specific validation logic.
Allows one to test if a given token has a particular authorization.
This class is NOT thread safe; a separate object is needed for every thread.
"""
_authz_requiring_path = set(["read", "write"])
# An array of versions of scitokens that we understand and can enforce
_versions_understood = [ 1, "scitoken:2.0" ]
def __init__(self, issuer, audience=None):
self._issuer = issuer
self.last_failure = None
if not self._issuer:
raise EnforcementError("Issuer must be specified.")
self._audience = audience
self._test_access = False
self._test_authz = None
self._test_path = None
self._token_scopes = set()
self._now = 0
self._validator = Validator()
self._validator.add_validator("exp", self._validate_exp)
self._validator.add_validator("nbf", self._validate_nbf)
self._validator.add_validator("iss", self._validate_iss)
self._validator.add_validator("iat", self._validate_iat)
self._validator.add_validator("aud", self._validate_aud)
self._validator.add_validator("scp", self._validate_scp)
self._validator.add_validator("scope", self._validate_scope)
self._validator.add_validator("jti", self._validate_jti)
self._validator.add_validator("sub", self._validate_sub)
self._validator.add_validator("ver", self._validate_ver)
self._validator.add_validator("opt", self._validate_opt)
def _reset_state(self):
"""
Reset the internal state variables of the Enforcer object. Automatically
invoked each time the Enforcer is used to test or generate_acls
"""
self._test_authz = None
self._test_path = None
self._test_access = False
self._token_scopes = set()
self._now = time.time()
self.last_failure = None
def add_validator(self, claim, validator):
"""
Add a user-defined validator in addition to the default enforcer logic.
"""
self._validator.add_validator(claim, validator)
def test(self, token, authz, path=None):
"""
Test whether a given token has the requested permission within the
current enforcer context.
"""
self._reset_state()
self._test_access = True
critical_claims = set(["scope"])
# Check for the older "scp" attribute
if 'scope' not in token and 'scp' in token:
critical_claims = set(["scp"])
# In scitokens 2.0, some claims are required
if 'ver' in token and token['ver'] == "scitoken:2.0":
critical_claims.update(['aud', 'ver'])
if not path and (authz in self._authz_requiring_path):
raise InvalidPathError("Enforcer provided with an empty path.")
if path and not path.startswith("/"):
raise InvalidPathError("Enforcer was given an invalid relative path to test; absolute path required.")
self._test_path = path
self._test_authz = authz
self.last_failure = None
try:
self._validator.validate(token, critical_claims=critical_claims)
except ValidationFailure as validation_failure:
self.last_failure = str(validation_failure)
return False
return True
def generate_acls(self, token):
"""
Given a SciToken object and the expected issuer, return the valid ACLs.
"""
self._reset_state()
critical_claims = set(["scope"])
# Check for the older "scp" attribute
if 'scope' not in token and 'scp' in token:
critical_claims = set(["scp"])
try:
self._validator.validate(token, critical_claims=critical_claims)
except ValidationFailure as verify_fail:
self.last_failure = str(verify_fail)
raise
return list(self._token_scopes)
def _validate_exp(self, value):
exp = float(value)
return exp >= self._now
def _validate_nbf(self, value):
nbf = float(value)
return nbf < self._now
def _validate_iss(self, value):
return self._issuer == value
def _validate_iat(self, value):
return float(value) < self._now
def _validate_aud(self, value):
if not self._audience:
return False
elif self._audience == "ANY":
return False
elif value == "ANY":
return True
# Convert the value and self._audience both to sets
# Then perform set intersection
values = []
if not isinstance(value, list):
values = [value]
else:
values = value
set_value = set(values)
audiences = []
if not isinstance(self._audience, list):
audiences = [self._audience]
else:
audiences = self._audience
set_aud = set(audiences)
if len(set_value.intersection(set_aud)) > 0:
return True
else:
return False
def _validate_ver(self, value):
if value in self._versions_understood:
return True
else:
return False
@classmethod
def _validate_opt(self, value):
"""
Opt is optional information. We don't know what's in there, so just
return true.
"""
del value
return True
@classmethod
def _validate_sub(self, value):
"""
SUB, or subject, should always pass. It's mostly used for identifying
a tokens origin.
"""
# Fix for unused argument
del value
return True
@classmethod
def _validate_jti(self, value):
"""
JTI, or json token id, should always pass. It's mostly used for logging
and auditing.
"""
global LOGGER
LOGGER.info("Validating SciToken with jti: {0}".format(value))
return True
def _check_scope(self, scope):
"""
Given a scope, make sure it contains a resource
for scope types that require resources.
Returns a tuple of the (authz, path). If path is
not in the scope (and is not required to be explicitly inside
the scope), it will default to '/'.
"""
info = scope.split(":", 1)
authz = info[0]
if authz in self._authz_requiring_path and (len(info) == 1):
raise InvalidAuthorizationResource("Token contains an authorization requiring a resource"
"(path), but no path is present")
if len(info) == 2:
path = info[1]
if not path.startswith("/"):
raise InvalidAuthorizationResource("Token contains a relative path in scope")
norm_path = urltools.normalize_path(path)
else:
norm_path = '/'
return (authz, norm_path)
def _validate_scp(self, value):
if not isinstance(value, list):
value = [value]
if self._test_access:
if not self._test_path:
norm_requested_path = '/'
else:
norm_requested_path = urltools.normalize_path(self._test_path)
for scope in value:
authz, norm_path = self._check_scope(scope)
if (self._test_authz == authz) and norm_requested_path.startswith(norm_path):
return True
return False
else:
for scope in value:
authz, norm_path = self._check_scope(scope)
self._token_scopes.add((authz, norm_path))
return True
def _validate_scope(self, value):
if not isinstance(value, string_types):
raise InvalidAuthorizationResource("Scope is invalid. Must be a space separated string")
if self._test_access:
if not self._test_path:
norm_requested_path = '/'
else:
norm_requested_path = urltools.normalize_path(self._test_path)
# Split on spaces
for scope in value.split(" "):
authz, norm_path = self._check_scope(scope)
if (self._test_authz == authz) and norm_requested_path.startswith(norm_path):
return True
return False
else:
# Split on spaces
for scope in value.split(" "):
authz, norm_path = self._check_scope(scope)
self._token_scopes.add((authz, norm_path))
return True
|
[] |
[] |
[
"BEARER_TOKEN_FILE",
"XDG_RUNTIME_DIR",
"BEARER_TOKEN"
] |
[]
|
["BEARER_TOKEN_FILE", "XDG_RUNTIME_DIR", "BEARER_TOKEN"]
|
python
| 3 | 0 | |
kedro_neptune/__init__.py
|
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = [
'NeptuneRunDataSet',
'NeptuneFileDataSet',
'neptune_hooks',
'init'
]
import hashlib
import json
import os
import sys
import time
import urllib.parse
from typing import Any, Dict, Optional
import click
from kedro.extras.datasets.text import TextDataSet
from kedro.framework.hooks import hook_impl
from kedro.framework.project import settings
from kedro.framework.session import KedroSession
from kedro.framework.startup import ProjectMetadata
from kedro.io import DataCatalog, MemoryDataSet
from kedro.io.core import (
AbstractDataSet,
get_filepath_str,
Version,
)
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node
from ruamel.yaml import YAML
from kedro_neptune._version import get_versions
from kedro_neptune.config import get_neptune_config
try:
# neptune-client=0.9.0+ package structure
import neptune.new as neptune
from neptune.new.types import File
from neptune.new.internal.utils import verify_type
from neptune.new.internal.init_impl import RunMode
from neptune.new.internal.utils.paths import join_paths
except ImportError:
# neptune-client>=1.0.0 package structure
import neptune
from neptune.types import File
from neptune.internal.utils import verify_type
from neptune.internal.init_impl import RunMode
from neptune.internal.utils.paths import join_paths
INTEGRATION_VERSION_KEY = 'source_code/integrations/kedro-neptune'
__version__ = get_versions()['version']
@click.group(name='Neptune')
def commands():
"""Kedro plugin for logging with Neptune.ai"""
@commands.group(name='neptune')
def neptune_commands():
pass
INITIAL_NEPTUNE_CONFIG = """\
neptune:
#GLOBAL CONFIG
project: ''
base_namespace: 'kedro'
enabled: true
#LOGGING
upload_source_files:
# - '**/*.py'
- 'conf/base/*.yml'
"""
INITIAL_NEPTUNE_CREDENTIALS = """\
neptune:
api_token: $NEPTUNE_API_TOKEN
"""
INITIAL_NEPTUNE_CATALOG = """\
# You can log files to Neptune via NeptuneFileDataSet
#
# example_artifact:
# type: kedro_neptune.NeptuneFileDataSet
# filepath: data/06_models/clf_model.pkl
#
# If you want to log existing Kedro Dataset to Neptune add @neptune to the DataSet name
#
# example_iris_data@neptune:
# type: kedro_neptune.NeptuneFileDataSet
# filepath: data/01_raw/iris.csv
#
# You can use kedro_neptune.NeptuneFileDataSet in any catalog including conf/base/catalog.yml
#
"""
PROMPT_API_TOKEN = """Pass Neptune API Token or press enter if you want to \
use $NEPTUNE_API_TOKEN environment variable:""".replace('\n', '')
PROMPT_PROJECT_NAME = """Pass Neptune project name in a WORKSPACE/PROJECT format or press enter if you want to \
use $NEPTUNE_PROJECT environment variable:""".replace('\n', '')
@neptune_commands.command()
@click.option('--api-token', prompt=PROMPT_API_TOKEN, default='$NEPTUNE_API_TOKEN')
@click.option('--project', prompt=PROMPT_PROJECT_NAME, default='$NEPTUNE_PROJECT')
@click.option('--base-namespace', default='kedro')
@click.option('--config', default='base')
@click.pass_obj
def init(metadata: ProjectMetadata, api_token: str, project: str, base_namespace: str, config: str):
"""Command line interface (CLI) command for initializing Kedro-Neptune plugin.
Kedro-Neptune plugin lets you log metadata related to Kedro pipelines to `Neptune.ai ML metadata store`_
so that you can monitor, visualize, and compare your pipelines and node outputs in the Neptune UI.
After initializing it, whenever you run '$ kedro run', you will log:
* parameters
* pipeline execution configuration (run_params)
* metadata about Kedro DataSets
* hardware consumption and node execution times
* configuration files from the conf/base directory
* full Kedro run command
* any additional metadata like metrics, charts, or images that you logged from inside of your node functions.
See `example project in Neptune`_.
You may also want to check `Neptune-Kedro integration docs page`_.
Args:
api-token (string:) Neptune API token or the environment variable name where it is stored.
Default is '$NEPTUNE_API_TOKEN'. See `How to find your Neptune API token`_.
project (string): Neptune project name or the environment variable name where it is stored.
Default is '$NEPTUNE_API_TOKEN'. See `How to find your Neptune project name`_.
base-namespace (string): Namespace in Neptune where all the Kedro-related metadata is logged.
Default is 'kedro'.
config (string): Name of the Subdirectory inside of the Kedro 'conf' directory for
configuration and catalog files. Default is 'base'.
Returns:
``dict`` with all summary items.
Examples:
Pass required arguments directly
$ kedro neptune init --api-token $NEPTUNE_API_TOKEN --project common/kedro-integration
Use prompts to fill the required arguments
$ kedro neptune init
You may also want to check `Neptune-Kedro integration docs page`_.
.. _Neptune.ai ML metadata store:
https://neptune.ai/
.. _example project in Neptune:
https://app.neptune.ai/o/common/org/kedro-integration/e/KED-572
.. _Neptune-Kedro integration docs page:
https://docs.neptune.ai/integrations-and-supported-tools/pipeline-and-orchestration/kedro
.. _How to find your Neptune project name:
https://docs.neptune.ai/getting-started/installation#setting-the-project-name
.. _How to find your Neptune API token:
https://docs.neptune.ai/getting-started/installation#authentication-neptune-api-token
"""
session = KedroSession(metadata.package_name)
context = session.load_context()
yaml = YAML()
context.credentials_file = context.project_path / settings.CONF_ROOT / 'local' / 'credentials_neptune.yml'
if not context.credentials_file.exists():
with context.credentials_file.open('w') as credentials_file:
credentials_template = yaml.load(INITIAL_NEPTUNE_CREDENTIALS)
credentials_template['neptune']['api_token'] = api_token
yaml.dump(credentials_template, credentials_file)
click.echo(f'Created credentials_neptune.yml configuration file: {context.credentials_file}')
context.config_file = context.project_path / settings.CONF_ROOT / config / 'neptune.yml'
if not context.config_file.exists():
with context.config_file.open('w') as config_file:
config_template = yaml.load(INITIAL_NEPTUNE_CONFIG)
config_template['neptune']['project'] = project
config_template['neptune']['base_namespace'] = base_namespace
config_template['neptune']['upload_source_files'] = ['**/*.py', f'{settings.CONF_ROOT}/{config}/*.yml']
yaml.dump(config_template, config_file)
click.echo(f'Creating neptune.yml configuration file in: {context.config_file}')
context.catalog_file = context.project_path / settings.CONF_ROOT / config / 'catalog_neptune.yml'
if not context.catalog_file.exists():
with context.catalog_file.open('w') as catalog_file:
catalog_file.writelines(INITIAL_NEPTUNE_CATALOG)
click.echo(f'Creating catalog_neptune.yml configuration file: {context.catalog_file}')
def _connection_mode(enabled: bool) -> str:
return RunMode.ASYNC if enabled else RunMode.DEBUG
class NeptuneRunDataSet(AbstractDataSet):
def _save(self, data: Dict[str, Any]) -> None:
raise NotImplementedError()
def _describe(self) -> Dict[str, Any]:
return {}
def _exists(self) -> bool:
return True
def _load(self) -> neptune.run.Handler:
config = get_neptune_config()
run = neptune.init(api_token=config.api_token,
project=config.project,
mode=_connection_mode(config.enabled),
capture_stdout=False,
capture_stderr=False,
capture_hardware_metrics=False,
capture_traceback=False,
source_files=None)
return run[config.base_namespace]
class BinaryFileDataSet(TextDataSet):
def __init__(
self,
filepath: str,
version: Version = None,
credentials: Dict[str, Any] = None,
fs_args: Dict[str, Any] = None,
) -> None:
super().__init__(
filepath=filepath,
version=version,
credentials=credentials,
fs_args=fs_args
)
def _describe(self) -> Dict[str, Any]:
load_path = get_filepath_str(self._get_load_path(), self._protocol)
path = urllib.parse.urlparse(load_path).path
extension = os.path.splitext(path)[1][1:]
return dict(
extension=extension,
**super()._describe()
)
def _save(self, data: bytes) -> None:
path = get_filepath_str(self._get_load_path(), self._protocol)
with self._fs.open(path, mode='wb') as fs_file:
return fs_file.write(data)
def _load(self) -> bytes:
path = get_filepath_str(self._get_load_path(), self._protocol)
with self._fs.open(path, mode='rb') as fs_file:
return fs_file.read()
class NeptuneFileDataSet(BinaryFileDataSet):
"""NeptuneFileDataSet is a Kedro Data Set that lets you log files to Neptune.
It can be any file on the POSIX compatible filesystem.
To log it, you need to define the NeptuneFileDataSet in any Kedro catalog, including catalog.yml.
You may also want to check `Neptune-Kedro integration docs page`_.
Args:
filepath (string): Filepath in POSIX format to a text file prefixed with a protocol like s3://.
Same as for `Kedro TextDataSet`_.
credentials (dict, optional): Credentials required to get access to the underlying filesystem.
Same as for `Kedro TextDataSet`_.
fs_args (dict, optional): Extra arguments to pass into underlying filesystem class constructor.
Same as for`Kedro TextDataSet`_.
Examples:
Log a file to Neptune from any Kedro catalog YML file.
example_model_file:
type: kedro_neptune.NeptuneFileDataSet
filepath: data/06_models/clf.pkl
Log a file to Neptune that has already been defined as a Kedro DataSet in any catalog YML file.
example_iris_data:
type: pandas.CSVDataSet
filepath: data/01_raw/iris.csv
example_iris_data@neptune:
type: kedro_neptune.NeptuneFileDataSet
filepath: data/01_raw/iris.csv
You may also want to check `Neptune-Kedro integration docs page`_.
.. _Neptune-Kedro integration docs page:
https://docs.neptune.ai/integrations-and-supported-tools/pipeline-and-orchestration/kedro
.. _Kedro TextDataSet:
https://kedro.readthedocs.io/en/stable/kedro.extras.datasets.text.TextDataSet.html
"""
def __init__(
self,
filepath: str,
credentials: Dict[str, Any] = None,
fs_args: Dict[str, Any] = None,
):
super().__init__(
filepath=filepath,
version=None,
credentials=credentials,
fs_args=fs_args
)
def log_file_dataset(namespace: neptune.run.Handler, name: str, dataset: NeptuneFileDataSet):
# pylint: disable=protected-access
if not namespace._run.exists(f'{namespace._path}/{name}'):
data = dataset.load()
extension = dataset._describe().get('extension')
try:
file = File.create_from(data)
except TypeError:
file = File.from_content(
data,
extension=extension
)
namespace[name].upload(
file
)
def log_parameters(namespace: neptune.run.Handler, catalog: DataCatalog):
# pylint: disable=protected-access
namespace['parameters'] = catalog._data_sets['parameters'].load()
def log_dataset_metadata(namespace: neptune.run.Handler, name: str, dataset: AbstractDataSet):
additional_parameters = {}
try:
# pylint: disable=protected-access
additional_parameters = dataset._describe()
except AttributeError:
pass
namespace[name] = {
'type': type(dataset).__name__,
'name': name,
**additional_parameters
}
def log_data_catalog_metadata(namespace: neptune.run.Handler, catalog: DataCatalog):
# pylint: disable=protected-access
namespace = namespace['catalog']
for name, dataset in catalog._data_sets.items():
if dataset.exists() and not namespace._run.exists(join_paths(namespace._path, name)):
if not isinstance(dataset, MemoryDataSet) and not isinstance(dataset, NeptuneRunDataSet):
log_dataset_metadata(namespace=namespace['datasets'], name=name, dataset=dataset)
if isinstance(dataset, NeptuneFileDataSet):
log_file_dataset(namespace=namespace['files'], name=name, dataset=dataset)
log_parameters(namespace=namespace, catalog=catalog)
def log_pipeline_metadata(namespace: neptune.run.Handler, pipeline: Pipeline):
namespace['structure'].upload(File.from_content(
json.dumps(
json.loads(pipeline.to_json()),
indent=4,
sort_keys=True
),
'json'
))
def log_run_params(namespace: neptune.run.Handler, run_params: Dict[str, Any]):
namespace['run_params'] = run_params
def log_command(namespace: neptune.run.Handler):
namespace['kedro_command'] = ' '.join(['kedro'] + sys.argv[1:])
class NeptuneHooks:
def __init__(self):
self._run_id: Optional[str] = None
self._node_execution_timers: Dict[str, float] = {}
# pylint: disable=unused-argument
@hook_impl
def after_catalog_created(
self,
catalog: DataCatalog,
run_id: str,
) -> None:
self._run_id = hashlib.md5(run_id.encode()).hexdigest()
os.environ['NEPTUNE_CUSTOM_RUN_ID'] = self._run_id
catalog.add(
data_set_name='neptune_run',
data_set=NeptuneRunDataSet()
)
@hook_impl
def before_pipeline_run(
self,
run_params: Dict[str, Any],
pipeline: Pipeline,
catalog: DataCatalog
) -> None:
config = get_neptune_config()
run = neptune.init(api_token=config.api_token,
project=config.project,
mode=_connection_mode(config.enabled),
custom_run_id=self._run_id,
source_files=config.source_files or None)
run[INTEGRATION_VERSION_KEY] = __version__
current_namespace = run[config.base_namespace]
os.environ['NEPTUNE_API_TOKEN'] = config.api_token or ''
os.environ['NEPTUNE_PROJECT'] = config.project or ''
log_command(namespace=current_namespace)
log_run_params(namespace=current_namespace, run_params=run_params)
log_data_catalog_metadata(namespace=current_namespace, catalog=catalog)
log_pipeline_metadata(namespace=current_namespace, pipeline=pipeline)
@hook_impl
def before_node_run(
self,
node: Node,
inputs: Dict[str, Any],
catalog: DataCatalog
):
run = catalog.load('neptune_run')
current_namespace = run[f'nodes/{node.short_name}']
if inputs:
current_namespace['inputs'] = list(sorted(inputs.keys()))
for input_name, input_value in inputs.items():
if input_name.startswith('params:'):
current_namespace[f'parameters/{input_name[len("params:"):]}'] = input_value
self._node_execution_timers[node.short_name] = time.time()
@hook_impl
def after_node_run(
self,
node: Node,
catalog: DataCatalog,
outputs: Dict[str, Any]
) -> None:
# pylint: disable=protected-access
execution_time = float(time.time() - self._node_execution_timers[node.short_name])
run = catalog.load('neptune_run')
current_namespace = run[f'nodes/{node.short_name}']
current_namespace['execution_time'] = execution_time
if outputs:
current_namespace['outputs'] = list(sorted(outputs.keys()))
log_data_catalog_metadata(namespace=run, catalog=catalog)
run._run.sync()
@hook_impl
def after_pipeline_run(
self,
catalog: DataCatalog
) -> None:
# pylint: disable=protected-access
run = catalog.load('neptune_run')
log_data_catalog_metadata(namespace=run, catalog=catalog)
run._run.sync()
neptune_hooks = NeptuneHooks()
|
[] |
[] |
[
"NEPTUNE_CUSTOM_RUN_ID",
"NEPTUNE_PROJECT",
"NEPTUNE_API_TOKEN"
] |
[]
|
["NEPTUNE_CUSTOM_RUN_ID", "NEPTUNE_PROJECT", "NEPTUNE_API_TOKEN"]
|
python
| 3 | 0 | |
gorush/server_test.go
|
package gorush
import (
"context"
"crypto/tls"
"io/ioutil"
"log"
"net/http"
"os"
"runtime"
"testing"
"time"
"github.com/appleboy/gorush/config"
"github.com/appleboy/gofight/v2"
"github.com/buger/jsonparser"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
)
var goVersion = runtime.Version()
func initTest() {
PushConf, _ = config.LoadConf("")
PushConf.Core.Mode = "test"
}
// testRequest is testing url string if server is running
func testRequest(t *testing.T, url string) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Timeout: time.Second * 10,
Transport: tr,
}
resp, err := client.Get(url)
defer func() {
if err := resp.Body.Close(); err != nil {
log.Println("close body err:", err)
}
}()
assert.NoError(t, err)
_, ioerr := ioutil.ReadAll(resp.Body)
assert.NoError(t, ioerr)
assert.Equal(t, "200 OK", resp.Status, "should get a 200")
}
func TestPrintGoRushVersion(t *testing.T) {
SetVersion("3.0.0")
ver := GetVersion()
PrintGoRushVersion()
assert.Equal(t, "3.0.0", ver)
}
func TestRunNormalServer(t *testing.T) {
initTest()
gin.SetMode(gin.TestMode)
go func() {
assert.NoError(t, RunHTTPServer(context.Background()))
}()
// have to wait for the goroutine to start and run the server
// otherwise the main thread will complete
time.Sleep(5 * time.Millisecond)
testRequest(t, "http://localhost:8088/api/stat/go")
}
func TestRunTLSServer(t *testing.T) {
initTest()
PushConf.Core.SSL = true
PushConf.Core.Port = "8087"
PushConf.Core.CertPath = "../certificate/localhost.cert"
PushConf.Core.KeyPath = "../certificate/localhost.key"
go func() {
assert.NoError(t, RunHTTPServer(context.Background()))
}()
// have to wait for the goroutine to start and run the server
// otherwise the main thread will complete
time.Sleep(5 * time.Millisecond)
testRequest(t, "https://localhost:8087/api/stat/go")
}
func TestRunTLSBase64Server(t *testing.T) {
var cert = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrekNDQWVPZ0F3SUJBZ0lKQUxiWkVEdlVRckZLTUEwR0NTcUdTSWIzRFFFQkJRVUFNQlF4RWpBUUJnTlYKQkFNTUNXeHZZMkZzYUc5emREQWVGdzB4TmpBek1qZ3dNek13TkRGYUZ3MHlOakF6TWpZd016TXdOREZhTUJReApFakFRQmdOVkJBTU1DV3h2WTJGc2FHOXpkRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTWoxK3hnNGpWTHpWbkI1ajduMXVsMzBXRUU0QkN6Y05GeGc1QU9CNUg1cSt3amUwWVlpVkZnNlBReXYKR0NpcHFJUlhWUmRWUTFoSFNldW5ZR0tlOGxxM1NiMVg4UFVKMTJ2OXVSYnBTOURLMU93cWs4cnNQRHU2c1ZUTApxS0tnSDFaOHlhenphUzBBYlh1QTVlOWdPL1J6aWpibnBFUCtxdU00ZHVlaU1QVkVKeUxxK0VvSVFZK01NOE1QCjhkWnpMNFhabDd3TDRVc0NON3JQY082VzN0bG5UMGlPM2g5Yy9ZbTJoRmh6K0tOSjlLUlJDdnRQR1pFU2lndEsKYkhzWEgwOTlXRG84di9XcDUvZXZCdy8rSkQwb3B4bUNmSElCQUxIdDl2NTNSdnZzRFoxdDMzUnB1NUM4em5FWQpZMkF5N05neGhxanFvV0pxQTQ4bEplQTBjbHNDQXdFQUFhTlFNRTR3SFFZRFZSME9CQllFRkMwYlRVMVhvZmVoCk5LSWVsYXNoSXNxS2lkRFlNQjhHQTFVZEl3UVlNQmFBRkMwYlRVMVhvZmVoTktJZWxhc2hJc3FLaWREWU1Bd0cKQTFVZEV3UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUZCUUFEZ2dFQkFBaUpMOElNVHdOWDlYcVFXWURGZ2tHNApBbnJWd1FocmVBcUM5clN4RENqcXFuTUhQSEd6Y0NlRE1MQU1vaDBrT3kyMG5vd1VHTnRDWjB1QnZuWDJxMWJOCmcxanQrR0JjTEpEUjNMTDRDcE5PbG0zWWhPeWN1TmZXTXhUQTdCWGttblNyWkQvN0toQXJzQkVZOGF1bHh3S0oKSFJnTmxJd2Uxb0ZEMVlkWDFCUzVwcDR0MjVCNlZxNEEzRk1NVWtWb1dFNjg4bkUxNjhodlFnd2pySGtnSGh3ZQplTjhsR0UyRGhGcmFYbldtRE1kd2FIRDNIUkZHaHlwcElGTitmN0JxYldYOWdNK1QyWVJUZk9iSVhMV2JxSkxECjNNay9Oa3hxVmNnNGVZNTR3SjF1ZkNVR0FZQUlhWTZmUXFpTlV6OG5od0szdDQ1TkJWVDl5L3VKWHFuVEx5WT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=`
var key = `LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBeVBYN0dEaU5Vdk5XY0htUHVmVzZYZlJZUVRnRUxOdzBYR0RrQTRIa2ZtcjdDTjdSCmhpSlVXRG85REs4WUtLbW9oRmRWRjFWRFdFZEo2NmRnWXA3eVdyZEp2VmZ3OVFuWGEvMjVGdWxMME1yVTdDcVQKeXV3OE83cXhWTXVvb3FBZlZuekpyUE5wTFFCdGU0RGw3MkE3OUhPS051ZWtRLzZxNHpoMjU2SXc5VVFuSXVyNApTZ2hCajR3end3L3gxbk12aGRtWHZBdmhTd0kzdXM5dzdwYmUyV2RQU0k3ZUgxejlpYmFFV0hQNG8wbjBwRkVLCiswOFprUktLQzBwc2V4Y2ZUMzFZT2p5Lzlhbm45NjhIRC80a1BTaW5HWUo4Y2dFQXNlMzIvbmRHKyt3Tm5XM2YKZEdtN2tMek9jUmhqWURMczJER0dxT3FoWW1vRGp5VWw0RFJ5V3dJREFRQUJBb0lCQUdUS3FzTjlLYlNmQTQycQpDcUkwVXVMb3VKTU5hMXFzbno1dUFpNllLV2dXZEE0QTQ0bXBFakNtRlJTVmhVSnZ4V3VLK2N5WUlRelh4SVdECkQxNm5aZHFGNzJBZUNXWjlKeVNzdnZaMDBHZktNM3kzNWlSeTA4c0pXZ096bWNMbkdKQ2lTZXlLc1FlM0hUSkMKZGhEWGJYcXZzSFRWUFpnMDFMVGVEeFVpVGZmVThOTUtxUjJBZWNRMnNURHdYRWhBblR5QXRuemwvWGFCZ0Z6dQpVNkc3RnpHTTV5OWJ4a2ZRVmt2eStERUprSEdOT2p6d2NWZkJ5eVZsNjEwaXhtRzF2bXhWajlQYldtSVBzVVY4CnlTbWpodkRRYk9mb3hXMGg5dlRsVHFHdFFjQnc5NjJvc25ERE1XRkNkTTdsek8wVDdSUm5QVkdJUnBDSk9LaHEKa2VxSEt3RUNnWUVBOHd3SS9pWnVnaG9UWFRORzlMblFRL1dBdHNxTzgwRWpNVFVoZW81STFrT3ptVXowOXB5aAppQXNVRG9OMC8yNnRaNVdOamxueVp1N2R2VGMveDNkVFpwbU5ub284Z2NWYlFORUNEUnpxZnVROVBQWG0xU041CjZwZUJxQXZCdjc4aGpWMDVhWHpQRy9WQmJlaWc3bDI5OUVhckVBK2Evb0gzS3JnRG9xVnFFMEVDZ1lFQTA2dkEKWUptZ2c0ZlpSdWNBWW9hWXNMejlaOXJDRmpUZTFQQlRtVUprYk9SOHZGSUhIVFRFV2kvU3V4WEwwd0RTZW9FMgo3QlFtODZnQ0M3L0tnUmRyem9CcVo1cVM5TXYyZHNMZ1k2MzVWU2dqamZaa1ZMaUgxVlJScFNRT2JZbmZveXNnCmdhdGNIU0tNRXhkNFNMUUJ5QXVJbVhQK0w1YXlEQmNFSmZicVNwc0NnWUI3OElzMWIwdXpOTERqT2g3WTlWaHIKRDJxUHpFT1JjSW9Oc2RaY3RPb1h1WGFBbW1uZ3lJYm01UjlaTjFnV1djNDdvRndMVjNyeFdxWGdzNmZtZzhjWAo3djMwOXZGY0M5UTQvVnhhYTRCNUxOSzluM2dUQUlCUFRPdGxVbmwrMm15MXRmQnRCcVJtMFc2SUtiVEhXUzVnCnZ4akVtL0NpRUl5R1VFZ3FUTWdIQVFLQmdCS3VYZFFvdXRuZzYzUXVmd0l6RHRiS1Z6TUxRNFhpTktobWJYcGgKT2F2Q25wK2dQYkIrTDdZbDhsdEFtVFNPSmdWWjBoY1QwRHhBMzYxWngrMk11NThHQmw0T2JsbmNobXdFMXZqMQpLY1F5UHJFUXhkb1VUeWlzd0dmcXZyczhKOWltdmIrejkvVTZUMUtBQjhXaTNXVmlYelByNE1zaWFhUlhnNjQyCkZJZHhBb0dBWjcvNzM1ZGtoSmN5T2ZzK0xLc0xyNjhKU3N0b29yWE9ZdmRNdTErSkdhOWlMdWhuSEVjTVZXQzgKSXVpaHpQZmxvWnRNYkdZa1pKbjhsM0JlR2Q4aG1mRnRnVGdaR1BvVlJldGZ0MkxERkxuUHhwMnNFSDVPRkxzUQpSK0sva0FPdWw4ZVN0V3VNWE9GQTlwTXpHa0dFZ0lGSk1KT3lhSk9OM2tlZFFJOGRlQ009Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==`
initTest()
PushConf.Core.SSL = true
PushConf.Core.Port = "8089"
PushConf.Core.CertPath = ""
PushConf.Core.KeyPath = ""
PushConf.Core.CertBase64 = cert
PushConf.Core.KeyBase64 = key
go func() {
assert.NoError(t, RunHTTPServer(context.Background()))
}()
// have to wait for the goroutine to start and run the server
// otherwise the main thread will complete
time.Sleep(5 * time.Millisecond)
testRequest(t, "https://localhost:8089/api/stat/go")
}
func TestRunAutoTLSServer(t *testing.T) {
initTest()
PushConf.Core.AutoTLS.Enabled = true
go func() {
assert.NoError(t, RunHTTPServer(context.Background()))
}()
// have to wait for the goroutine to start and run the server
// otherwise the main thread will complete
time.Sleep(5 * time.Millisecond)
}
func TestLoadTLSCertError(t *testing.T) {
initTest()
PushConf.Core.SSL = true
PushConf.Core.Port = "8087"
PushConf.Core.CertPath = "../config/config.yml"
PushConf.Core.KeyPath = "../config/config.yml"
assert.Error(t, RunHTTPServer(context.Background()))
}
func TestMissingTLSCertConfg(t *testing.T) {
initTest()
PushConf.Core.SSL = true
PushConf.Core.Port = "8087"
PushConf.Core.CertPath = ""
PushConf.Core.KeyPath = ""
PushConf.Core.CertBase64 = ""
PushConf.Core.KeyBase64 = ""
err := RunHTTPServer(context.Background())
assert.Error(t, RunHTTPServer(context.Background()))
assert.Equal(t, "missing https cert config", err.Error())
}
func TestRootHandler(t *testing.T) {
initTest()
r := gofight.New()
// log for json
PushConf.Log.Format = "json"
r.GET("/").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
data := r.Body.Bytes()
value, _ := jsonparser.GetString(data, "text")
assert.Equal(t, "Welcome to notification server.", value)
assert.Equal(t, http.StatusOK, r.Code)
assert.Equal(t, "application/json; charset=utf-8", r.HeaderMap.Get("Content-Type"))
})
}
func TestAPIStatusGoHandler(t *testing.T) {
initTest()
r := gofight.New()
r.GET("/api/stat/go").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
data := r.Body.Bytes()
value, _ := jsonparser.GetString(data, "go_version")
assert.Equal(t, goVersion, value)
assert.Equal(t, http.StatusOK, r.Code)
})
}
func TestAPIStatusAppHandler(t *testing.T) {
initTest()
r := gofight.New()
appVersion := "v1.0.0"
SetVersion(appVersion)
r.GET("/api/stat/app").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
data := r.Body.Bytes()
value, _ := jsonparser.GetString(data, "version")
assert.Equal(t, appVersion, value)
assert.Equal(t, http.StatusOK, r.Code)
})
}
func TestAPIConfigHandler(t *testing.T) {
initTest()
r := gofight.New()
r.GET("/api/config").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusCreated, r.Code)
})
}
func TestMissingNotificationsParameter(t *testing.T) {
initTest()
r := gofight.New()
// missing notifications parameter.
r.POST("/api/push").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusBadRequest, r.Code)
assert.Equal(t, "application/json; charset=utf-8", r.HeaderMap.Get("Content-Type"))
})
}
func TestEmptyNotifications(t *testing.T) {
initTest()
r := gofight.New()
// notifications is empty.
r.POST("/api/push").
SetJSON(gofight.D{
"notifications": []PushNotification{},
}).
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusBadRequest, r.Code)
})
}
func TestMutableContent(t *testing.T) {
initTest()
r := gofight.New()
// notifications is empty.
r.POST("/api/push").
SetJSON(gofight.D{
"notifications": []gofight.D{
{
"tokens": []string{"aaaaa", "bbbbb"},
"platform": PlatFormAndroid,
"message": "Welcome",
"mutable_content": 1,
"topic": "test",
"badge": 1,
"alert": gofight.D{
"title": "title",
"body": "body",
},
},
},
}).
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
// json: cannot unmarshal number into Go struct field PushNotification.mutable_content of type bool
assert.Equal(t, http.StatusBadRequest, r.Code)
})
}
func TestOutOfRangeMaxNotifications(t *testing.T) {
initTest()
PushConf.Core.MaxNotification = int64(1)
r := gofight.New()
// notifications is empty.
r.POST("/api/push").
SetJSON(gofight.D{
"notifications": []gofight.D{
{
"tokens": []string{"aaaaa", "bbbbb"},
"platform": PlatFormAndroid,
"message": "Welcome",
},
{
"tokens": []string{"aaaaa", "bbbbb"},
"platform": PlatFormAndroid,
"message": "Welcome",
},
},
}).
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusBadRequest, r.Code)
})
}
func TestSuccessPushHandler(t *testing.T) {
t.Skip()
initTest()
PushConf.Android.Enabled = true
PushConf.Android.APIKey = os.Getenv("ANDROID_API_KEY")
androidToken := os.Getenv("ANDROID_TEST_TOKEN")
r := gofight.New()
r.POST("/api/push").
SetJSON(gofight.D{
"notifications": []gofight.D{
{
"tokens": []string{androidToken, "bbbbb"},
"platform": PlatFormAndroid,
"message": "Welcome",
},
},
}).
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusOK, r.Code)
})
}
func TestSysStatsHandler(t *testing.T) {
initTest()
r := gofight.New()
r.GET("/sys/stats").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusOK, r.Code)
})
}
func TestMetricsHandler(t *testing.T) {
initTest()
r := gofight.New()
r.GET("/metrics").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusOK, r.Code)
})
}
func TestHeartbeatHandler(t *testing.T) {
initTest()
r := gofight.New()
r.GET("/healthz").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusOK, r.Code)
})
}
func TestVersionHandler(t *testing.T) {
SetVersion("3.0.0")
initTest()
r := gofight.New()
r.GET("/version").
Run(routerEngine(), func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
assert.Equal(t, http.StatusOK, r.Code)
data := r.Body.Bytes()
value, _ := jsonparser.GetString(data, "version")
assert.Equal(t, "3.0.0", value)
})
}
func TestDisabledHTTPServer(t *testing.T) {
initTest()
PushConf.Core.Enabled = false
err := RunHTTPServer(context.Background())
PushConf.Core.Enabled = true
assert.Nil(t, err)
}
|
[
"\"ANDROID_API_KEY\"",
"\"ANDROID_TEST_TOKEN\""
] |
[] |
[
"ANDROID_TEST_TOKEN",
"ANDROID_API_KEY"
] |
[]
|
["ANDROID_TEST_TOKEN", "ANDROID_API_KEY"]
|
go
| 2 | 0 | |
apis/auth/signin.controller.go
|
package auth
import (
"context"
"os"
"strconv"
"strings"
"github.com/gofiber/fiber/v2"
"go.mongodb.org/mongo-driver/bson"
"deepseen-backend/configuration"
DB "deepseen-backend/database"
Schemas "deepseen-backend/database/schemas"
"deepseen-backend/redis"
"deepseen-backend/utilities"
)
// Handle signing in
func signIn(ctx *fiber.Ctx) error {
// check data
var body SignInUserRequest
bodyParsingError := ctx.BodyParser(&body)
if bodyParsingError != nil {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.InternalServerError,
Status: fiber.StatusInternalServerError,
})
}
client := body.Client
email := body.Email
password := body.Password
if client == "" || email == "" || password == "" {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.MissingData,
Status: fiber.StatusBadRequest,
})
}
trimmedClient := strings.TrimSpace(client)
trimmedEmail := strings.TrimSpace(email)
trimmedPassword := strings.TrimSpace(password)
if trimmedClient == "" || trimmedEmail == "" || trimmedPassword == "" {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.MissingData,
Status: fiber.StatusBadRequest,
})
}
// make sure that the client is valid
clients := utilities.Values(configuration.Clients)
if !utilities.IncludesString(clients, trimmedClient) {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.InvalidData,
Status: fiber.StatusBadRequest,
})
}
// load User schema
UserCollection := DB.Instance.Database.Collection(DB.Collections.User)
// find a user
rawUserRecord := UserCollection.FindOne(
ctx.Context(),
bson.D{{Key: "email", Value: trimmedEmail}},
)
userRecord := &Schemas.User{}
rawUserRecord.Decode(userRecord)
if userRecord.ID == "" {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.AccessDenied,
Status: fiber.StatusUnauthorized,
})
}
// load Password schema
PasswordCollection := DB.Instance.Database.Collection(DB.Collections.Password)
// find a password
rawPasswordRecord := PasswordCollection.FindOne(
ctx.Context(),
bson.D{{Key: "userId", Value: userRecord.ID}},
)
passwordRecord := &Schemas.Password{}
rawPasswordRecord.Decode(passwordRecord)
if passwordRecord.ID == "" {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.AccessDenied,
Status: fiber.StatusUnauthorized,
})
}
// compare hashes
passwordIsValid := utilities.CompareHashes(trimmedPassword, passwordRecord.Hash)
if !passwordIsValid {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.AccessDenied,
Status: fiber.StatusUnauthorized,
})
}
// load Image schema
ImageCollection := DB.Instance.Database.Collection(DB.Collections.Image)
// find an image
rawImageRecord := ImageCollection.FindOne(
ctx.Context(),
bson.D{{Key: "userId", Value: userRecord.ID}},
)
imageRecord := &Schemas.Image{}
rawImageRecord.Decode(imageRecord)
if imageRecord.ID == "" {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.AccessDenied,
Status: fiber.StatusUnauthorized,
})
}
// generate a token
expiration, expirationError := strconv.Atoi(os.Getenv("TOKEN_EXPIRATION"))
if expirationError != nil {
expiration = 9999
}
token, tokenError := utilities.GenerateJWT(utilities.GenerateJWTParams{
Client: trimmedClient,
ExpiresIn: int64(expiration),
Image: imageRecord.Image,
UserId: userRecord.ID,
})
if tokenError != nil {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.InternalServerError,
Status: fiber.StatusInternalServerError,
})
}
// store user image in Redis
redisError := redis.Client.Set(
context.Background(),
utilities.KeyFormatter(
configuration.Redis.Prefixes.User,
userRecord.ID,
),
imageRecord.Image,
configuration.Redis.TTL,
).Err()
if redisError != nil {
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Info: configuration.ResponseMessages.InternalServerError,
Status: fiber.StatusInternalServerError,
})
}
return utilities.Response(utilities.ResponseParams{
Ctx: ctx,
Data: fiber.Map{
"token": token,
"user": userRecord,
},
})
}
|
[
"\"TOKEN_EXPIRATION\""
] |
[] |
[
"TOKEN_EXPIRATION"
] |
[]
|
["TOKEN_EXPIRATION"]
|
go
| 1 | 0 | |
server.py
|
import os
from flask import Flask, request
from twilio.jwt.access_token import AccessToken
from twilio.jwt.access_token.grants import VoiceGrant
from twilio.rest import Client
from twilio.twiml.voice_response import VoiceResponse
ACCOUNT_SID = 'AC***'
API_KEY = 'SK***'
API_KEY_SECRET = '***'
PUSH_CREDENTIAL_SID = 'CR***'
APP_SID = 'AP***'
"""
Use a valid Twilio number by adding to your account via https://www.twilio.com/console/phone-numbers/verified
"""
CALLER_NUMBER = '1234567890'
"""
The caller id used when a client is dialed.
"""
CALLER_ID = 'client:quick_start'
IDENTITY = 'alice'
app = Flask(__name__)
"""
Creates an access token with VoiceGrant using your Twilio credentials.
"""
@app.route('/accessToken', methods=['GET', 'POST'])
def token():
account_sid = os.environ.get("ACCOUNT_SID", ACCOUNT_SID)
api_key = os.environ.get("API_KEY", API_KEY)
api_key_secret = os.environ.get("API_KEY_SECRET", API_KEY_SECRET)
push_credential_sid = os.environ.get("PUSH_CREDENTIAL_SID", PUSH_CREDENTIAL_SID)
app_sid = os.environ.get("APP_SID", APP_SID)
grant = VoiceGrant(
push_credential_sid=push_credential_sid,
outgoing_application_sid=app_sid
)
identity = request.values["identity"] \
if request.values and request.values["identity"] else IDENTITY
token = AccessToken(account_sid, api_key, api_key_secret, identity=identity)
token.add_grant(grant)
return token.to_jwt()
"""
Creates an endpoint that plays back a greeting.
"""
@app.route('/incoming', methods=['GET', 'POST'])
def incoming():
resp = VoiceResponse()
resp.say("Congratulations! You have received your first inbound call! Good bye.")
return str(resp)
"""
Makes a call to the specified client using the Twilio REST API.
"""
@app.route('/placeCall', methods=['GET', 'POST'])
def placeCall():
account_sid = os.environ.get("ACCOUNT_SID", ACCOUNT_SID)
api_key = os.environ.get("API_KEY", API_KEY)
api_key_secret = os.environ.get("API_KEY_SECRET", API_KEY_SECRET)
client = Client(api_key, api_key_secret, account_sid)
to = request.values.get("to")
call = None
if to is None or len(to) == 0:
call = client.calls.create(url=request.url_root + 'incoming', to='client:' + IDENTITY, from_=CALLER_ID)
elif to[0] in "+1234567890" and (len(to) == 1 or to[1:].isdigit()):
call = client.calls.create(url=request.url_root + 'incoming', to=to, from_=CALLER_NUMBER)
else:
call = client.calls.create(url=request.url_root + 'incoming', to='client:' + to, from_=CALLER_ID)
return str(call)
@app.route('/outgoing', methods=['GET', 'POST'])
def outgoing():
resp = twilio.twiml.Response()
from_value = request.values.get('From')
caller = request.values.get('Caller')
caller_value=caller[7:]
to = request.values.get('To')
if not (from_value and to):
resp.say("Invalid request")
return str(resp)
if to.startswith("client:"):
# client -> client
resp.dial(callerId=from_value).client(to[7:])
elif to.startswith("number:"):
resp.dial(callerId=caller_value).number(to[7:])
else:
resp.say("Invalid request")
return str(resp)
"""
Creates an endpoint that can be used in your TwiML App as the Voice Request Url.
In order to make an outgoing call using Twilio Voice SDK, you need to provide a
TwiML App SID in the Access Token. You can run your server, make it publicly
accessible and use `/makeCall` endpoint as the Voice Request Url in your TwiML App.
"""
@app.route('/makeCall', methods=['GET', 'POST'])
def makeCall():
resp = VoiceResponse()
to = request.values.get("to")
if to is None or len(to) == 0:
resp.say("Congratulations! You have just made your first call! Good bye.")
elif to[0] in "+1234567890" and (len(to) == 1 or to[1:].isdigit()):
resp.dial(callerId=CALLER_NUMBER).number(to)
else:
resp.dial(callerId=CALLER_ID).client(to)
return str(resp)
@app.route('/', methods=['GET', 'POST'])
def welcome():
resp = VoiceResponse()
resp.say("Welcome to Twilio")
return str(resp)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
[] |
[] |
[
"PORT",
"ACCOUNT_SID",
"PUSH_CREDENTIAL_SID",
"API_KEY",
"API_KEY_SECRET",
"APP_SID"
] |
[]
|
["PORT", "ACCOUNT_SID", "PUSH_CREDENTIAL_SID", "API_KEY", "API_KEY_SECRET", "APP_SID"]
|
python
| 6 | 0 | |
google/appengine/tools/appcfg.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for deploying apps to an app server.
Currently, the application only uploads new appversions. To do this, it first
walks the directory tree rooted at the path the user specifies, adding all the
files it finds to a list. It then uploads the application configuration
(app.yaml) to the server using HTTP, followed by uploading each of the files.
It then commits the transaction with another request.
The bulk of this work is handled by the AppVersionUpload class, which exposes
methods to add to the list of files, fetch a list of modified files, upload
files, and commit or rollback the transaction.
"""
from __future__ import with_statement
import calendar
import contextlib
import copy
import datetime
import errno
import getpass
import hashlib
import logging
import mimetypes
import optparse
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
import urllib
import urllib2
import google
import yaml
from google.appengine.cron import groctimespecification
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google.appengine.api import backendinfo
from google.appengine.api import croninfo
from google.appengine.api import dispatchinfo
from google.appengine.api import dosinfo
from google.appengine.api import queueinfo
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_index
from google.appengine.tools import appcfg_java
from google.appengine.tools import appengine_rpc
try:
from google.appengine.tools import appengine_rpc_httplib2
except ImportError:
appengine_rpc_httplib2 = None
from google.appengine.tools import bulkloader
from google.appengine.tools import sdk_update_checker
LIST_DELIMITER = '\n'
TUPLE_DELIMITER = '|'
BACKENDS_ACTION = 'backends'
BACKENDS_MESSAGE = ('Looks like you\'re using Backends. We suggest that you '
'start looking at App Engine Modules. See the Modules '
'documentation to learn more about converting: ')
_CONVERTING_URL = (
'https://developers.google.com/appengine/docs/%s/modules/converting')
MAX_LOG_LEVEL = 4
MAX_BATCH_SIZE = 3200000
MAX_BATCH_COUNT = 100
MAX_BATCH_FILE_SIZE = 200000
BATCH_OVERHEAD = 500
verbosity = 1
PREFIXED_BY_ADMIN_CONSOLE_RE = '^(?:admin-console)(.*)'
SDK_PRODUCT = 'appcfg_py'
DAY = 24*3600
SUNDAY = 6
SUPPORTED_RUNTIMES = ('go', 'php', 'python', 'python27', 'java', 'java7', 'vm')
MEGA = 1024 * 1024
MILLION = 1000 * 1000
DEFAULT_RESOURCE_LIMITS = {
'max_file_size': 32 * MILLION,
'max_blob_size': 32 * MILLION,
'max_files_to_clone': 100,
'max_total_file_size': 150 * MEGA,
'max_file_count': 10000,
}
# Client ID and secrets are managed in the Google API console.
APPCFG_CLIENT_ID = '550516889912.apps.googleusercontent.com'
APPCFG_CLIENT_NOTSOSECRET = 'ykPq-0UYfKNprLRjVx1hBBar'
APPCFG_SCOPES = ('https://www.googleapis.com/auth/appengine.admin',)
STATIC_FILE_PREFIX = '__static__'
METADATA_BASE = 'http://metadata.google.internal'
SERVICE_ACCOUNT_BASE = (
'computeMetadata/v1beta1/instance/service-accounts/default')
APP_YAML_FILENAME = 'app.yaml'
class Error(Exception):
pass
class OAuthNotAvailable(Error):
"""The appengine_rpc_httplib2 module could not be imported."""
pass
class CannotStartServingError(Error):
"""We could not start serving the version being uploaded."""
pass
def PrintUpdate(msg):
"""Print a message to stderr.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
timestamp = datetime.datetime.now()
print >>sys.stderr, '%s %s' % (timestamp.strftime('%I:%M %p'), msg)
def StatusUpdate(msg):
"""Print a status message to stderr."""
PrintUpdate(msg)
def BackendsStatusUpdate(runtime):
"""Print the Backends status message based on current runtime.
Args:
runtime: String name of current runtime.
"""
language = runtime
if language == 'python27':
language = 'python'
elif language == 'java7':
language = 'java'
if language == 'python' or language == 'java':
StatusUpdate(BACKENDS_MESSAGE + (_CONVERTING_URL % language))
def ErrorUpdate(msg):
"""Print an error message to stderr."""
PrintUpdate(msg)
def _PrintErrorAndExit(stream, msg, exit_code=2):
"""Prints the given error message and exists the program.
Args:
stream: The stream (e.g. StringIO or file) to write the message to.
msg: The error message to display as a string.
exit_code: The integer code to pass to sys.exit().
"""
stream.write(msg)
sys.exit(exit_code)
@contextlib.contextmanager
def TempChangeField(obj, field_name, new_value):
"""Context manager to change a field value on an object temporarily.
Args:
obj: The object to change the field on.
field_name: The field name to change.
new_value: The new value.
Yields:
The old value.
"""
old_value = getattr(obj, field_name)
setattr(obj, field_name, new_value)
yield old_value
setattr(obj, field_name, old_value)
class FileClassification(object):
"""A class to hold a file's classification.
This class both abstracts away the details of how we determine
whether a file is a regular, static or error file as well as acting
as a container for various metadata about the file.
"""
def __init__(self, config, filename):
"""Initializes a FileClassification instance.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
"""
self.__static_mime_type = self.__GetMimeTypeIfStaticFile(config, filename)
self.__static_app_readable = self.__GetAppReadableIfStaticFile(config,
filename)
self.__error_mime_type, self.__error_code = self.__LookupErrorBlob(config,
filename)
@staticmethod
def __GetMimeTypeIfStaticFile(config, filename):
"""Looks up the mime type for 'filename'.
Uses the handlers in 'config' to determine if the file should
be treated as a static file.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
The mime type string. For example, 'text/plain' or 'image/gif'.
None if this is not a static file.
"""
if FileClassification.__FileNameImpliesStaticFile(filename):
return FileClassification.__MimeType(filename)
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ('static_dir', 'static_files'):
if handler_type == 'static_dir':
regex = os.path.join(re.escape(handler.GetHandler()), '.*')
else:
regex = handler.upload
if re.match(regex, filename):
return handler.mime_type or FileClassification.__MimeType(filename)
return None
@staticmethod
def __FileNameImpliesStaticFile(filename):
"""True if the name of a file implies that it is a static resource.
For Java applications specified with web.xml and appengine-web.xml, we
create a staging directory that includes a __static__ hierarchy containing
links to all files that are implied static by the contents of those XML
files. So if a file has been copied into that directory then we can assume
it is static.
Args:
filename: The full path to the file.
Returns:
True if the file should be considered a static resource based on its name.
"""
static = '__static__' + os.sep
return static in filename
@staticmethod
def __GetAppReadableIfStaticFile(config, filename):
"""Looks up whether a static file is readable by the application.
Uses the handlers in 'config' to determine if the file should
be treated as a static file and if so, if the file should be readable by the
application.
Args:
config: The AppInfoExternal object to check the filename against.
filename: The name of the file.
Returns:
True if the file is static and marked as app readable, False otherwise.
"""
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ('static_dir', 'static_files'):
if handler_type == 'static_dir':
regex = os.path.join(re.escape(handler.GetHandler()), '.*')
else:
regex = handler.upload
if re.match(regex, filename):
return handler.application_readable
return False
@staticmethod
def __LookupErrorBlob(config, filename):
"""Looks up the mime type and error_code for 'filename'.
Uses the error handlers in 'config' to determine if the file should
be treated as an error blob.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
A tuple of (mime_type, error_code), or (None, None) if this is not an
error blob. For example, ('text/plain', default) or ('image/gif',
timeout) or (None, None).
"""
if not config.error_handlers:
return (None, None)
for error_handler in config.error_handlers:
if error_handler.file == filename:
error_code = error_handler.error_code
error_code = error_code or 'default'
if error_handler.mime_type:
return (error_handler.mime_type, error_code)
else:
return (FileClassification.__MimeType(filename), error_code)
return (None, None)
@staticmethod
def __MimeType(filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
print >>sys.stderr, ('Could not guess mimetype for %s. Using %s.'
% (filename, default))
return default
return guess
def IsApplicationFile(self):
return bool((not self.IsStaticFile() or self.__static_app_readable) and
not self.IsErrorFile())
def IsStaticFile(self):
return bool(self.__static_mime_type)
def StaticMimeType(self):
return self.__static_mime_type
def IsErrorFile(self):
return bool(self.__error_mime_type)
def ErrorMimeType(self):
return self.__error_mime_type
def ErrorCode(self):
return self.__error_code
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs,errorblobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[1]
tup = tup[2:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
def GetRemoteResourceLimits(rpcserver, config):
"""Get the resource limit as reported by the admin console.
Get the resource limits by querying the admin_console/appserver. The
actual limits returned depends on the server we are talking to and
could be missing values we expect or include extra values.
Args:
rpcserver: The RPC server to use.
config: The appyaml configuration.
Returns:
A dictionary.
"""
try:
StatusUpdate('Getting current resource limits.')
yaml_data = rpcserver.Send('/api/appversion/getresourcelimits',
app_id=config.application,
version=config.version)
except urllib2.HTTPError, err:
if err.code != 404:
raise
return {}
return yaml.safe_load(yaml_data)
def GetResourceLimits(rpcserver, config):
"""Gets the resource limits.
Gets the resource limits that should be applied to apps. Any values
that the server does not know about will have their default value
reported (although it is also possible for the server to report
values we don't know about).
Args:
rpcserver: The RPC server to use.
config: The appyaml configuration.
Returns:
A dictionary.
"""
resource_limits = DEFAULT_RESOURCE_LIMITS.copy()
resource_limits.update(GetRemoteResourceLimits(rpcserver, config))
logging.debug('Using resource limits: %s', resource_limits)
return resource_limits
def RetryWithBackoff(callable_func, retry_notify_func,
initial_delay=1, backoff_factor=2,
max_delay=60, max_tries=20):
"""Calls a function multiple times, backing off more and more each time.
Args:
callable_func: A function that performs some operation that should be
retried a number of times up on failure. Signature: () -> (done, value)
If 'done' is True, we'll immediately return (True, value)
If 'done' is False, we'll delay a bit and try again, unless we've
hit the 'max_tries' limit, in which case we'll return (False, value).
retry_notify_func: This function will be called immediately before the
next retry delay. Signature: (value, delay) -> None
'value' is the value returned by the last call to 'callable_func'
'delay' is the retry delay, in seconds
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_delay: Maximum delay, in seconds.
max_tries: Maximum number of tries (the first one counts).
Returns:
What the last call to 'callable_func' returned, which is of the form
(done, value). If 'done' is True, you know 'callable_func' returned True
before we ran out of retries. If 'done' is False, you know 'callable_func'
kept returning False and we ran out of retries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
num_tries = 0
while True:
done, opaque_value = callable_func()
num_tries += 1
if done:
return True, opaque_value
if num_tries >= max_tries:
return False, opaque_value
retry_notify_func(opaque_value, delay)
time.sleep(delay)
delay = min(delay * backoff_factor, max_delay)
def MigratePython27Notice():
"""Tells the user that Python 2.5 runtime is deprecated.
Encourages the user to migrate from Python 2.5 to Python 2.7.
Prints a message to sys.stdout. The caller should have tested that the user is
using Python 2.5, so as not to spuriously display this message.
"""
print (
'WARNING: This application is using the Python 2.5 runtime, which is '
'deprecated! It should be updated to the Python 2.7 runtime as soon as '
'possible, which offers performance improvements and many new features. '
'Learn how simple it is to migrate your application to Python 2.7 at '
'https://developers.google.com/appengine/docs/python/python25/migrate27.')
class IndexDefinitionUpload(object):
"""Provides facilities to upload index definitions to the hosting service."""
def __init__(self, rpcserver, definitions):
"""Creates a new DatastoreIndexUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
definitions: An IndexDefinitions object.
"""
self.rpcserver = rpcserver
self.definitions = definitions
def DoUpload(self):
"""Uploads the index definitions."""
StatusUpdate('Uploading index definitions.')
with TempChangeField(self.definitions, 'application', None) as app_id:
self.rpcserver.Send('/api/datastore/index/add',
app_id=app_id,
payload=self.definitions.ToYAML())
class CronEntryUpload(object):
"""Provides facilities to upload cron entries to the hosting service."""
def __init__(self, rpcserver, cron):
"""Creates a new CronEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
cron: The CronInfoExternal object loaded from the cron.yaml file.
"""
self.rpcserver = rpcserver
self.cron = cron
def DoUpload(self):
"""Uploads the cron entries."""
StatusUpdate('Uploading cron entries.')
with TempChangeField(self.cron, 'application', None) as app_id:
self.rpcserver.Send('/api/cron/update',
app_id=app_id,
payload=self.cron.ToYAML())
class QueueEntryUpload(object):
"""Provides facilities to upload task queue entries to the hosting service."""
def __init__(self, rpcserver, queue):
"""Creates a new QueueEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
queue: The QueueInfoExternal object loaded from the queue.yaml file.
"""
self.rpcserver = rpcserver
self.queue = queue
def DoUpload(self):
"""Uploads the task queue entries."""
StatusUpdate('Uploading task queue entries.')
with TempChangeField(self.queue, 'application', None) as app_id:
self.rpcserver.Send('/api/queue/update',
app_id=app_id,
payload=self.queue.ToYAML())
class DosEntryUpload(object):
"""Provides facilities to upload dos entries to the hosting service."""
def __init__(self, rpcserver, dos):
"""Creates a new DosEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
dos: The DosInfoExternal object loaded from the dos.yaml file.
"""
self.rpcserver = rpcserver
self.dos = dos
def DoUpload(self):
"""Uploads the dos entries."""
StatusUpdate('Uploading DOS entries.')
with TempChangeField(self.dos, 'application', None) as app_id:
self.rpcserver.Send('/api/dos/update',
app_id=app_id,
payload=self.dos.ToYAML())
class PagespeedEntryUpload(object):
"""Provides facilities to upload pagespeed configs to the hosting service."""
def __init__(self, rpcserver, config, pagespeed):
"""Creates a new PagespeedEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
pagespeed: The PagespeedEntry object from config.
"""
self.rpcserver = rpcserver
self.config = config
self.pagespeed = pagespeed
def DoUpload(self):
"""Uploads the pagespeed entries."""
pagespeed_yaml = ''
if self.pagespeed:
StatusUpdate('Uploading PageSpeed configuration.')
pagespeed_yaml = self.pagespeed.ToYAML()
try:
self.rpcserver.Send('/api/appversion/updatepagespeed',
app_id=self.config.application,
version=self.config.version,
payload=pagespeed_yaml)
except urllib2.HTTPError, err:
if err.code != 404 or self.pagespeed is not None:
raise
class DefaultVersionSet(object):
"""Provides facilities to set the default (serving) version."""
def __init__(self, rpcserver, app_id, module, version):
"""Creates a new DefaultVersionSet.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
app_id: The application to make the change to.
module: The module to set the default version of (if any).
version: The version to set as the default.
"""
self.rpcserver = rpcserver
self.app_id = app_id
self.module = module
self.version = version
def SetVersion(self):
"""Sets the default version."""
if self.module:
modules = self.module.split(',')
if len(modules) > 1:
StatusUpdate('Setting the default version of modules %s of application '
'%s to %s.' % (', '.join(modules),
self.app_id,
self.version))
params = [('app_id', self.app_id), ('version', self.version)]
params.extend(('module', module) for module in modules)
url = '/api/appversion/setdefault?' + urllib.urlencode(sorted(params))
self.rpcserver.Send(url)
return
else:
StatusUpdate('Setting default version of module %s of application %s '
'to %s.' % (self.module, self.app_id, self.version))
else:
StatusUpdate('Setting default version of application %s to %s.'
% (self.app_id, self.version))
self.rpcserver.Send('/api/appversion/setdefault',
app_id=self.app_id,
module=self.module,
version=self.version)
class TrafficMigrator(object):
"""Provides facilities to migrate traffic."""
def __init__(self, rpcserver, app_id, version):
"""Creates a new TrafficMigrator.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
app_id: The application to make the change to.
version: The version to set as the default.
"""
self.rpcserver = rpcserver
self.app_id = app_id
self.version = version
def MigrateTraffic(self):
"""Migrates traffic."""
StatusUpdate('Migrating traffic of application %s to %s.'
% (self.app_id, self.version))
self.rpcserver.Send('/api/appversion/migratetraffic',
app_id=self.app_id,
version=self.version)
class IndexOperation(object):
"""Provide facilities for writing Index operation commands."""
def __init__(self, rpcserver):
"""Creates a new IndexOperation.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
"""
self.rpcserver = rpcserver
def DoDiff(self, definitions):
"""Retrieve diff file from the server.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
Returns:
A pair of datastore_index.IndexDefinitions objects. The first record
is the set of indexes that are present in the index.yaml file but missing
from the server. The second record is the set of indexes that are
present on the server but missing from the index.yaml file (indicating
that these indexes should probably be vacuumed).
"""
StatusUpdate('Fetching index definitions diff.')
with TempChangeField(definitions, 'application', None) as app_id:
response = self.rpcserver.Send('/api/datastore/index/diff',
app_id=app_id,
payload=definitions.ToYAML())
return datastore_index.ParseMultipleIndexDefinitions(response)
def DoDelete(self, definitions, app_id):
"""Delete indexes from the server.
Args:
definitions: Index definitions to delete from datastore.
app_id: The application id.
Returns:
A single datstore_index.IndexDefinitions containing indexes that were
not deleted, probably because they were already removed. This may
be normal behavior as there is a potential race condition between fetching
the index-diff and sending deletion confirmation through.
"""
StatusUpdate('Deleting selected index definitions.')
response = self.rpcserver.Send('/api/datastore/index/delete',
app_id=app_id,
payload=definitions.ToYAML())
return datastore_index.ParseIndexDefinitions(response)
class VacuumIndexesOperation(IndexOperation):
"""Provide facilities to request the deletion of datastore indexes."""
def __init__(self, rpcserver, force, confirmation_fn=raw_input):
"""Creates a new VacuumIndexesOperation.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
force: True to force deletion of indexes, else False.
confirmation_fn: Function used for getting input form user.
"""
super(VacuumIndexesOperation, self).__init__(rpcserver)
self.force = force
self.confirmation_fn = confirmation_fn
def GetConfirmation(self, index):
"""Get confirmation from user to delete an index.
This method will enter an input loop until the user provides a
response it is expecting. Valid input is one of three responses:
y: Confirm deletion of index.
n: Do not delete index.
a: Delete all indexes without asking for further confirmation.
If the user enters nothing at all, the default action is to skip
that index and do not delete.
If the user selects 'a', as a side effect, the 'force' flag is set.
Args:
index: Index to confirm.
Returns:
True if user enters 'y' or 'a'. False if user enter 'n'.
"""
while True:
print 'This index is no longer defined in your index.yaml file.'
print
print index.ToYAML()
print
confirmation = self.confirmation_fn(
'Are you sure you want to delete this index? (N/y/a): ')
confirmation = confirmation.strip().lower()
if confirmation == 'y':
return True
elif confirmation == 'n' or not confirmation:
return False
elif confirmation == 'a':
self.force = True
return True
else:
print 'Did not understand your response.'
def DoVacuum(self, definitions):
"""Vacuum indexes in datastore.
This method will query the server to determine which indexes are not
being used according to the user's local index.yaml file. Once it has
made this determination, it confirms with the user which unused indexes
should be deleted. Once confirmation for each index is receives, it
deletes those indexes.
Because another user may in theory delete the same indexes at the same
time as the user, there is a potential race condition. In this rare cases,
some of the indexes previously confirmed for deletion will not be found.
The user is notified which indexes these were.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
"""
unused_new_indexes, notused_indexes = self.DoDiff(definitions)
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes is not None:
for index in notused_indexes.indexes:
if self.force or self.GetConfirmation(index):
deletions.indexes.append(index)
if deletions.indexes:
not_deleted = self.DoDelete(deletions, definitions.application)
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ('An index was not deleted. Most likely this is '
'because it no longer exists.\n\n')
else:
warning_message = ('%d indexes were not deleted. Most likely this '
'is because they no longer exist.\n\n'
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
logging.warning(warning_message)
class LogsRequester(object):
"""Provide facilities to export request logs."""
def __init__(self,
rpcserver,
app_id,
module,
version_id,
output_file,
num_days,
append,
severity,
end,
vhost,
include_vhost,
include_all=None,
time_func=time.time):
"""Constructor.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
app_id: The application to fetch logs from.
module: The module of the app to fetch logs from, optional.
version_id: The version of the app to fetch logs for.
output_file: Output file name.
num_days: Number of days worth of logs to export; 0 for all available.
append: True if appending to an existing file.
severity: App log severity to request (0-4); None for no app logs.
end: date object representing last day of logs to return.
vhost: The virtual host of log messages to get. None for all hosts.
include_vhost: If true, the virtual host is included in log messages.
include_all: If true, we add to the log message everything we know
about the request.
time_func: A time.time() compatible function, which can be overridden for
testing.
"""
self.rpcserver = rpcserver
self.app_id = app_id
self.output_file = output_file
self.append = append
self.num_days = num_days
self.severity = severity
self.vhost = vhost
self.include_vhost = include_vhost
self.include_all = include_all
self.module = module
self.version_id = version_id
self.sentinel = None
self.write_mode = 'w'
if self.append:
self.sentinel = FindSentinel(self.output_file)
self.write_mode = 'a'
self.skip_until = False
now = PacificDate(time_func())
if end < now:
self.skip_until = end
else:
end = now
self.valid_dates = None
if self.num_days:
start = end - datetime.timedelta(self.num_days - 1)
self.valid_dates = (start, end)
def DownloadLogs(self):
"""Download the requested logs.
This will write the logs to the file designated by
self.output_file, or to stdout if the filename is '-'.
Multiple roundtrips to the server may be made.
"""
if self.module:
StatusUpdate('Downloading request logs for app %s module %s version %s.' %
(self.app_id, self.module, self.version_id))
else:
StatusUpdate('Downloading request logs for app %s version %s.' %
(self.app_id, self.version_id))
tf = tempfile.TemporaryFile()
last_offset = None
try:
while True:
try:
new_offset = self.RequestLogLines(tf, last_offset)
if not new_offset or new_offset == last_offset:
break
last_offset = new_offset
except KeyboardInterrupt:
StatusUpdate('Keyboard interrupt; saving data downloaded so far.')
break
StatusUpdate('Copying request logs to %r.' % self.output_file)
if self.output_file == '-':
of = sys.stdout
else:
try:
of = open(self.output_file, self.write_mode)
except IOError, err:
StatusUpdate('Can\'t write %r: %s.' % (self.output_file, err))
sys.exit(1)
try:
line_count = CopyReversedLines(tf, of)
finally:
of.flush()
if of is not sys.stdout:
of.close()
finally:
tf.close()
StatusUpdate('Copied %d records.' % line_count)
def RequestLogLines(self, tf, offset):
"""Make a single roundtrip to the server.
Args:
tf: Writable binary stream to which the log lines returned by
the server are written, stripped of headers, and excluding
lines skipped due to self.sentinel or self.valid_dates filtering.
offset: Offset string for a continued request; None for the first.
Returns:
The offset string to be used for the next request, if another
request should be issued; or None, if not.
"""
logging.info('Request with offset %r.', offset)
kwds = {'app_id': self.app_id,
'version': self.version_id,
'limit': 1000,
}
if self.module:
kwds['module'] = self.module
if offset:
kwds['offset'] = offset
if self.severity is not None:
kwds['severity'] = str(self.severity)
if self.vhost is not None:
kwds['vhost'] = str(self.vhost)
if self.include_vhost is not None:
kwds['include_vhost'] = str(self.include_vhost)
if self.include_all is not None:
kwds['include_all'] = str(self.include_all)
response = self.rpcserver.Send('/api/request_logs', payload=None, **kwds)
response = response.replace('\r', '\0')
lines = response.splitlines()
logging.info('Received %d bytes, %d records.', len(response), len(lines))
offset = None
if lines and lines[0].startswith('#'):
match = re.match(r'^#\s*next_offset=(\S+)\s*$', lines[0])
del lines[0]
if match:
offset = match.group(1)
if lines and lines[-1].startswith('#'):
del lines[-1]
valid_dates = self.valid_dates
sentinel = self.sentinel
skip_until = self.skip_until
len_sentinel = None
if sentinel:
len_sentinel = len(sentinel)
for line in lines:
if (sentinel and
line.startswith(sentinel) and
line[len_sentinel : len_sentinel+1] in ('', '\0')):
return None
linedate = DateOfLogLine(line)
if not linedate:
continue
if skip_until:
if linedate > skip_until:
continue
else:
self.skip_until = skip_until = False
if valid_dates and not valid_dates[0] <= linedate <= valid_dates[1]:
return None
tf.write(line + '\n')
if not lines:
return None
return offset
def DateOfLogLine(line):
"""Returns a date object representing the log line's timestamp.
Args:
line: a log line string.
Returns:
A date object representing the timestamp or None if parsing fails.
"""
m = re.compile(r'[^[]+\[(\d+/[A-Za-z]+/\d+):[^\d]*').match(line)
if not m:
return None
try:
return datetime.date(*time.strptime(m.group(1), '%d/%b/%Y')[:3])
except ValueError:
return None
def PacificDate(now):
"""For a UTC timestamp, return the date in the US/Pacific timezone.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A date object representing what day it is in the US/Pacific timezone.
"""
return datetime.date(*time.gmtime(PacificTime(now))[:3])
def PacificTime(now):
"""Helper to return the number of seconds between UTC and Pacific time.
This is needed to compute today's date in Pacific time (more
specifically: Mountain View local time), which is how request logs
are reported. (Google servers always report times in Mountain View
local time, regardless of where they are physically located.)
This takes (post-2006) US DST into account. Pacific time is either
8 hours or 7 hours west of UTC, depending on whether DST is in
effect. Since 2007, US DST starts on the Second Sunday in March
March, and ends on the first Sunday in November. (Reference:
http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
Note that the server doesn't report its local time (the HTTP Date
header uses UTC), and the client's local time is irrelevant.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A pseudo-posix timestamp giving current Pacific time. Passing
this through time.gmtime() will produce a tuple in Pacific local
time.
"""
now -= 8*3600
if IsPacificDST(now):
now += 3600
return now
def IsPacificDST(now):
"""Helper for PacificTime to decide whether now is Pacific DST (PDT).
Args:
now: A pseudo-posix timestamp giving current time in PST.
Returns:
True if now falls within the range of DST, False otherwise.
"""
pst = time.gmtime(now)
year = pst[0]
assert year >= 2007
begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
while time.gmtime(begin).tm_wday != SUNDAY:
begin += DAY
end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
while time.gmtime(end).tm_wday != SUNDAY:
end += DAY
return begin <= now < end
def CopyReversedLines(instream, outstream, blocksize=2**16):
r"""Copy lines from input stream to output stream in reverse order.
As a special feature, null bytes in the input are turned into
newlines followed by tabs in the output, but these 'sub-lines'
separated by null bytes are not reversed. E.g. If the input is
'A\0B\nC\0D\n', the output is 'C\n\tD\nA\n\tB\n'.
Args:
instream: A seekable stream open for reading in binary mode.
outstream: A stream open for writing; doesn't have to be seekable or binary.
blocksize: Optional block size for buffering, for unit testing.
Returns:
The number of lines copied.
"""
line_count = 0
instream.seek(0, 2)
last_block = instream.tell() // blocksize
spillover = ''
for iblock in xrange(last_block + 1, -1, -1):
instream.seek(iblock * blocksize)
data = instream.read(blocksize)
lines = data.splitlines(True)
lines[-1:] = ''.join(lines[-1:] + [spillover]).splitlines(True)
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.reverse()
if lines and iblock > 0:
spillover = lines.pop()
if lines:
line_count += len(lines)
data = ''.join(lines).replace('\0', '\n\t')
outstream.write(data)
return line_count
def FindSentinel(filename, blocksize=2**16):
"""Return the sentinel line from the output file.
Args:
filename: The filename of the output file. (We'll read this file.)
blocksize: Optional block size for buffering, for unit testing.
Returns:
The contents of the last line in the file that doesn't start with
a tab, with its trailing newline stripped; or None if the file
couldn't be opened or no such line could be found by inspecting
the last 'blocksize' bytes of the file.
"""
if filename == '-':
StatusUpdate('Can\'t combine --append with output to stdout.')
sys.exit(2)
try:
fp = open(filename, 'rb')
except IOError, err:
StatusUpdate('Append mode disabled: can\'t read %r: %s.' % (filename, err))
return None
try:
fp.seek(0, 2)
fp.seek(max(0, fp.tell() - blocksize))
lines = fp.readlines()
del lines[:1]
sentinel = None
for line in lines:
if not line.startswith('\t'):
sentinel = line
if not sentinel:
StatusUpdate('Append mode disabled: can\'t find sentinel in %r.' %
filename)
return None
return sentinel.rstrip('\n')
finally:
fp.close()
class UploadBatcher(object):
"""Helper to batch file uploads."""
def __init__(self, what, rpcserver, params):
"""Constructor.
Args:
what: Either 'file' or 'blob' or 'errorblob' indicating what kind of
objects this batcher uploads. Used in messages and URLs.
rpcserver: The RPC server.
params: A dictionary object containing URL params to add to HTTP requests.
"""
assert what in ('file', 'blob', 'errorblob'), repr(what)
self.what = what
self.params = params
self.rpcserver = rpcserver
self.single_url = '/api/appversion/add' + what
self.batch_url = self.single_url + 's'
self.batching = True
self.batch = []
self.batch_size = 0
def SendBatch(self):
"""Send the current batch on its way.
If successful, resets self.batch and self.batch_size.
Raises:
HTTPError with code=404 if the server doesn't support batching.
"""
boundary = 'boundary'
parts = []
for path, payload, mime_type in self.batch:
while boundary in payload:
boundary += '%04x' % random.randint(0, 0xffff)
assert len(boundary) < 80, 'Unexpected error, please try again.'
part = '\n'.join(['',
'X-Appcfg-File: %s' % urllib.quote(path),
'X-Appcfg-Hash: %s' % _Hash(payload),
'Content-Type: %s' % mime_type,
'Content-Length: %d' % len(payload),
'Content-Transfer-Encoding: 8bit',
'',
payload,
])
parts.append(part)
parts.insert(0,
'MIME-Version: 1.0\n'
'Content-Type: multipart/mixed; boundary="%s"\n'
'\n'
'This is a message with multiple parts in MIME format.' %
boundary)
parts.append('--\n')
delimiter = '\n--%s' % boundary
payload = delimiter.join(parts)
logging.info('Uploading batch of %d %ss to %s with boundary="%s".',
len(self.batch), self.what, self.batch_url, boundary)
self.rpcserver.Send(self.batch_url,
payload=payload,
content_type='message/rfc822',
**self.params)
self.batch = []
self.batch_size = 0
def SendSingleFile(self, path, payload, mime_type):
"""Send a single file on its way."""
logging.info('Uploading %s %s (%s bytes, type=%s) to %s.',
self.what, path, len(payload), mime_type, self.single_url)
self.rpcserver.Send(self.single_url,
payload=payload,
content_type=mime_type,
path=path,
**self.params)
def Flush(self):
"""Flush the current batch.
This first attempts to send the batch as a single request; if that
fails because the server doesn't support batching, the files are
sent one by one, and self.batching is reset to False.
At the end, self.batch and self.batch_size are reset.
"""
if not self.batch:
return
try:
self.SendBatch()
except urllib2.HTTPError, err:
if err.code != 404:
raise
logging.info('Old server detected; turning off %s batching.', self.what)
self.batching = False
for path, payload, mime_type in self.batch:
self.SendSingleFile(path, payload, mime_type)
self.batch = []
self.batch_size = 0
def AddToBatch(self, path, payload, mime_type):
"""Batch a file, possibly flushing first, or perhaps upload it directly.
Args:
path: The name of the file.
payload: The contents of the file.
mime_type: The MIME Content-type of the file, or None.
If mime_type is None, application/octet-stream is substituted.
"""
if not mime_type:
mime_type = 'application/octet-stream'
size = len(payload)
if size <= MAX_BATCH_FILE_SIZE:
if (len(self.batch) >= MAX_BATCH_COUNT or
self.batch_size + size > MAX_BATCH_SIZE):
self.Flush()
if self.batching:
logging.info('Adding %s %s (%s bytes, type=%s) to batch.',
self.what, path, size, mime_type)
self.batch.append((path, payload, mime_type))
self.batch_size += size + BATCH_OVERHEAD
return
self.SendSingleFile(path, payload, mime_type)
def _FormatHash(h):
"""Return a string representation of a hash.
The hash is a sha1 hash. It is computed both for files that need to be
pushed to App Engine and for data payloads of requests made to App Engine.
Args:
h: The hash
Returns:
The string representation of the hash.
"""
return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def _Hash(content):
"""Compute the sha1 hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = hashlib.sha1(content).hexdigest()
return _FormatHash(h)
def _HashFromFileHandle(file_handle):
"""Compute the hash of the content of the file pointed to by file_handle.
Args:
file_handle: File-like object which provides seek, read and tell.
Returns:
The string representation of the hash.
"""
pos = file_handle.tell()
content_hash = _Hash(file_handle.read())
file_handle.seek(pos, 0)
return content_hash
def EnsureDir(path):
"""Makes sure that a directory exists at the given path.
If a directory already exists at that path, nothing is done.
Otherwise, try to create a directory at that path with os.makedirs.
If that fails, propagate the resulting OSError exception.
Args:
path: The path that you want to refer to a directory.
"""
try:
os.makedirs(path)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def DoDownloadApp(rpcserver, out_dir, app_id, module, app_version):
"""Downloads the files associated with a particular app version.
Args:
rpcserver: The RPC server to use to download.
out_dir: The directory the files should be downloaded to.
app_id: The app ID of the app whose files we want to download.
module: The module we want to download from. Can be:
- None: We'll download from the default module.
- <module>: We'll download from the specified module.
app_version: The version number we want to download. Can be:
- None: We'll download the latest default version.
- <major>: We'll download the latest minor version.
- <major>/<minor>: We'll download that exact version.
"""
StatusUpdate('Fetching file list...')
url_args = {'app_id': app_id}
if module:
url_args['module'] = module
if app_version is not None:
url_args['version_match'] = app_version
result = rpcserver.Send('/api/files/list', **url_args)
StatusUpdate('Fetching files...')
lines = result.splitlines()
if len(lines) < 1:
logging.error('Invalid response from server: empty')
return
full_version = lines[0]
file_lines = lines[1:]
current_file_number = 0
num_files = len(file_lines)
num_errors = 0
for line in file_lines:
parts = line.split('|', 2)
if len(parts) != 3:
logging.error('Invalid response from server: expecting '
'"<id>|<size>|<path>", found: "%s"\n', line)
return
current_file_number += 1
file_id, size_str, path = parts
try:
size = int(size_str)
except ValueError:
logging.error('Invalid file list entry from server: invalid size: '
'"%s"', size_str)
return
StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path))
def TryGet():
"""A request to /api/files/get which works with the RetryWithBackoff."""
try:
contents = rpcserver.Send('/api/files/get', app_id=app_id,
version=full_version, id=file_id)
return True, contents
except urllib2.HTTPError, exc:
if exc.code == 503:
return False, exc
else:
raise
def PrintRetryMessage(_, delay):
StatusUpdate('Server busy. Will try again in %d seconds.' % delay)
success, contents = RetryWithBackoff(TryGet, PrintRetryMessage)
if not success:
logging.error('Unable to download file "%s".', path)
num_errors += 1
continue
if len(contents) != size:
logging.error('File "%s": server listed as %d bytes but served '
'%d bytes.', path, size, len(contents))
num_errors += 1
full_path = os.path.join(out_dir, path)
if os.path.exists(full_path):
logging.error('Unable to create file "%s": path conflicts with '
'an existing file or directory', path)
num_errors += 1
continue
full_dir = os.path.dirname(full_path)
try:
EnsureDir(full_dir)
except OSError, exc:
logging.error('Couldn\'t create directory "%s": %s', full_dir, exc)
num_errors += 1
continue
try:
out_file = open(full_path, 'wb')
except IOError, exc:
logging.error('Couldn\'t open file "%s": %s', full_path, exc)
num_errors += 1
continue
try:
try:
out_file.write(contents)
except IOError, exc:
logging.error('Couldn\'t write to file "%s": %s', full_path, exc)
num_errors += 1
continue
finally:
out_file.close()
if num_errors > 0:
logging.error('Number of errors: %d. See output for details.', num_errors)
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
rpcserver: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
backend: The backend to update, if any.
files: A dictionary of files to upload to the rpcserver, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
started: True iff the StartServing method has been called.
"""
def __init__(self, rpcserver, config, module_yaml_path='app.yaml',
backend=None,
error_fh=None,
get_version=sdk_update_checker.GetVersionObject):
"""Creates a new AppVersionUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
module_yaml_path: The (string) path to the yaml file corresponding to
<config>, relative to the bundle directory.
backend: If specified, indicates the update applies to the given backend.
The backend name must match an entry in the backends: stanza.
error_fh: Unexpected HTTPErrors are printed to this file handle.
get_version: Method for determining the current SDK version. The override
is used for testing.
"""
self.rpcserver = rpcserver
self.config = config
self.app_id = self.config.application
self.module = self.config.module
self.backend = backend
self.error_fh = error_fh or sys.stderr
self.version = self.config.version
self.params = {}
if self.app_id:
self.params['app_id'] = self.app_id
if self.module:
self.params['module'] = self.module
if self.backend:
self.params['backend'] = self.backend
elif self.version:
self.params['version'] = self.version
self.files = {}
self.all_files = set()
self.in_transaction = False
self.deployed = False
self.started = False
self.batching = True
self.file_batcher = UploadBatcher('file', self.rpcserver, self.params)
self.blob_batcher = UploadBatcher('blob', self.rpcserver, self.params)
self.errorblob_batcher = UploadBatcher('errorblob', self.rpcserver,
self.params)
if not self.config.vm_settings:
self.config.vm_settings = appinfo.VmSettings()
self.config.vm_settings['module_yaml_path'] = module_yaml_path
if not self.config.vm_settings.get('image'):
sdk_version = get_version()
if sdk_version and sdk_version.get('release'):
self.config.vm_settings['image'] = sdk_version['release']
if not self.config.auto_id_policy:
self.config.auto_id_policy = appinfo.DATASTORE_ID_POLICY_DEFAULT
def Send(self, url, payload=''):
"""Sends a request to the server, with common params."""
logging.info('Send: %s, params=%s', url, self.params)
return self.rpcserver.Send(url, payload=payload, **self.params)
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, 'Already in a transaction.'
assert file_handle is not None
reason = appinfo.ValidFilename(path)
if reason:
logging.error(reason)
return
content_hash = _HashFromFileHandle(file_handle)
self.files[path] = content_hash
self.all_files.add(path)
def Describe(self):
"""Returns a string describing the object being updated."""
result = 'app: %s' % self.app_id
if self.module is not None and self.module != appinfo.DEFAULT_MODULE:
result += ', module: %s' % self.module
if self.backend:
result += ', backend: %s' % self.backend
elif self.version:
result += ', version: %s' % self.version
return result
@staticmethod
def _ValidateBeginYaml(resp):
"""Validates the given /api/appversion/create response string."""
response_dict = yaml.safe_load(resp)
if not response_dict or 'warnings' not in response_dict:
return False
return response_dict
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, 'Already in a transaction.'
config_copy = copy.deepcopy(self.config)
for url in config_copy.handlers:
handler_type = url.GetHandlerType()
if url.application_readable:
if handler_type == 'static_dir':
url.static_dir = '%s/%s' % (STATIC_FILE_PREFIX, url.static_dir)
elif handler_type == 'static_files':
url.static_files = '%s/%s' % (STATIC_FILE_PREFIX, url.static_files)
url.upload = '%s/%s' % (STATIC_FILE_PREFIX, url.upload)
response = self.Send(
'/api/appversion/create',
payload=config_copy.ToYAML())
result = self._ValidateBeginYaml(response)
if result:
warnings = result.get('warnings')
for warning in warnings:
StatusUpdate('WARNING: %s' % warning)
self.in_transaction = True
files_to_clone = []
blobs_to_clone = []
errorblobs = {}
for path, content_hash in self.files.iteritems():
file_classification = FileClassification(self.config, path)
if file_classification.IsStaticFile():
upload_path = path
if file_classification.IsApplicationFile():
upload_path = '%s/%s' % (STATIC_FILE_PREFIX, path)
blobs_to_clone.append((path, upload_path, content_hash,
file_classification.StaticMimeType()))
if file_classification.IsErrorFile():
errorblobs[path] = content_hash
if file_classification.IsApplicationFile():
files_to_clone.append((path, path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
StatusUpdate('Cloning %d %s file%s.' %
(len(files), file_type, len(files) != 1 and 's' or ''))
max_files = self.resource_limits['max_files_to_clone']
for i in xrange(0, len(files), max_files):
if i > 0 and i % max_files == 0:
StatusUpdate('Cloned %d files.' % i)
chunk = files[i:min(len(files), i + max_files)]
result = self.Send(url, payload=BuildClonePostBody(chunk))
if result:
to_upload = {}
for f in result.split(LIST_DELIMITER):
for entry in files:
real_path, upload_path = entry[:2]
if f == upload_path:
to_upload[real_path] = self.files[real_path]
break
files_to_upload.update(to_upload)
CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static')
CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application')
logging.debug('Files to upload: %s', files_to_upload)
for (path, content_hash) in errorblobs.iteritems():
files_to_upload[path] = content_hash
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, 'Begin() must be called before UploadFile().'
if path not in self.files:
raise KeyError('File \'%s\' is not in the list of files to be uploaded.'
% path)
del self.files[path]
file_classification = FileClassification(self.config, path)
payload = file_handle.read()
if file_classification.IsStaticFile():
upload_path = path
if file_classification.IsApplicationFile():
upload_path = '%s/%s' % (STATIC_FILE_PREFIX, path)
self.blob_batcher.AddToBatch(upload_path, payload,
file_classification.StaticMimeType())
if file_classification.IsErrorFile():
self.errorblob_batcher.AddToBatch(file_classification.ErrorCode(),
payload,
file_classification.ErrorMimeType())
if file_classification.IsApplicationFile():
self.file_batcher.AddToBatch(path, payload, None)
def Precompile(self):
"""Handle precompilation."""
StatusUpdate('Compilation starting.')
files = []
if self.config.runtime == 'go':
for f in self.all_files:
if f.endswith('.go') and not self.config.nobuild_files.match(f):
files.append(f)
while True:
if files:
StatusUpdate('Compilation: %d files left.' % len(files))
files = self.PrecompileBatch(files)
if not files:
break
StatusUpdate('Compilation completed.')
def PrecompileBatch(self, files):
"""Precompile a batch of files.
Args:
files: Either an empty list (for the initial request) or a list
of files to be precompiled.
Returns:
Either an empty list (if no more files need to be precompiled)
or a list of files to be precompiled subsequently.
"""
payload = LIST_DELIMITER.join(files)
response = self.Send('/api/appversion/precompile', payload=payload)
if not response:
return []
return response.split(LIST_DELIMITER)
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
RuntimeError: Some required files were not uploaded.
CannotStartServingError: Another operation is in progress on this version.
"""
assert self.in_transaction, 'Begin() must be called before Commit().'
if self.files:
raise RuntimeError('Not all required files have been uploaded.')
def PrintRetryMessage(_, delay):
StatusUpdate('Will check again in %s seconds.' % delay)
app_summary = self.Deploy()
success, unused_contents = RetryWithBackoff(
lambda: (self.IsReady(), None), PrintRetryMessage, 1, 2, 60, 20)
if not success:
logging.warning('Version still not ready to serve, aborting.')
raise RuntimeError('Version not ready.')
result = self.StartServing()
if not result:
self.in_transaction = False
else:
if result == '0':
raise CannotStartServingError(
'Another operation on this version is in progress.')
success, response = RetryWithBackoff(
self.IsServing, PrintRetryMessage, 1, 2, 60, 20)
if not success:
logging.warning('Version still not serving, aborting.')
raise RuntimeError('Version not ready.')
check_config_updated = response.get('check_endpoints_config')
if check_config_updated:
success, unused_contents = RetryWithBackoff(
lambda: (self.IsEndpointsConfigUpdated(), None),
PrintRetryMessage, 1, 2, 60, 20)
if not success:
logging.warning('Failed to update Endpoints configuration. Try '
'updating again.')
raise RuntimeError('Endpoints config update failed.')
self.in_transaction = False
return app_summary
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
RuntimeError: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Deploy().'
if self.files:
raise RuntimeError('Not all required files have been uploaded.')
StatusUpdate('Starting deployment.')
result = self.Send('/api/appversion/deploy')
self.deployed = True
if result:
return yaml_object.BuildSingleObject(appinfo.AppInfoSummary, result)
else:
return None
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
RuntimeError: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Checking if deployment succeeded.')
result = self.Send('/api/appversion/isready')
return result == '1'
def StartServing(self):
"""Start serving with the newly created version.
Raises:
RuntimeError: Deploy has not yet been called.
Returns:
The response body, as a string.
"""
assert self.deployed, 'Deploy() must be called before StartServing().'
StatusUpdate('Deployment successful.')
self.params['willcheckserving'] = '1'
result = self.Send('/api/appversion/startserving')
del self.params['willcheckserving']
self.started = True
return result
@staticmethod
def _ValidateIsServingYaml(resp):
"""Validates the given /isserving YAML string.
Args:
resp: the response from an RPC to a URL such as /api/appversion/isserving.
Returns:
The resulting dictionary if the response is valid, or None otherwise.
"""
response_dict = yaml.safe_load(resp)
if 'serving' not in response_dict:
return None
return response_dict
def IsServing(self):
"""Check if the new app version is serving.
Raises:
RuntimeError: Deploy has not yet been called.
CannotStartServingError: A bad response was received from the isserving
API call.
Returns:
(serving, response) Where serving is True if the deployed app version is
serving, False otherwise. response is a dict containing the parsed
response from the server, or an empty dict if the server's response was
an old style 0/1 response.
"""
assert self.started, 'StartServing() must be called before IsServing().'
StatusUpdate('Checking if updated app version is serving.')
self.params['new_serving_resp'] = '1'
result = self.Send('/api/appversion/isserving')
del self.params['new_serving_resp']
if result in ['0', '1']:
return result == '1', {}
result = AppVersionUpload._ValidateIsServingYaml(result)
if not result:
raise CannotStartServingError(
'Internal error: Could not parse IsServing response.')
message = result.get('message')
fatal = result.get('fatal')
if message:
StatusUpdate(message)
if fatal:
raise CannotStartServingError(message or 'Unknown error.')
return result['serving'], result
@staticmethod
def _ValidateIsEndpointsConfigUpdatedYaml(resp):
"""Validates the YAML string response from an isconfigupdated request.
Args:
resp: A string containing the response from the server.
Returns:
The dictionary with the parsed response if the response is valid.
Otherwise returns False.
"""
response_dict = yaml.safe_load(resp)
if 'updated' not in response_dict:
return None
return response_dict
def IsEndpointsConfigUpdated(self):
"""Check if the Endpoints configuration for this app has been updated.
This should only be called if the app has a Google Cloud Endpoints
handler, or if it's removing one. The server performs the check to see
if Endpoints support is added/updated/removed, and the response to the
isserving call indicates whether IsEndpointsConfigUpdated should be called.
Raises:
AssertionError: Deploy has not yet been called.
CannotStartServingError: There was an unexpected error with the server
response.
Returns:
True if the configuration has been updated, False if not.
"""
assert self.started, ('StartServing() must be called before '
'IsEndpointsConfigUpdated().')
StatusUpdate('Checking if Endpoints configuration has been updated.')
result = self.Send('/api/isconfigupdated')
result = AppVersionUpload._ValidateIsEndpointsConfigUpdatedYaml(result)
if result is None:
raise CannotStartServingError(
'Internal error: Could not parse IsEndpointsConfigUpdated response.')
return result['updated']
def Rollback(self):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
StatusUpdate('Rolling back the update.')
self.Send('/api/appversion/rollback')
self.in_transaction = False
self.files = {}
def DoUpload(self, paths, openfunc):
"""Uploads a new appversion with the given config and files to the server.
Args:
paths: An iterator that yields the relative paths of the files to upload.
openfunc: A function that takes a path and returns a file-like object.
Returns:
An appinfo.AppInfoSummary if one was returned from the server, None
otherwise.
"""
logging.info('Reading app configuration.')
StatusUpdate('\nStarting update of %s' % self.Describe())
path = ''
try:
self.resource_limits = GetResourceLimits(self.rpcserver, self.config)
self._AddFilesThatAreSmallEnough(paths, openfunc)
except KeyboardInterrupt:
logging.info('User interrupted. Aborting.')
raise
except EnvironmentError, e:
logging.error('An error occurred processing file \'%s\': %s. Aborting.',
path, e)
raise
try:
missing_files = self.Begin()
self._UploadMissingFiles(missing_files, openfunc)
if (self.config.derived_file_type and
appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type):
try:
self.Precompile()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
if e.code == 422 or self.config.runtime == 'go':
raise
print >>self.error_fh, (
'Precompilation failed. Your app can still serve but may '
'have reduced startup performance. You can retry the update '
'later to retry the precompilation step.')
app_summary = self.Commit()
StatusUpdate('Completed update of %s' % self.Describe())
except BaseException, e:
self._LogDoUploadException(e)
self.Rollback()
raise
logging.info('Done!')
return app_summary
def _AddFilesThatAreSmallEnough(self, paths, openfunc):
"""Calls self.AddFile on files that are small enough.
By small enough, we mean that their size is within
self.resource_limits['max_file_size'] for application files, and
'max_blob_size' otherwise. Files that are too large are logged as errors,
and dropped (not sure why this isn't handled by raising an exception...).
Args:
paths: List of paths, relative to the app's base path.
openfunc: A function that takes a paths element, and returns a file-like
object.
"""
StatusUpdate('Scanning files on local disk.')
num_files = 0
for path in paths:
file_handle = openfunc(path)
try:
file_length = GetFileLength(file_handle)
file_classification = FileClassification(self.config, path)
if file_classification.IsApplicationFile():
max_size = self.resource_limits['max_file_size']
else:
max_size = self.resource_limits['max_blob_size']
if file_length > max_size:
logging.error('Ignoring file \'%s\': Too long '
'(max %d bytes, file is %d bytes)',
path, max_size, file_length)
else:
logging.info('Processing file \'%s\'', path)
self.AddFile(path, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Scanned %d files.' % num_files)
def _UploadMissingFiles(self, missing_files, openfunc):
"""DoUpload helper to upload files that need to be uploaded.
Args:
missing_files: List of files that need to be uploaded. Begin returns such
a list. Design note: we don't call Begin here, because we want DoUpload
to call it directly so that Begin/Commit are more clearly paired.
openfunc: Function that takes a path relative to the app's base path, and
returns a file-like object.
"""
if not missing_files:
return
StatusUpdate('Uploading %d files and blobs.' % len(missing_files))
num_files = 0
for missing_file in missing_files:
file_handle = openfunc(missing_file)
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Processed %d out of %s.' %
(num_files, len(missing_files)))
self.file_batcher.Flush()
self.blob_batcher.Flush()
self.errorblob_batcher.Flush()
StatusUpdate('Uploaded %d files and blobs' % num_files)
@staticmethod
def _LogDoUploadException(exception):
"""Helper that logs exceptions that occurred during DoUpload.
Args:
exception: An exception that was thrown during DoUpload.
"""
def InstanceOf(tipe):
return isinstance(exception, tipe)
if InstanceOf(KeyboardInterrupt):
logging.info('User interrupted. Aborting.')
elif InstanceOf(urllib2.HTTPError):
logging.info('HTTP Error (%s)', exception)
elif InstanceOf(CannotStartServingError):
logging.error(exception.message)
else:
logging.exception('An unexpected error occurred. Aborting.')
def FileIterator(base, skip_files, runtime, separator=os.path.sep):
"""Walks a directory tree, returning all the files. Follows symlinks.
Args:
base: The base path to search for files under.
skip_files: A regular expression object for files/directories to skip.
runtime: The name of the runtime e.g. "python". If "python27" then .pyc
files with matching .py files will be skipped.
separator: Path separator used by the running system's platform.
Yields:
Paths of files found, relative to base.
"""
dirs = ['']
while dirs:
current_dir = dirs.pop()
entries = set(os.listdir(os.path.join(base, current_dir)))
for entry in sorted(entries):
name = os.path.join(current_dir, entry)
fullname = os.path.join(base, name)
if separator == '\\':
name = name.replace('\\', '/')
if runtime == 'python27' and not skip_files.match(name):
root, extension = os.path.splitext(entry)
if extension == '.pyc' and (root + '.py') in entries:
logging.warning('Ignoring file \'%s\': Cannot upload both '
'<filename>.py and <filename>.pyc', name)
continue
if os.path.isfile(fullname):
if skip_files.match(name):
logging.info('Ignoring file \'%s\': File matches ignore regex.', name)
else:
yield name
elif os.path.isdir(fullname):
if skip_files.match(name):
logging.info(
'Ignoring directory \'%s\': Directory matches ignore regex.',
name)
else:
dirs.append(name)
def GetFileLength(fh):
"""Returns the length of the file represented by fh.
This function is capable of finding the length of any seekable stream,
unlike os.fstat, which only works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=sdk_update_checker.GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken,
sdk_product=SDK_PRODUCT):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
sdk_product: Used as part of sdk/version product token.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'.
"""
product_tokens = []
sdk_name = os.environ.get('APPCFG_SDK_NAME')
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
product_tokens.append('%s/%s' % (sdk_product, release))
product_tokens.append(get_platform())
python_version = '.'.join(str(i) for i in sys.version_info)
product_tokens.append('Python/%s' % python_version)
return ' '.join(product_tokens)
def GetSourceName(get_version=sdk_update_checker.GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
return 'Google-appcfg-%s' % (release,)
def _ReadUrlContents(url):
"""Reads the contents of a URL into a string.
Args:
url: a string that is the URL to read.
Returns:
A string that is the contents read from the URL.
Raises:
urllib2.URLError: If the URL cannot be read.
"""
req = urllib2.Request(url)
return urllib2.urlopen(req).read()
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
raw_input_fn: Function used for getting raw user input, like email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
read_url_contents: A function to read the contents of a URL.
override_java_supported: If not None, forces the code to assume that Java
support is (True) or is not (False) present.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=None,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass,
out_fh=sys.stdout,
error_fh=sys.stderr,
update_check_class=sdk_update_checker.SDKUpdateChecker,
throttle_class=None,
opener=open,
file_iterator=FileIterator,
time_func=time.time,
wrap_server_error_message=True,
oauth_client_id=APPCFG_CLIENT_ID,
oauth_client_secret=APPCFG_CLIENT_NOTSOSECRET,
oauth_scopes=APPCFG_SCOPES,
override_java_supported=None):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
raw_input_fn: Function used for getting user email.
password_input_fn: Function used for getting user password.
out_fh: All normal output is printed to this file handle.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: sdk_update_checker.SDKUpdateChecker class (can be
replaced for testing).
throttle_class: A class to use instead of ThrottledHttpRpcServer
(only used in the bulkloader).
opener: Function used for opening files.
file_iterator: Callable that takes (basepath, skip_files, file_separator)
and returns a generator that yields all filenames in the file tree
rooted at that path, skipping files that match the skip_files compiled
regular expression.
time_func: A time.time() compatible function, which can be overridden for
testing.
wrap_server_error_message: If true, the error messages from
urllib2.HTTPError exceptions in Run() are wrapped with
'--- begin server output ---' and '--- end server output ---',
otherwise the error message is printed as is.
oauth_client_id: The client ID of the project providing Auth. Defaults to
the SDK default project client ID, the constant APPCFG_CLIENT_ID.
oauth_client_secret: The client secret of the project providing Auth.
Defaults to the SDK default project client secret, the constant
APPCFG_CLIENT_NOTSOSECRET.
oauth_scopes: The scope or set of scopes to be accessed by the OAuth2
token retrieved. Defaults to APPCFG_SCOPES. Can be a string or
iterable of strings, representing the scope(s) to request.
override_java_supported: If not None, forces the code to assume that Java
support is (True) or is not (False) present.
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.raw_input_fn = raw_input_fn
self.password_input_fn = password_input_fn
self.out_fh = out_fh
self.error_fh = error_fh
self.update_check_class = update_check_class
self.throttle_class = throttle_class
self.time_func = time_func
self.wrap_server_error_message = wrap_server_error_message
self.oauth_client_id = oauth_client_id
self.oauth_client_secret = oauth_client_secret
self.oauth_scopes = oauth_scopes
self.override_java_supported = override_java_supported
self.read_url_contents = _ReadUrlContents
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if len(self.args) < 1:
self._PrintHelpAndExit()
if not self.options.allow_any_runtime:
if self.options.runtime:
if self.options.runtime not in SUPPORTED_RUNTIMES:
_PrintErrorAndExit(self.error_fh,
'"%s" is not a supported runtime\n' %
self.options.runtime)
else:
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = (
'|'.join(SUPPORTED_RUNTIMES))
action = self.args.pop(0)
def RaiseParseError(actionname, action):
self.parser, self.options = self._MakeSpecificParser(action)
error_desc = action.error_desc
if not error_desc:
error_desc = "Expected a <directory> argument after '%s'." % (
actionname.split(' ')[0])
self.parser.error(error_desc)
if action == BACKENDS_ACTION:
if len(self.args) < 1:
RaiseParseError(action, self.actions[BACKENDS_ACTION])
backend_action_first = BACKENDS_ACTION + ' ' + self.args[0]
if backend_action_first in self.actions:
self.args.pop(0)
action = backend_action_first
elif len(self.args) > 1:
backend_directory_first = BACKENDS_ACTION + ' ' + self.args[1]
if backend_directory_first in self.actions:
self.args.pop(1)
action = backend_directory_first
if len(self.args) < 1 or action == BACKENDS_ACTION:
RaiseParseError(action, self.actions[action])
if action not in self.actions:
self.parser.error("Unknown action: '%s'\n%s" %
(action, self.parser.get_description()))
self.action = self.actions[action]
if not self.action.uses_basepath or self.options.help:
self.basepath = None
else:
if not self.args:
RaiseParseError(action, self.action)
self.basepath = self.args.pop(0)
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
if any((self.options.oauth2_refresh_token, self.options.oauth2_access_token,
self.options.authenticate_service_account)):
self.options.oauth2 = True
if self.options.oauth2_client_id:
self.oauth_client_id = self.options.oauth2_client_id
if self.options.oauth2_client_secret:
self.oauth_client_secret = self.options.oauth2_client_secret
self.opener = opener
self.file_iterator = file_iterator
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
Returns:
1 on error, 0 if successful.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
if self.wrap_server_error_message:
error_format = ('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---')
else:
error_format = 'Error %d: %s'
print >>self.error_fh, (error_format % (e.code, body.rstrip('\n')))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ('Error parsing yaml file:\n%s' % e)
return 1
except CannotStartServingError:
print >>self.error_fh, 'Could not start serving the given version.'
return 1
return 0
def _JavaSupported(self):
"""True if this SDK supports uploading Java apps."""
if self.override_java_supported is not None:
return self.override_java_supported
tools_java_dir = os.path.join(os.path.dirname(appcfg_java.__file__), 'java')
return os.path.isdir(tools_java_dir)
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ''
for action_name in action_names:
if not self.actions[action_name].hidden:
desc += ' %s: %s\n' % (action_name,
self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + '\n'
class AppCfgOption(optparse.Option):
"""Custom Option for AppCfg.
Adds an 'update' action for storing key-value pairs as a dict.
"""
_ACTION = 'update'
ACTIONS = optparse.Option.ACTIONS + (_ACTION,)
STORE_ACTIONS = optparse.Option.STORE_ACTIONS + (_ACTION,)
TYPED_ACTIONS = optparse.Option.TYPED_ACTIONS + (_ACTION,)
ALWAYS_TYPED_ACTIONS = optparse.Option.ALWAYS_TYPED_ACTIONS + (_ACTION,)
def take_action(self, action, dest, opt, value, values, parser):
if action != self._ACTION:
return optparse.Option.take_action(
self, action, dest, opt, value, values, parser)
try:
key, value = value.split(':', 1)
except ValueError:
raise optparse.OptionValueError(
'option %s: invalid value: %s (must match NAME:VALUE)' % (
opt, value))
values.ensure_value(dest, {})[key] = value
desc = self._GetActionDescriptions()
desc = ('Action must be one of:\n%s'
'Use \'help <action>\' for a detailed description.') % desc
parser = self.parser_class(usage='%prog [options] <action>',
description=desc,
formatter=Formatter(),
conflict_handler='resolve',
option_class=AppCfgOption)
parser.add_option('-h', '--help', action='store_true',
dest='help', help='Show the help message and exit.')
parser.add_option('-q', '--quiet', action='store_const', const=0,
dest='verbose', help='Print errors only.')
parser.add_option('-v', '--verbose', action='store_const', const=2,
dest='verbose', default=1,
help='Print info level logs.')
parser.add_option('--noisy', action='store_const', const=3,
dest='verbose', help='Print all logs.')
parser.add_option('-s', '--server', action='store', dest='server',
default='appengine.google.com',
metavar='SERVER', help='The App Engine server.')
parser.add_option('--secure', action='store_true', dest='secure',
default=True, help=optparse.SUPPRESS_HELP)
parser.add_option('--ignore_bad_cert', action='store_true',
dest='ignore_certs', default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option('--insecure', action='store_false', dest='secure',
help=optparse.SUPPRESS_HELP)
parser.add_option('-e', '--email', action='store', dest='email',
metavar='EMAIL', default=None,
help='The username to use. Will prompt if omitted.')
parser.add_option('-H', '--host', action='store', dest='host',
metavar='HOST', default=None,
help='Overrides the Host header sent with all RPCs.')
parser.add_option('--no_cookies', action='store_false',
dest='save_cookies', default=True,
help='Do not save authentication cookies to local disk.')
parser.add_option('--skip_sdk_update_check', action='store_true',
dest='skip_sdk_update_check', default=False,
help='Do not check for SDK updates.')
parser.add_option('--passin', action='store_true',
dest='passin', default=False,
help='Read the login password from stdin.')
parser.add_option('-A', '--application', action='store', dest='app_id',
help=('Set the application, overriding the application '
'value from app.yaml file.'))
parser.add_option('-M', '--module', action='store', dest='module',
help=optparse.SUPPRESS_HELP)
parser.add_option('-V', '--version', action='store', dest='version',
help=('Set the (major) version, overriding the version '
'value from app.yaml file.'))
parser.add_option('-r', '--runtime', action='store', dest='runtime',
help='Override runtime from app.yaml file.')
parser.add_option('-E', '--env_variable', action='update',
dest='env_variables', metavar='NAME:VALUE',
help=('Set an environment variable, potentially '
'overriding an env_variable value from app.yaml '
'file (flag may be repeated to set multiple '
'variables).'))
parser.add_option('-R', '--allow_any_runtime', action='store_true',
dest='allow_any_runtime', default=False,
help='Do not validate the runtime in app.yaml')
parser.add_option('--oauth2', action='store_true', dest='oauth2',
default=False,
help='Use OAuth2 instead of password auth.')
parser.add_option('--oauth2_refresh_token', action='store',
dest='oauth2_refresh_token', default=None,
help='An existing OAuth2 refresh token to use. Will '
'not attempt interactive OAuth approval.')
parser.add_option('--oauth2_access_token', action='store',
dest='oauth2_access_token', default=None,
help='An existing OAuth2 access token to use. Will '
'not attempt interactive OAuth approval.')
parser.add_option('--oauth2_client_id', action='store',
dest='oauth2_client_id', default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--oauth2_client_secret', action='store',
dest='oauth2_client_secret', default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--oauth2_credential_file', action='store',
dest='oauth2_credential_file', default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--authenticate_service_account', action='store_true',
dest='authenticate_service_account', default=False,
help='Authenticate using the default service account '
'for the Google Compute Engine VM in which appcfg is '
'being called')
parser.add_option('--noauth_local_webserver', action='store_false',
dest='auth_local_webserver', default=True,
help='Do not run a local web server to handle redirects '
'during OAuth authorization.')
return parser
def _MakeSpecificParser(self, action):
"""Creates a new parser with documentation specific to 'action'.
Args:
action: An Action instance to be used when initializing the new parser.
Returns:
A tuple containing:
parser: An instance of OptionsParser customized to 'action'.
options: The command line options after re-parsing.
"""
parser = self._GetOptionParser()
parser.set_usage(action.usage)
parser.set_description('%s\n%s' % (action.short_desc, action.long_desc))
action.options(self, parser)
options, unused_args = parser.parse_args(self.argv[1:])
return parser, options
def _PrintHelpAndExit(self, exit_code=2):
"""Prints the parser's help message and exits the program.
Args:
exit_code: The integer code to pass to sys.exit().
"""
self.parser.print_help()
sys.exit(exit_code)
def _GetRpcServer(self):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
Raises:
OAuthNotAvailable: OAuth is requested but the dependecies aren't imported.
RuntimeError: The user has request non-interactive authentication but the
environment is not correct for that to work.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = self.options.email
if email is None:
email = self.raw_input_fn('Email: ')
password_prompt = 'Password for %s: ' % email
if self.options.passin:
password = self.raw_input_fn(password_prompt)
else:
password = self.password_input_fn(password_prompt)
return (email, password)
StatusUpdate('Host: %s' % self.options.server)
source = GetSourceName()
dev_appserver = self.options.host == 'localhost'
if self.options.oauth2 and not dev_appserver:
if not appengine_rpc_httplib2:
raise OAuthNotAvailable()
if not self.rpc_server_class:
self.rpc_server_class = appengine_rpc_httplib2.HttpRpcServerOAuth2
get_user_credentials = (
appengine_rpc_httplib2.HttpRpcServerOAuth2.OAuth2Parameters(
access_token=self.options.oauth2_access_token,
client_id=self.oauth_client_id,
client_secret=self.oauth_client_secret,
scope=self.oauth_scopes,
refresh_token=self.options.oauth2_refresh_token,
credential_file=self.options.oauth2_credential_file,
token_uri=self._GetTokenUri()))
if hasattr(appengine_rpc_httplib2.tools, 'FLAGS'):
appengine_rpc_httplib2.tools.FLAGS.auth_local_webserver = (
self.options.auth_local_webserver)
else:
if not self.rpc_server_class:
self.rpc_server_class = appengine_rpc.HttpRpcServerWithOAuth2Suggestion
if hasattr(self, 'runtime'):
self.rpc_server_class.RUNTIME = self.runtime
get_user_credentials = GetUserCredentials
if dev_appserver:
email = self.options.email
if email is None:
email = '[email protected]'
logging.info('Using debug user %s. Override with --email', email)
rpcserver = self.rpc_server_class(
self.options.server,
lambda: (email, 'password'),
GetUserAgent(),
source,
host_override=self.options.host,
save_cookies=self.options.save_cookies,
secure=False)
rpcserver.authenticated = True
return rpcserver
if self.options.passin:
auth_tries = 1
else:
auth_tries = 3
return self.rpc_server_class(self.options.server, get_user_credentials,
GetUserAgent(), source,
host_override=self.options.host,
save_cookies=self.options.save_cookies,
auth_tries=auth_tries,
account_type='HOSTED_OR_GOOGLE',
secure=self.options.secure,
ignore_certs=self.options.ignore_certs)
def _GetTokenUri(self):
"""Returns the OAuth2 token_uri, or None to use the default URI.
Returns:
A string that is the token_uri, or None.
Raises:
RuntimeError: The user has requested authentication for a service account
but the environment is not correct for that to work.
"""
if self.options.authenticate_service_account:
url = '%s/%s/scopes' % (METADATA_BASE, SERVICE_ACCOUNT_BASE)
try:
vm_scopes_string = self.read_url_contents(url)
except urllib2.URLError, e:
raise RuntimeError('Could not obtain scope list from metadata service: '
'%s: %s. This may be because we are not running in '
'a Google Compute Engine VM.' % (url, e))
vm_scopes = vm_scopes_string.split()
missing = list(set(self.oauth_scopes).difference(vm_scopes))
if missing:
raise RuntimeError('Required scopes %s missing from %s. '
'This VM instance probably needs to be recreated '
'with the missing scopes.' % (missing, vm_scopes))
return '%s/%s/token' % (METADATA_BASE, SERVICE_ACCOUNT_BASE)
else:
return None
def _FindYaml(self, basepath, file_name):
"""Find yaml files in application directory.
Args:
basepath: Base application directory.
file_name: Relative file path from basepath, without extension, to search
for.
Returns:
Path to located yaml file if one exists, else None.
"""
if not os.path.isdir(basepath):
self.parser.error('Not a directory: %s' % basepath)
alt_basepath = os.path.join(basepath, 'WEB-INF', 'appengine-generated')
for yaml_basepath in (basepath, alt_basepath):
for yaml_file in (file_name + '.yaml', file_name + '.yml'):
yaml_path = os.path.join(yaml_basepath, yaml_file)
if os.path.isfile(yaml_path):
return yaml_path
return None
def _ParseAppInfoFromYaml(self, basepath, basename='app'):
"""Parses the app.yaml file.
Args:
basepath: The directory of the application.
basename: The relative file path, from basepath, to search for.
Returns:
An AppInfoExternal object.
"""
try:
appyaml = self._ParseYamlFile(basepath, basename, appinfo_includes.Parse)
except yaml_errors.EventListenerError, e:
self.parser.error('Error parsing %s.yaml: %s.' % (
os.path.join(basepath, basename), e))
if not appyaml:
if self._JavaSupported():
if appcfg_java.IsWarFileWithoutYaml(basepath):
java_app_update = appcfg_java.JavaAppUpdate(basepath, self.options)
appyaml_string = java_app_update.GenerateAppYamlString(basepath, [])
appyaml = appinfo.LoadSingleAppInfo(appyaml_string)
if not appyaml:
self.parser.error('Directory contains neither an %s.yaml '
'configuration file nor a WEB-INF subdirectory '
'with web.xml and appengine-web.xml.' % basename)
else:
self.parser.error('Directory does not contain an %s.yaml configuration '
'file' % basename)
orig_application = appyaml.application
orig_module = appyaml.module
orig_version = appyaml.version
if self.options.app_id:
appyaml.application = self.options.app_id
if self.options.module:
appyaml.module = self.options.module
if self.options.version:
appyaml.version = self.options.version
if self.options.runtime:
appyaml.runtime = self.options.runtime
if self.options.env_variables:
if appyaml.env_variables is None:
appyaml.env_variables = appinfo.EnvironmentVariables()
appyaml.env_variables.update(self.options.env_variables)
if not appyaml.application:
self.parser.error('Expected -A app_id when application property in file '
'%s.yaml is not set.' % basename)
msg = 'Application: %s' % appyaml.application
if appyaml.application != orig_application:
msg += ' (was: %s)' % orig_application
if self.action.function is 'Update':
if (appyaml.module is not None and
appyaml.module != appinfo.DEFAULT_MODULE):
msg += '; module: %s' % appyaml.module
if appyaml.module != orig_module:
msg += ' (was: %s)' % orig_module
msg += '; version: %s' % appyaml.version
if appyaml.version != orig_version:
msg += ' (was: %s)' % orig_version
StatusUpdate(msg)
return appyaml
def _ParseYamlFile(self, basepath, basename, parser):
"""Parses a yaml file.
Args:
basepath: The base directory of the application.
basename: The relative file path, from basepath, (with the '.yaml'
stripped off).
parser: the function or method used to parse the file.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
file_name = self._FindYaml(basepath, basename)
if file_name is not None:
fh = self.opener(file_name, 'r')
try:
defns = parser(fh, open_fn=self.opener)
finally:
fh.close()
return defns
return None
def _ParseBackendsYaml(self, basepath):
"""Parses the backends.yaml file.
Args:
basepath: the directory of the application.
Returns:
A BackendsInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'backends',
backendinfo.LoadBackendInfo)
def _ParseIndexYaml(self, basepath, appyaml=None):
"""Parses the index.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
index_yaml = self._ParseYamlFile(basepath,
'index',
datastore_index.ParseIndexDefinitions)
if not index_yaml:
return None
self._SetApplication(index_yaml, 'index', appyaml)
return index_yaml
def _SetApplication(self, dest_yaml, basename, appyaml=None):
"""Parses and sets the application property onto the dest_yaml parameter.
The order of precendence is:
1. Command line (-A application)
2. Specified dest_yaml file
3. App.yaml file
This exits with a parse error if application is not present in any of these
locations.
Args:
dest_yaml: The yaml object to set 'application' on.
basename: The name of the dest_yaml file for use in errors.
appyaml: The already parsed appyaml, if present. If none, this method will
attempt to parse app.yaml.
"""
if self.options.app_id:
dest_yaml.application = self.options.app_id
if not dest_yaml.application:
if not appyaml:
appyaml = self._ParseYamlFile(self.basepath,
'app',
appinfo_includes.Parse)
if appyaml:
dest_yaml.application = appyaml.application
else:
self.parser.error('Expected -A app_id when %s.yaml.application is not '
'set and app.yaml is not present.' % basename)
def _ParseCronYaml(self, basepath, appyaml=None):
"""Parses the cron.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A CronInfoExternal object or None if the file does not exist.
"""
cron_yaml = self._ParseYamlFile(basepath, 'cron', croninfo.LoadSingleCron)
if not cron_yaml:
return None
self._SetApplication(cron_yaml, 'cron', appyaml)
return cron_yaml
def _ParseQueueYaml(self, basepath, appyaml=None):
"""Parses the queue.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A QueueInfoExternal object or None if the file does not exist.
"""
queue_yaml = self._ParseYamlFile(basepath,
'queue',
queueinfo.LoadSingleQueue)
if not queue_yaml:
return None
self._SetApplication(queue_yaml, 'queue', appyaml)
return queue_yaml
def _ParseDispatchYaml(self, basepath):
"""Parses the dispatch.yaml file.
Args:
basepath: the directory of the application.
Returns:
A DispatchInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'dispatch',
dispatchinfo.LoadSingleDispatch)
def _ParseDosYaml(self, basepath, appyaml=None):
"""Parses the dos.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A DosInfoExternal object or None if the file does not exist.
"""
dos_yaml = self._ParseYamlFile(basepath, 'dos', dosinfo.LoadSingleDos)
if not dos_yaml:
return None
self._SetApplication(dos_yaml, 'dos', appyaml)
return dos_yaml
def Help(self, action=None):
"""Prints help for a specific action.
Args:
action: If provided, print help for the action provided.
Expects self.args[0], or 'action', to contain the name of the action in
question. Exits the program after printing the help message.
"""
if not action:
if len(self.args) > 1:
self.args = [' '.join(self.args)]
if len(self.args) != 1 or self.args[0] not in self.actions:
self.parser.error('Expected a single action argument. '
' Must be one of:\n' +
self._GetActionDescriptions())
action = self.args[0]
action = self.actions[action]
self.parser, unused_options = self._MakeSpecificParser(action)
self._PrintHelpAndExit(exit_code=0)
def DownloadApp(self):
"""Downloads the given app+version."""
if len(self.args) != 1:
self.parser.error('\"download_app\" expects one non-option argument, '
'found ' + str(len(self.args)) + '.')
out_dir = self.args[0]
app_id = self.options.app_id
if app_id is None:
self.parser.error('You must specify an app ID via -A or --application.')
module = self.options.module
app_version = self.options.version
if os.path.exists(out_dir):
if not os.path.isdir(out_dir):
self.parser.error('Cannot download to path "%s": '
'there\'s a file in the way.' % out_dir)
elif os.listdir(out_dir):
self.parser.error('Cannot download to path "%s": directory already '
'exists and it isn\'t empty.' % out_dir)
rpcserver = self._GetRpcServer()
DoDownloadApp(rpcserver, out_dir, app_id, module, app_version)
def UpdateVersion(self, rpcserver, basepath, appyaml, module_yaml_path,
backend=None):
"""Updates and deploys a new appversion.
Args:
rpcserver: An AbstractRpcServer instance on which RPC calls can be made.
basepath: The root directory of the version to update.
appyaml: The AppInfoExternal object parsed from an app.yaml-like file.
module_yaml_path: The (string) path to the yaml file, relative to the
bundle directory.
backend: The name of the backend to update, if any.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
RuntimeError: If go-app-builder fails to generate a mapping from relative
paths to absolute paths, its stderr is raised.
"""
if not self.options.precompilation and appyaml.runtime == 'go':
logging.warning('Precompilation is required for Go apps; '
'ignoring --no_precompilation')
self.options.precompilation = True
if appyaml.runtime.startswith('java'):
self.options.precompilation = False
if self.options.precompilation:
if not appyaml.derived_file_type:
appyaml.derived_file_type = []
if appinfo.PYTHON_PRECOMPILED not in appyaml.derived_file_type:
appyaml.derived_file_type.append(appinfo.PYTHON_PRECOMPILED)
paths = self.file_iterator(basepath, appyaml.skip_files, appyaml.runtime)
openfunc = lambda path: self.opener(os.path.join(basepath, path), 'rb')
if appyaml.runtime == 'go':
goroot = os.path.join(os.path.dirname(google.appengine.__file__),
'../../goroot')
gopath = os.environ.get('GOPATH')
if os.path.isdir(goroot) and gopath:
app_paths = list(paths)
go_files = [f for f in app_paths
if f.endswith('.go') and not appyaml.nobuild_files.match(f)]
if not go_files:
raise RuntimeError('no Go source files to upload '
'(-nobuild_files applied)')
gab_argv = [
os.path.join(goroot, 'bin', 'go-app-builder'),
'-app_base', self.basepath,
'-arch', '6',
'-gopath', gopath,
'-goroot', goroot,
'-print_extras',
] + go_files
env = {
'GOOS': 'linux',
'GOARCH': 'amd64',
}
try:
p = subprocess.Popen(gab_argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
(stdout, stderr) = p.communicate()
except Exception, e:
raise RuntimeError('failed running go-app-builder', e)
if p.returncode != 0:
raise RuntimeError(stderr)
overlay = dict([l.split('|') for l in stdout.split('\n') if l])
logging.info('GOPATH overlay: %s', overlay)
def Open(path):
if path in overlay:
return self.opener(overlay[path], 'rb')
return self.opener(os.path.join(basepath, path), 'rb')
paths = app_paths + overlay.keys()
openfunc = Open
appversion = AppVersionUpload(rpcserver,
appyaml,
module_yaml_path=module_yaml_path,
backend=backend,
error_fh=self.error_fh)
return appversion.DoUpload(paths, openfunc)
def UpdateUsingSpecificFiles(self):
"""Updates and deploys new app versions based on given config files."""
rpcserver = self._GetRpcServer()
all_files = [self.basepath] + self.args
has_python25_version = False
for yaml_path in all_files:
file_name = os.path.basename(yaml_path)
self.basepath = os.path.dirname(yaml_path)
if not self.basepath:
self.basepath = '.'
module_yaml = self._ParseAppInfoFromYaml(self.basepath,
os.path.splitext(file_name)[0])
if module_yaml.runtime == 'python':
has_python25_version = True
if not module_yaml.module and file_name != 'app.yaml':
ErrorUpdate("Error: 'module' parameter not specified in %s" %
yaml_path)
continue
self.UpdateVersion(rpcserver, self.basepath, module_yaml, file_name)
if has_python25_version:
MigratePython27Notice()
def Update(self):
"""Updates and deploys a new appversion and global app configs."""
if not os.path.isdir(self.basepath):
self.UpdateUsingSpecificFiles()
return
if (self._JavaSupported() and
appcfg_java.IsWarFileWithoutYaml(self.basepath)):
java_app_update = appcfg_java.JavaAppUpdate(self.basepath, self.options)
sdk_root = os.path.dirname(appcfg_java.__file__)
self.options.compile_jsps = True
self.stage_dir = java_app_update.CreateStagingDirectory(sdk_root)
try:
appyaml = self._ParseAppInfoFromYaml(
self.stage_dir,
basename=os.path.splitext(APP_YAML_FILENAME)[0])
self._UpdateWithParsedAppYaml(appyaml, self.stage_dir)
finally:
if self.options.retain_upload_dir:
StatusUpdate(
'Temporary staging directory left in %s' % self.stage_dir)
else:
shutil.rmtree(self.stage_dir)
else:
appyaml = self._ParseAppInfoFromYaml(
self.basepath,
basename=os.path.splitext(APP_YAML_FILENAME)[0])
self._UpdateWithParsedAppYaml(appyaml, self.basepath)
def _UpdateWithParsedAppYaml(self, appyaml, basepath):
"""Completes update command.
Helper to Update.
Args:
appyaml: AppInfoExternal for the app.
basepath: Path where application's files can be found.
"""
self.runtime = appyaml.runtime
rpcserver = self._GetRpcServer()
if self.options.skip_sdk_update_check:
logging.info('Skipping update check')
else:
updatecheck = self.update_check_class(rpcserver, appyaml)
updatecheck.CheckForUpdates()
def _AbortAppMismatch(yaml_name):
StatusUpdate('Error: Aborting upload because application in %s does not '
'match application in app.yaml' % yaml_name)
dos_yaml = self._ParseDosYaml(basepath, appyaml)
if dos_yaml and dos_yaml.application != appyaml.application:
_AbortAppMismatch('dos.yaml')
return
queue_yaml = self._ParseQueueYaml(basepath, appyaml)
if queue_yaml and queue_yaml.application != appyaml.application:
_AbortAppMismatch('queue.yaml')
return
cron_yaml = self._ParseCronYaml(basepath, appyaml)
if cron_yaml and cron_yaml.application != appyaml.application:
_AbortAppMismatch('cron.yaml')
return
index_defs = self._ParseIndexYaml(basepath, appyaml)
if index_defs and index_defs.application != appyaml.application:
_AbortAppMismatch('index.yaml')
return
self.UpdateVersion(rpcserver, basepath, appyaml, APP_YAML_FILENAME)
if appyaml.runtime == 'python':
MigratePython27Notice()
if self.options.backends:
self.BackendsUpdate()
if index_defs:
index_upload = IndexDefinitionUpload(rpcserver, index_defs)
try:
index_upload.DoUpload()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating your '
'indexes. Please retry later with appcfg.py update_indexes.')
if cron_yaml:
cron_upload = CronEntryUpload(rpcserver, cron_yaml)
cron_upload.DoUpload()
if queue_yaml:
queue_upload = QueueEntryUpload(rpcserver, queue_yaml)
queue_upload.DoUpload()
if dos_yaml:
dos_upload = DosEntryUpload(rpcserver, dos_yaml)
dos_upload.DoUpload()
if appyaml:
pagespeed_upload = PagespeedEntryUpload(
rpcserver, appyaml, appyaml.pagespeed)
try:
pagespeed_upload.DoUpload()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating PageSpeed. '
'Please try the update again later.')
def _UpdateOptions(self, parser):
"""Adds update-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--no_precompilation', action='store_false',
dest='precompilation', default=True,
help='Disable automatic precompilation '
'(ignored for Go apps).')
parser.add_option('--backends', action='store_true',
dest='backends', default=False,
help='Update backends when performing appcfg update.')
if self._JavaSupported():
appcfg_java.AddUpdateOptions(parser)
def VacuumIndexes(self):
"""Deletes unused indexes."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs is None:
index_defs = datastore_index.IndexDefinitions()
rpcserver = self._GetRpcServer()
vacuum = VacuumIndexesOperation(rpcserver,
self.options.force_delete)
vacuum.DoVacuum(index_defs)
def _VacuumIndexesOptions(self, parser):
"""Adds vacuum_indexes-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-f', '--force', action='store_true', dest='force_delete',
default=False,
help='Force deletion without being prompted.')
def UpdateCron(self):
"""Updates any new or changed cron definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml:
cron_upload = CronEntryUpload(rpcserver, cron_yaml)
cron_upload.DoUpload()
else:
print >>sys.stderr, 'Could not find cron configuration. No action taken.'
def UpdateIndexes(self):
"""Updates indexes."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpcserver, index_defs)
index_upload.DoUpload()
else:
print >>sys.stderr, 'Could not find index configuration. No action taken.'
def UpdateQueues(self):
"""Updates any new or changed task queue definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
queue_yaml = self._ParseQueueYaml(self.basepath)
if queue_yaml:
queue_upload = QueueEntryUpload(rpcserver, queue_yaml)
queue_upload.DoUpload()
else:
print >>sys.stderr, 'Could not find queue configuration. No action taken.'
def UpdateDispatch(self):
"""Updates new or changed dispatch definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
dispatch_yaml = self._ParseDispatchYaml(self.basepath)
if dispatch_yaml:
if self.options.app_id:
dispatch_yaml.application = self.options.app_id
if not dispatch_yaml.application:
self.parser.error('Expected -A app_id when dispatch.yaml.application'
' is not set.')
StatusUpdate('Uploading dispatch entries.')
rpcserver.Send('/api/dispatch/update',
app_id=dispatch_yaml.application,
payload=dispatch_yaml.ToYAML())
else:
print >>sys.stderr, ('Could not find dispatch configuration. No action'
' taken.')
def UpdateDos(self):
"""Updates any new or changed dos definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
dos_yaml = self._ParseDosYaml(self.basepath)
if dos_yaml:
dos_upload = DosEntryUpload(rpcserver, dos_yaml)
dos_upload.DoUpload()
else:
print >>sys.stderr, 'Could not find dos configuration. No action taken.'
def BackendsAction(self):
"""Placeholder; we never expect this action to be invoked."""
pass
def BackendsPhpCheck(self, appyaml):
"""Don't support backends with the PHP runtime.
This should be used to prevent use of backends update/start/configure
with the PHP runtime. We continue to allow backends
stop/delete/list/rollback just in case there are existing PHP backends.
Args:
appyaml: A parsed app.yaml file.
"""
if appyaml.runtime == 'php':
_PrintErrorAndExit(
self.error_fh,
'Error: Backends are not supported with the PHP runtime. '
'Please use Modules instead.\n')
def BackendsYamlCheck(self, appyaml, backend=None):
"""Check the backends.yaml file is sane and which backends to update."""
if appyaml.backends:
self.parser.error('Backends are not allowed in app.yaml.')
backends_yaml = self._ParseBackendsYaml(self.basepath)
appyaml.backends = backends_yaml.backends
if not appyaml.backends:
self.parser.error('No backends found in backends.yaml.')
backends = []
for backend_entry in appyaml.backends:
entry = backendinfo.LoadBackendEntry(backend_entry.ToYAML())
if entry.name in backends:
self.parser.error('Duplicate entry for backend: %s.' % entry.name)
else:
backends.append(entry.name)
backends_to_update = []
if backend:
if backend in backends:
backends_to_update = [backend]
else:
self.parser.error("Backend '%s' not found in backends.yaml." %
backend)
else:
backends_to_update = backends
return backends_to_update
def BackendsUpdate(self):
"""Updates a backend."""
self.backend = None
if len(self.args) == 1:
self.backend = self.args[0]
elif len(self.args) > 1:
self.parser.error('Expected an optional <backend> argument.')
yaml_file_basename = 'app'
appyaml = self._ParseAppInfoFromYaml(self.basepath,
basename=yaml_file_basename)
BackendsStatusUpdate(appyaml.runtime)
self.BackendsPhpCheck(appyaml)
rpcserver = self._GetRpcServer()
backends_to_update = self.BackendsYamlCheck(appyaml, self.backend)
for backend in backends_to_update:
self.UpdateVersion(rpcserver, self.basepath, appyaml, yaml_file_basename,
backend=backend)
def BackendsList(self):
"""Lists all backends for an app."""
if self.args:
self.parser.error('Expected no arguments.')
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/list', app_id=appyaml.application)
print >> self.out_fh, response
def BackendsRollback(self):
"""Does a rollback of an existing transaction on this backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
self._Rollback(self.args[0])
def BackendsStart(self):
"""Starts a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime)
self.BackendsPhpCheck(appyaml)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/start',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsStop(self):
"""Stops a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/stop',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsDelete(self):
"""Deletes a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/delete',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsConfigure(self):
"""Changes the configuration of an existing backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime)
self.BackendsPhpCheck(appyaml)
backends_yaml = self._ParseBackendsYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/configure',
app_id=appyaml.application,
backend=backend,
payload=backends_yaml.ToYAML())
print >> self.out_fh, response
def ListVersions(self):
"""Lists all versions for an app."""
if self.args:
self.parser.error('Expected no arguments.')
appyaml = self._ParseAppInfoFromYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/versions/list', app_id=appyaml.application)
parsed_response = yaml.safe_load(response)
if not parsed_response:
print >> self.out_fh, ('No versions uploaded for app: %s.' %
appyaml.application)
else:
print >> self.out_fh, response
def DeleteVersion(self):
"""Deletes the specified version for an app."""
if not (self.options.app_id and self.options.version):
self.parser.error('Expected an <app_id> argument, a <version> argument '
'and an optional <module> argument.')
if self.options.module:
module = self.options.module
else:
module = ''
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/versions/delete',
app_id=self.options.app_id,
version_match=self.options.version,
module=module)
print >> self.out_fh, response
def DebugAction(self):
"""Sets the specified version and instance for an app to be debuggable."""
if len(self.args) == 1:
appyaml = self._ParseAppInfoFromYaml(self.args[0])
app_id = appyaml.application
module = appyaml.module or ''
version = appyaml.version
elif not self.args:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected a <directory> argument or both --application and '
'--version flags.'))
module = ''
else:
self._PrintHelpAndExit()
if self.options.instance is None:
self.parser.error(
('Expected an --instance flag.'))
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/vms/debug',
app_id=app_id,
version_match=version,
module=module,
instance=self.options.instance)
print >> self.out_fh, response
def _DebugActionOptions(self, parser):
"""Adds debug-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-I', '--instance', type='int', dest='instance',
help='Instance to debug.')
def _ParseAndValidateModuleYamls(self, yaml_paths):
"""Validates given yaml paths and returns the parsed yaml objects.
Args:
yaml_paths: List of paths to AppInfo yaml files.
Returns:
List of parsed AppInfo yamls.
"""
results = []
app_id = None
last_yaml_path = None
for yaml_path in yaml_paths:
if not os.path.isfile(yaml_path):
_PrintErrorAndExit(
self.error_fh,
("Error: The given path '%s' is not to a YAML configuration "
"file.\n") % yaml_path)
file_name = os.path.basename(yaml_path)
base_path = os.path.dirname(yaml_path)
if not base_path:
base_path = '.'
module_yaml = self._ParseAppInfoFromYaml(base_path,
os.path.splitext(file_name)[0])
if not module_yaml.module and file_name != 'app.yaml':
_PrintErrorAndExit(
self.error_fh,
"Error: 'module' parameter not specified in %s" % yaml_path)
if app_id is not None and module_yaml.application != app_id:
_PrintErrorAndExit(
self.error_fh,
"Error: 'application' value '%s' in %s does not match the value "
"'%s', found in %s" % (module_yaml.application,
yaml_path,
app_id,
last_yaml_path))
app_id = module_yaml.application
last_yaml_path = yaml_path
results.append(module_yaml)
return results
def _ModuleAction(self, action_path):
"""Process flags and yaml files and make a call to the given path.
The 'start_module_version' and 'stop_module_version' actions are extremely
similar in how they process input to appcfg.py and only really differ in
what path they hit on the RPCServer.
Args:
action_path: Path on the RPCServer to send the call to.
"""
modules_to_process = []
if not self.args:
if not (self.options.app_id and
self.options.module and
self.options.version):
_PrintErrorAndExit(self.error_fh,
'Expected at least one <file> argument or the '
'--application, --module and --version flags to'
' be set.')
else:
modules_to_process.append((self.options.app_id,
self.options.module,
self.options.version))
else:
if self.options.module:
_PrintErrorAndExit(self.error_fh,
'You may not specify a <file> argument with the '
'--module flag.')
module_yamls = self._ParseAndValidateModuleYamls(self.args)
for serv_yaml in module_yamls:
app_id = serv_yaml.application
modules_to_process.append((self.options.app_id or serv_yaml.application,
serv_yaml.module or appinfo.DEFAULT_MODULE,
self.options.version or serv_yaml.version))
rpcserver = self._GetRpcServer()
for app_id, module, version in modules_to_process:
response = rpcserver.Send(action_path,
app_id=app_id,
module=module,
version=version)
print >> self.out_fh, response
def StartModuleVersion(self):
"""Starts one or more versions."""
self._ModuleAction('/api/modules/start')
def StopModuleVersion(self):
"""Stops one or more versions."""
self._ModuleAction('/api/modules/stop')
def Rollback(self):
"""Does a rollback of an existing transaction for this app version."""
if self.args:
self.parser.error('Expected a single <directory> or <file> argument.')
self._Rollback()
def _Rollback(self, backend=None):
"""Does a rollback of an existing transaction.
Args:
backend: name of a backend to rollback, or None
If a backend is specified the rollback will affect only that backend, if no
backend is specified the rollback will affect the current app version.
"""
if os.path.isdir(self.basepath):
module_yaml = self._ParseAppInfoFromYaml(self.basepath)
else:
file_name = os.path.basename(self.basepath)
self.basepath = os.path.dirname(self.basepath)
if not self.basepath:
self.basepath = '.'
module_yaml = self._ParseAppInfoFromYaml(self.basepath,
os.path.splitext(file_name)[0])
appversion = AppVersionUpload(self._GetRpcServer(), module_yaml,
module_yaml_path='app.yaml',
backend=backend)
appversion.in_transaction = True
appversion.Rollback()
def SetDefaultVersion(self):
"""Sets the default version."""
module = ''
if len(self.args) == 1:
stored_modules = self.options.module
self.options.module = None
try:
appyaml = self._ParseAppInfoFromYaml(self.args[0])
finally:
self.options.module = stored_modules
app_id = appyaml.application
module = appyaml.module or ''
version = appyaml.version
elif not self.args:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected a <directory> argument or both --application and '
'--version flags.'))
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
version_setter = DefaultVersionSet(self._GetRpcServer(),
app_id,
module,
version)
version_setter.SetVersion()
def MigrateTraffic(self):
"""Migrates traffic."""
if len(self.args) == 1:
appyaml = self._ParseAppInfoFromYaml(self.args[0])
app_id = appyaml.application
version = appyaml.version
elif not self.args:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected a <directory> argument or both --application and '
'--version flags.'))
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.version:
version = self.options.version
traffic_migrator = TrafficMigrator(
self._GetRpcServer(), app_id, version)
traffic_migrator.MigrateTraffic()
def RequestLogs(self):
"""Write request logs to a file."""
args_length = len(self.args)
module = ''
if args_length == 2:
appyaml = self._ParseAppInfoFromYaml(self.args.pop(0))
app_id = appyaml.application
module = appyaml.module or ''
version = appyaml.version
elif args_length == 1:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected the --application and --version flags if <directory> '
'argument is not specified.'))
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
if (self.options.severity is not None and
not 0 <= self.options.severity <= MAX_LOG_LEVEL):
self.parser.error(
'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL)
if self.options.num_days is None:
self.options.num_days = int(not self.options.append)
try:
end_date = self._ParseEndDate(self.options.end_date)
except (TypeError, ValueError):
self.parser.error('End date must be in the format YYYY-MM-DD.')
rpcserver = self._GetRpcServer()
logs_requester = LogsRequester(rpcserver,
app_id,
module,
version,
self.args[0],
self.options.num_days,
self.options.append,
self.options.severity,
end_date,
self.options.vhost,
self.options.include_vhost,
self.options.include_all,
time_func=self.time_func)
logs_requester.DownloadLogs()
@staticmethod
def _ParseEndDate(date, time_func=time.time):
"""Translates an ISO 8601 date to a date object.
Args:
date: A date string as YYYY-MM-DD.
time_func: A time.time() compatible function, which can be overridden for
testing.
Returns:
A date object representing the last day of logs to get.
If no date is given, returns today in the US/Pacific timezone.
"""
if not date:
return PacificDate(time_func())
return datetime.date(*[int(i) for i in date.split('-')])
def _RequestLogsOptions(self, parser):
"""Adds request_logs-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_days', type='int', dest='num_days',
action='store', default=None,
help='Number of days worth of log data to get. '
'The cut-off point is midnight US/Pacific. '
'Use 0 to get all available logs. '
'Default is 1, unless --append is also given; '
'then the default is 0.')
parser.add_option('-a', '--append', dest='append',
action='store_true', default=False,
help='Append to existing file.')
parser.add_option('--severity', type='int', dest='severity',
action='store', default=None,
help='Severity of app-level log messages to get. '
'The range is 0 (DEBUG) through 4 (CRITICAL). '
'If omitted, only request logs are returned.')
parser.add_option('--vhost', type='string', dest='vhost',
action='store', default=None,
help='The virtual host of log messages to get. '
'If omitted, all log messages are returned.')
parser.add_option('--include_vhost', dest='include_vhost',
action='store_true', default=False,
help='Include virtual host in log messages.')
parser.add_option('--include_all', dest='include_all',
action='store_true', default=None,
help='Include everything in log messages.')
parser.add_option('--end_date', dest='end_date',
action='store', default='',
help='End date (as YYYY-MM-DD) of period for log data. '
'Defaults to today.')
def CronInfo(self, now=None, output=sys.stdout):
"""Displays information about cron definitions.
Args:
now: used for testing.
output: Used for testing.
"""
if self.args:
self.parser.error('Expected a single <directory> argument.')
if now is None:
now = datetime.datetime.utcnow()
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml and cron_yaml.cron:
for entry in cron_yaml.cron:
description = entry.description
if not description:
description = '<no description>'
if not entry.timezone:
entry.timezone = 'UTC'
print >>output, '\n%s:\nURL: %s\nSchedule: %s (%s)' % (description,
entry.url,
entry.schedule,
entry.timezone)
if entry.timezone != 'UTC':
print >>output, ('Note: Schedules with timezones won\'t be calculated'
' correctly here')
schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
matches = schedule.GetMatches(now, self.options.num_runs)
for match in matches:
print >>output, '%s, %s from now' % (
match.strftime('%Y-%m-%d %H:%M:%SZ'), match - now)
def _CronInfoOptions(self, parser):
"""Adds cron_info-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_runs', type='int', dest='num_runs',
action='store', default=5,
help='Number of runs of each cron job to display'
'Default is 5')
def _CheckRequiredLoadOptions(self):
"""Checks that upload/download options are present."""
for option in ['filename']:
if getattr(self.options, option) is None:
self.parser.error('Option \'%s\' is required.' % option)
if not self.options.url:
self.parser.error('You must have google.appengine.ext.remote_api.handler '
'assigned to an endpoint in app.yaml, or provide '
'the url of the handler via the \'url\' option.')
def InferRemoteApiUrl(self, appyaml):
"""Uses app.yaml to determine the remote_api endpoint.
Args:
appyaml: A parsed app.yaml file.
Returns:
The url of the remote_api endpoint as a string, or None
"""
handlers = appyaml.handlers
handler_suffixes = ['remote_api/handler.py',
'remote_api.handler.application']
app_id = appyaml.application
for handler in handlers:
if hasattr(handler, 'script') and handler.script:
if any(handler.script.endswith(suffix) for suffix in handler_suffixes):
server = self.options.server
url = handler.url
if url.endswith('(/.*)?'):
url = url[:-6]
if server == 'appengine.google.com':
return 'http://%s.appspot.com%s' % (app_id, url)
else:
match = re.match(PREFIXED_BY_ADMIN_CONSOLE_RE, server)
if match:
return 'http://%s%s%s' % (app_id, match.group(1), url)
else:
return 'http://%s%s' % (server, url)
return None
def RunBulkloader(self, arg_dict):
"""Invokes the bulkloader with the given keyword arguments.
Args:
arg_dict: Dictionary of arguments to pass to bulkloader.Run().
"""
try:
import sqlite3
except ImportError:
logging.error('upload_data action requires SQLite3 and the python '
'sqlite3 module (included in python since 2.5).')
sys.exit(1)
sys.exit(bulkloader.Run(arg_dict))
def _SetupLoad(self):
"""Performs common verification and set up for upload and download."""
if len(self.args) != 1 and not self.options.url:
self.parser.error('Expected either --url or a single <directory> '
'argument.')
if len(self.args) == 1:
self.basepath = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
self.options.app_id = appyaml.application
if not self.options.url:
url = self.InferRemoteApiUrl(appyaml)
if url is not None:
self.options.url = url
self._CheckRequiredLoadOptions()
if self.options.batch_size < 1:
self.parser.error('batch_size must be 1 or larger.')
if verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
self.options.debug = False
else:
logging.getLogger().setLevel(logging.DEBUG)
self.options.debug = True
def _MakeLoaderArgs(self):
"""Returns a dict made from many attributes of self.options, plus others.
See body for list of self.options attributes included. In addition, result
includes
'application' = self.options.app_id
'throttle_class' = self.throttle_class
Returns:
A dict.
"""
args = dict([(arg_name, getattr(self.options, arg_name, None)) for
arg_name in (
'url',
'filename',
'batch_size',
'kind',
'num_threads',
'bandwidth_limit',
'rps_limit',
'http_limit',
'db_filename',
'config_file',
'auth_domain',
'has_header',
'loader_opts',
'log_file',
'passin',
'email',
'debug',
'exporter_opts',
'mapper_opts',
'result_db_filename',
'mapper_opts',
'dry_run',
'dump',
'restore',
'namespace',
'create_config',
)])
args['application'] = self.options.app_id
args['throttle_class'] = self.throttle_class
return args
def PerformDownload(self, run_fn=None):
"""Performs a datastore download via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Downloading data records.')
args = self._MakeLoaderArgs()
args['download'] = bool(args['config_file'])
args['has_header'] = False
args['map'] = False
args['dump'] = not args['config_file']
args['restore'] = False
args['create_config'] = False
run_fn(args)
def PerformUpload(self, run_fn=None):
"""Performs a datastore upload via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Uploading data records.')
args = self._MakeLoaderArgs()
args['download'] = False
args['map'] = False
args['dump'] = False
args['restore'] = not args['config_file']
args['create_config'] = False
run_fn(args)
def CreateBulkloadConfig(self, run_fn=None):
"""Create a bulkloader config via the bulkloader wizard.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Creating bulkloader configuration.')
args = self._MakeLoaderArgs()
args['download'] = False
args['has_header'] = False
args['map'] = False
args['dump'] = False
args['restore'] = False
args['create_config'] = True
run_fn(args)
def _PerformLoadOptions(self, parser):
"""Adds options common to 'upload_data' and 'download_data'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--url', type='string', dest='url',
action='store',
help='The location of the remote_api endpoint.')
parser.add_option('--batch_size', type='int', dest='batch_size',
action='store', default=10,
help='Number of records to post in each request.')
parser.add_option('--bandwidth_limit', type='int', dest='bandwidth_limit',
action='store', default=250000,
help='The maximum bytes/second bandwidth for transfers.')
parser.add_option('--rps_limit', type='int', dest='rps_limit',
action='store', default=20,
help='The maximum records/second for transfers.')
parser.add_option('--http_limit', type='int', dest='http_limit',
action='store', default=8,
help='The maximum requests/second for transfers.')
parser.add_option('--db_filename', type='string', dest='db_filename',
action='store',
help='Name of the progress database file.')
parser.add_option('--auth_domain', type='string', dest='auth_domain',
action='store', default='gmail.com',
help='The name of the authorization domain to use.')
parser.add_option('--log_file', type='string', dest='log_file',
help='File to write bulkloader logs. If not supplied '
'then a new log file will be created, named: '
'bulkloader-log-TIMESTAMP.')
parser.add_option('--dry_run', action='store_true',
dest='dry_run', default=False,
help='Do not execute any remote_api calls')
parser.add_option('--namespace', type='string', dest='namespace',
action='store', default='',
help='Namespace to use when accessing datastore.')
parser.add_option('--num_threads', type='int', dest='num_threads',
action='store', default=10,
help='Number of threads to transfer records with.')
def _PerformUploadOptions(self, parser):
"""Adds 'upload_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file containing the input data.'
' (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to store.')
parser.add_option('--has_header', dest='has_header',
action='store_true', default=False,
help='Whether the first line of the input file should be'
' skipped')
parser.add_option('--loader_opts', type='string', dest='loader_opts',
help='A string to pass to the Loader.initialize method.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _PerformDownloadOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file where output data is to be'
' written. (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to retrieve.')
parser.add_option('--exporter_opts', type='string', dest='exporter_opts',
help='A string to pass to the Exporter.initialize method.'
)
parser.add_option('--result_db_filename', type='string',
dest='result_db_filename',
action='store',
help='Database to write entities to for download.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _CreateBulkloadConfigOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file where the generated template'
' is to be written. (Required)')
def ResourceLimitsInfo(self, output=None):
"""Outputs the current resource limits.
Args:
output: The file handle to write the output to (used for testing).
"""
appyaml = self._ParseAppInfoFromYaml(self.basepath)
resource_limits = GetResourceLimits(self._GetRpcServer(), appyaml)
for attr_name in sorted(resource_limits):
print >>output, '%s: %s' % (attr_name, resource_limits[attr_name])
class Action(object):
"""Contains information about a command line action.
Attributes:
function: The name of a function defined on AppCfg or its subclasses
that will perform the appropriate action.
usage: A command line usage string.
short_desc: A one-line description of the action.
long_desc: A detailed description of the action. Whitespace and
formatting will be preserved.
error_desc: An error message to display when the incorrect arguments are
given.
options: A function that will add extra options to a given OptionParser
object.
uses_basepath: Does the action use a basepath/app-directory (and hence
app.yaml).
hidden: Should this command be shown in the help listing.
"""
def __init__(self, function, usage, short_desc, long_desc='',
error_desc=None, options=lambda obj, parser: None,
uses_basepath=True, hidden=False):
"""Initializer for the class attributes."""
self.function = function
self.usage = usage
self.short_desc = short_desc
self.long_desc = long_desc
self.error_desc = error_desc
self.options = options
self.uses_basepath = uses_basepath
self.hidden = hidden
def __call__(self, appcfg):
"""Invoke this Action on the specified AppCfg.
This calls the function of the appropriate name on AppCfg, and
respects polymophic overrides.
Args:
appcfg: The appcfg to use.
Returns:
The result of the function call.
"""
method = getattr(appcfg, self.function)
return method()
actions = {
'help': Action(
function='Help',
usage='%prog help <action>',
short_desc='Print help for a specific action.',
uses_basepath=False),
'update': Action(
function='Update',
usage='%prog [options] update <directory> | [file, ...]',
options=_UpdateOptions,
short_desc='Create or update an app version.',
long_desc="""
Specify a directory that contains all of the files required by
the app, and appcfg.py will create/update the app version referenced
in the app.yaml file at the top level of that directory. appcfg.py
will follow symlinks and recursively upload all files to the server.
Temporary or source control files (e.g. foo~, .svn/*) will be skipped.
If you are using the Modules feature, then you may prefer to pass multiple files
to update, rather than a directory, to specify which modules you would like
updated."""),
'download_app': Action(
function='DownloadApp',
usage='%prog [options] download_app -A app_id [ -V version ]'
' <out-dir>',
short_desc='Download a previously-uploaded app.',
long_desc="""
Download a previously-uploaded app to the specified directory. The app
ID is specified by the \"-A\" option. The optional version is specified
by the \"-V\" option.""",
uses_basepath=False),
'update_cron': Action(
function='UpdateCron',
usage='%prog [options] update_cron <directory>',
short_desc='Update application cron definitions.',
long_desc="""
The 'update_cron' command will update any new, removed or changed cron
definitions from the optional cron.yaml file."""),
'update_indexes': Action(
function='UpdateIndexes',
usage='%prog [options] update_indexes <directory>',
short_desc='Update application indexes.',
long_desc="""
The 'update_indexes' command will add additional indexes which are not currently
in production as well as restart any indexes that were not completed."""),
'update_queues': Action(
function='UpdateQueues',
usage='%prog [options] update_queues <directory>',
short_desc='Update application task queue definitions.',
long_desc="""
The 'update_queue' command will update any new, removed or changed task queue
definitions from the optional queue.yaml file."""),
'update_dispatch': Action(
function='UpdateDispatch',
usage='%prog [options] update_dispatch <directory>',
short_desc='Update application dispatch definitions.',
long_desc="""
The 'update_dispatch' command will update any new, removed or changed dispatch
definitions from the optional dispatch.yaml file."""),
'update_dos': Action(
function='UpdateDos',
usage='%prog [options] update_dos <directory>',
short_desc='Update application dos definitions.',
long_desc="""
The 'update_dos' command will update any new, removed or changed dos
definitions from the optional dos.yaml file."""),
'backends': Action(
function='BackendsAction',
usage='%prog [options] backends <directory> <action>',
short_desc='Perform a backend action.',
long_desc="""
The 'backends' command will perform a backends action.""",
error_desc="""\
Expected a <directory> and <action> argument."""),
'backends list': Action(
function='BackendsList',
usage='%prog [options] backends <directory> list',
short_desc='List all backends configured for the app.',
long_desc="""
The 'backends list' command will list all backends configured for the app."""),
'backends update': Action(
function='BackendsUpdate',
usage='%prog [options] backends <directory> update [<backend>]',
options=_UpdateOptions,
short_desc='Update one or more backends.',
long_desc="""
The 'backends update' command updates one or more backends. This command
updates backend configuration settings and deploys new code to the server. Any
existing instances will stop and be restarted. Updates all backends, or a
single backend if the <backend> argument is provided."""),
'backends rollback': Action(
function='BackendsRollback',
usage='%prog [options] backends <directory> rollback <backend>',
short_desc='Roll back an update of a backend.',
long_desc="""
The 'backends update' command requires a server-side transaction.
Use 'backends rollback' if you experience an error during 'backends update'
and want to start the update over again."""),
'backends start': Action(
function='BackendsStart',
usage='%prog [options] backends <directory> start <backend>',
short_desc='Start a backend.',
long_desc="""
The 'backends start' command will put a backend into the START state."""),
'backends stop': Action(
function='BackendsStop',
usage='%prog [options] backends <directory> stop <backend>',
short_desc='Stop a backend.',
long_desc="""
The 'backends start' command will put a backend into the STOP state."""),
'backends delete': Action(
function='BackendsDelete',
usage='%prog [options] backends <directory> delete <backend>',
short_desc='Delete a backend.',
long_desc="""
The 'backends delete' command will delete a backend."""),
'backends configure': Action(
function='BackendsConfigure',
usage='%prog [options] backends <directory> configure <backend>',
short_desc='Reconfigure a backend without stopping it.',
long_desc="""
The 'backends configure' command performs an online update of a backend, without
stopping instances that are currently running. No code or handlers are updated,
only certain configuration settings specified in backends.yaml. Valid settings
are: instances, options: public, and options: failfast."""),
'vacuum_indexes': Action(
function='VacuumIndexes',
usage='%prog [options] vacuum_indexes <directory>',
options=_VacuumIndexesOptions,
short_desc='Delete unused indexes from application.',
long_desc="""
The 'vacuum_indexes' command will help clean up indexes which are no longer
in use. It does this by comparing the local index configuration with
indexes that are actually defined on the server. If any indexes on the
server do not exist in the index configuration file, the user is given the
option to delete them."""),
'rollback': Action(
function='Rollback',
usage='%prog [options] rollback <directory> | <file>',
short_desc='Rollback an in-progress update.',
long_desc="""
The 'update' command requires a server-side transaction.
Use 'rollback' if you experience an error during 'update'
and want to begin a new update transaction."""),
'request_logs': Action(
function='RequestLogs',
usage='%prog [options] request_logs [<directory>] <output_file>',
options=_RequestLogsOptions,
uses_basepath=False,
short_desc='Write request logs in Apache common log format.',
long_desc="""
The 'request_logs' command exports the request logs from your application
to a file. It will write Apache common log format records ordered
chronologically. If output file is '-' stdout will be written.""",
error_desc="""\
Expected an optional <directory> and mandatory <output_file> argument."""),
'cron_info': Action(
function='CronInfo',
usage='%prog [options] cron_info <directory>',
options=_CronInfoOptions,
short_desc='Display information about cron jobs.',
long_desc="""
The 'cron_info' command will display the next 'number' runs (default 5) for
each cron job defined in the cron.yaml file."""),
'start_module_version': Action(
function='StartModuleVersion',
uses_basepath=False,
usage='%prog [options] start_module_version [file, ...]',
short_desc='Start a module version.',
long_desc="""
The 'start_module_version' command will put a module version into the START
state."""),
'stop_module_version': Action(
function='StopModuleVersion',
uses_basepath=False,
usage='%prog [options] stop_module_version [file, ...]',
short_desc='Stop a module version.',
long_desc="""
The 'stop_module_version' command will put a module version into the STOP
state."""),
'upload_data': Action(
function='PerformUpload',
usage='%prog [options] upload_data <directory>',
options=_PerformUploadOptions,
short_desc='Upload data records to datastore.',
long_desc="""
The 'upload_data' command translates input records into datastore entities and
uploads them into your application's datastore.""",
uses_basepath=False),
'download_data': Action(
function='PerformDownload',
usage='%prog [options] download_data <directory>',
options=_PerformDownloadOptions,
short_desc='Download entities from datastore.',
long_desc="""
The 'download_data' command downloads datastore entities and writes them to
file as CSV or developer defined format.""",
uses_basepath=False),
'create_bulkloader_config': Action(
function='CreateBulkloadConfig',
usage='%prog [options] create_bulkload_config <directory>',
options=_CreateBulkloadConfigOptions,
short_desc='Create a bulkloader.yaml from a running application.',
long_desc="""
The 'create_bulkloader_config' command creates a bulkloader.yaml configuration
template for use with upload_data or download_data.""",
uses_basepath=False),
'set_default_version': Action(
function='SetDefaultVersion',
usage='%prog [options] set_default_version [directory]',
short_desc='Set the default (serving) version.',
long_desc="""
The 'set_default_version' command sets the default (serving) version of the app.
Defaults to using the application, version and module specified in app.yaml;
use the --application, --version and --module flags to override these values.
The --module flag can also be a comma-delimited string of several modules. (ex.
module1,module2,module3) In this case, the default version of each module will
be changed to the version specified.
The 'migrate_traffic' command can be thought of as a safer version of this
command.""",
uses_basepath=False),
'migrate_traffic': Action(
function='MigrateTraffic',
usage='%prog [options] migrate_traffic [directory]',
short_desc='Migrates traffic to another version.',
long_desc="""
The 'migrate_traffic' command gradually gradually sends an increasing fraction
of traffic your app's traffic from the current default version to another
version. Once all traffic has been migrated, the new version is set as the
default version.
app.yaml specifies the target application, version, and (optionally) module; use
the --application, --version and --module flags to override these values.
Can be thought of as an enhanced version of the 'set_default_version'
command.""",
uses_basepath=False,
hidden=True),
'resource_limits_info': Action(
function='ResourceLimitsInfo',
usage='%prog [options] resource_limits_info <directory>',
short_desc='Get the resource limits.',
long_desc="""
The 'resource_limits_info' command prints the current resource limits that
are enforced."""),
'list_versions': Action(
function='ListVersions',
usage='%prog [options] list_versions <directory>',
short_desc='List all uploaded versions for an app.',
long_desc="""
The 'list_versions' command outputs the uploaded versions for each module of
an application in YAML."""),
'delete_version': Action(
function='DeleteVersion',
usage='%prog [options] delete_version -A app_id -V version '
'[-M module]',
uses_basepath=False,
short_desc='Delete the specified version for an app.',
long_desc="""
The 'delete_version' command deletes the specified version for the specified
application."""),
'debug': Action(
function='DebugAction',
usage='%prog [options] debug -I instance [-A app_id] [-V version] '
' [-M module] [directory]',
options=_DebugActionOptions,
short_desc='Debug a vm runtime application.',
hidden=True,
uses_basepath=False,
long_desc="""
The 'debug' command configures a vm runtime instance to be accessable for
debugging."""),
}
def main(argv):
logging.basicConfig(format=('%(asctime)s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s '))
try:
result = AppCfgApp(argv).Run()
if result:
sys.exit(result)
except KeyboardInterrupt:
StatusUpdate('Interrupted.')
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
[] |
[] |
[
"GOPATH",
"APPCFG_SDK_NAME"
] |
[]
|
["GOPATH", "APPCFG_SDK_NAME"]
|
python
| 2 | 0 | |
src/runtime/runtime-gdb_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"bytes"
"fmt"
"go/build"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
)
func checkGdbEnvironment(t *testing.T) {
testenv.MustHaveGoBuild(t)
if runtime.GOOS == "darwin" {
t.Skip("gdb does not work on darwin")
}
if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64" {
t.Skip("skipping gdb tests on linux/ppc64; see golang.org/issue/17366")
}
if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final {
t.Skip("gdb test can fail with GOROOT_FINAL pending")
}
}
func checkGdbVersion(t *testing.T) {
// Issue 11214 reports various failures with older versions of gdb.
out, err := exec.Command("gdb", "--version").CombinedOutput()
if err != nil {
t.Skipf("skipping: error executing gdb: %v", err)
}
re := regexp.MustCompile(`([0-9]+)\.([0-9]+)`)
matches := re.FindSubmatch(out)
if len(matches) < 3 {
t.Skipf("skipping: can't determine gdb version from\n%s\n", out)
}
major, err1 := strconv.Atoi(string(matches[1]))
minor, err2 := strconv.Atoi(string(matches[2]))
if err1 != nil || err2 != nil {
t.Skipf("skipping: can't determine gdb version: %v, %v", err1, err2)
}
if major < 7 || (major == 7 && minor < 7) {
t.Skipf("skipping: gdb version %d.%d too old", major, minor)
}
t.Logf("gdb version %d.%d", major, minor)
}
func checkGdbPython(t *testing.T) {
cmd := exec.Command("gdb", "-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')")
out, err := cmd.CombinedOutput()
if err != nil {
t.Skipf("skipping due to issue running gdb: %v", err)
}
if string(out) != "go gdb python support\n" {
t.Skipf("skipping due to lack of python gdb support: %s", out)
}
}
const helloSource = `
import "fmt"
import "runtime"
var gslice []string
func main() {
mapvar := make(map[string]string,5)
mapvar["abc"] = "def"
mapvar["ghi"] = "jkl"
strvar := "abc"
ptrvar := &strvar
slicevar := make([]string, 0, 16)
slicevar = append(slicevar, mapvar["abc"])
fmt.Println("hi") // line 13
_ = ptrvar
gslice = slicevar
runtime.KeepAlive(mapvar)
}
`
func TestGdbPython(t *testing.T) {
testGdbPython(t, false)
}
func TestGdbPythonCgo(t *testing.T) {
if runtime.GOARCH == "mips" || runtime.GOARCH == "mipsle" || runtime.GOARCH == "mips64" {
testenv.SkipFlaky(t, 18784)
}
testGdbPython(t, true)
}
func testGdbPython(t *testing.T, cgo bool) {
if cgo && !build.Default.CgoEnabled {
t.Skip("skipping because cgo is not enabled")
}
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
checkGdbPython(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
var buf bytes.Buffer
buf.WriteString("package main\n")
if cgo {
buf.WriteString(`import "C"` + "\n")
}
buf.WriteString(helloSource)
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, buf.Bytes(), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
args := []string{"-nx", "-q", "--batch", "-iex",
fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()),
"-ex", "set startup-with-shell off",
"-ex", "info auto-load python-scripts",
"-ex", "set python print-stack full",
"-ex", "br fmt.Println",
"-ex", "run",
"-ex", "echo BEGIN info goroutines\n",
"-ex", "info goroutines",
"-ex", "echo END\n",
"-ex", "up", // up from fmt.Println to main
"-ex", "echo BEGIN print mapvar\n",
"-ex", "print mapvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN print strvar\n",
"-ex", "print strvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN info locals\n",
"-ex", "info locals",
"-ex", "echo END\n",
"-ex", "down", // back to fmt.Println (goroutine 2 below only works at bottom of stack. TODO: fix that)
"-ex", "echo BEGIN goroutine 1 bt\n",
"-ex", "goroutine 1 bt",
"-ex", "echo END\n",
"-ex", "echo BEGIN goroutine 2 bt\n",
"-ex", "goroutine 2 bt",
"-ex", "echo END\n",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
firstLine := bytes.SplitN(got, []byte("\n"), 2)[0]
if string(firstLine) != "Loading Go Runtime support." {
// This can happen when using all.bash with
// GOROOT_FINAL set, because the tests are run before
// the final installation of the files.
cmd := exec.Command(testenv.GoToolPath(t), "env", "GOROOT")
cmd.Env = []string{}
out, err := cmd.CombinedOutput()
if err != nil && bytes.Contains(out, []byte("cannot find GOROOT")) {
t.Skipf("skipping because GOROOT=%s does not exist", runtime.GOROOT())
}
_, file, _, _ := runtime.Caller(1)
t.Logf("package testing source file: %s", file)
t.Fatalf("failed to load Go runtime support: %s\n%s", firstLine, got)
}
// Extract named BEGIN...END blocks from output
partRe := regexp.MustCompile(`(?ms)^BEGIN ([^\n]*)\n(.*?)\nEND`)
blocks := map[string]string{}
for _, subs := range partRe.FindAllSubmatch(got, -1) {
blocks[string(subs[1])] = string(subs[2])
}
infoGoroutinesRe := regexp.MustCompile(`\*\s+\d+\s+running\s+`)
if bl := blocks["info goroutines"]; !infoGoroutinesRe.MatchString(bl) {
t.Fatalf("info goroutines failed: %s", bl)
}
printMapvarRe := regexp.MustCompile(`\Q = map[string]string = {["abc"] = "def", ["ghi"] = "jkl"}\E$`)
if bl := blocks["print mapvar"]; !printMapvarRe.MatchString(bl) {
t.Fatalf("print mapvar failed: %s", bl)
}
strVarRe := regexp.MustCompile(`\Q = "abc"\E$`)
if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) {
t.Fatalf("print strvar failed: %s", bl)
}
// Issue 16338: ssa decompose phase can split a structure into
// a collection of scalar vars holding the fields. In such cases
// the DWARF variable location expression should be of the
// form "var.field" and not just "field".
infoLocalsRe := regexp.MustCompile(`^slicevar.len = `)
if bl := blocks["info locals"]; !infoLocalsRe.MatchString(bl) {
t.Fatalf("info locals failed: %s", bl)
}
btGoroutine1Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?fmt\.Println.+at`)
if bl := blocks["goroutine 1 bt"]; !btGoroutine1Re.MatchString(bl) {
t.Fatalf("goroutine 1 bt failed: %s", bl)
}
btGoroutine2Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?runtime.+at`)
if bl := blocks["goroutine 2 bt"]; !btGoroutine2Re.MatchString(bl) {
t.Fatalf("goroutine 2 bt failed: %s", bl)
}
}
const backtraceSource = `
package main
//go:noinline
func aaa() bool { return bbb() }
//go:noinline
func bbb() bool { return ccc() }
//go:noinline
func ccc() bool { return ddd() }
//go:noinline
func ddd() bool { return f() }
//go:noinline
func eee() bool { return true }
var f = eee
func main() {
_ = aaa()
}
`
// TestGdbBacktrace tests that gdb can unwind the stack correctly
// using only the DWARF debug info.
func TestGdbBacktrace(t *testing.T) {
if runtime.GOOS == "netbsd" {
testenv.SkipFlaky(t, 15603)
}
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(backtraceSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-ex", "set startup-with-shell off",
"-ex", "break main.eee",
"-ex", "run",
"-ex", "backtrace",
"-ex", "continue",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
// Check that the backtrace matches the source code.
bt := []string{
"eee",
"ddd",
"ccc",
"bbb",
"aaa",
"main",
}
for i, name := range bt {
s := fmt.Sprintf("#%v.*main\\.%v", i, name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Errorf("could not find '%v' in backtrace", s)
t.Fatalf("gdb output:\n%v", string(got))
}
}
}
const autotmpTypeSource = `
package main
type astruct struct {
a, b int
}
func main() {
var iface interface{} = map[string]astruct{}
var iface2 interface{} = []astruct{}
println(iface, iface2)
}
`
// TestGdbAutotmpTypes ensures that types of autotmp variables appear in .debug_info
// See bug #17830.
func TestGdbAutotmpTypes(t *testing.T) {
t.Parallel()
checkGdbEnvironment(t)
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(autotmpTypeSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=-N -l", "-o", "a.exe")
cmd.Dir = dir
out, err := testEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "step",
"-ex", "info types astruct",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
sgot := string(got)
// Check that the backtrace matches the source code.
types := []string{
"struct []main.astruct;",
"struct bucket<string,main.astruct>;",
"struct hash<string,main.astruct>;",
"struct main.astruct;",
"typedef struct hash<string,main.astruct> * map[string]main.astruct;",
}
for _, name := range types {
if !strings.Contains(sgot, name) {
t.Errorf("could not find %s in 'info typrs astruct' output", name)
t.Fatalf("gdb output:\n%v", sgot)
}
}
}
|
[
"\"GOROOT_FINAL\""
] |
[] |
[
"GOROOT_FINAL"
] |
[]
|
["GOROOT_FINAL"]
|
go
| 1 | 0 | |
tests/test_socless.py
|
# # Copyright 2018 Twilio, Inc.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License
from tests.conftest import * #imports testing boilerplate
from socless.socless import fetch_actual_parameters, fetch_from_vault, socless_save_to_vault, parse_parameters, apply_conversion_from, socless_template_string
# initialize test data
TEST_DATA = {
"data": {
"string": "value",
"nested_ref": "this_value_is_nested",
"vault_id": "vault:socless_vault_tests.txt",
"vault_id_json": "vault:socless_vault_tests.json",
"object": {"test": "hello world"},
"string_json": '["hello","world"]'
},
"referencer": {
"param": "$.data.parameter"
},
"statics": {
'list': ['1','2','3']
},
"nested_referer": {
"object": {
"top_level": "$.data.nested_ref"
}
}
}
PARSE_TEST_DATA = {
"_testing": True,
"this": "test",
"artifacts": {
"event": {
"vault_data": "vault:socless_vault_tests.txt",
"text": "this is the value at path",
"nested": "this_value_is_nested"
}
},
"parameters": {
"test": {
"string": "hello world",
"path": "$.artifacts.event.text",
"vault": "$.artifacts.event.vault_data",
"nested_ref": {
"nester": "$.artifacts.event.nested"
}
}
}
}
EXPECTED_RESULTS = {
"string": PARSE_TEST_DATA['parameters']['test']['string'],
"path": PARSE_TEST_DATA['artifacts']['event']['text'],
"vault": "this came from the vault",
"nested_ref": {
"nester": PARSE_TEST_DATA['artifacts']['event']['nested']
}
}
context = {
"safe_string": "Elliot Alderson",
"unsafe_string": "<script>alert('Elliot Alderson')</script>",
"dict": {
"safe_string": "Elliot Alderson",
"unsafe_string": "<script>alert('Elliot Alderson')</script>"
},
"unicodelist": ['hello','world']
}
def test_path_pointing_to_string():
assert fetch_actual_parameters("$.data.string",TEST_DATA) == TEST_DATA["data"]["string"]
def test_path_pointing_to_vault_id():
assert fetch_actual_parameters("$.data.vault_id",TEST_DATA) == "this came from the vault"
def test_path_pointing_to_object():
assert fetch_actual_parameters("$.data.object",TEST_DATA) == TEST_DATA["data"]["object"]
def test_with_list_input():
assert fetch_actual_parameters(TEST_DATA['statics']['list'],TEST_DATA) == TEST_DATA['statics']['list']
def test_with_object_input():
assert fetch_actual_parameters(TEST_DATA['data']['object'],TEST_DATA) == TEST_DATA['data']['object']
def test_jsonpath_with_json_conversion():
assert fetch_actual_parameters("$.data.string_json!json", TEST_DATA) == ["hello","world"]
def test_vault_path_with_json_conversion():
assert fetch_actual_parameters("vault:socless_vault_tests.json!json",TEST_DATA) == {'hello':'world'}
def test_json_path_to_vault_path_with_conversion():
assert fetch_actual_parameters("$.data.vault_id_json!json",TEST_DATA) == {'hello':'world'}
def test_nested_reference():
assert fetch_actual_parameters(TEST_DATA['nested_referer']['object'], TEST_DATA) == {'top_level': 'this_value_is_nested'}
def test_parse_parameters():
assert parse_parameters(PARSE_TEST_DATA, None) == EXPECTED_RESULTS
def test_socless_save_to_vault_saves_cotent_correctly():
CONTENT_STRING = "Hello there!"
result = socless_save_to_vault(CONTENT_STRING)
s3 = boto3.resource('s3')
content = s3.Bucket(os.environ['SOCLESS_VAULT']).Object(result['file_id']).get()['Body'].read().decode('utf-8')
assert content == CONTENT_STRING
def test_socless_fetch_from_vault():
assert fetch_from_vault('socless_vault_tests.txt') == {"content": "this came from the vault"}
def test_conversion_from_json():
assert apply_conversion_from('["hello", "world"]',"json") == ['hello', 'world']
def test_safe_string():
assert socless_template_string("Hello {context.safe_string}", context) == "Hello Elliot Alderson"
def test_unsafe_string():
assert socless_template_string("Hello {context.unsafe_string}", context) == "Hello <script>alert('Elliot Alderson')</script>"
def test_dictionary_reference():
assert socless_template_string("Hello {context.dict}", context) == """Hello {'safe_string': 'Elliot Alderson', 'unsafe_string': "<script>alert('Elliot Alderson')</script>"}"""
def test_maptostr():
assert socless_template_string("{context.unicodelist|maptostr}", context) == "{}".format(['hello','world'])
|
[] |
[] |
[
"SOCLESS_VAULT"
] |
[]
|
["SOCLESS_VAULT"]
|
python
| 1 | 0 | |
example/virtualaccount/main.go
|
package main
import (
"fmt"
"log"
"os"
"time"
"github.com/hfajunakaliantiga/xendit-go"
"github.com/hfajunakaliantiga/xendit-go/virtualaccount"
"github.com/joho/godotenv"
)
func main() {
godotenvErr := godotenv.Load()
if godotenvErr != nil {
log.Fatal(godotenvErr)
}
xendit.Opt.SecretKey = os.Getenv("SECRET_KEY")
availableBanks, err := virtualaccount.GetAvailableBanks()
if err != nil {
log.Fatal(err)
}
fmt.Printf("available va banks: %+v\n", availableBanks)
createFixedVAData := virtualaccount.CreateFixedVAParams{
ExternalID: "va-" + time.Now().String(),
BankCode: availableBanks[0].Code,
Name: "Michael Jackson",
}
resp, err := virtualaccount.CreateFixedVA(&createFixedVAData)
if err != nil {
log.Fatal(err)
}
fmt.Printf("created fixed va: %+v\n", resp)
getFixedVAData := virtualaccount.GetFixedVAParams{
ID: resp.ID,
}
resp, err = virtualaccount.GetFixedVA(&getFixedVAData)
if err != nil {
log.Fatal(err)
}
fmt.Printf("retrieved fixed va: %+v\n", resp)
expirationDate := time.Now().AddDate(0, 0, 1)
updateFixedVAData := virtualaccount.UpdateFixedVAParams{
ID: resp.ID,
ExpirationDate: &expirationDate,
}
resp, err = virtualaccount.UpdateFixedVA(&updateFixedVAData)
if err != nil {
log.Fatal(err)
}
fmt.Printf("updated fixed va: %+v\n", resp)
// Before running this example, create a fixed virtual account payment simulation
// by making a POST request to
// https://api.xendit.co/callback_virtual_accounts/external_id=<FVA external ID>/simulate_payment
payment, err := virtualaccount.GetPayment(&virtualaccount.GetPaymentParams{
PaymentID: "VA_fixed-1580285972",
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("retrieved va payment: %+v\n", payment)
}
|
[
"\"SECRET_KEY\""
] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
go
| 1 | 0 | |
pkg/tcpip/link/sharedmem/sharedmem_test.go
|
// Copyright 2016 The Netstack Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sharedmem
import (
"io/ioutil"
"math/rand"
"os"
"reflect"
"sync"
"syscall"
"testing"
"time"
"gvisor.googlesource.com/gvisor/pkg/tcpip"
"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer"
"gvisor.googlesource.com/gvisor/pkg/tcpip/header"
"gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/pipe"
"gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/queue"
"gvisor.googlesource.com/gvisor/pkg/tcpip/stack"
)
const (
localLinkAddr = "\xde\xad\xbe\xef\x56\x78"
remoteLinkAddr = "\xde\xad\xbe\xef\x12\x34"
queueDataSize = 1024 * 1024
queuePipeSize = 4096
)
type queueBuffers struct {
data []byte
rx pipe.Tx
tx pipe.Rx
}
func initQueue(t *testing.T, q *queueBuffers, c *QueueConfig) {
// Prepare tx pipe.
b, err := getBuffer(c.TxPipeFD)
if err != nil {
t.Fatalf("getBuffer failed: %v", err)
}
q.tx.Init(b)
// Prepare rx pipe.
b, err = getBuffer(c.RxPipeFD)
if err != nil {
t.Fatalf("getBuffer failed: %v", err)
}
q.rx.Init(b)
// Get data slice.
q.data, err = getBuffer(c.DataFD)
if err != nil {
t.Fatalf("getBuffer failed: %v", err)
}
}
func (q *queueBuffers) cleanup() {
syscall.Munmap(q.tx.Bytes())
syscall.Munmap(q.rx.Bytes())
syscall.Munmap(q.data)
}
type packetInfo struct {
addr tcpip.LinkAddress
proto tcpip.NetworkProtocolNumber
vv buffer.VectorisedView
}
type testContext struct {
t *testing.T
ep *endpoint
txCfg QueueConfig
rxCfg QueueConfig
txq queueBuffers
rxq queueBuffers
packetCh chan struct{}
mu sync.Mutex
packets []packetInfo
}
func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress) *testContext {
var err error
c := &testContext{
t: t,
packetCh: make(chan struct{}, 1000000),
}
c.txCfg = createQueueFDs(t, queueSizes{
dataSize: queueDataSize,
txPipeSize: queuePipeSize,
rxPipeSize: queuePipeSize,
sharedDataSize: 4096,
})
c.rxCfg = createQueueFDs(t, queueSizes{
dataSize: queueDataSize,
txPipeSize: queuePipeSize,
rxPipeSize: queuePipeSize,
sharedDataSize: 4096,
})
initQueue(t, &c.txq, &c.txCfg)
initQueue(t, &c.rxq, &c.rxCfg)
id, err := New(mtu, bufferSize, addr, c.txCfg, c.rxCfg)
if err != nil {
t.Fatalf("New failed: %v", err)
}
c.ep = stack.FindLinkEndpoint(id).(*endpoint)
c.ep.Attach(c)
return c
}
func (c *testContext) DeliverNetworkPacket(_ stack.LinkEndpoint, remoteAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) {
c.mu.Lock()
c.packets = append(c.packets, packetInfo{
addr: remoteAddr,
proto: proto,
vv: vv.Clone(nil),
})
c.mu.Unlock()
c.packetCh <- struct{}{}
}
func (c *testContext) cleanup() {
c.ep.Close()
closeFDs(&c.txCfg)
closeFDs(&c.rxCfg)
c.txq.cleanup()
c.rxq.cleanup()
}
func (c *testContext) waitForPackets(n int, to <-chan time.Time, errorStr string) {
for i := 0; i < n; i++ {
select {
case <-c.packetCh:
case <-to:
c.t.Fatalf(errorStr)
}
}
}
func (c *testContext) pushRxCompletion(size uint32, bs []queue.RxBuffer) {
b := c.rxq.rx.Push(queue.RxCompletionSize(len(bs)))
queue.EncodeRxCompletion(b, size, 0)
for i := range bs {
queue.EncodeRxCompletionBuffer(b, i, queue.RxBuffer{
Offset: bs[i].Offset,
Size: bs[i].Size,
ID: bs[i].ID,
})
}
}
func randomFill(b []byte) {
for i := range b {
b[i] = byte(rand.Intn(256))
}
}
func shuffle(b []int) {
for i := len(b) - 1; i >= 0; i-- {
j := rand.Intn(i + 1)
b[i], b[j] = b[j], b[i]
}
}
func createFile(t *testing.T, size int64, initQueue bool) int {
tmpDir := os.Getenv("TEST_TMPDIR")
if tmpDir == "" {
tmpDir = os.Getenv("TMPDIR")
}
f, err := ioutil.TempFile(tmpDir, "sharedmem_test")
if err != nil {
t.Fatalf("TempFile failed: %v", err)
}
defer f.Close()
syscall.Unlink(f.Name())
if initQueue {
// Write the "slot-free" flag in the initial queue.
_, err := f.WriteAt([]byte{0, 0, 0, 0, 0, 0, 0, 0x80}, 0)
if err != nil {
t.Fatalf("WriteAt failed: %v", err)
}
}
fd, err := syscall.Dup(int(f.Fd()))
if err != nil {
t.Fatalf("Dup failed: %v", err)
}
if err := syscall.Ftruncate(fd, size); err != nil {
syscall.Close(fd)
t.Fatalf("Ftruncate failed: %v", err)
}
return fd
}
func closeFDs(c *QueueConfig) {
syscall.Close(c.DataFD)
syscall.Close(c.EventFD)
syscall.Close(c.TxPipeFD)
syscall.Close(c.RxPipeFD)
syscall.Close(c.SharedDataFD)
}
type queueSizes struct {
dataSize int64
txPipeSize int64
rxPipeSize int64
sharedDataSize int64
}
func createQueueFDs(t *testing.T, s queueSizes) QueueConfig {
fd, _, err := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, 0, 0)
if err != 0 {
t.Fatalf("eventfd failed: %v", error(err))
}
return QueueConfig{
EventFD: int(fd),
DataFD: createFile(t, s.dataSize, false),
TxPipeFD: createFile(t, s.txPipeSize, true),
RxPipeFD: createFile(t, s.rxPipeSize, true),
SharedDataFD: createFile(t, s.sharedDataSize, false),
}
}
// TestSimpleSend sends 1000 packets with random header and payload sizes,
// then checks that the right payload is received on the shared memory queues.
func TestSimpleSend(t *testing.T) {
c := newTestContext(t, 20000, 1500, localLinkAddr)
defer c.cleanup()
// Prepare route.
r := stack.Route{
RemoteLinkAddress: remoteLinkAddr,
}
for iters := 1000; iters > 0; iters-- {
// Prepare and send packet.
n := rand.Intn(10000)
hdr := buffer.NewPrependable(n + int(c.ep.MaxHeaderLength()))
hdrBuf := hdr.Prepend(n)
randomFill(hdrBuf)
n = rand.Intn(10000)
buf := buffer.NewView(n)
randomFill(buf)
proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000))
err := c.ep.WritePacket(&r, &hdr, buf, proto)
if err != nil {
t.Fatalf("WritePacket failed: %v", err)
}
// Receive packet.
desc := c.txq.tx.Pull()
pi := queue.DecodeTxPacketHeader(desc)
contents := make([]byte, 0, pi.Size)
for i := 0; i < pi.BufferCount; i++ {
bi := queue.DecodeTxBufferHeader(desc, i)
contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...)
}
c.txq.tx.Flush()
if pi.Reserved != 0 {
t.Fatalf("Reserved value is non-zero: 0x%x", pi.Reserved)
}
// Check the thernet header.
ethTemplate := make(header.Ethernet, header.EthernetMinimumSize)
ethTemplate.Encode(&header.EthernetFields{
SrcAddr: localLinkAddr,
DstAddr: remoteLinkAddr,
Type: proto,
})
if got := contents[:header.EthernetMinimumSize]; !reflect.DeepEqual(got, []byte(ethTemplate)) {
t.Fatalf("Bad ethernet header in packet: got %x, want %x", got, ethTemplate)
}
// Compare contents skipping the ethernet header added by the
// endpoint.
merged := append(hdrBuf, buf...)
if uint32(len(contents)) < pi.Size {
t.Fatalf("Sum of buffers is less than packet size: %v < %v", len(contents), pi.Size)
}
contents = contents[:pi.Size][header.EthernetMinimumSize:]
if !reflect.DeepEqual(contents, merged) {
t.Fatalf("Buffers are different: got %x (%v bytes), want %x (%v bytes)", contents, len(contents), merged, len(merged))
}
// Tell the endpoint about the completion of the write.
b := c.txq.rx.Push(8)
queue.EncodeTxCompletion(b, pi.ID)
c.txq.rx.Flush()
}
}
// TestFillTxQueue sends packets until the queue is full.
func TestFillTxQueue(t *testing.T) {
c := newTestContext(t, 20000, 1500, localLinkAddr)
defer c.cleanup()
// Prepare to send a packet.
r := stack.Route{
RemoteLinkAddress: remoteLinkAddr,
}
buf := buffer.NewView(100)
// Each packet is uses no more than 40 bytes, so write that many packets
// until the tx queue if full.
ids := make(map[uint64]struct{})
for i := queuePipeSize / 40; i > 0; i-- {
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil {
t.Fatalf("WritePacket failed unexpectedly: %v", err)
}
// Check that they have different IDs.
desc := c.txq.tx.Pull()
pi := queue.DecodeTxPacketHeader(desc)
if _, ok := ids[pi.ID]; ok {
t.Fatalf("ID (%v) reused", pi.ID)
}
ids[pi.ID] = struct{}{}
}
// Next attempt to write must fail.
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber)
if want := tcpip.ErrWouldBlock; err != want {
t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want)
}
}
// TestFillTxQueueAfterBadCompletion sends a bad completion, then sends packets
// until the queue is full.
func TestFillTxQueueAfterBadCompletion(t *testing.T) {
c := newTestContext(t, 20000, 1500, localLinkAddr)
defer c.cleanup()
// Send a bad completion.
queue.EncodeTxCompletion(c.txq.rx.Push(8), 1)
c.txq.rx.Flush()
// Prepare to send a packet.
r := stack.Route{
RemoteLinkAddress: remoteLinkAddr,
}
buf := buffer.NewView(100)
// Send two packets so that the id slice has at least two slots.
for i := 2; i > 0; i-- {
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil {
t.Fatalf("WritePacket failed unexpectedly: %v", err)
}
}
// Complete the two writes twice.
for i := 2; i > 0; i-- {
pi := queue.DecodeTxPacketHeader(c.txq.tx.Pull())
queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID)
queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID)
c.txq.rx.Flush()
}
c.txq.tx.Flush()
// Each packet is uses no more than 40 bytes, so write that many packets
// until the tx queue if full.
ids := make(map[uint64]struct{})
for i := queuePipeSize / 40; i > 0; i-- {
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil {
t.Fatalf("WritePacket failed unexpectedly: %v", err)
}
// Check that they have different IDs.
desc := c.txq.tx.Pull()
pi := queue.DecodeTxPacketHeader(desc)
if _, ok := ids[pi.ID]; ok {
t.Fatalf("ID (%v) reused", pi.ID)
}
ids[pi.ID] = struct{}{}
}
// Next attempt to write must fail.
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber)
if want := tcpip.ErrWouldBlock; err != want {
t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want)
}
}
// TestFillTxMemory sends packets until the we run out of shared memory.
func TestFillTxMemory(t *testing.T) {
const bufferSize = 1500
c := newTestContext(t, 20000, bufferSize, localLinkAddr)
defer c.cleanup()
// Prepare to send a packet.
r := stack.Route{
RemoteLinkAddress: remoteLinkAddr,
}
buf := buffer.NewView(100)
// Each packet is uses up one buffer, so write as many as possible until
// we fill the memory.
ids := make(map[uint64]struct{})
for i := queueDataSize / bufferSize; i > 0; i-- {
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil {
t.Fatalf("WritePacket failed unexpectedly: %v", err)
}
// Check that they have different IDs.
desc := c.txq.tx.Pull()
pi := queue.DecodeTxPacketHeader(desc)
if _, ok := ids[pi.ID]; ok {
t.Fatalf("ID (%v) reused", pi.ID)
}
ids[pi.ID] = struct{}{}
c.txq.tx.Flush()
}
// Next attempt to write must fail.
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber)
if want := tcpip.ErrWouldBlock; err != want {
t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want)
}
}
// TestFillTxMemoryWithMultiBuffer sends packets until the we run out of
// shared memory for a 2-buffer packet, but still with room for a 1-buffer
// packet.
func TestFillTxMemoryWithMultiBuffer(t *testing.T) {
const bufferSize = 1500
c := newTestContext(t, 20000, bufferSize, localLinkAddr)
defer c.cleanup()
// Prepare to send a packet.
r := stack.Route{
RemoteLinkAddress: remoteLinkAddr,
}
buf := buffer.NewView(100)
// Each packet is uses up one buffer, so write as many as possible
// until there is only one buffer left.
for i := queueDataSize/bufferSize - 1; i > 0; i-- {
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil {
t.Fatalf("WritePacket failed unexpectedly: %v", err)
}
// Pull the posted buffer.
c.txq.tx.Pull()
c.txq.tx.Flush()
}
// Attempt to write a two-buffer packet. It must fail.
hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
err := c.ep.WritePacket(&r, &hdr, buffer.NewView(bufferSize), header.IPv4ProtocolNumber)
if want := tcpip.ErrWouldBlock; err != want {
t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want)
}
// Attempt to write a one-buffer packet. It must succeed.
hdr = buffer.NewPrependable(int(c.ep.MaxHeaderLength()))
if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil {
t.Fatalf("WritePacket failed unexpectedly: %v", err)
}
}
func pollPull(t *testing.T, p *pipe.Rx, to <-chan time.Time, errStr string) []byte {
for {
b := p.Pull()
if b != nil {
return b
}
select {
case <-time.After(10 * time.Millisecond):
case <-to:
t.Fatalf(errStr)
}
}
}
// TestSimpleReceive completes 1000 different receives with random payload and
// random number of buffers. It checks that the contents match the expected
// values.
func TestSimpleReceive(t *testing.T) {
const bufferSize = 1500
c := newTestContext(t, 20000, bufferSize, localLinkAddr)
defer c.cleanup()
// Check that buffers have been posted.
limit := c.ep.rx.q.PostedBuffersLimit()
timeout := time.After(2 * time.Second)
for i := uint64(0); i < limit; i++ {
bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers to be posted"))
if want := i * bufferSize; want != bi.Offset {
t.Fatalf("Bad posted offset: got %v, want %v", bi.Offset, want)
}
if want := i; want != bi.ID {
t.Fatalf("Bad posted ID: got %v, want %v", bi.ID, want)
}
if bufferSize != bi.Size {
t.Fatalf("Bad posted bufferSize: got %v, want %v", bi.Size, bufferSize)
}
}
c.rxq.tx.Flush()
// Create a slice with the indices 0..limit-1.
idx := make([]int, limit)
for i := range idx {
idx[i] = i
}
// Complete random packets 1000 times.
for iters := 1000; iters > 0; iters-- {
// Prepare a random packet.
shuffle(idx)
n := 1 + rand.Intn(10)
bufs := make([]queue.RxBuffer, n)
contents := make([]byte, bufferSize*n-rand.Intn(500))
randomFill(contents)
for i := range bufs {
j := idx[i]
bufs[i].Size = bufferSize
bufs[i].Offset = uint64(bufferSize * j)
bufs[i].ID = uint64(j)
copy(c.rxq.data[bufs[i].Offset:][:bufferSize], contents[i*bufferSize:])
}
// Push completion.
c.pushRxCompletion(uint32(len(contents)), bufs)
c.rxq.rx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Wait for packet to be received, then check it.
c.waitForPackets(1, time.After(time.Second), "Error waiting for packet")
c.mu.Lock()
rcvd := []byte(c.packets[0].vv.First())
c.packets = c.packets[:0]
c.mu.Unlock()
contents = contents[header.EthernetMinimumSize:]
if !reflect.DeepEqual(contents, rcvd) {
t.Fatalf("Unexpected buffer contents: got %x, want %x", rcvd, contents)
}
// Check that buffers have been reposted.
for i := range bufs {
bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffers to be reposted"))
if !reflect.DeepEqual(bi, bufs[i]) {
t.Fatalf("Unexpected buffer reposted: got %x, want %x", bi, bufs[i])
}
}
c.rxq.tx.Flush()
}
}
// TestRxBuffersReposted tests that rx buffers get reposted after they have been
// completed.
func TestRxBuffersReposted(t *testing.T) {
const bufferSize = 1500
c := newTestContext(t, 20000, bufferSize, localLinkAddr)
defer c.cleanup()
// Receive all posted buffers.
limit := c.ep.rx.q.PostedBuffersLimit()
buffers := make([]queue.RxBuffer, 0, limit)
timeout := time.After(2 * time.Second)
for i := limit; i > 0; i-- {
buffers = append(buffers, queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers")))
}
c.rxq.tx.Flush()
// Check that all buffers are reposted when individually completed.
timeout = time.After(2 * time.Second)
for i := range buffers {
// Complete the buffer.
c.pushRxCompletion(buffers[i].Size, buffers[i:][:1])
c.rxq.rx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Wait for it to be reposted.
bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted"))
if !reflect.DeepEqual(bi, buffers[i]) {
t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[i])
}
}
c.rxq.tx.Flush()
// Check that all buffers are reposted when completed in pairs.
timeout = time.After(2 * time.Second)
for i := 0; i < len(buffers)/2; i++ {
// Complete with two buffers.
c.pushRxCompletion(2*bufferSize, buffers[2*i:][:2])
c.rxq.rx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Wait for them to be reposted.
bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted"))
if !reflect.DeepEqual(bi, buffers[2*i]) {
t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[2*i])
}
bi = queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted"))
if !reflect.DeepEqual(bi, buffers[2*i+1]) {
t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[2*i+1])
}
}
c.rxq.tx.Flush()
}
// TestReceivePostingIsFull checks that the endpoint will properly handle the
// case when a received buffer cannot be immediately reposted because it hasn't
// been pulled from the tx pipe yet.
func TestReceivePostingIsFull(t *testing.T) {
const bufferSize = 1500
c := newTestContext(t, 20000, bufferSize, localLinkAddr)
defer c.cleanup()
// Complete first posted buffer before flushing it from the tx pipe.
first := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for first buffer to be posted"))
c.pushRxCompletion(first.Size, []queue.RxBuffer{first})
c.rxq.rx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Check that packet is received.
c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet")
// Complete another buffer.
second := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for second buffer to be posted"))
c.pushRxCompletion(second.Size, []queue.RxBuffer{second})
c.rxq.rx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Check that no packet is received yet, as the worker is blocked trying
// to repost.
select {
case <-time.After(500 * time.Millisecond):
case <-c.packetCh:
t.Fatalf("Unexpected packet received")
}
// Flush tx queue, which will allow the first buffer to be reposted,
// and the second completion to be pulled.
c.rxq.tx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Check that second packet completes.
c.waitForPackets(1, time.After(time.Second), "Timeout waiting for second completed packet")
}
// TestCloseWhileWaitingToPost closes the endpoint while it is waiting to
// repost a buffer. Make sure it backs out.
func TestCloseWhileWaitingToPost(t *testing.T) {
const bufferSize = 1500
c := newTestContext(t, 20000, bufferSize, localLinkAddr)
cleaned := false
defer func() {
if !cleaned {
c.cleanup()
}
}()
// Complete first posted buffer before flushing it from the tx pipe.
bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for initial buffer to be posted"))
c.pushRxCompletion(bi.Size, []queue.RxBuffer{bi})
c.rxq.rx.Flush()
syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
// Wait for packet to be indicated.
c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet")
// Cleanup and wait for worker to complete.
c.cleanup()
cleaned = true
c.ep.Wait()
}
|
[
"\"TEST_TMPDIR\"",
"\"TMPDIR\""
] |
[] |
[
"TEST_TMPDIR",
"TMPDIR"
] |
[]
|
["TEST_TMPDIR", "TMPDIR"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awwards.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Django site maintenance documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 5 19:11:46 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
here = os.path.abspath(os.path.join(os.path.dirname(__file__)))
up = lambda base, level: os.path.abspath(os.path.join(base, *([os.pardir] * level)))
sys.path.insert(0, up(here, 2))
import adminactions as app
from django.conf import settings
settings.configure(SITE_ID=1)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
# 'djangodocs',
'version',
'github']
#issuetracker = 'github'
#issuetracker_project = 'saxix/django-adminactions'
#issuetracker_plaintext_issues = True
next_version = '0.3'
github_project_url = 'https://github.com/saxix/django-adminactions/'
github_project_url_basesource = 'https://github.com/saxix/django-adminactions/'
todo_include_todos = True
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'django': ('http://django.readthedocs.org/en/1.5.x/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None)}
intersphinx_cache_limit = 90 # days
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Admin Actions'
copyright = u'2012, Stefano Apostolico'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = app.get_version()
# The full version, including alpha/beta/rc tags.
release = app.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
# html_translator_class = "version.HTMLTranslator"
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'djangoadminactionsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoAdminActions.tex', u"Django Admin Actions Documentation",
u'Stefano Apostolico', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoadminactions', u"Django Admin Actions Documentation",
[u'Stefano Apostolico'], 1)
]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
storage/objects/main.go
|
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Sample objects creates, list, deletes objects and runs
// other similar operations on them by using the Google Storage API.
// More documentation is available at
// https://cloud.google.com/storage/docs/json_api/v1/.
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"google.golang.org/api/iterator"
"golang.org/x/net/context"
"cloud.google.com/go/storage"
)
func main() {
projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
if projectID == "" {
fmt.Fprintf(os.Stderr, "GOOGLE_CLOUD_PROJECT environment variable must be set.\n")
os.Exit(1)
}
var o string
flag.StringVar(&o, "o", "", "source object; in the format of <bucket:object>")
flag.Parse()
names := strings.Split(o, ":")
if len(names) < 2 {
usage("missing -o flag")
}
bucket, object := names[0], names[1]
if len(os.Args) < 2 {
usage("missing subcommand")
}
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
switch os.Args[1] {
case "write":
if err := write(client, bucket, object); err != nil {
log.Fatalf("Cannot write object: %v", err)
}
case "read":
data, err := read(client, bucket, object)
if err != nil {
log.Fatalf("Cannot read object: %v", err)
}
fmt.Printf("Object contents: %s\n", data)
case "metadata":
attrs, err := attrs(client, bucket, object)
if err != nil {
log.Fatalf("Cannot get object metadata: %v", err)
}
fmt.Printf("Object metadata: %v\n", attrs)
case "makepublic":
if err := makePublic(client, bucket, object); err != nil {
log.Fatalf("Cannot to make object public: %v", err)
}
case "delete":
if err := delete(client, bucket, object); err != nil {
log.Fatalf("Cannot to delete object: %v", err)
}
}
}
func write(client *storage.Client, bucket, object string) error {
ctx := context.Background()
// [START upload_file]
f, err := os.Open("notes.txt")
if err != nil {
return err
}
defer f.Close()
wc := client.Bucket(bucket).Object(object).NewWriter(ctx)
if _, err = io.Copy(wc, f); err != nil {
return err
}
if err := wc.Close(); err != nil {
return err
}
// [END upload_file]
return nil
}
func list(client *storage.Client, bucket string) error {
ctx := context.Background()
// [START storage_list_files]
it := client.Bucket(bucket).Objects(ctx, nil)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
fmt.Println(attrs.Name)
}
// [END storage_list_files]
return nil
}
func listByPrefix(client *storage.Client, bucket, prefix, delim string) error {
ctx := context.Background()
// [START storage_list_files_with_prefix]
// Prefixes and delimiters can be used to emulate directory listings.
// Prefixes can be used filter objects starting with prefix.
// The delimiter argument can be used to restrict the results to only the
// objects in the given "directory". Without the delimeter, the entire tree
// under the prefix is returned.
//
// For example, given these blobs:
// /a/1.txt
// /a/b/2.txt
//
// If you just specify prefix="/a", you'll get back:
// /a/1.txt
// /a/b/2.txt
//
// However, if you specify prefix="/a"" and delim="/", you'll get back:
// /a/1.txt
it := client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: prefix,
Delimiter: delim,
})
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
fmt.Println(attrs.Name)
}
// [END storage_list_files_with_prefix]
return nil
}
func read(client *storage.Client, bucket, object string) ([]byte, error) {
ctx := context.Background()
// [START download_file]
rc, err := client.Bucket(bucket).Object(object).NewReader(ctx)
if err != nil {
return nil, err
}
defer rc.Close()
data, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
return data, nil
// [END download_file]
}
func attrs(client *storage.Client, bucket, object string) (*storage.ObjectAttrs, error) {
ctx := context.Background()
// [START get_metadata]
o := client.Bucket(bucket).Object(object)
attrs, err := o.Attrs(ctx)
if err != nil {
return nil, err
}
return attrs, nil
// [END get_metadata]
}
func makePublic(client *storage.Client, bucket, object string) error {
ctx := context.Background()
// [START public]
acl := client.Bucket(bucket).Object(object).ACL()
if err := acl.Set(ctx, storage.AllUsers, storage.RoleReader); err != nil {
return err
}
// [END public]
return nil
}
func move(client *storage.Client, bucket, object string) error {
ctx := context.Background()
// [START move_file]
dstName := object + "-rename"
src := client.Bucket(bucket).Object(object)
dst := client.Bucket(bucket).Object(dstName)
if _, err := dst.CopierFrom(src).Run(ctx); err != nil {
return err
}
if err := src.Delete(ctx); err != nil {
return err
}
// [END move_file]
return nil
}
func copyToBucket(client *storage.Client, dstBucket, srcBucket, srcObject string) error {
ctx := context.Background()
// [START copy_file]
dstObject := srcObject + "-copy"
src := client.Bucket(srcBucket).Object(srcObject)
dst := client.Bucket(dstBucket).Object(dstObject)
if _, err := dst.CopierFrom(src).Run(ctx); err != nil {
return err
}
// [END copy_file]
return nil
}
func delete(client *storage.Client, bucket, object string) error {
ctx := context.Background()
// [START delete_file]
o := client.Bucket(bucket).Object(object)
if err := o.Delete(ctx); err != nil {
return err
}
// [END delete_file]
return nil
}
func writeEncryptedObject(client *storage.Client, bucket, object string, secretKey []byte) error {
ctx := context.Background()
// [START storage_upload_encrypted_file]
obj := client.Bucket(bucket).Object(object)
// Encrypt the object's contents.
wc := obj.Key(secretKey).NewWriter(ctx)
if _, err := wc.Write([]byte("top secret")); err != nil {
return err
}
if err := wc.Close(); err != nil {
return err
}
// [END storage_upload_encrypted_file]
return nil
}
func readEncryptedObject(client *storage.Client, bucket, object string, secretKey []byte) ([]byte, error) {
ctx := context.Background()
// [START storage_download_encrypted_file]
obj := client.Bucket(bucket).Object(object)
rc, err := obj.Key(secretKey).NewReader(ctx)
if err != nil {
return nil, err
}
defer rc.Close()
data, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
// [END storage_download_encrypted_file]
return data, nil
}
func rotateEncryptionKey(client *storage.Client, bucket, object string, key, newKey []byte) error {
ctx := context.Background()
// [START storage_rotate_encryption_key]
client, err := storage.NewClient(ctx)
if err != nil {
return err
}
obj := client.Bucket(bucket).Object(object)
// obj is encrypted with key, we are encrypting it with the newKey.
_, err = obj.Key(newKey).CopierFrom(obj.Key(key)).Run(ctx)
if err != nil {
return err
}
// [END storage_rotate_encryption_key]
return nil
}
const helptext = `usage: objects -o=bucket:name [subcommand] <args...>
subcommands:
- write
- read
- metadata
- makepublic
- delete
`
func usage(msg string) {
if msg != "" {
fmt.Fprintln(os.Stderr, msg)
}
fmt.Fprintln(os.Stderr, helptext)
os.Exit(2)
}
|
[
"\"GOOGLE_CLOUD_PROJECT\""
] |
[] |
[
"GOOGLE_CLOUD_PROJECT"
] |
[]
|
["GOOGLE_CLOUD_PROJECT"]
|
go
| 1 | 0 | |
build/matplotlib/lib/matplotlib/backends/backend_qt5.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import re
import signal
import sys
from six import unichr
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.backend_bases import cursors
from matplotlib.backend_bases import TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import matplotlib.backends.qt_editor.figureoptions as figureoptions
except ImportError:
figureoptions = None
from .qt_compat import QtCore, QtGui, QtWidgets, _getSaveFileName, __version__
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
def fn_name():
return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
if DEBUG:
print("Starting up QApplication")
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if hasattr(QtGui, "QX11Info"):
display = os.environ.get('DISPLAY')
if display is None or not re.search(':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([str(" ")])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
class Show(ShowBase):
def mainloop(self):
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
thisFig = Figure(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQT(figure)
manager = FigureManagerQT(canvas, num)
return manager
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt4 timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def __del__(self):
# Probably not necessary in practice, but is good behavior to
# disconnect
try:
TimerBase.__del__(self)
self._timer.timeout.disconnect(self._on_timer)
except RuntimeError:
# Timer C++ object already deleted
pass
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQt qt5: ', figure)
_create_qApp()
# NB: Using super for this call to avoid a TypeError:
# __init__() takes exactly 2 arguments (1 given) on QWidget
# PyQt5
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self.setMouseTracking(True)
self._idle = True
# hide until we can test and fix
# self.startTimer(backend_IdleEvent.milliseconds)
w, h = self.get_width_height()
self.resize(w, h)
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button)
if DEBUG:
print('button pressed:', event.button())
def mouseDoubleClickEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True)
if DEBUG:
print('button doubleclicked:', event.button())
def mouseMoveEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event(self, x, y)
# if DEBUG: print('mouse move')
def mouseReleaseEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button)
if DEBUG:
print('button released')
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps != 0:
FigureCanvasBase.scroll_event(self, x, y, steps)
if DEBUG:
print('scroll event: delta = %i, '
'steps = %i ' % (event.delta(), steps))
def keyPressEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_press_event(self, key)
if DEBUG:
print('key press', key)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_release_event(self, key)
if DEBUG:
print('key release', key)
def resizeEvent(self, event):
w = event.size().width()
h = event.size().height()
if DEBUG:
print('resize (%d x %d)' % (w, h))
print("FigureCanvasQt.resizeEvent(%d, %d)" % (w, h))
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw()
self.update()
QtWidgets.QWidget.resizeEvent(self, event)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs)
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
start_event_loop.__doc__ = \
FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = FigureCanvasBase.stop_event_loop_default.__doc__
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
if d:
QtCore.QTimer.singleShot(0, idle_draw)
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__(self, canvas, num):
if DEBUG:
print('FigureManagerQT.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self._show_message)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
@QtCore.Slot()
def _show_message(self, s):
# Fixes a PySide segfault.
self.window.statusBar().showMessage(s)
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
if DEBUG:
print("destroy figure manager")
self.window.close()
def get_window_title(self):
return str(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if figureoptions is not None:
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit curves line and axes parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
if figureoptions is not None:
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if len(allaxes) == 1:
axes = allaxes[0]
else:
titles = []
for axes in allaxes:
title = axes.get_title()
ylabel = axes.get_ylabel()
label = axes.get_label()
if title:
fmt = "%(title)s"
if ylabel:
fmt += ": %(ylabel)s"
fmt += " (%(axes_repr)s)"
elif ylabel:
fmt = "%(axes_repr)s (%(ylabel)s)"
elif label:
fmt = "%(axes_repr)s (%(label)s)"
else:
fmt = "%(axes_repr)s"
titles.append(fmt % dict(title=title,
ylabel=ylabel, label=label,
axes_repr=repr(axes)))
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def dynamic_update(self):
self.canvas.draw()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor(self, cursor):
if DEBUG:
print('Set cursor', cursor)
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)]
self.canvas.drawRectangle(rect)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
startpath = matplotlib.rcParams.get('savefig.directory', '')
startpath = os.path.expanduser(startpath)
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = _getSaveFileName(self.parent, "Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath == '':
# explicitly missing key or empty str signals to use cwd
matplotlib.rcParams['savefig.directory'] = startpath
else:
# save dir for next time
savefig_dir = os.path.dirname(six.text_type(fname))
matplotlib.rcParams['savefig.directory'] = savefig_dir
try:
self.canvas.print_figure(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", str(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(SubplotTool, UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.donebutton.clicked.connect(self.close)
self.resetbutton.clicked.connect(self.reset)
self.tightlayout.clicked.connect(self.functight)
# constraints
self.sliderleft.valueChanged.connect(self.sliderright.setMinimum)
self.sliderright.valueChanged.connect(self.sliderleft.setMaximum)
self.sliderbottom.valueChanged.connect(self.slidertop.setMinimum)
self.slidertop.valueChanged.connect(self.sliderbottom.setMaximum)
self.defaults = {}
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace',):
self.defaults[attr] = getattr(self.targetfig.subplotpars, attr)
slider = getattr(self, 'slider' + attr)
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
slider.valueChanged.connect(getattr(self, 'func' + attr))
self._setSliderPositions()
def _setSliderPositions(self):
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace',):
slider = getattr(self, 'slider' + attr)
slider.setSliderPosition(int(self.defaults[attr] * 1000))
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(left=val)
self.leftvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(right=val)
self.rightvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(bottom=val)
self.bottomvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(top=val)
self.topvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(wspace=val)
self.wspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(hspace=val)
self.hspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functight(self):
self.targetfig.tight_layout()
self._setSliderPositions()
self.targetfig.canvas.draw()
def reset(self):
self.targetfig.subplots_adjust(**self.defaults)
self._setSliderPositions()
self.targetfig.canvas.draw()
def error_msg_qt(msg, parent=None):
if not is_string_like(msg):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += str(value)
if len(msg):
error_msg_qt(msg)
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
|
[] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
python
| 1 | 0 | |
Chapter-6/test_JWT/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_JWT.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/cx/main.go
|
package main
import (
"fmt"
"os"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
cxcore "github.com/skycoin/cx/cx"
"github.com/skycoin/cx/cxgo/actions"
"github.com/skycoin/cx/cxgo/cxgo"
"github.com/skycoin/cx/cxgo/cxgo0"
"github.com/skycoin/cx/cxgo/parser"
)
const VERSION = "0.7.1"
func main() {
//cx.CXLogFile(true)
if os.Args != nil && len(os.Args) > 1 {
Run(os.Args[1:])
}
}
func Run(args []string) {
runtime.LockOSThread()
runtime.GOMAXPROCS(2)
options := defaultCmdFlags()
parseFlags(&options, args)
// Checking if CXPATH is set, either by setting an environment variable
// or by setting the `--cxpath` flag.
checkCXPathSet(options)
if checkHelp(args) {
commandLine.PrintDefaults()
return
}
// Does the user want to print the command-line help?
if options.printHelp {
printHelp()
return
}
// Does the user want to print CX's version?
if options.printVersion {
printVersion()
return
}
// User wants to print CX env
if options.printEnv {
printEnv()
return
}
if options.initialHeap != "" {
cxcore.INIT_HEAP_SIZE = parseMemoryString(options.initialHeap)
}
if options.maxHeap != "" {
cxcore.MAX_HEAP_SIZE = parseMemoryString(options.maxHeap)
if cxcore.MAX_HEAP_SIZE < cxcore.INIT_HEAP_SIZE {
// Then MAX_HEAP_SIZE overrides INIT_HEAP_SIZE's value.
cxcore.INIT_HEAP_SIZE = cxcore.MAX_HEAP_SIZE
}
}
if options.stackSize != "" {
cxcore.STACK_SIZE = parseMemoryString(options.stackSize)
actions.DataOffset = cxcore.STACK_SIZE
}
if options.minHeapFreeRatio != float64(0) {
cxcore.MIN_HEAP_FREE_RATIO = float32(options.minHeapFreeRatio)
}
if options.maxHeapFreeRatio != float64(0) {
cxcore.MAX_HEAP_FREE_RATIO = float32(options.maxHeapFreeRatio)
}
// options, file pointers, filenames
cxArgs, sourceCode, fileNames := cxcore.ParseArgsForCX(commandLine.Args(), true)
// Propagate some options out to other packages.
parser.DebugLexer = options.debugLexer // in package parser
DebugProfileRate = options.debugProfile
DebugProfile = DebugProfileRate > 0
if run, bcHeap, sPrgrm := parseProgram(options, fileNames, sourceCode); run {
runProgram(options, cxArgs, sourceCode, bcHeap, sPrgrm)
}
}
// initMainPkg adds a `main` package with an empty `main` function to `prgrm`.
func initMainPkg(prgrm *cxcore.CXProgram) {
mod := cxcore.MakePackage(cxcore.MAIN_PKG)
prgrm.AddPackage(mod)
fn := cxcore.MakeFunction(cxcore.MAIN_FUNC, actions.CurrentFile, actions.LineNo)
mod.AddFunction(fn)
}
// optionTokenize checks if the user wants to use CX to generate the lexer tokens
func optionTokenize(options cxCmdFlags, fileNames []string) {
var r *os.File
var w *os.File
var err error
if len(fileNames) == 0 {
r = os.Stdin
} else {
sourceFilename := fileNames[0]
if len(fileNames) > 1 {
fmt.Fprintln(os.Stderr, "Multiple source files detected. Ignoring all except", sourceFilename)
}
r, err = cxcore.CXOpenFile(sourceFilename)
if err != nil {
fmt.Fprintln(os.Stderr, "Error reading:", sourceFilename, err)
return
}
defer r.Close()
}
if options.compileOutput == "" {
w = os.Stdout
} else {
tokenFilename := options.compileOutput
w, err = cxcore.CXCreateFile(tokenFilename)
if err != nil {
fmt.Fprintln(os.Stderr, "Error writing:", tokenFilename, err)
return
}
defer w.Close()
}
parser.Tokenize(r, w)
}
func parseProgram(options cxCmdFlags, fileNames []string, sourceCode []*os.File) (bool, []byte, []byte) {
profile := StartCPUProfile("parse")
defer StopCPUProfile(profile)
defer DumpMEMProfile("parse")
StartProfile("parse")
defer StopProfile("parse")
actions.PRGRM = cxcore.MakeProgram()
corePkgsPrgrm, err := cxcore.GetProgram()
if err != nil {
panic(err)
}
actions.PRGRM.Packages = corePkgsPrgrm.Packages
// var bcPrgrm *CXProgram
var sPrgrm []byte
// In case of a CX chain, we need to temporarily store the blockchain code heap elsewhere,
// so we can then add it after the transaction code's data segment.
var bcHeap []byte
// Parsing all the source code files sent as CLI arguments to CX.
cxgo.ParseSourceCode(sourceCode, fileNames)
// setting project's working directory
if !options.replMode && len(sourceCode) > 0 {
cxgo0.PRGRM0.Path = determineWorkDir(sourceCode[0].Name())
}
// Checking if a main package exists. If not, create and add it to `PRGRM`.
if _, err := actions.PRGRM.GetFunction(cxcore.MAIN_FUNC, cxcore.MAIN_PKG); err != nil {
initMainPkg(actions.PRGRM)
}
// Setting what function to start in if using the REPL.
actions.ReplTargetFn = cxcore.MAIN_FUNC
// Adding *init function that initializes all the global variables.
err = cxgo.AddInitFunction(actions.PRGRM)
if err != nil {
return false, nil, nil
}
actions.LineNo = 0
if cxcore.FoundCompileErrors {
//cleanupAndExit(cxcore.CX_COMPILATION_ERROR)
StopCPUProfile(profile)
exitCode := cxcore.CX_COMPILATION_ERROR
os.Exit(exitCode)
}
return true, bcHeap, sPrgrm
}
func runProgram(options cxCmdFlags, cxArgs []string, sourceCode []*os.File, bcHeap []byte, sPrgrm []byte) {
StartProfile("run")
defer StopProfile("run")
if options.replMode || len(sourceCode) == 0 {
actions.PRGRM.SelectProgram()
Repl()
return
}
// Normal run of a CX program.
err := actions.PRGRM.RunCompiled(0, cxArgs)
if err != nil {
panic(err)
}
if cxcore.AssertFailed() {
os.Exit(cxcore.CX_ASSERT)
}
}
// Used for the -heap-initial, -heap-max and -stack-size flags.
// This function parses, for example, "1M" to 1048576 (the corresponding number of bytes)
// Possible suffixes are: G or g (gigabytes), M or m (megabytes), K or k (kilobytes)
func parseMemoryString(s string) int {
suffix := s[len(s)-1]
_, notSuffix := strconv.ParseFloat(string(suffix), 64)
if notSuffix == nil {
// then we don't have a suffix
num, err := strconv.ParseInt(s, 10, 64)
if err != nil {
// malformed size
return -1
}
return int(num)
} else {
// then we have a suffix
num, err := strconv.ParseFloat(s[:len(s)-1], 64)
if err != nil {
// malformed size
return -1
}
// The user can use suffixes to give as input gigabytes, megabytes or kilobytes.
switch suffix {
case 'G', 'g':
return int(num * 1073741824)
case 'M', 'm':
return int(num * 1048576)
case 'K', 'k':
return int(num * 1024)
default:
return -1
}
}
}
type SourceCode struct {
Code string //Unused?
}
func determineWorkDir(filename string) string {
filename = filepath.FromSlash(filename)
i := strings.LastIndexByte(filename, os.PathSeparator)
if i == -1 {
i = 0
}
return filename[:i]
}
// checkCXPathSet checks if the user has set the environment variable
// `CXPATH`. If not, CX creates a workspace at $HOME/cx, along with $HOME/cx/bin,
// $HOME/cx/pkg and $HOME/cx/src
func checkCXPathSet(options cxCmdFlags) {
// Determining the filepath of the directory where the user
// started the `cx` command.
_, err := os.Executable()
if err != nil {
panic(err)
}
// cxcore.COREPATH = filepath.Dir(ex) // TODO @evanlinjin: Not used.
CXPATH := ""
if os.Getenv("CXPATH") != "" {
CXPATH = os.Getenv("CXPATH")
}
// `options.cxpath` overrides `os.Getenv("CXPATH")`
if options.cxpath != "" {
CXPATH, err = filepath.Abs(options.cxpath)
if err != nil {
panic(err)
}
}
if os.Getenv("CXPATH") == "" && options.cxpath == "" {
usr, err := user.Current()
if err != nil {
panic(err)
}
CXPATH = usr.HomeDir + "/cx/"
}
cxcore.BINPATH = filepath.Join(CXPATH, "bin/")
cxcore.PKGPATH = filepath.Join(CXPATH, "pkg/")
cxcore.SRCPATH = filepath.Join(CXPATH, "src/")
// Creating directories in case they do not exist.
if _, err := cxcore.CXStatFile(CXPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(CXPATH, 0755)
}
if _, err := cxcore.CXStatFile(cxcore.BINPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(cxcore.BINPATH, 0755)
}
if _, err := cxcore.CXStatFile(cxcore.PKGPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(cxcore.PKGPATH, 0755)
}
if _, err := cxcore.CXStatFile(cxcore.SRCPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(cxcore.SRCPATH, 0755)
}
}
|
[
"\"CXPATH\"",
"\"CXPATH\"",
"\"CXPATH\"",
"\"CXPATH\""
] |
[] |
[
"CXPATH"
] |
[]
|
["CXPATH"]
|
go
| 1 | 0 | |
pair/realimage/optimize.py
|
"""
CUDA_VISIBLE_DEVICES=0 python optimize.py --learning_rate 1.0 --optimizer sgd --msg sgd_lr1.0
SGD lr 1.0 loss 0.078
SGD lr 5.0 loss 0.061
SGD lr 10.0 loss 0.029
SGD lr 15.0 loss 0.032
Adam lr 0.05 loss 0.008
Adam lr 0.1 loss 0.007
Adam lr 0.5 loss 0.014
Adam lr 1.0 loss 0.021
Adam lr 5.0 loss 0.135
"""
import argparse
import os
from PIL import Image
import logging
import datetime
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
import numpy as np
from torchvision.datasets.mnist import FashionMNIST, MNIST
from torchvision import transforms
from torchvision.datasets import ImageFolder
import torchvision.utils as vutils
from tqdm import tqdm
import models as models
import matplotlib.pyplot as plt
from helper import mkdir_p, save_model, save_args, set_seed, Logger
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser('training')
parser.add_argument('-c', '--checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--msg', type=str, help='message after checkpoint')
parser.add_argument('--model', default='RealOptimize', help='model name [default: pointnet_cls]')
parser.add_argument('--img_path', default='single.png')
parser.add_argument('--optimizer', default='sgd')
# training
parser.add_argument('--epoch', default=300, type=int, help='number of epoch in training')
parser.add_argument('--learning_rate', default=0.1, type=float, help='learning rate in training')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='decay rate')
parser.add_argument('--seed', type=int, help='random seed')
parser.add_argument('--frequency', default=100, type=int, help='workers')
parser.add_argument('--loss', default='l2')
# models
# imsize = 28, paths = 4, segments = 5, samples = 2, zdim = 1024, stroke_width = None
parser.add_argument('--imsize', default=224, type=int)
parser.add_argument('--paths', default=128, type=int)
parser.add_argument('--segments', default=3, type=int)
parser.add_argument('--samples', default=2, type=int)
parser.add_argument('--max_width', default=2, type=int)
return parser.parse_args()
args = parse_args()
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
# Verbose operations: make folder, init logger, fix seed, set device
time_str = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
message = time_str if args.msg is None else "-" + args.msg
args.checkpoint = 'checkpoints/' + args.model + message
args.visualize = 'checkpoints/' + args.model + message + '/visualize'
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
if not os.path.isdir(args.visualize):
mkdir_p(args.visualize)
screen_logger = logging.getLogger("Model")
screen_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
file_handler = logging.FileHandler(os.path.join(args.checkpoint, "screen_out.txt"))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
screen_logger.addHandler(file_handler)
def printf(str):
screen_logger.info(str)
print(str)
def main():
if args.seed is not None:
set_seed(args.seed)
printf(f'==> fixing the random seed to: {args.seed}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
printf(f'==> using device: {device}')
printf(f"==> args: {args}")
# building models
printf(f'==> Building model: {args.model}')
net = models.__dict__[args.model](
imsize=args.imsize, paths=args.paths, segments=args.segments, samples=args.samples, max_width=2)
if args.loss == 'l1':
criterion = nn.L1Loss().to(device)
printf(f"Using criterion L1 loss.")
else:
criterion = nn.MSELoss().to(device)
printf(f"Using criterion MSE loss.")
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
best_test_loss = float("inf")
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
optimizer_dict = None
if not os.path.isfile(os.path.join(args.checkpoint, "last_checkpoint.pth")):
save_args(args)
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title="main")
logger.set_names(["Epoch-Num", 'Learning-Rate', 'Train-Loss'])
else:
printf(f"Resuming last checkpoint from {args.checkpoint}")
checkpoint_path = os.path.join(args.checkpoint, "last_checkpoint.pth")
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
best_test_loss = checkpoint['best_test_loss']
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title="main", resume=True)
optimizer_dict = checkpoint['optimizer']
printf('==> Preparing data..')
transform = transforms.Compose([
transforms.Resize((args.imsize, args.imsize)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
printf(f"==> Loading image: {args.img_path}")
image = Image.open(args.img_path)
image = transform(image)
if image.size()[0] == 4:
image = image[:3, :, :] # remove alpha channel
image = image.unsqueeze(0)
image = image.to(device)
printf(f"==> Saving the input image.")
first_img = (image[0]).permute(1, 2, 0).cpu().numpy()
plt.imsave(os.path.join(args.visualize, "input.png"), first_img)
if args.optimizer == "sgd":
printf("==> Using SGD optimizer")
optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate,
momentum=0.9, weight_decay=args.weight_decay)
else:
printf("==> Using Adam optimizer")
optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate)
if optimizer_dict is not None:
optimizer.load_state_dict(optimizer_dict)
scheduler = CosineAnnealingLR(optimizer, args.epoch, eta_min=args.learning_rate / 100, last_epoch=start_epoch - 1)
# init save images
# visualize(net, train_loader, device, args.checkpoint +'/epoch-0', nrow=8)
visualize(net, "init")
for epoch in range(start_epoch, args.epoch):
printf('Epoch(%d/%s) Learning Rate %s:' % (epoch + 1, args.epoch, optimizer.param_groups[0]['lr']))
train_out = train(net, image, optimizer, criterion) # {"loss"}
if (epoch + 1) % args.frequency == 0:
visualize(net, epoch)
scheduler.step()
if train_out["loss"] < best_test_loss:
best_test_loss = train_out["loss"]
is_best = True
else:
is_best = False
save_model(net, epoch, path=args.checkpoint, is_best=is_best, best_test_loss=best_test_loss,
test_loss=train_out["loss"], optimizer=optimizer.state_dict())
logger.append([epoch, optimizer.param_groups[0]['lr'], train_out["loss"]])
printf(f"Train loss:{train_out['loss']} [best loss:{best_test_loss}]")
logger.close()
def train(net, image, optimizer, criterion):
net.train()
optimizer.zero_grad()
out = net(image)
loss = criterion(image, out)
loss.backward()
optimizer.step()
return {
"loss": float("%.3f" % (loss.item()))
}
def visualize(net, epoch):
net.eval()
svgpath = os.path.join(args.visualize, f"epoch_{epoch}_svg.svg")
renderpath = os.path.join(args.visualize, f"epoch_{epoch}_render.png")
with torch.no_grad():
net.module.visualize(svgpath=svgpath, renderpath=renderpath)
printf(f"Finish visualization of epoch {epoch}.")
if __name__ == '__main__':
main()
|
[] |
[] |
[
"HDF5_USE_FILE_LOCKING"
] |
[]
|
["HDF5_USE_FILE_LOCKING"]
|
python
| 1 | 0 | |
language/cloud-client/v1/snippets_test.py
|
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import snippets
BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET)
def test_sentiment_text(capsys):
snippets.sentiment_text('President Obama is speaking at the White House.')
out, _ = capsys.readouterr()
assert 'Score: 0.2' in out
def test_sentiment_file(capsys):
snippets.sentiment_file(TEST_FILE_URL)
out, _ = capsys.readouterr()
assert 'Score: 0.2' in out
def test_entities_text(capsys):
snippets.entities_text('President Obama is speaking at the White House.')
out, _ = capsys.readouterr()
assert 'name' in out
assert ': Obama' in out
def test_entities_file(capsys):
snippets.entities_file(TEST_FILE_URL)
out, _ = capsys.readouterr()
assert 'name' in out
assert ': Obama' in out
def test_syntax_text(capsys):
snippets.syntax_text('President Obama is speaking at the White House.')
out, _ = capsys.readouterr()
assert 'NOUN: President' in out
def test_syntax_file(capsys):
snippets.syntax_file(TEST_FILE_URL)
out, _ = capsys.readouterr()
assert 'NOUN: President' in out
|
[] |
[] |
[
"CLOUD_STORAGE_BUCKET"
] |
[]
|
["CLOUD_STORAGE_BUCKET"]
|
python
| 1 | 0 | |
tests/st/pynative/data_parallel/test_pynative_hccl_allreduce.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test hccl allreduce performance with 8p"""
import os
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
import mindspore.communication.management as D
from mindspore import context
from mindspore.context import ParallelMode
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
np.random.seed(1)
os.environ['GLOG_v'] = str(2)
class AllReduceNet(nn.Cell):
def __init__(self):
super(AllReduceNet, self).__init__()
self.mul = P.Mul()
self.all_reduce = P.AllReduce()
self.add = P.Add()
def construct(self, x):
x = self.mul(x, 2)
y1 = Tensor(np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])).astype(np.float32)
z = self.add(x, y1)
z = self.all_reduce(z)
y2 = Tensor(np.array([[-16, -16, -16, -16], [-16, -16, -16, -16], [-16, -16, -16, -16]])).astype(np.float32)
out = self.add(z, y2)
out = self.all_reduce(out)
out = self.mul(out, 2)
return out
def train_allreduce_8p(q, device_id, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False,
device_num=device_num)
net = AllReduceNet()
input_x = np.ones([3, 4]).astype(np.float32)
output = net(Tensor(input_x, mstype.float32))
q.put(output)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_allreduce_8p():
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
expect_output = [[256, 256, 256, 256], [256, 256, 256, 256], [256, 256, 256, 256]]
assert not q.empty()
output = Tensor(q.get())
assert np.allclose(output.asnumpy(), expect_output)
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
|
[] |
[] |
[
"RANK_SIZE",
"GLOG_v",
"RANK_ID",
"MINDSPORE_HCCL_CONFIG_PATH"
] |
[]
|
["RANK_SIZE", "GLOG_v", "RANK_ID", "MINDSPORE_HCCL_CONFIG_PATH"]
|
python
| 4 | 0 | |
logomachy_project/wsgi.py
|
"""
WSGI config for logomachy_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logomachy_project.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
enyo/__init__.py
|
#!/bin/env/python3
# -*- encoding: utf-8 -*-
import os
__version__ = '0.1.0dev'
__license__ = 'BSD3'
__author__ = 'Kyle B. Westfall'
__maintainer__ = 'Kyle B. Westfall'
__email__ = '[email protected]'
__copyright__ = '(c) 2018, Kyle B. Westfall'
def enyo_source_dir():
"""Return the root path to the DAP source directory."""
import pkg_resources
data_dir = pkg_resources.resource_filename('enyo', 'data')
return os.path.split(data_dir) [0]
os.environ['ENYO_DIR'] = enyo_source_dir()
def short_warning(message, category, filename, lineno, file=None, line=None):
"""
Return the format for a short warning message.
"""
return ' %s: %s\n' % (category.__name__, message)
import warnings
warnings.formatwarning = short_warning
|
[] |
[] |
[
"ENYO_DIR"
] |
[]
|
["ENYO_DIR"]
|
python
| 1 | 0 | |
src/lib/configure_dynatrace.py
|
import json
import os
from os import listdir
from os.path import isfile
from typing import List, Optional, Dict, Callable
import yaml
from aiohttp import ClientSession
from lib.context import LoggingContext
from lib.credentials import fetch_dynatrace_url, fetch_dynatrace_api_key
from lib.fast_check import get_dynatrace_token_metadata
from main import is_yaml_file
class ConfigureDynatrace:
class DtApi:
def __init__(self, dt_session: ClientSession, dynatrace_url: str, dynatrace_api_key: str, ):
self.dt_session = dt_session
self.dynatrace_url = dynatrace_url.rstrip('/')
self.dynatrace_api_key = dynatrace_api_key
async def call(self, method: str, path: str, json_data: Optional[Dict] = None, timeout: Optional[int] = 2):
return await self.dt_session.request(
method=method,
url=f"{self.dynatrace_url}{path}",
headers={
"Authorization": f"Api-Token {self.dynatrace_api_key}",
"Content-Type": "application/json; charset=utf-8"
},
timeout=timeout,
json=json_data)
async def post_dashboard(self, dt_api: DtApi, path: str, name:str, remove_owner: bool, timeout: Optional[int] = 2) -> str:
try:
with open(path, encoding="utf-8") as dashboard_file:
dashboard_json = json.load(dashboard_file)
if remove_owner:
del dashboard_json["dashboardMetadata"]["owner"]
response = await dt_api.call("POST", "/api/config/v1/dashboards", dashboard_json, timeout)
if response.status != 201:
response_json= await response.json()
if not remove_owner and 'owner' in json.dumps(response_json):
await self.post_dashboard(dt_api, path, name, True)
else:
self.logging_context.log(f'Unable to create dashboard {name} in Dynatrace: {response.status}, url: {response.url}, reason: {response.reason}, message {response_json}')
else:
self.logging_context.log(f"Installed dashboard {name}")
response_json = await response.json()
return response_json.get("id", "")
except Exception as e:
self.logging_context.log(f'Unable to create dashboard in Dynatrace. Error details: {e}')
async def get_existing_dashboards(self, dt_api: DtApi, timeout: Optional[int] = 2) -> List[dict]:
try:
response = await dt_api.call("GET", "/api/config/v1/dashboards", timeout=timeout)
response.raise_for_status()
dashboards_json = await response.json()
all_dashboards = [dashboard["name"] for dashboard in dashboards_json.get("dashboards", [])]
return all_dashboards
except Exception as e:
self.logging_context.log(f'Unable to get existing dashboards config. Error details: {e}')
return []
async def get_existing_alerts(self, dt_api: DtApi, timeout: Optional[int] = 2) -> List[dict]:
try:
response = await dt_api.call("GET", "/api/config/v1/anomalyDetection/metricEvents", timeout=timeout)
response.raise_for_status()
response_json = await response.json()
all_alerts = [alert["id"] for alert in response_json.get("values", [])]
return all_alerts
except Exception as e:
self.logging_context.log(f'Unable to get existing dashboards config. Error details: {e}')
return []
def get_ext_resources(self, list_name: str, item_name: str, properties_extractor: Callable[[Dict], Dict]) -> List[Dict]:
try:
if "GCP_SERVICES" in os.environ:
selected_services_string = os.environ.get("GCP_SERVICES", "")
selected_services = selected_services_string.split(",") if selected_services_string else []
working_directory = os.path.dirname(os.path.realpath(__file__))
config_directory = os.path.join(working_directory, "../config")
config_files = [
file for file
in listdir(config_directory)
if os.path.splitext(os.path.basename(file))[0] in selected_services and isfile(os.path.join(config_directory, file)) and is_yaml_file(file)
]
resources = []
for file in config_files:
config_file_path = os.path.join(config_directory, file)
try:
with open(config_file_path, encoding="utf-8") as config_file:
config_yaml = yaml.safe_load(config_file)
for resource in config_yaml.get(list_name, []):
resource_file_path = os.path.join(working_directory, '../', resource.get(item_name, {}))
with open(resource_file_path, encoding="utf-8") as resource_file:
resource_json = json.load(resource_file)
properties = properties_extractor(resource_json)
properties["path"] = resource_file_path
resources.append(properties)
except Exception as error:
self.logging_context.log(
f"Failed to load configuration file: '{config_file_path}'. Error details: {error}")
continue
return resources
else:
return []
except Exception as e:
self.logging_context.log(f'Unable to get available dashboards. Error details: {e}')
return []
def __init__(self, gcp_session: ClientSession, dt_session: ClientSession, logging_context: LoggingContext):
self.gcp_session = gcp_session
self.dt_session = dt_session
self.logging_context = logging_context
async def import_dashboards(self, dt_api: DtApi):
existing_dashboards = await self.get_existing_dashboards(dt_api)
available_dashboards = self.get_ext_resources("dashboards", "dashboard", lambda x: {
'name': x.get("dashboardMetadata", {}).get("name", "")})
dashboards_to_install = [dash for dash in available_dashboards if dash["name"] not in existing_dashboards]
self.logging_context.log(f"Available dashboards: {[dash['name'] for dash in available_dashboards]}")
if dashboards_to_install:
self.logging_context.log(f"New dashboards to install: {[dash['name'] for dash in dashboards_to_install]}")
for dashboard in dashboards_to_install:
dashboard_id = await self.post_dashboard(dt_api, dashboard["path"], dashboard["name"], False)
if dashboard_id:
try:
response = await dt_api.call("PUT", f"/api/config/v1/dashboards/{dashboard_id}/shareSettings",
{"id": f"{dashboard_id}",
"published": "true",
"enabled": "true",
"publicAccess": {
"managementZoneIds": [],
"urls": {}
}, "permissions": [
{"type": "ALL", "permission": "VIEW"}
]
}
)
response.raise_for_status()
except Exception as e:
self.logging_context.log(
f"Unable to apply permissions for dashboard {dashboard_id}, details: {e}")
else:
self.logging_context.log(
f"All dashboards already installed, skipping. (if you wish to upgrade a dashboard, please delete it first)")
async def import_alerts(self, dt_api: DtApi):
existing_alerts = await self.get_existing_alerts(dt_api)
available_alerts = self.get_ext_resources("alerts", "path", lambda x: {
"id": x.get("id", ""), "name": x.get("name", "")})
alerts_to_install = [alert for alert in available_alerts if alert["id"] not in existing_alerts]
self.logging_context.log(f"Available alerts: {[alert['name'] for alert in available_alerts]}")
if alerts_to_install:
self.logging_context.log(f"New alerts to install: {[alert['name'] for alert in alerts_to_install]}")
for alert in alerts_to_install:
try:
with open(alert["path"], encoding="utf-8") as alert_file:
alert_json = json.load(alert_file)
response = await dt_api.call("PUT", f"/api/config/v1/anomalyDetection/metricEvents/{alert['id']}", alert_json)
response.raise_for_status()
self.logging_context.log(f"Installed alert {alert['name']}")
except Exception as e:
self.logging_context.log(
f"Unable to install alert {alert['name']}, details: {e}")
else:
self.logging_context.log(
f"All alerts already installed, skipping. (if you wish to upgrade an alert, please delete it first)")
async def _init_(self):
dynatrace_url = await fetch_dynatrace_url(self.gcp_session, "", "")
self.logging_context.log(f"Using Dynatrace endpoint: {dynatrace_url}")
dynatrace_access_key = await fetch_dynatrace_api_key(self.gcp_session,"", "")
dt_api = self.DtApi(self.dt_session, dynatrace_url, dynatrace_access_key)
scopes = await get_dynatrace_token_metadata(self.dt_session, self.logging_context, dynatrace_url, dynatrace_access_key)
has_write_config_permission=any(s in scopes.get('scopes', []) for s in ["WriteConfig", "ReadConfig"])
if not has_write_config_permission:
self.logging_context.log("Missing ReadConfig/WriteConfig permission for Dynatrace API token, skipping dashboards configuration")
else:
dashboards_import = os.environ.get("IMPORT_DASHBOARDS", "yes").lower() != "no"
alerts_import = os.environ.get("IMPORT_ALERTS", "yes").lower() != "no"
if dashboards_import:
await self.import_dashboards(dt_api)
else:
self.logging_context.log("Dashboards import disabled")
if alerts_import:
await self.import_alerts(dt_api)
else:
self.logging_context.log("Alerts import disabled")
def __await__(self):
return self._init_().__await__()
|
[] |
[] |
[
"IMPORT_DASHBOARDS",
"IMPORT_ALERTS",
"GCP_SERVICES"
] |
[]
|
["IMPORT_DASHBOARDS", "IMPORT_ALERTS", "GCP_SERVICES"]
|
python
| 3 | 0 | |
plugins/callbacks.py
|
import os
import ast
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from database.filters_mdb import del_all, find_filter
from database.connections_mdb import(
all_connections,
active_connection,
if_active,
delete_connection,
make_active,
make_inactive
)
@trojanz.on_callback_query()
async def cb_handler(client, query):
if query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Command Help", callback_data="help_data")
]
]
)
await query.message.edit_text(
Script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("How to Deploy?", url="https://youtu.be/hkmc3e7U7R4"),
InlineKeyboardButton("About Me", callback_data="about_data")
],
[
InlineKeyboardButton("BOT Channel", url="https://t.me/ciemahub182"),
InlineKeyboardButton("Support Group", url="https://t.me/ciemahub182")
]
]
)
await query.message.edit_text(
Script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"SOURCE CODE", url="https://github.com/Anandhu123555?tab=repositories")
],
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
)
await query.message.edit_text(
Script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "close_data":
await query.message.delete()
elif query.data == "delallconfirm":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
grpid = await active_connection(str(userid))
if grpid is not None:
grp_id = grpid
try:
chat = await client.get_chat(grpid)
title = chat.title
except:
await query.message.edit_text("Make sure I'm present in your group!!", quote=True)
return
else:
await query.message.edit_text(
"I'm not connected to any groups!\nCheck /connections or connect to any groups",
quote=True
)
return
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
title = query.message.chat.title
else:
return
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await del_all(query.message, grp_id, title)
else:
await query.answer("You need to be Group Owner or an Auth User to do that!",show_alert=True)
elif query.data == "delallcancel":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
await query.message.reply_to_message.delete()
await query.message.delete()
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await query.message.delete()
try:
await query.message.reply_to_message.delete()
except:
pass
else:
await query.answer("Thats not for you!!",show_alert=True)
elif "groupcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
act = query.data.split(":")[3]
user_id = query.from_user.id
if act == "":
stat = "CONNECT"
cb = "connectcb"
else:
stat = "DISCONNECT"
cb = "disconnect"
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton(f"{stat}", callback_data=f"{cb}:{group_id}:{title}"),
InlineKeyboardButton("DELETE", callback_data=f"deletecb:{group_id}")],
[InlineKeyboardButton("BACK", callback_data="backcb")]
])
await query.message.edit_text(
f"Group Name : **{title}**\nGroup ID : `{group_id}`",
reply_markup=keyboard,
parse_mode="md"
)
return
elif "connectcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
user_id = query.from_user.id
mkact = await make_active(str(user_id), str(group_id))
if mkact:
await query.message.edit_text(
f"Connected to **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "disconnect" in query.data:
await query.answer()
title = query.data.split(":")[2]
user_id = query.from_user.id
mkinact = await make_inactive(str(user_id))
if mkinact:
await query.message.edit_text(
f"Disconnected from **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "deletecb" in query.data:
await query.answer()
user_id = query.from_user.id
group_id = query.data.split(":")[1]
delcon = await delete_connection(str(user_id), str(group_id))
if delcon:
await query.message.edit_text(
"Successfully deleted connection"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif query.data == "backcb":
await query.answer()
userid = query.from_user.id
groupids = await all_connections(str(userid))
if groupids is None:
await query.message.edit_text(
"There are no active connections!! Connect to some groups first.",
)
return
buttons = []
for groupid in groupids:
try:
ttl = await client.get_chat(int(groupid))
title = ttl.title
active = await if_active(str(userid), str(groupid))
if active:
act = " - ACTIVE"
else:
act = ""
buttons.append(
[
InlineKeyboardButton(
text=f"{title}{act}", callback_data=f"groupcb:{groupid}:{title}:{act}"
)
]
)
except:
pass
if buttons:
await query.message.edit_text(
"Your connected group details ;\n\n",
reply_markup=InlineKeyboardMarkup(buttons)
)
elif "alertmessage" in query.data:
grp_id = query.message.chat.id
i = query.data.split(":")[1]
keyword = query.data.split(":")[2]
reply_text, btn, alerts, fileid = await find_filter(grp_id, keyword)
if alerts is not None:
alerts = ast.literal_eval(alerts)
alert = alerts[int(i)]
alert = alert.replace("\\n", "\n").replace("\\t", "\t")
await query.answer(alert,show_alert=True)
|
[] |
[] |
[
"WEBHOOK"
] |
[]
|
["WEBHOOK"]
|
python
| 1 | 0 | |
e2e_tests/tests/experiment/experiment.py
|
import datetime
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, List, Optional
import dateutil.parser
import pytest
import requests
from determined import experimental
from determined.common import api, yaml
from determined.common.api import authentication, certs
from tests import config as conf
from tests.cluster import utils as cluster_utils
def maybe_create_native_experiment(context_dir: str, command: List[str]) -> Optional[int]:
target_env = os.environ.copy()
target_env["DET_MASTER"] = conf.make_master_url()
with subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=context_dir, env=target_env
) as p:
assert p.stdout is not None
for line in p.stdout:
m = re.search(r"Created experiment (\d+)\n", line.decode())
if m is not None:
return int(m.group(1))
return None
def create_native_experiment(context_dir: str, command: List[str]) -> int:
experiment_id = maybe_create_native_experiment(context_dir, command)
if experiment_id is None:
pytest.fail(f"Failed to create experiment in {context_dir}: {command}")
return experiment_id
def maybe_create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> subprocess.CompletedProcess:
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
config_file,
model_def_file,
]
if create_args is not None:
command += create_args
env = os.environ.copy()
env["DET_DEBUG"] = "true"
return subprocess.run(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
def create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> int:
completed_process = maybe_create_experiment(config_file, model_def_file, create_args)
assert completed_process.returncode == 0, "\nstdout:\n{} \nstderr:\n{}".format(
completed_process.stdout, completed_process.stderr
)
m = re.search(r"Created experiment (\d+)\n", str(completed_process.stdout))
assert m is not None
return int(m.group(1))
def pause_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "pause", str(experiment_id)]
subprocess.check_call(command)
def activate_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "activate", str(experiment_id)]
subprocess.check_call(command)
def change_experiment_state(experiment_id: int, new_state: str) -> None:
# TODO(DET-5678): refactor tests to not use cli singleton auth.
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.patch(
conf.make_master_url(),
"experiments/{}".format(experiment_id),
headers={"Content-Type": "application/merge-patch+json"},
json={"state": new_state},
)
assert r.status_code == requests.codes.no_content, r.text
def cancel_experiment(experiment_id: int) -> None:
change_experiment_state(experiment_id, "STOPPING_CANCELED")
# We may never observe the STOPPING_CANCELED state.
wait_for_experiment_state(experiment_id, "CANCELED")
def cancel_experiment_v1(experiment_id: int) -> None:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.post(conf.make_master_url(), "/api/v1/experiments/{}/cancel".format(experiment_id))
r.raise_for_status()
wait_for_experiment_state(experiment_id, "CANCELED")
def wait_for_experiment_state(
experiment_id: int,
target_state: str,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
log_every: int = 60,
) -> None:
for seconds_waited in range(max_wait_secs):
try:
state = experiment_state(experiment_id)
# Ignore network errors while polling for experiment state to avoid a
# single network flake to cause a test suite failure. If the master is
# unreachable multiple times, this test will fail after max_wait_secs.
except api.errors.MasterNotFoundException:
logging.warning(
"Network failure ignored when polling for state of "
"experiment {}".format(experiment_id)
)
time.sleep(1)
continue
if state == target_state:
return
if is_terminal_state(state):
if state != target_state:
report_failed_experiment(experiment_id)
pytest.fail(
f"Experiment {experiment_id} terminated in {state} state, expected {target_state}"
)
if seconds_waited > 0 and seconds_waited % log_every == 0:
print(
f"Waited {seconds_waited} seconds for experiment {experiment_id} "
f"(currently {state}) to reach {target_state}"
)
time.sleep(1)
else:
if target_state == "COMPLETED":
cancel_experiment(experiment_id)
report_failed_experiment(experiment_id)
pytest.fail(
"Experiment did not reach target state {} after {} seconds".format(
target_state, max_wait_secs
)
)
def experiment_has_active_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "tasks").json()
for task in r.values():
if "Experiment {}".format(experiment_id) in task["name"] and len(task["containers"]) > 0:
return True
return False
def wait_for_experiment_active_workload(
experiment_id: int, max_ticks: int = conf.MAX_TASK_SCHEDULED_SECS
) -> None:
for _ in range(conf.MAX_TASK_SCHEDULED_SECS):
if experiment_has_active_workload(experiment_id):
return
time.sleep(1)
pytest.fail(
f"The only trial cannot be scheduled within {max_ticks} seconds.",
)
def wait_for_experiment_workload_progress(
experiment_id: int, max_ticks: int = conf.MAX_TRIAL_BUILD_SECS
) -> None:
for _ in range(conf.MAX_TRIAL_BUILD_SECS):
trials = experiment_trials(experiment_id)
if len(trials) > 0:
only_trial = trials[0]
if len(only_trial["steps"]) > 1:
return
time.sleep(1)
pytest.fail(
f"Trial cannot finish first workload within {max_ticks} seconds.",
)
def experiment_has_completed_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
trials = experiment_trials(experiment_id)
if not any(trials):
return False
return any(any(s["state"] == "COMPLETED" for s in t["steps"]) for t in trials)
def experiment_json(experiment_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments/{}".format(experiment_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def experiment_state(experiment_id: int) -> str:
state = experiment_json(experiment_id)["state"] # type: str
return state
def experiment_trials(experiment_id: int) -> List[Dict[str, Any]]:
trials = experiment_json(experiment_id)["trials"] # type: List[Dict[str, Any]]
return trials
def num_experiments() -> int:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments")
assert r.status_code == requests.codes.ok, r.text
return len(r.json())
def cancel_single(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def cancel_single_v1(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment_v1(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def is_terminal_state(state: str) -> bool:
return state in ("CANCELED", "COMPLETED", "ERROR")
def trial_metrics(trial_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "trials/{}/metrics".format(trial_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def get_flat_metrics(trial_id: int, metric: str) -> List:
full_trial_metrics = trial_metrics(trial_id)
metrics = [m for step in full_trial_metrics["steps"] for m in step["metrics"]["batch_metrics"]]
return [v[metric] for v in metrics]
def num_trials(experiment_id: int) -> int:
return len(experiment_trials(experiment_id))
def num_active_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ACTIVE" else 0 for t in experiment_trials(experiment_id))
def num_completed_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "COMPLETED" else 0 for t in experiment_trials(experiment_id))
def num_error_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ERROR" else 0 for t in experiment_trials(experiment_id))
def trial_logs(trial_id: int) -> List[str]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
return [tl["message"] for tl in api.trial_logs(conf.make_master_url(), trial_id)]
def check_if_string_present_in_trial_logs(trial_id: int, target_string: str) -> bool:
logs = trial_logs(trial_id)
for log_line in logs:
if target_string in log_line:
return True
return False
def assert_equivalent_trials(A: int, B: int, validation_metrics: List[str]) -> None:
full_trial_metrics1 = trial_metrics(A)
full_trial_metrics2 = trial_metrics(B)
assert len(full_trial_metrics1["steps"]) == len(full_trial_metrics2["steps"])
for step1, step2 in zip(full_trial_metrics1["steps"], full_trial_metrics2["steps"]):
metric1 = step1["metrics"]["batch_metrics"]
metric2 = step2["metrics"]["batch_metrics"]
for batch1, batch2 in zip(metric1, metric2):
assert len(batch1) == len(batch2) == 2
assert batch1["loss"] == pytest.approx(batch2["loss"])
if step1["validation"] is not None or step2["validation"] is not None:
assert step1["validation"] is not None
assert step2["validation"] is not None
for metric in validation_metrics:
val1 = step1.get("validation").get("metrics").get("validation_metrics").get(metric)
val2 = step2.get("validation").get("metrics").get("validation_metrics").get(metric)
assert val1 == pytest.approx(val2)
def assert_performed_initial_validation(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
zeroth_step = steps[0]
assert zeroth_step["validation"] is not None
assert zeroth_step["validation"]["total_batches"] == 0
assert zeroth_step["validation"]["state"] == "COMPLETED"
def assert_performed_final_checkpoint(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
last_step = steps[-1]
assert last_step["checkpoint"] is not None
assert last_step["checkpoint"]["state"] == "COMPLETED"
def run_describe_cli_tests(experiment_id: int) -> None:
"""
Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure.
"""
# "det experiment describe" without metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
# "det experiment describe" with metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--metrics",
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
def run_list_cli_tests(experiment_id: int) -> None:
"""
Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure.
"""
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)]
)
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"list-checkpoints",
"--best",
str(1),
str(experiment_id),
]
)
def report_failed_experiment(experiment_id: int) -> None:
trials = experiment_trials(experiment_id)
active = sum(1 for t in trials if t["state"] == "ACTIVE")
paused = sum(1 for t in trials if t["state"] == "PAUSED")
stopping_completed = sum(1 for t in trials if t["state"] == "STOPPING_COMPLETED")
stopping_canceled = sum(1 for t in trials if t["state"] == "STOPPING_CANCELED")
stopping_error = sum(1 for t in trials if t["state"] == "STOPPING_ERROR")
completed = sum(1 for t in trials if t["state"] == "COMPLETED")
canceled = sum(1 for t in trials if t["state"] == "CANCELED")
errored = sum(1 for t in trials if t["state"] == "ERROR")
stopping_killed = sum(1 for t in trials if t["state"] == "STOPPING_KILLED")
print(
f"Experiment {experiment_id}: {len(trials)} trials, {completed} completed, "
f"{active} active, {paused} paused, {stopping_completed} stopping-completed, "
f"{stopping_canceled} stopping-canceled, {stopping_error} stopping-error, "
f"{stopping_killed} stopping-killed, {canceled} canceled, {errored} errored",
file=sys.stderr,
)
for trial in trials:
print_trial_logs(trial["id"])
def report_failed_trial(trial_id: int, state: str) -> None:
print(f"Trial {trial_id} was not COMPLETED but {state}", file=sys.stderr)
print_trial_logs(trial_id)
def print_trial_logs(trial_id: int) -> None:
print("******** Start of logs for trial {} ********".format(trial_id), file=sys.stderr)
print("".join(trial_logs(trial_id)), file=sys.stderr)
print("******** End of logs for trial {} ********".format(trial_id), file=sys.stderr)
def run_basic_test(
config_file: str,
model_def_file: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
assert os.path.isdir(model_def_file)
experiment_id = create_experiment(config_file, model_def_file, create_args)
wait_for_experiment_state(experiment_id, "COMPLETED", max_wait_secs=max_wait_secs)
assert num_active_trials(experiment_id) == 0
verify_completed_experiment_metadata(experiment_id, expected_trials)
return experiment_id
def verify_completed_experiment_metadata(
experiment_id: int, num_expected_trials: Optional[int]
) -> None:
# If `expected_trials` is None, the expected number of trials is
# non-deterministic.
if num_expected_trials is not None:
assert num_trials(experiment_id) == num_expected_trials
assert num_completed_trials(experiment_id) == num_expected_trials
# Check that every trial and step is COMPLETED.
trials = experiment_trials(experiment_id)
assert len(trials) > 0
for trial in trials:
if trial["state"] != "COMPLETED":
report_failed_trial(trial["id"], trial["state"])
pytest.fail(f"Trial {trial['id']} was not COMPLETED but {trial['state']}")
assert len(trial["steps"]) > 0
# Check that batches appear in increasing order.
batch_ids = [s["total_batches"] for s in trial["steps"]]
assert all(x <= y for x, y in zip(batch_ids, batch_ids[1:]))
for step in trial["steps"]:
assert step["state"] == "COMPLETED"
if step["validation"]:
validation = step["validation"]
assert validation["state"] == "COMPLETED"
if step["checkpoint"]:
checkpoint = step["checkpoint"]
assert checkpoint["state"] in {"COMPLETED", "DELETED"}
# The last step of every trial should have a checkpoint.
for trial in trials:
last_step = trial["steps"][-1]
assert last_step["checkpoint"]
# When the experiment completes, all slots should now be free. This
# requires terminating the experiment's last container, which might
# take some time.
max_secs_to_free_slots = 30
for _ in range(max_secs_to_free_slots):
if cluster_utils.num_free_slots() == cluster_utils.num_slots():
break
time.sleep(1)
else:
raise AssertionError("Slots failed to free after experiment {}".format(experiment_id))
# Run a series of CLI tests on the finished experiment, to sanity check
# that basic CLI commands don't raise errors.
run_describe_cli_tests(experiment_id)
run_list_cli_tests(experiment_id)
# Use Determined to run an experiment that we expect to fail.
def run_failure_test(
config_file: str, model_def_file: str, error_str: Optional[str] = None
) -> None:
experiment_id = create_experiment(config_file, model_def_file)
wait_for_experiment_state(experiment_id, "ERROR")
# The searcher is configured with a `max_trials` of 8. Since the
# first step of each trial results in an error, there should be no
# completed trials.
#
# Most of the trials should result in ERROR, but depending on that
# seems fragile: if we support task preemption in the future, we
# might start a trial but cancel it before we hit the error in the
# model definition.
assert num_active_trials(experiment_id) == 0
assert num_completed_trials(experiment_id) == 0
assert num_error_trials(experiment_id) >= 1
# For each failed trial, check for the expected error in the logs.
trials = experiment_trials(experiment_id)
for t in trials:
if t["state"] != "ERROR":
continue
trial_id = t["id"]
logs = trial_logs(trial_id)
if error_str is not None:
assert any(error_str in line for line in logs)
def get_validation_metric_from_last_step(
experiment_id: int, trial_id: int, validation_metric_name: str
) -> float:
trial = experiment_trials(experiment_id)[trial_id]
last_validation = trial["steps"][len(trial["steps"]) - 1]["validation"]
return last_validation["metrics"]["validation_metrics"][validation_metric_name] # type: ignore
class ExperimentDurations:
def __init__(
self,
experiment_duration: datetime.timedelta,
training_duration: datetime.timedelta,
validation_duration: datetime.timedelta,
checkpoint_duration: datetime.timedelta,
):
self.experiment_duration = experiment_duration
self.training_duration = training_duration
self.validation_duration = validation_duration
self.checkpoint_duration = checkpoint_duration
def __str__(self) -> str:
duration_strs = []
duration_strs.append(f"experiment duration: {self.experiment_duration}")
duration_strs.append(f"training duration: {self.training_duration}")
duration_strs.append(f"validation duration: {self.validation_duration}")
duration_strs.append(f"checkpoint duration: {self.checkpoint_duration}")
return "\n".join(duration_strs)
def get_experiment_durations(experiment_id: int, trial_idx: int) -> ExperimentDurations:
experiment_metadata = experiment_json(experiment_id)
end_time = dateutil.parser.parse(experiment_metadata["end_time"])
start_time = dateutil.parser.parse(experiment_metadata["start_time"])
experiment_duration = end_time - start_time
training_duration = datetime.timedelta(seconds=0)
validation_duration = datetime.timedelta(seconds=0)
checkpoint_duration = datetime.timedelta(seconds=0)
for step in experiment_metadata["trials"][trial_idx]["steps"]:
end_time = dateutil.parser.parse(step["end_time"])
start_time = dateutil.parser.parse(step["start_time"])
training_duration += end_time - start_time
if "validation" in step and step["validation"]:
end_time = dateutil.parser.parse(step["validation"]["end_time"])
start_time = dateutil.parser.parse(step["validation"]["start_time"])
validation_duration += end_time - start_time
if "checkpoint" in step and step["checkpoint"]:
end_time = dateutil.parser.parse(step["checkpoint"]["end_time"])
start_time = dateutil.parser.parse(step["checkpoint"]["start_time"])
checkpoint_duration += end_time - start_time
return ExperimentDurations(
experiment_duration, training_duration, validation_duration, checkpoint_duration
)
def run_basic_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = run_basic_test(
tf.name,
model_def_path,
expected_trials,
create_args,
max_wait_secs=max_wait_secs,
)
return experiment_id
def run_failure_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
error_str: Optional[str] = None,
) -> None:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
run_failure_test(tf.name, model_def_path, error_str=error_str)
def shared_fs_checkpoint_config() -> Dict[str, str]:
return {
"type": "shared_fs",
"host_path": "/tmp",
"storage_path": "determined-integration-checkpoints",
}
def s3_checkpoint_config(secrets: Dict[str, str], prefix: Optional[str] = None) -> Dict[str, str]:
config_dict = {
"type": "s3",
"access_key": secrets["INTEGRATIONS_S3_ACCESS_KEY"],
"secret_key": secrets["INTEGRATIONS_S3_SECRET_KEY"],
"bucket": secrets["INTEGRATIONS_S3_BUCKET"],
}
if prefix is not None:
config_dict["prefix"] = prefix
return config_dict
def s3_checkpoint_config_no_creds() -> Dict[str, str]:
return {"type": "s3", "bucket": "determined-ai-examples"}
def root_user_home_bind_mount() -> Dict[str, str]:
return {"host_path": "/tmp", "container_path": "/root"}
def _export_and_load_model(experiment_id: int, master_url: str) -> None:
experimental.Determined(master_url).get_experiment(experiment_id).top_checkpoint().load()
def export_and_load_model(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
unifipoller/start.go
|
package unifipoller
import (
"crypto/tls"
"fmt"
"log"
"os"
"strings"
"time"
influx "github.com/influxdata/influxdb1-client/v2"
"github.com/spf13/pflag"
"golift.io/unifi"
)
// Start begins the application from a CLI.
// Parses flags, parses config and executes Run().
func Start() error {
log.SetFlags(log.LstdFlags)
up := &UnifiPoller{
Flag: &Flag{},
Config: &Config{
// Preload our defaults.
InfluxURL: defaultInfluxURL,
InfluxUser: defaultInfluxUser,
InfluxPass: defaultInfluxPass,
InfluxDB: defaultInfluxDB,
UnifiUser: defaultUnifiUser,
UnifiPass: os.Getenv("UNIFI_PASSWORD"), // deprecated name.
UnifiBase: defaultUnifiURL,
Interval: Duration{defaultInterval},
Sites: []string{"all"},
}}
up.Flag.Parse(os.Args[1:])
if up.Flag.ShowVer {
fmt.Printf("unifi-poller v%s\n", Version)
return nil // don't run anything else w/ version request.
}
if up.Flag.DumpJSON == "" { // do not print this when dumping JSON.
up.Logf("Loading Configuration File: %s", up.Flag.ConfigFile)
}
// Parse config file.
if err := up.Config.ParseFile(up.Flag.ConfigFile); err != nil {
up.Flag.Usage()
return err
}
// Update Config with ENV variable overrides.
if err := up.Config.ParseENV(); err != nil {
return err
}
return up.Run()
}
// Parse turns CLI arguments into data structures. Called by Start() on startup.
func (f *Flag) Parse(args []string) {
f.FlagSet = pflag.NewFlagSet("unifi-poller", pflag.ExitOnError)
f.Usage = func() {
fmt.Println("Usage: unifi-poller [--config=/path/to/up.conf] [--version]")
f.PrintDefaults()
}
f.StringVarP(&f.DumpJSON, "dumpjson", "j", "",
"This debug option prints a json payload and exits. See man page for more info.")
f.StringVarP(&f.ConfigFile, "config", "c", DefaultConfFile, "Poller config file path.")
f.BoolVarP(&f.ShowVer, "version", "v", false, "Print the version and exit.")
_ = f.FlagSet.Parse(args) // pflag.ExitOnError means this will never return error.
}
// Run invokes all the application logic and routines.
func (u *UnifiPoller) Run() (err error) {
if u.Flag.DumpJSON != "" {
return u.DumpJSONPayload()
}
if u.Config.Debug {
log.SetFlags(log.Lshortfile | log.Lmicroseconds | log.Ldate)
u.LogDebugf("Debug Logging Enabled")
}
log.Printf("[INFO] UniFi Poller v%v Starting Up! PID: %d", Version, os.Getpid())
if err = u.GetUnifi(); err != nil {
return err
}
u.Logf("Polling UniFi Controller at %s v%s as user %s. Sites: %v",
u.Config.UnifiBase, u.Unifi.ServerVersion, u.Config.UnifiUser, u.Config.Sites)
if err = u.GetInfluxDB(); err != nil {
return err
}
u.Logf("Logging Measurements to InfluxDB at %s as user %s", u.Config.InfluxURL, u.Config.InfluxUser)
switch strings.ToLower(u.Config.Mode) {
case "influxlambda", "lambdainflux", "lambda_influx", "influx_lambda":
u.LogDebugf("Lambda Mode Enabled")
u.LastCheck = time.Now()
return u.CollectAndReport()
default:
return u.PollController()
}
}
// GetInfluxDB returns an InfluxDB interface.
func (u *UnifiPoller) GetInfluxDB() (err error) {
u.Influx, err = influx.NewHTTPClient(influx.HTTPConfig{
Addr: u.Config.InfluxURL,
Username: u.Config.InfluxUser,
Password: u.Config.InfluxPass,
TLSConfig: &tls.Config{InsecureSkipVerify: u.Config.InfxBadSSL},
})
if err != nil {
return fmt.Errorf("influxdb: %v", err)
}
return nil
}
// GetUnifi returns a UniFi controller interface.
func (u *UnifiPoller) GetUnifi() (err error) {
// Create an authenticated session to the Unifi Controller.
u.Unifi, err = unifi.NewUnifi(&unifi.Config{
User: u.Config.UnifiUser,
Pass: u.Config.UnifiPass,
URL: u.Config.UnifiBase,
VerifySSL: u.Config.VerifySSL,
ErrorLog: u.LogErrorf, // Log all errors.
DebugLog: u.LogDebugf, // Log debug messages.
})
if err != nil {
return fmt.Errorf("unifi controller: %v", err)
}
u.LogDebugf("Authenticated with controller successfully")
return u.CheckSites()
}
|
[
"\"UNIFI_PASSWORD\""
] |
[] |
[
"UNIFI_PASSWORD"
] |
[]
|
["UNIFI_PASSWORD"]
|
go
| 1 | 0 | |
mysql/mysql_test.go
|
package mysql
import (
"database/sql"
"os"
"testing"
"time"
"github.com/gofiber/utils"
)
var testStore = New(Config{
Database: os.Getenv("MYSQL_DATABASE"),
Username: os.Getenv("MYSQL_USERNAME"),
Password: os.Getenv("MYSQL_PASSWORD"),
Reset: true,
})
func Test_MYSQL_Set(t *testing.T) {
var (
key = "john"
val = []byte("doe")
)
err := testStore.Set(key, val, 0)
utils.AssertEqual(t, nil, err)
}
func Test_MYSQL_Set_Override(t *testing.T) {
var (
key = "john"
val = []byte("doe")
)
err := testStore.Set(key, val, 0)
utils.AssertEqual(t, nil, err)
err = testStore.Set(key, val, 0)
utils.AssertEqual(t, nil, err)
}
func Test_MYSQL_Get(t *testing.T) {
var (
key = "john"
val = []byte("doe")
)
err := testStore.Set(key, val, 0)
utils.AssertEqual(t, nil, err)
result, err := testStore.Get(key)
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, val, result)
}
func Test_MYSQL_Set_Expiration(t *testing.T) {
var (
key = "john"
val = []byte("doe")
exp = 1 * time.Second
)
err := testStore.Set(key, val, exp)
utils.AssertEqual(t, nil, err)
time.Sleep(1100 * time.Millisecond)
}
func Test_MYSQL_Get_Expired(t *testing.T) {
var (
key = "john"
)
result, err := testStore.Get(key)
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, true, len(result) == 0)
}
func Test_MYSQL_Get_NotExist(t *testing.T) {
result, err := testStore.Get("notexist")
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, true, len(result) == 0)
}
func Test_MYSQL_Delete(t *testing.T) {
var (
key = "john"
val = []byte("doe")
)
err := testStore.Set(key, val, 0)
utils.AssertEqual(t, nil, err)
err = testStore.Delete(key)
utils.AssertEqual(t, nil, err)
result, err := testStore.Get(key)
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, true, len(result) == 0)
}
func Test_MYSQL_Reset(t *testing.T) {
var (
val = []byte("doe")
)
err := testStore.Set("john1", val, 0)
utils.AssertEqual(t, nil, err)
err = testStore.Set("john2", val, 0)
utils.AssertEqual(t, nil, err)
err = testStore.Reset()
utils.AssertEqual(t, nil, err)
result, err := testStore.Get("john1")
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, true, len(result) == 0)
result, err = testStore.Get("john2")
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, true, len(result) == 0)
}
func Test_MYSQL_GC(t *testing.T) {
var (
testVal = []byte("doe")
)
// This key should expire
err := testStore.Set("john", testVal, time.Nanosecond)
utils.AssertEqual(t, nil, err)
testStore.gc(time.Now())
row := testStore.db.QueryRow(testStore.sqlSelect, "john")
err = row.Scan(nil, nil)
utils.AssertEqual(t, sql.ErrNoRows, err)
// This key should not expire
err = testStore.Set("john", testVal, 0)
utils.AssertEqual(t, nil, err)
testStore.gc(time.Now())
val, err := testStore.Get("john")
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, testVal, val)
}
func Test_MYSQL_Non_UTF8(t *testing.T) {
val := []byte("0xF5")
err := testStore.Set("0xF6", val, 0)
utils.AssertEqual(t, nil, err)
result, err := testStore.Get("0xF6")
utils.AssertEqual(t, nil, err)
utils.AssertEqual(t, val, result)
}
func Test_MYSQL_Close(t *testing.T) {
utils.AssertEqual(t, nil, testStore.Close())
}
|
[
"\"MYSQL_DATABASE\"",
"\"MYSQL_USERNAME\"",
"\"MYSQL_PASSWORD\""
] |
[] |
[
"MYSQL_DATABASE",
"MYSQL_PASSWORD",
"MYSQL_USERNAME"
] |
[]
|
["MYSQL_DATABASE", "MYSQL_PASSWORD", "MYSQL_USERNAME"]
|
go
| 3 | 0 | |
Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testapi provides a helper for retrieving the KUBE_TEST_API environment variable.
package testapi
import (
"fmt"
"os"
"strings"
"k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/metrics/install"
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/runtime"
)
var (
Groups = make(map[string]TestGroup)
Default TestGroup
Extensions TestGroup
)
type TestGroup struct {
externalGroupVersion unversioned.GroupVersion
internalGroupVersion unversioned.GroupVersion
}
func init() {
kubeTestAPI := os.Getenv("KUBE_TEST_API")
if kubeTestAPI != "" {
testGroupVersions := strings.Split(kubeTestAPI, ",")
for _, gvString := range testGroupVersions {
groupVersion := unversioned.ParseGroupVersionOrDie(gvString)
Groups[groupVersion.Group] = TestGroup{
externalGroupVersion: groupVersion,
internalGroupVersion: unversioned.GroupVersion{Group: groupVersion.Group},
}
}
}
if _, ok := Groups[api.SchemeGroupVersion.Group]; !ok {
Groups[api.SchemeGroupVersion.Group] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: api.SchemeGroupVersion.Group, Version: latest.GroupOrDie(api.SchemeGroupVersion.Group).GroupVersion.Version},
internalGroupVersion: api.SchemeGroupVersion,
}
}
if _, ok := Groups[extensions.SchemeGroupVersion.Group]; !ok {
Groups[extensions.SchemeGroupVersion.Group] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: extensions.SchemeGroupVersion.Group, Version: latest.GroupOrDie(extensions.SchemeGroupVersion.Group).GroupVersion.Version},
internalGroupVersion: extensions.SchemeGroupVersion,
}
}
Default = Groups[api.SchemeGroupVersion.Group]
Extensions = Groups[extensions.SchemeGroupVersion.Group]
}
func (g TestGroup) GroupVersion() *unversioned.GroupVersion {
copyOfGroupVersion := g.externalGroupVersion
return ©OfGroupVersion
}
// InternalGroupVersion returns the group,version used to identify the internal
// types for this API
func (g TestGroup) InternalGroupVersion() unversioned.GroupVersion {
return g.internalGroupVersion
}
// Codec returns the codec for the API version to test against, as set by the
// KUBE_TEST_API env var.
func (g TestGroup) Codec() runtime.Codec {
// TODO: caesarxuchao: Restructure the body once we have a central `latest`.
interfaces, err := latest.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion)
if err != nil {
panic(err)
}
return interfaces.Codec
}
// Converter returns the api.Scheme for the API version to test against, as set by the
// KUBE_TEST_API env var.
func (g TestGroup) Converter() runtime.ObjectConvertor {
interfaces, err := latest.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion)
if err != nil {
panic(err)
}
return interfaces.ObjectConvertor
}
// MetadataAccessor returns the MetadataAccessor for the API version to test against,
// as set by the KUBE_TEST_API env var.
func (g TestGroup) MetadataAccessor() meta.MetadataAccessor {
interfaces, err := latest.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion)
if err != nil {
panic(err)
}
return interfaces.MetadataAccessor
}
// SelfLink returns a self link that will appear to be for the version Version().
// 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be
// empty for lists.
func (g TestGroup) SelfLink(resource, name string) string {
if g.externalGroupVersion.Group == api.SchemeGroupVersion.Group {
if name == "" {
return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource)
}
return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name)
} else {
// TODO: will need a /apis prefix once we have proper multi-group
// support
if name == "" {
return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource)
}
return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name)
}
}
// Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name.
// For ex, this is of the form:
// /api/v1/watch/namespaces/foo/pods/pod0 for v1.
func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string {
var path string
if g.externalGroupVersion.Group == api.SchemeGroupVersion.Group {
path = "/api/" + g.externalGroupVersion.Version
} else {
// TODO: switch back once we have proper multiple group support
// path = "/apis/" + g.Group + "/" + Version(group...)
path = "/apis/" + g.externalGroupVersion.Group + "/" + g.externalGroupVersion.Version
}
if prefix != "" {
path = path + "/" + prefix
}
if namespace != "" {
path = path + "/namespaces/" + namespace
}
// Resource names are lower case.
resource = strings.ToLower(resource)
if resource != "" {
path = path + "/" + resource
}
if name != "" {
path = path + "/" + name
}
return path
}
// Returns the appropriate path for the given resource, namespace and name.
// For example, this is of the form:
// /api/v1/namespaces/foo/pods/pod0 for v1.
func (g TestGroup) ResourcePath(resource, namespace, name string) string {
return g.ResourcePathWithPrefix("", resource, namespace, name)
}
func (g TestGroup) RESTMapper() meta.RESTMapper {
return latest.GroupOrDie(g.externalGroupVersion.Group).RESTMapper
}
// Get codec based on runtime.Object
func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) {
kind, err := api.Scheme.ObjectKind(obj)
if err != nil {
return nil, fmt.Errorf("unexpected encoding error: %v", err)
}
for _, group := range Groups {
if group.GroupVersion().Group != kind.Group {
continue
}
if api.Scheme.Recognizes(kind) {
return group.Codec(), nil
}
}
// Codec used for unversioned types
if api.Scheme.Recognizes(kind) {
return api.Codec, nil
}
return nil, fmt.Errorf("unexpected kind: %v", kind)
}
|
[
"\"KUBE_TEST_API\""
] |
[] |
[
"KUBE_TEST_API"
] |
[]
|
["KUBE_TEST_API"]
|
go
| 1 | 0 | |
slu4p/speech_to_text/google_client.py
|
import qi
import os
import argparse
import requests
import json
import slu_utils
import base64
class GoogleClient(object):
timeout = 20
url = ''
headers = {"Content-Type": "application/json"}
FLAC_COMM = 'flac -f '
busy = False
def __init__(self, language, key_file, app):
super(GoogleClient, self).__init__()
app.start()
session = app.session
self.memory_service = session.service("ALMemory")
keys = slu_utils.lines_to_list(key_file)
self.language = language
key = keys[0]
self.url = "https://speech.googleapis.com/v1/speech:recognize?key=%s" % key
self.subGR = self.memory_service.subscriber("GoogleRequest")
self.idsubGR = self.subGR.signal.connect(self.onGoogleRequest)
def quit(self):
self.subGR.signal.disconnect(self.idsubGR)
def onGoogleRequest(self, value):
print "onGoogleRequest:", value
file_path = str(value) + ".wav"
print "busy", self.busy
if not self.busy:
self.busy = True
"""
Convert Wave file into Flac file
"""
if os.path.exists(file_path):
print "file exists"
if os.path.getsize(file_path) > 0:
os.system(self.FLAC_COMM + file_path)
f = open(value + '.flac', 'rb')
flac_cont = f.read()
f.close()
transcriptions = [r.encode('ascii', 'ignore').lower() for r in self.recognize_data(flac_cont)]
self.memory_service.raiseEvent("GoogleResponse", transcriptions)
print transcriptions
self.busy = False
def recognize_file(self, file_path):
try:
print "[" + self.__class__.__name__ + "] [GOOGLE] Recognizing file.."
transcriptions = []
data = open(file_path, "rb").read()
transcriptions = self.recognize_data(data)
return transcriptions
except Error as e:
print e.message
print "[" + self.__class__.__name__ + "] [RECOGNIZE]ERROR! Returning empty list.."
return []
def recognize_data(self, data):
try:
print "[" + self.__class__.__name__ + "] [GOOGLE] Recognizing data.."
transcriptions = []
base64_data = base64.b64encode(data)
audio_json = {"content": base64_data}
config_json = {"languageCode": self.language}
json_data = {"config": config_json, "audio": audio_json}
response = requests.post(self.url, json=json_data, headers=self.headers, timeout=self.timeout)
json_res = json.loads(response.text)
print json_res
if "results" in json_res.keys() and "alternatives" in json_res["results"][0].keys():
for alternative in json_res["results"][0]["alternatives"]:
transcriptions.append(alternative["transcript"].lower())
return transcriptions
except ValueError as ve:
print ve.message
print "[" + self.__class__.__name__ + "] [RECOGNIZE]ERROR! Google APIs are temporary unavailable. Returning empty list.."
return []
except requests.exceptions.RequestException as e:
print e.message
print "[" + self.__class__.__name__ + "] [RECOGNIZE]ERROR! Unable to reach Google. Returning empty list.."
return []
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["google_client", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
gc = GoogleClient(
"en-US",
"resources/cloud_google_keys.txt",
app
)
app.run()
gc.quit()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PEPPER_IP"
] |
[]
|
["PEPPER_IP"]
|
python
| 1 | 0 | |
bin/push_to_tns.py
|
#!/usr/bin/env python
# Copyright 2020-2022 AstroLab Software
# Author: Julien Peloton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Push early SN candidates to TNS
"""
import argparse
import requests
import os
from fink_broker.parser import getargs
from fink_broker.sparkUtils import init_sparksession, load_parquet_files
from fink_broker.loggingUtils import get_fink_logger, inspect_application
from fink_filters.classification import extract_fink_classification
from fink_tns.utils import read_past_ids, retrieve_groupid
from fink_tns.report import extract_discovery_photometry, build_report
from fink_tns.report import save_logs_and_return_json_report, send_json_report
def main():
parser = argparse.ArgumentParser(description=__doc__)
args = getargs(parser)
# Initialise Spark session
spark = init_sparksession(
name="TNS_report_{}".format(args.night),
shuffle_partitions=2
)
# The level here should be controlled by an argument.
logger = get_fink_logger(spark.sparkContext.appName, args.log_level)
# debug statements
inspect_application(logger)
# Connect to the aggregated science database
path = '{}/science/year={}/month={}/day={}'.format(
args.agg_data_prefix,
args.night[:4],
args.night[4:6],
args.night[6:8]
)
df = load_parquet_files(path)
with open('{}/tns_marker.txt'.format(args.tns_folder)) as f:
tns_marker = f.read().replace('\n', '')
if not args.tns_sandbox:
print("WARNING: submitting to real (not sandbox) TNS website")
if args.tns_sandbox:
url_tns_api = "https://sandbox.wis-tns.org/api"
with open('{}/sandbox-tns_api.key'.format(args.tns_folder)) as f:
# remove line break...
key = f.read().replace('\n', '')
else:
url_tns_api = "https://www.wis-tns.org/api"
with open('{}/tns_api.key'.format(args.tns_folder)) as f:
# remove line break...
key = f.read().replace('\n', '')
cols = [
'cdsxmatch', 'roid', 'mulens',
'snn_snia_vs_nonia', 'snn_sn_vs_all', 'rf_snia_vs_nonia',
'candidate.ndethist', 'candidate.drb', 'candidate.classtar',
'candidate.jd', 'candidate.jdstarthist', 'rf_kn_vs_nonkn', 'tracklet'
]
df = df.withColumn('class', extract_fink_classification(*cols))
pdf = df\
.filter(df['class'] == 'Early SN Ia candidate')\
.filter(df['candidate.ndethist'] <= 20)\
.toPandas()
pdf_unique = pdf.groupby('objectId')[pdf.columns].min()
print("{} new alerts".format(len(pdf)))
print("{} new sources".format(len(pdf_unique)))
pdf = pdf_unique
ids = []
report = {"at_report": {}}
check_tns = False
for index, row in enumerate(pdf.iterrows()):
alert = row[1]
past_ids = read_past_ids(args.tns_folder)
if alert['objectId'] in past_ids.values:
print('{} already sent!'.format(alert['objectId']))
continue
if check_tns:
groupid = retrieve_groupid(key, tns_marker, alert['objectId'])
if groupid > 0:
print("{} already reported by {}".format(
alert['objectId'],
groupid
))
else:
print('New report for object {}'.format(alert['objectId']))
photometry, non_detection = extract_discovery_photometry(alert)
report['at_report']["{}".format(index)] = build_report(
alert,
photometry,
non_detection
)
ids.append(alert['objectId'])
print('new objects: ', ids)
if len(ids) != 0:
json_report = save_logs_and_return_json_report(
name='{}{}{}'.format(
args.night[:4],
args.night[4:6],
args.night[6:8]
),
folder=args.tns_folder,
ids=ids,
report=report
)
r = send_json_report(key, url_tns_api, json_report, tns_marker)
print(r.json())
# post to slack
slacktxt = ' \n '.join(['https://fink-portal.org/{}'.format(i) for i in ids])
slacktxt = '{} \n '.format(args.night) + slacktxt
r = requests.post(
os.environ['TNSWEBHOOK'],
json={'text': slacktxt, "username": "VirtualData"},
headers={'Content-Type': 'application/json'}
)
print(r.status_code)
else:
slacktxt = '{} \n No new sources'.format(args.night)
r = requests.post(
os.environ['TNSWEBHOOK'],
json={'text': slacktxt, "username": "VirtualData"},
headers={'Content-Type': 'application/json'}
)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"TNSWEBHOOK"
] |
[]
|
["TNSWEBHOOK"]
|
python
| 1 | 0 | |
topdown/topdown_test.go
|
// Copyright 2016 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package topdown
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
"sync"
"testing"
"time"
"github.com/open-policy-agent/opa/format"
"github.com/ghodss/yaml"
iCache "github.com/open-policy-agent/opa/topdown/cache"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/storage/inmem"
"github.com/open-policy-agent/opa/types"
"github.com/open-policy-agent/opa/util"
testutil "github.com/open-policy-agent/opa/util/test"
)
func TestTopDownQueryIDsUnique(t *testing.T) {
ctx := context.Background()
store := inmem.New()
inputTerm := &ast.Term{}
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
compiler := compileModules([]string{
`package x
p { 1 }
p { 2 }`})
tr := []*Event{}
query := NewQuery(ast.MustParseBody("data.x.p")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithTracer((*BufferTracer)(&tr)).
WithInput(inputTerm)
_, err := query.Run(ctx)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
queryIDs := map[uint64]bool{} // set of seen queryIDs (in EnterOps)
for _, evt := range tr {
if evt.Op != EnterOp {
continue
}
if queryIDs[evt.QueryID] {
t.Errorf("duplicate queryID: %v", evt)
}
queryIDs[evt.QueryID] = true
}
}
func TestTopDownIndexExpr(t *testing.T) {
ctx := context.Background()
store := inmem.New()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
compiler := compileModules([]string{
`package test
p = true {
1 > 0
q
}
q = true { true }`})
tr := []*Event{}
query := NewQuery(ast.MustParseBody("data.test.p")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithTracer((*BufferTracer)(&tr))
_, err := query.Run(ctx)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
exp := []*ast.Expr{
ast.MustParseExpr("data.test.p"),
ast.MustParseExpr("data.test.q"),
}
i := 0
for _, evt := range tr {
if evt.Op != IndexOp {
continue
}
expr, ok := evt.Node.(*ast.Expr)
if !ok {
t.Fatal("Expected expr node but got:", evt.Node)
}
exp[i].Index = i
if ast.Compare(expr, exp[i]) != 0 {
t.Fatalf("Expected %v but got: %v", exp[i], expr)
}
i++
}
}
func TestTopDownWithKeyword(t *testing.T) {
tests := []struct {
note string
rules []string
modules []string
input string
exp interface{}
}{
{
// NOTE(tsandall): This case assumes that partial sets are not memoized.
// If we change that, it'll be harder to test that the comprehension
// cache is invalidated.
note: "invalidate comprehension cache",
exp: `[[{"b": ["a", "c"]}], [{"b": ["a"]}]]`,
modules: []string{`package ex
s[x] {
x = {v: ks |
v = input[i]
ks = {k | v = input[k]}
}
}
`},
rules: []string{`p = [x, y] {
x = data.ex.s with input as {"a": "b", "c": "b"}
y = data.ex.s with input as {"a": "b"}
}`},
},
}
for _, tc := range tests {
runTopDownTestCaseWithModules(t, loadSmallTestData(), tc.note, tc.rules, tc.modules, tc.input, tc.exp)
}
}
func TestTopDownUnsupportedBuiltin(t *testing.T) {
ast.RegisterBuiltin(&ast.Builtin{
Name: "unsupported_builtin",
})
body := ast.MustParseBody(`unsupported_builtin()`)
ctx := context.Background()
compiler := ast.NewCompiler()
store := inmem.New()
txn := storage.NewTransactionOrDie(ctx, store)
q := NewQuery(body).WithCompiler(compiler).WithStore(store).WithTransaction(txn)
_, err := q.Run(ctx)
expected := unsupportedBuiltinErr(body[0].Location)
if !reflect.DeepEqual(err, expected) {
t.Fatalf("Expected %v but got: %v", expected, err)
}
}
func TestTopDownQueryCancellation(t *testing.T) {
ctx := context.Background()
compiler := compileModules([]string{
`
package test
p { data.arr[_] = _; test.sleep("1ms") }
`,
})
data := map[string]interface{}{
"arr": make([]interface{}, 1000),
}
store := inmem.NewFromObject(data)
txn := storage.NewTransactionOrDie(ctx, store)
cancel := NewCancel()
query := NewQuery(ast.MustParseBody("data.test.p")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithCancel(cancel)
go func() {
time.Sleep(time.Millisecond * 50)
cancel.Cancel()
}()
qrs, err := query.Run(ctx)
if err == nil || err.(*Error).Code != CancelErr {
t.Fatalf("Expected cancel error but got: %v (err: %v)", qrs, err)
}
}
type contextPropagationMock struct{}
// contextPropagationStore will accumulate values from the contexts provided to
// read calls so that the test can verify that contexts are being propagated as
// expected.
type contextPropagationStore struct {
storage.WritesNotSupported
storage.TriggersNotSupported
storage.PolicyNotSupported
calls []interface{}
}
func (m *contextPropagationStore) NewTransaction(context.Context, ...storage.TransactionParams) (storage.Transaction, error) {
return nil, nil
}
func (m *contextPropagationStore) Commit(context.Context, storage.Transaction) error {
return nil
}
func (m *contextPropagationStore) Abort(context.Context, storage.Transaction) {
}
func (m *contextPropagationStore) Read(ctx context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) {
val := ctx.Value(contextPropagationMock{})
m.calls = append(m.calls, val)
return nil, nil
}
func TestTopDownContextPropagation(t *testing.T) {
ctx := context.WithValue(context.Background(), contextPropagationMock{}, "bar")
compiler := ast.NewCompiler()
compiler.Compile(map[string]*ast.Module{
"mod1": ast.MustParseModule(`package ex
p[x] { data.a[i] = x }`,
),
})
mockStore := &contextPropagationStore{}
txn := storage.NewTransactionOrDie(ctx, mockStore)
query := NewQuery(ast.MustParseBody("data.ex.p")).
WithCompiler(compiler).
WithStore(mockStore).
WithTransaction(txn)
_, err := query.Run(ctx)
if err != nil {
t.Fatalf("Unexpected query error: %v", err)
}
expectedCalls := []interface{}{"bar"}
if !reflect.DeepEqual(expectedCalls, mockStore.calls) {
t.Fatalf("Expected %v but got: %v", expectedCalls, mockStore.calls)
}
}
func compileModules(input []string) *ast.Compiler {
mods := map[string]*ast.Module{}
for idx, i := range input {
id := fmt.Sprintf("testMod%d", idx)
mods[id] = ast.MustParseModule(i)
}
c := ast.NewCompiler()
if c.Compile(mods); c.Failed() {
panic(c.Errors)
}
return c
}
func compileRules(imports []string, input []string, modules []string) (*ast.Compiler, error) {
is := []*ast.Import{}
for _, i := range imports {
is = append(is, &ast.Import{
Path: ast.MustParseTerm(i),
})
}
m := &ast.Module{
Package: ast.MustParsePackage("package generated"),
Imports: is,
}
rules := []*ast.Rule{}
for i := range input {
rules = append(rules, ast.MustParseRule(input[i]))
rules[i].Module = m
}
m.Rules = rules
for i := range rules {
rules[i].Module = m
}
mods := map[string]*ast.Module{"testMod": m}
for i, s := range modules {
mods[fmt.Sprintf("testMod%d", i)] = ast.MustParseModule(s)
}
c := ast.NewCompiler()
if c.Compile(mods); c.Failed() {
return nil, c.Errors
}
return c, nil
}
// loadSmallTestData returns base documents that are referenced
// throughout the topdown test suite.
//
// Avoid the following top-level keys: i, j, k, p, q, r, v, x, y, z.
// These are used for rule names, local variables, etc.
//
func loadSmallTestData() map[string]interface{} {
var data map[string]interface{}
err := util.UnmarshalJSON([]byte(`{
"a": [1,2,3,4],
"b": {
"v1": "hello",
"v2": "goodbye"
},
"c": [{
"x": [true, false, "foo"],
"y": [null, 3.14159],
"z": {"p": true, "q": false}
}],
"d": {
"e": ["bar", "baz"]
},
"f": [
{"xs": [1.0], "ys": [2.0]},
{"xs": [2.0], "ys": [3.0]}
],
"g": {
"a": [1, 0, 0, 0],
"b": [0, 2, 0, 0],
"c": [0, 0, 0, 4]
},
"h": [
[1,2,3],
[2,3,4]
],
"l": [
{
"a": "bob",
"b": -1,
"c": [1,2,3,4]
},
{
"a": "alice",
"b": 1,
"c": [2,3,4,5],
"d": null
}
],
"strings": {
"foo": 1,
"bar": 2,
"baz": 3
},
"three": 3,
"m": [],
"numbers": [
"1",
"2",
"3",
"4"
]
}`), &data)
if err != nil {
panic(err)
}
return data
}
func runTopDownTestCase(t *testing.T, data map[string]interface{}, note string, rules []string, expected interface{}) {
t.Helper()
runTopDownTestCaseWithContext(context.Background(), t, data, note, rules, nil, "", expected)
}
func runTopDownTestCaseWithModules(t *testing.T, data map[string]interface{}, note string, rules []string, modules []string, input string, expected interface{}) {
t.Helper()
runTopDownTestCaseWithContext(context.Background(), t, data, note, rules, modules, input, expected)
}
func runTopDownTestCaseWithContext(ctx context.Context, t *testing.T, data map[string]interface{}, note string, rules []string, modules []string, input string, expected interface{}) {
t.Helper()
imports := []string{}
for k := range data {
imports = append(imports, "data."+k)
}
compiler, err := compileRules(imports, rules, modules)
if err != nil {
if _, ok := expected.(error); ok {
assertError(t, expected, err)
} else {
t.Errorf("%v: Compiler error: %v", note, err)
}
return
}
store := inmem.NewFromObject(data)
assertTopDownWithPathAndContext(ctx, t, compiler, store, note, []string{"generated", "p"}, input, expected)
}
func assertTopDownWithPath(t *testing.T, compiler *ast.Compiler, store storage.Store, note string, path []string, input string, expected interface{}) {
t.Helper()
assertTopDownWithPathAndContext(context.Background(), t, compiler, store, note, path, input, expected)
}
func assertTopDownWithPathAndContext(ctx context.Context, t *testing.T, compiler *ast.Compiler, store storage.Store, note string, path []string, input string, expected interface{}) {
t.Helper()
var inputTerm *ast.Term
if len(input) > 0 {
inputTerm = ast.MustParseTerm(input)
}
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
var lhs *ast.Term
if len(path) == 0 {
lhs = ast.NewTerm(ast.DefaultRootRef)
} else {
lhs = ast.MustParseTerm("data." + strings.Join(path, "."))
}
rhs := ast.VarTerm(ast.WildcardPrefix + "result")
body := ast.NewBody(ast.Equality.Expr(lhs, rhs))
var requiresSort bool
if rules := compiler.GetRulesExact(lhs.Value.(ast.Ref)); len(rules) > 0 && rules[0].Head.DocKind() == ast.PartialSetDoc {
requiresSort = true
}
if os.Getenv("OPA_DUMP_TEST") != "" {
data, err := store.Read(ctx, txn, storage.MustParsePath("/"))
if err != nil {
t.Fatal(err)
}
dump(note, compiler.Modules, data, path, inputTerm, expected, requiresSort)
}
// add an inter-query cache
config, _ := iCache.ParseCachingConfig(nil)
interQueryCache := iCache.NewInterQueryCache(config)
query := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(inputTerm).
WithInterQueryBuiltinCache(interQueryCache)
var tracer BufferTracer
if os.Getenv("OPA_TRACE_TEST") != "" {
query = query.WithTracer(&tracer)
}
testutil.Subtest(t, note, func(t *testing.T) {
t.Helper()
switch e := expected.(type) {
case *Error, error:
_, err := query.Run(ctx)
assertError(t, expected, err)
case string:
qrs, err := query.Run(ctx)
if tracer != nil {
PrettyTrace(os.Stdout, tracer)
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(e) == 0 {
if len(qrs) != 0 {
t.Fatalf("Expected undefined result but got: %v", qrs)
}
return
}
if len(qrs) == 0 {
t.Fatalf("Expected %v but got undefined", e)
}
result, err := ast.JSON(qrs[0][rhs.Value.(ast.Var)].Value)
if err != nil {
t.Fatal(err)
}
expected := util.MustUnmarshalJSON([]byte(e))
if requiresSort {
sort.Sort(resultSet(result.([]interface{})))
if sl, ok := expected.([]interface{}); ok {
sort.Sort(resultSet(sl))
}
}
if util.Compare(expected, result) != 0 {
t.Fatalf("Unexpected result:\nGot: %+v\nExp:\n%+v", result, expected)
}
// If the test case involved the input document, re-run it with partial
// evaluation enabled and input marked as unknown. Then replay the query and
// verify the partial evaluation result is the same. Note, we cannot evaluate
// the result of a query against `data` because the queries need to be
// converted into rules (which would result in recursion.)
if len(path) > 0 {
runTopDownPartialTestCase(ctx, t, compiler, store, txn, inputTerm, rhs, body, requiresSort, expected)
}
default:
t.Fatalf("Unexpected expected value type: %+v", e)
}
})
}
func runTopDownPartialTestCase(ctx context.Context, t *testing.T, compiler *ast.Compiler, store storage.Store, txn storage.Transaction, input *ast.Term, output *ast.Term, body ast.Body, requiresSort bool, expected interface{}) {
t.Helper()
// add an inter-query cache
config, _ := iCache.ParseCachingConfig(nil)
interQueryCache := iCache.NewInterQueryCache(config)
partialQuery := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithUnknowns([]*ast.Term{ast.MustParseTerm("input")}).
WithTransaction(txn).
WithInterQueryBuiltinCache(interQueryCache)
partials, support, err := partialQuery.PartialRun(ctx)
if err != nil {
t.Fatal("Unexpected error on partial evaluation comparison:", err)
}
module := ast.MustParseModule("package topdown_test_partial")
module.Rules = make([]*ast.Rule, len(partials))
for i, body := range partials {
module.Rules[i] = &ast.Rule{
Head: ast.NewHead(ast.Var("__result__"), nil, output),
Body: body,
Module: module,
}
}
compiler.Modules["topdown_test_partial"] = module
for i, module := range support {
compiler.Modules[fmt.Sprintf("topdown_test_support_%d", i)] = module
}
compiler.Compile(compiler.Modules)
if compiler.Failed() {
t.Fatal("Unexpected error on partial evaluation result compile:", compiler.Errors)
}
query := NewQuery(ast.MustParseBody("data.topdown_test_partial.__result__ = x")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(input).
WithInterQueryBuiltinCache(interQueryCache)
qrs, err := query.Run(ctx)
if err != nil {
t.Fatal("Unexpected error on query after partial evaluation:", err)
}
if len(qrs) == 0 {
t.Fatalf("Expected %v but got undefined from query after partial evaluation", expected)
}
result, err := ast.JSON(qrs[0][ast.Var("x")].Value)
if err != nil {
t.Fatal(err)
}
if requiresSort {
sort.Sort(resultSet(result.([]interface{})))
if sl, ok := expected.([]interface{}); ok {
sort.Sort(resultSet(sl))
}
}
if util.Compare(expected, result) != 0 {
t.Fatalf("Unexpected result after partial evaluation:\nGot:\n%v\nExp:\n%v", result, expected)
}
}
type resultSet []interface{}
func (rs resultSet) Less(i, j int) bool {
return util.Compare(rs[i], rs[j]) < 0
}
func (rs resultSet) Swap(i, j int) {
tmp := rs[i]
rs[i] = rs[j]
rs[j] = tmp
}
func (rs resultSet) Len() int {
return len(rs)
}
func init() {
ast.RegisterBuiltin(&ast.Builtin{
Name: "test.sleep",
Decl: types.NewFunction(
types.Args(types.S),
types.NewNull(),
),
})
RegisterFunctionalBuiltin1("test.sleep", func(a ast.Value) (ast.Value, error) {
d, _ := time.ParseDuration(string(a.(ast.String)))
time.Sleep(d)
return ast.Null{}, nil
})
}
var testID = 0
var testIDMutex sync.Mutex
func getTestNamespace() string {
programCounters := make([]uintptr, 20)
n := runtime.Callers(0, programCounters)
if n > 0 {
frames := runtime.CallersFrames(programCounters[:n])
for more := true; more; {
var f runtime.Frame
f, more = frames.Next()
if strings.HasPrefix(f.Function, "github.com/open-policy-agent/opa/topdown.Test") {
return strings.TrimPrefix(strings.ToLower(strings.TrimPrefix(strings.TrimPrefix(f.Function, "github.com/open-policy-agent/opa/topdown.Test"), "TopDown")), "builtin")
}
}
}
return ""
}
func dump(note string, modules map[string]*ast.Module, data interface{}, docpath []string, input *ast.Term, exp interface{}, requiresSort bool) {
moduleSet := []string{}
for _, module := range modules {
moduleSet = append(moduleSet, string(bytes.ReplaceAll(format.MustAst(module), []byte("\t"), []byte(" "))))
}
namespace := getTestNamespace()
test := map[string]interface{}{
"note": namespace + "/" + note,
"data": data,
"modules": moduleSet,
"query": strings.Join(append([]string{"data"}, docpath...), ".") + " = x",
}
if input != nil {
test["input_term"] = input.String()
}
switch e := exp.(type) {
case string:
rs := []map[string]interface{}{}
if len(e) > 0 {
exp := util.MustUnmarshalJSON([]byte(e))
if requiresSort {
sl := exp.([]interface{})
sort.Sort(resultSet(sl))
}
rs = append(rs, map[string]interface{}{"x": exp})
}
test["want_result"] = rs
if requiresSort {
test["sort_bindings"] = true
}
case error:
test["want_error_code"] = e.(*Error).Code
test["want_error"] = e.(*Error).Message
default:
panic("Unexpected test expectation. Cowardly refusing to generate test cases.")
}
bs, err := yaml.Marshal(map[string]interface{}{"cases": []interface{}{test}})
if err != nil {
panic(err)
}
dir := path.Join(os.Getenv("OPA_DUMP_TEST"), namespace)
if err := os.MkdirAll(dir, 0755); err != nil {
panic(err)
}
testIDMutex.Lock()
testID++
c := testID
testIDMutex.Unlock()
filename := fmt.Sprintf("test-%v-%04d.yaml", namespace, c)
if err := ioutil.WriteFile(filepath.Join(dir, filename), bs, 0644); err != nil {
panic(err)
}
}
func assertError(t *testing.T, expected interface{}, actual error) {
t.Helper()
if actual == nil {
t.Errorf("Expected error but got: %v", actual)
return
}
errString := actual.Error()
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
t.Errorf("Expected error of type '%T', got '%T'", expected, actual)
}
switch e := expected.(type) {
case Error:
assertErrorContains(t, errString, e.Code)
assertErrorContains(t, errString, e.Message)
case *Error:
assertErrorContains(t, errString, e.Code)
assertErrorContains(t, errString, e.Message)
case *ast.Error:
assertErrorContains(t, errString, e.Code)
assertErrorContains(t, errString, e.Message)
case ast.Errors:
for _, astErr := range e {
assertErrorContains(t, errString, astErr.Code)
assertErrorContains(t, errString, astErr.Message)
}
case error:
assertErrorContains(t, errString, e.Error())
}
}
func assertErrorContains(t *testing.T, actualErrMsg string, expected string) {
t.Helper()
if !strings.Contains(actualErrMsg, expected) {
t.Errorf("Expected error '%v' but got: '%v'", expected, actualErrMsg)
}
}
|
[
"\"OPA_DUMP_TEST\"",
"\"OPA_TRACE_TEST\"",
"\"OPA_DUMP_TEST\""
] |
[] |
[
"OPA_TRACE_TEST",
"OPA_DUMP_TEST"
] |
[]
|
["OPA_TRACE_TEST", "OPA_DUMP_TEST"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/gbodra/network-monitor/notification"
"github.com/go-co-op/gocron"
"github.com/joho/godotenv"
"github.com/schollz/progressbar/v3"
)
func loadConfig() []string {
var subnets []string
f, err := os.Open("subnets.cfg")
if err != nil {
log.Fatal(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
subnets = append(subnets, scanner.Text())
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return subnets
}
func findActiveDevices(ipBase string) []string {
bar := progressbar.Default(254)
var devicesFound []string
for i := 1; i < 255; i++ {
ip := ipBase + strconv.Itoa(i)
out, _ := exec.Command("ping", ip, "-c 1", "-t 1").Output()
if !strings.Contains(string(out), "100.0% packet loss") {
devicesFound = append(devicesFound, ip)
// data.InsertHost(&data.Host{IdScan: idScan, Ip: ip})
}
bar.Add(1)
}
log.Println("Found", len(devicesFound), "devices on network:", ipBase)
return devicesFound
}
func ScanNetwork(ips []string) {
for _, ip := range ips {
devices := findActiveDevices(ip)
message := fmt.Sprint("Found ", len(devices), " devices on network ", ip, "\n", strings.Join(devices, "\n"))
notification.SendMessageTelegram(message)
}
}
func PrintMsg() {
log.Println("Hello cron")
}
func main() {
err := godotenv.Load()
if err != nil {
log.Println("Error loading .env file")
}
ips := loadConfig()
// data.MigrateDb()
// idScan := data.InsertScan(&data.Scan{Subnets: strings.Join(ips, ",")})
scheduler := gocron.NewScheduler(time.Local)
scheduler.Every(os.Getenv("TASK_FREQ")).Do(ScanNetwork, ips)
scheduler.StartBlocking()
fmt.Println("Press the any key to stop")
fmt.Scanln()
}
|
[
"\"TASK_FREQ\""
] |
[] |
[
"TASK_FREQ"
] |
[]
|
["TASK_FREQ"]
|
go
| 1 | 0 | |
stacks.go
|
package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
"github.com/codegangsta/cli"
"github.com/goamz/goamz/aws"
"github.com/litl/galaxy/log"
"github.com/litl/galaxy/stack"
"github.com/litl/galaxy/utils"
"github.com/ryanuber/columnize"
)
// return --base, or try to find a base cloudformation stack
func getBase(c *cli.Context) string {
errNoBase := fmt.Errorf("could not identify a unique base stack")
base := c.String("base")
if base != "" {
return base
}
descResp, err := stack.DescribeStacks("")
if err != nil {
log.Fatal(err)
}
for _, stack := range descResp.Stacks {
// first check for galaxy:base tag
baseTag := false
for _, t := range stack.Tags {
if t.Key == "galaxy" && t.Value == "base" {
baseTag = true
}
}
if baseTag {
if base != "" {
err = errNoBase
}
base = stack.Name
continue
}
parts := strings.Split(stack.Name, "-")
// check for "-base" in the name
if parts[len(parts)-1] == "base" {
if base != "" {
err = errNoBase
}
base = stack.Name
continue
}
// the best we can do for now is look for a stack with a single word
if len(parts) == 1 {
if base != "" {
err = errNoBase
}
base = stack.Name
log.Printf("Warning: guessing base stack: %s", base)
}
}
if err != nil {
log.Fatalf("%s: %s", err, "use --base")
}
return base
}
func promptValue(prompt, dflt string) string {
if !tty {
return dflt
}
fmt.Printf("%s [%s]: ", prompt, dflt)
val, err := bufio.NewReader(os.Stdin).ReadString('\n')
if err != nil {
log.Println(err)
return dflt
}
val = strings.TrimSpace(val)
// return the default if the input was empty
if len(val) == 0 {
return dflt
}
return val
}
// Prompt user for required arguments
// TODO: parse CIDR and generate appropriate subnets
// TODO: check for subnet collision
func getInitOpts(c *cli.Context) *stack.GalaxyTmplParams {
name := c.Args().First()
if name == "" {
name = promptValue("Base Stack Name", "galaxy-base")
}
keyName := c.String("keyname")
if keyName == "" {
keyName = promptValue("EC2 Keypair Name", "required")
if keyName == "required" {
log.Fatal("keyname required")
}
}
controllerAMI := promptValue("Controller AMI", "ami-9a562df2")
controllerInstance := promptValue("Controller Instance Type", "t2.medium")
poolAMI := promptValue("Default Pool AMI", "ami-9a562df2")
poolInstance := promptValue("Default Pool Instance Type", "t2.medium")
vpcSubnet := promptValue("VPC CIDR Block", "10.24.0.0/16")
// some *very* basic input verification
if !strings.Contains(vpcSubnet, "/") || strings.Count(vpcSubnet, ".") != 3 {
log.Fatal("VPC Subnet must be in CIDR notation")
}
region := c.String("region")
if region == "" {
region = os.Getenv("AWS_DEFAULT_REGION")
if region == "" {
region = "us-east-1"
}
region = promptValue("EC2 Region", region)
}
azResp, err := stack.DescribeAvailabilityZones(region)
if err != nil {
log.Fatal(err)
}
subnets := []*stack.SubnetTmplParams{}
for i, az := range azResp.AvailabilityZones {
s := &stack.SubnetTmplParams{
Name: fmt.Sprintf("%sSubnet%d", name, i+1),
Subnet: fmt.Sprintf("10.24.%d.0/24", i+1),
AZ: az.Name,
}
subnets = append(subnets, s)
}
// replace default subnets with user values
for i, s := range subnets {
s.Subnet = promptValue(fmt.Sprintf("Subnet %d", i+1), s.Subnet)
}
opts := &stack.GalaxyTmplParams{
Name: name,
KeyName: keyName,
ControllerImageId: controllerAMI,
ControllerInstanceType: controllerInstance,
PoolImageId: poolAMI,
PoolInstanceType: poolInstance,
VPCCIDR: vpcSubnet,
Subnets: subnets,
}
return opts
}
// Return json supplied in the argument, or look for a file by the name given.
// Is the name is "STDIN", read the json from stdin
func jsonFromArg(arg string) ([]byte, error) {
var jsonArg []byte
var err error
arg = strings.TrimSpace(arg)
// assume that an opening bracket mean the json is given directly
if strings.HasPrefix(arg, "{") {
jsonArg = []byte(arg)
} else if arg == "STDIN" {
jsonArg, err = ioutil.ReadAll(os.Stdin)
if err != nil {
return nil, err
}
} else {
// all else fails, look for a file
jsonArg, err = ioutil.ReadFile(arg)
if err != nil {
return nil, err
}
}
// verify the json by compacting it
buf := bytes.NewBuffer(nil)
err = json.Compact(buf, jsonArg)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// create our base stack
func stackInit(c *cli.Context) {
stackName := c.Args().First()
if stackName == "" {
log.Fatal("ERROR: stack name required")
}
if c.String("region") != "" {
stack.Region = c.String("region")
}
exists, err := stack.Exists(stackName)
if exists {
log.Fatalf("ERROR: stack %s already exists", stackName)
} else if err != nil {
fmt.Println("EXISTS ERROR")
log.Fatal(err)
}
params := getInitOpts(c)
stackTmpl, err := stack.GalaxyTemplate(params)
if err != nil {
log.Fatalf("ERROR: %s", err)
}
if c.Bool("print") {
fmt.Println(string(stackTmpl))
return
}
opts := make(map[string]string)
opts["tag.galaxy"] = "base"
_, err = stack.Create(stackName, stackTmpl, opts)
if err != nil {
log.Fatalf("ERROR: %s", err)
}
log.Println("Initializing stack", stackName)
}
// update the base stack
func stackUpdate(c *cli.Context) {
var stackTmpl []byte
var err error
stackName := c.Args().First()
if stackName == "" {
log.Fatal("ERROR: stack name required")
}
if c.String("region") != "" {
stack.Region = c.String("region")
}
params := make(map[string]string)
if p := c.String("parameters"); p != "" {
paramJSON, err := jsonFromArg(p)
if err != nil {
log.Fatal("ERROR: decoding parameters:", err)
}
err = json.Unmarshal(paramJSON, ¶ms)
if err != nil {
log.Fatal(err)
}
}
template := c.String("template")
if template != "" {
stackTmpl, err = jsonFromArg(template)
if err != nil {
log.Fatalf("ERROR: %s", err)
}
}
if policy := c.String("policy"); policy != "" {
policyJSON, err := jsonFromArg(policy)
if err != nil {
log.Fatal("policy error:", err)
}
params["StackPolicyDuringUpdateBody"] = string(policyJSON)
}
if len(stackTmpl) == 0 {
// get the current running template
stackTmpl, err = stack.GetTemplate(stackName)
if err != nil {
log.Fatal(err)
}
}
// this reads the Parameters supplied for our current stack for us
shared := sharedResources(c, stackName)
// add any missing parameters to our
for key, val := range shared.Parameters {
if params[key] == "" {
params[key] = val
}
}
p, _ := json.MarshalIndent(params, "", " ")
ok := promptValue(fmt.Sprintf("\nUpdate the [%s] stack with:\n%s\nAccept?", stackName, string(p)), "n")
switch strings.ToLower(ok) {
case "y", "yes":
_, err = stack.Update(stackName, stackTmpl, params)
if err != nil {
log.Fatal(err)
}
log.Println("Updating stack:", stackName)
default:
log.Fatal("aborted")
}
}
// Print a Cloudformation template to stdout.
func stackTemplate(c *cli.Context) {
stackName := c.Args().First()
if c.String("region") != "" {
stack.Region = c.String("region")
}
if stackName == "" {
os.Stdout.Write(stack.DefaultGalaxyTemplate())
return
}
if c.String("region") != "" {
stack.Region = c.String("region")
}
stackTmpl, err := stack.GetTemplate(stackName)
if err != nil {
if err, ok := err.(*aws.Error); ok {
if err.Code == "ValidationError" && strings.Contains(err.Message, "does not exist") {
log.Fatalf("ERROR: Stack '%s' does not exist", stackName)
}
}
log.Fatal(err)
}
if _, err := os.Stdout.Write(stackTmpl); err != nil {
log.Fatal(err)
}
}
func sharedResources(c *cli.Context, baseStack string) stack.SharedResources {
// get the resources we need from the base stack
resources, err := stack.GetSharedResources(baseStack)
if err != nil {
log.Fatal(err)
}
keyName := c.String("keyname")
if keyName != "" {
resources.Parameters["KeyName"] = keyName
}
amiID := c.String("ami")
if amiID != "" {
resources.Parameters["PoolImageId"] = amiID
}
instanceType := c.String("instance-type")
if instanceType != "" {
resources.Parameters["PoolInstanceType"] = instanceType
}
return resources
}
// seto autoscaling options for a pool
func setCPUAutoScale(c *cli.Context, pool *stack.Pool) {
scaleAdj := c.Int("scale-adj")
scaleUpDel := c.Int("scale-up-delay")
scaleDownDel := c.Int("scale-down-delay")
scaleUpCPU := c.Int("scale-up-cpu")
scaleDownCPU := c.Int("scale-down-cpu")
asgName := pool.ASG().Name
if asgName == "" {
log.Fatal("Error: missing ASG Name")
}
// Any options set to 0 will use template defaults.
// Don't autoscale if no options are set.
if scaleAdj != 0 || scaleUpDel != 0 || scaleDownDel != 0 || scaleUpCPU != 0 || scaleDownCPU != 0 {
pool.SetCPUAutoScaling(asgName, scaleAdj, scaleUpCPU, scaleUpDel, scaleDownCPU, scaleDownDel)
}
}
func stackCreatePool(c *cli.Context) {
var err error
ensureEnvArg(c)
ensurePoolArg(c)
if c.String("region") != "" {
stack.Region = c.String("region")
}
poolName := utils.GalaxyPool(c)
baseStack := getBase(c)
poolEnv := utils.GalaxyEnv(c)
stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)
pool := stack.NewPool()
// get the resources we need from the base stack
// TODO: this may search for the base stack a second time
resources := sharedResources(c, baseStack)
desiredCap := c.Int("desired-size")
if desiredCap == 0 {
desiredCap = 1
}
numZones := c.Int("availability-zones")
if numZones == 0 {
// default to running one host per zone
numZones = desiredCap
}
minSize := c.Int("min-size")
maxSize := c.Int("max-size")
httpPort := c.Int("http-port")
if httpPort == 0 {
httpPort = 80
}
sslCert := ""
if cert := c.String("ssl-cert"); cert != "" {
sslCert = resources.ServerCerts[cert]
if sslCert == "" {
log.Fatalf("Could not find certificate '%s'", cert)
}
}
// Create our Launch Config
lc := pool.LCTemplate
lcName := "lc" + poolEnv + poolName
if amiID := c.String("ami"); amiID != "" {
lc.Properties.ImageId = amiID
} else {
lc.Properties.ImageId = resources.Parameters["PoolImageId"]
}
if insType := c.String("instance-type"); insType != "" {
lc.Properties.InstanceType = insType
} else {
lc.Properties.InstanceType = resources.Parameters["PoolInstanceType"]
}
if keyName := c.String("keyname"); keyName != "" {
lc.Properties.KeyName = keyName
} else {
lc.Properties.KeyName = resources.Parameters["KeyName"]
}
lc.Properties.IamInstanceProfile = resources.Roles["galaxyInstanceProfile"]
lc.Properties.SecurityGroups = []string{
resources.SecurityGroups["sshSG"],
resources.SecurityGroups["defaultSG"],
}
lc.SetVolumeSize(c.Int("volume-size"))
pool.Resources[lcName] = lc
// Create the Auto Scaling Group
asg := pool.ASGTemplate
asgName := "asg" + poolEnv + poolName
asg.AddTag("Name", fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName), true)
asg.AddTag("env", poolEnv, true)
asg.AddTag("pool", poolName, true)
asg.AddTag("galaxy", "pool", true)
asg.Properties.DesiredCapacity = desiredCap
// Don't always run in all zones
subnets := resources.Subnets
if numZones <= len(subnets) {
subnets = subnets[:numZones]
} else {
log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
}
// break the subnets info into separate subnet and AZ slices for the template
subnetIDs := []string{}
azIDs := []string{}
for _, sn := range subnets {
subnetIDs = append(subnetIDs, sn.ID)
azIDs = append(azIDs, sn.AvailabilityZone)
}
asg.SetLaunchConfiguration(lcName)
asg.Properties.AvailabilityZones = azIDs
asg.Properties.VPCZoneIdentifier = subnetIDs
if maxSize > 0 {
asg.Properties.MaxSize = maxSize
}
if minSize > 0 {
asg.Properties.MinSize = minSize
}
if c.Bool("auto-update") {
asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
}
pool.Resources[asgName] = asg
// Optionally create the Elastic Load Balancer
if c.Bool("elb") {
elb := pool.ELBTemplate
elbName := "elb" + poolEnv + poolName
// make sure to add this to the ASG
asg.AddLoadBalancer(elbName)
elb.Properties.Subnets = subnetIDs
elb.Properties.SecurityGroups = []string{
resources.SecurityGroups["webSG"],
resources.SecurityGroups["defaultSG"],
}
elb.Properties.HealthCheck.Target = c.String("http-health-check")
elb.AddListener(80, "HTTP", httpPort, "HTTP", "", nil)
if sslCert != "" {
elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
}
pool.Resources[elbName] = elb
}
// add autoscaling if it's required
setCPUAutoScale(c, pool)
poolTmpl, err := json.MarshalIndent(pool, "", " ")
if err != nil {
log.Fatal(err)
}
if c.Bool("print") {
fmt.Println(string(poolTmpl))
return
}
opts := make(map[string]string)
opts["tag.env"] = poolEnv
opts["tag.pool"] = poolName
opts["tag.galaxy"] = "pool"
_, err = stack.Create(stackName, poolTmpl, opts)
if err != nil {
log.Fatal(err)
}
log.Println("Creating stack:", stackName)
// do we want to wait on this by default?
if err := stack.Wait(stackName, 5*time.Minute); err != nil {
log.Error(err)
log.Error("CreateStack Failed, attempting to delete")
waitAndDelete(stackName)
return
}
log.Println("CreateStack complete")
}
// wait until a stack is in a final state, then delete it
func waitAndDelete(name string) {
log.Println("Attempting to delete stack:", name)
// we need to get the StackID in order to lookup DELETE events
desc, err := stack.DescribeStacks(name)
if err != nil {
log.Fatalf("ERROR: %s", err)
} else if len(desc.Stacks) == 0 {
log.Fatal("ERROR: could not describe stack:", name)
}
stackId := desc.Stacks[0].Id
err = stack.WaitForComplete(stackId, 5*time.Minute)
if err != nil {
log.Fatal(err)
}
_, err = stack.Delete(name)
if err != nil {
log.Fatal(err)
}
// wait
err = stack.WaitForComplete(stackId, 5*time.Minute)
if err != nil {
log.Fatal(err)
}
log.Println("Deleted stack:", name)
}
// Update an existing Pool Stack
func stackUpdatePool(c *cli.Context) {
ensureEnvArg(c)
ensurePoolArg(c)
if c.String("region") != "" {
stack.Region = c.String("region")
}
poolName := utils.GalaxyPool(c)
baseStack := getBase(c)
poolEnv := utils.GalaxyEnv(c)
stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)
pool, err := stack.GetPool(stackName)
if err != nil {
log.Fatal(err)
}
options := make(map[string]string)
if policy := c.String("policy"); policy != "" {
policyJSON, err := jsonFromArg(policy)
if err != nil {
log.Fatal("policy error:", err)
}
options["StackPolicyDuringUpdateBody"] = string(policyJSON)
}
resources := sharedResources(c, baseStack)
asg := pool.ASG()
if asg == nil {
log.Fatal("missing ASG")
}
if c.Int("desired-size") > 0 {
asg.Properties.DesiredCapacity = c.Int("desired-size")
}
if c.Int("min-size") > 0 {
asg.Properties.MinSize = c.Int("min-size")
}
if c.Int("max-size") > 0 {
asg.Properties.MaxSize = c.Int("max-size")
}
if c.Bool("auto-update") {
// note that the max pause is only PT5M30S
asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
}
numZones := c.Int("availability-zones")
if numZones == 0 {
numZones = len(asg.Properties.VPCZoneIdentifier)
}
// start with the current settings
subnetIDs := []string{}
azIDs := []string{}
// only update the subnets/AZs if we changed the count
if len(asg.Properties.VPCZoneIdentifier) != numZones {
subnets := resources.Subnets
if numZones <= len(subnets) {
subnets = subnets[:numZones]
} else {
log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
}
for _, sn := range subnets {
subnetIDs = append(subnetIDs, sn.ID)
azIDs = append(azIDs, sn.AvailabilityZone)
}
asg.Properties.VPCZoneIdentifier = subnetIDs
asg.Properties.AvailabilityZones = azIDs
}
elb := pool.ELB()
sslCert := ""
if cert := c.String("ssl-cert"); cert != "" {
sslCert = resources.ServerCerts[sslCert]
if sslCert == "" {
log.Fatalf("Could not find certificate '%s'", sslCert)
}
}
httpPort := c.Int("http-port")
if (sslCert != "" || httpPort > 0) && elb == nil {
log.Fatal("ERROR: Pool does not have an ELB")
}
if elb != nil {
for _, l := range elb.Properties.Listeners {
if sslCert != "" && l.Protocol == "HTTPS" {
l.SSLCertificateId = sslCert
}
if httpPort > 0 {
l.InstancePort = httpPort
}
}
healthCheck := c.String("http-health-check")
if healthCheck != "" && healthCheck != elb.Properties.HealthCheck.Target {
elb.Properties.HealthCheck.Target = healthCheck
}
// always make sure the ELB is in the same subnets as the ASG
elb.Properties.Subnets = asg.Properties.VPCZoneIdentifier
}
lc := pool.LC()
if amiID := c.String("ami"); amiID != "" {
lc.Properties.ImageId = amiID
}
if insType := c.String("instance-type"); insType != "" {
lc.Properties.InstanceType = insType
}
// add autoscaling if it's required
setCPUAutoScale(c, pool)
poolTmpl, err := json.MarshalIndent(pool, "", " ")
if err != nil {
log.Fatal(err)
}
if c.Bool("print") {
fmt.Println(string(poolTmpl))
return
}
log.Println("Updating stack:", stackName)
if _, err := stack.Update(stackName, poolTmpl, options); err != nil {
log.Fatal(err)
}
// do we want to wait on this by default?
if err := stack.Wait(stackName, 5*time.Minute); err != nil {
log.Fatal(err)
}
log.Println("UpdateStack complete")
}
func stackDeletePool(c *cli.Context) {
ensureEnvArg(c)
ensurePoolArg(c)
if c.String("region") != "" {
stack.Region = c.String("region")
}
baseStack := getBase(c)
stackName := fmt.Sprintf("%s-%s-%s", baseStack,
utils.GalaxyEnv(c),
utils.GalaxyPool(c))
waitAndDelete(stackName)
}
// delete a pool
func stackDelete(c *cli.Context) {
stackName := c.Args().First()
if stackName == "" {
log.Fatal("ERROR: stack name required")
}
ok := c.Bool("y")
if !ok {
switch strings.ToLower(promptValue(fmt.Sprintf("\nDelete Stack '%s'?", stackName), "n")) {
case "y", "yes":
ok = true
}
}
if !ok {
log.Fatal("aborted")
}
if c.String("region") != "" {
stack.Region = c.String("region")
}
waitAndDelete(stackName)
}
func stackList(c *cli.Context) {
if c.String("region") != "" {
stack.Region = c.String("region")
}
descResp, err := stack.DescribeStacks("")
if err != nil {
log.Fatal(err)
}
stacks := []string{"STACK | STATUS | "}
for _, stack := range descResp.Stacks {
s := fmt.Sprintf("%s | %s | %s", stack.Name, stack.Status, stack.StatusReason)
stacks = append(stacks, s)
}
output, _ := columnize.SimpleFormat(stacks)
log.Println(output)
}
// List recent events for a stack
// Shows up to 20 events, or 24 hours of events.
func stackListEvents(c *cli.Context) {
stackName := c.Args().First()
if stackName == "" {
log.Fatal("ERROR: stack name required")
}
if c.String("region") != "" {
stack.Region = c.String("region")
}
resp, err := stack.DescribeStackEvents(stackName)
if err != nil {
log.Fatal(err)
}
if len(resp.Events) == 0 {
log.Println("no events for", stackName)
return
}
firstTS := resp.Events[0].Timestamp.Add(-24 * time.Hour)
lines := []string{"TIMESTAMP | Logical ID | STATUS | REASON"}
format := "%s | %s | %s | %s"
for i, e := range resp.Events {
if i > 20 || e.Timestamp.Before(firstTS) {
break
}
displayTime := e.Timestamp.Format(time.Stamp)
line := fmt.Sprintf(format, displayTime, e.LogicalResourceId, e.ResourceStatus, e.ResourceStatusReason)
lines = append(lines, line)
}
output, _ := columnize.SimpleFormat(lines)
log.Println(output)
}
|
[
"\"AWS_DEFAULT_REGION\""
] |
[] |
[
"AWS_DEFAULT_REGION"
] |
[]
|
["AWS_DEFAULT_REGION"]
|
go
| 1 | 0 | |
tester/database.go
|
package tester
import (
"database/sql/driver"
"fmt"
"os"
"time"
"github.com/BuxOrg/bux/datastore"
sqle "github.com/dolthub/go-mysql-server"
"github.com/dolthub/go-mysql-server/auth"
"github.com/dolthub/go-mysql-server/memory"
"github.com/dolthub/go-mysql-server/server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/information_schema"
embeddedPostgres "github.com/fergusstrange/embedded-postgres"
"github.com/tryvium-travels/memongo"
)
// AnyTime will fill the need for any timestamp field
type AnyTime struct{}
// Match satisfies sqlmock.Argument interface
func (a AnyTime) Match(v driver.Value) bool {
_, ok := v.(time.Time)
return ok
}
// AnyGUID will fill the need for any GUID field
type AnyGUID struct{}
// Match satisfies sqlmock.Argument interface
func (a AnyGUID) Match(v driver.Value) bool {
str, ok := v.(string)
return ok && len(str) > 0
}
// CreatePostgresServer will create a new Postgresql server
func CreatePostgresServer(port uint32) (*embeddedPostgres.EmbeddedPostgres, error) {
// Create the new database
postgres := embeddedPostgres.NewDatabase(embeddedPostgres.DefaultConfig().Port(port))
if postgres == nil {
return nil, ErrFailedLoadingPostgresql
}
// Start the database
if err := postgres.Start(); err != nil {
return nil, err
}
// Return the database
return postgres, nil
}
// CreateMongoServer will create a new mongo server
func CreateMongoServer(version string) (*memongo.Server, error) {
mongoServer, err := memongo.StartWithOptions(
&memongo.Options{
MongoVersion: version,
ShouldUseReplica: false,
DownloadURL: os.Getenv("BUX_MONGODB_DOWNLOAD_URL"),
},
)
if err != nil {
return nil, err
}
return mongoServer, nil
}
// CreateMySQL will make a new MySQL server
func CreateMySQL(host, databaseName, username, password string, port uint32) (*server.Server, error) {
engine := sqle.NewDefault(
sql.NewDatabaseProvider(
CreateMySQLTestDatabase(databaseName),
information_schema.NewInformationSchemaDatabase(),
))
config := server.Config{
Protocol: "tcp",
Address: fmt.Sprintf("%s:%d", host, port),
Auth: auth.NewNativeSingle(username, password, auth.AllPermissions),
}
s, err := server.NewDefaultServer(config, engine)
if err != nil {
return nil, err
}
return s, nil
}
// CreateMySQLTestDatabase is a dummy database for MySQL
func CreateMySQLTestDatabase(databaseName string) *memory.Database {
return memory.NewDatabase(databaseName)
}
// SQLiteTestConfig will return a test-version of SQLite
func SQLiteTestConfig(debug, shared bool) *datastore.SQLiteConfig {
return &datastore.SQLiteConfig{
CommonConfig: datastore.CommonConfig{
Debug: debug,
MaxIdleConnections: 1,
MaxOpenConnections: 1,
TablePrefix: RandomTablePrefix(),
},
DatabasePath: "",
Shared: shared,
}
}
|
[
"\"BUX_MONGODB_DOWNLOAD_URL\""
] |
[] |
[
"BUX_MONGODB_DOWNLOAD_URL"
] |
[]
|
["BUX_MONGODB_DOWNLOAD_URL"]
|
go
| 1 | 0 | |
blobcli/client.py
|
import os
from azure.storage.blob import BlobServiceClient, BlobPrefix, BlobProperties
class BlobStorageClient():
"""Azure Blob Storage Client."""
def __init__(self):
connect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
if not connect_str:
msg = 'Please add the connection string of the storage account to the AZURE_STORAGE_CONNECTION_STRING variable.'
raise Exception(msg)
self._blob_service_client = BlobServiceClient.from_connection_string(
connect_str)
def list_contaners(self):
containers = []
for container in self._blob_service_client.list_containers():
if not container.deleted:
containers.append({'name': container.name,
'last_modified': container.last_modified})
return containers
def _convert_bytes(self, num):
step_unit = 1024
for x in ['B', 'K', 'M', 'G', 'T']:
if num < step_unit:
return '{:3.0f}{:s}'.format(num, x)
num /= step_unit
def list_blobs(self, container_name, blob_prefix):
if container_name not in [c['name'] for c in self.list_contaners()]:
msg = '{}: No such container'.format(container_name)
raise Exception(msg)
container_client = self._blob_service_client.get_container_client(
container_name)
blobs = []
for blob in container_client.walk_blobs(name_starts_with=blob_prefix, delimiter='/'):
if type(blob) == BlobPrefix:
blobs.append({'type': 'prefix',
'name': blob.name})
elif type(blob) == BlobProperties and not blob.deleted:
blobs.append({'type': 'blob',
'name': blob.name.split('/')[-1],
'last_modified': blob.last_modified,
'size': self._convert_bytes(blob.size)})
if blob_prefix and not blobs:
msg = '{}: No such blob'.format(blob_prefix)
raise Exception(msg)
return blobs
def delete_blob(self, container_name, blob_name):
blob_client = self._blob_service_client.get_blob_client(
container_name, blob_name)
if not blob_client.exists():
msg = '{}/{}: No such container or blob'.format(
container_name, blob_name)
raise Exception(msg)
blob_client.delete_blob()
def upload_blob(self, container_name, blob_name, path):
blob_client = self._blob_service_client.get_blob_client(
container_name, blob=blob_name)
with open(path, 'rb') as f:
blob_client.upload_blob(f, overwrite=True)
def copy_blob(self, src_container_name, src_blob_name, dst_container_name, dst_blob_name):
src_blob_client = self._blob_service_client.get_blob_client(
src_container_name, src_blob_name)
dst_blob_client = self._blob_service_client.get_blob_client(
dst_container_name, dst_blob_name)
if not src_blob_client.exists():
msg = '{}/{}: No such container or blob'.format(
src_container_name, src_blob_name)
raise Exception(msg)
stream = src_blob_client.download_blob().readall()
dst_blob_client.upload_blob(stream, overwrite=True)
def download_blob(self, container_name, blob_name, path):
blob_client = self._blob_service_client.get_blob_client(
container_name, blob=blob_name)
if not blob_client.exists():
msg = '{}/{}: No such container or blob'.format(
container_name, blob_name)
raise Exception(msg)
with open(path, 'wb') as f:
f.write(blob_client.download_blob().readall())
|
[] |
[] |
[
"AZURE_STORAGE_CONNECTION_STRING"
] |
[]
|
["AZURE_STORAGE_CONNECTION_STRING"]
|
python
| 1 | 0 | |
cms/settings.py
|
import hashlib
from django.conf import settings
# Customisable settings - see reference.markdown
USE_SITES_FRAMEWORK = getattr(settings, 'CMS_USE_SITES_FRAMEWORK', False)
TINYMCE_CONFIG = getattr(settings, 'CMS_TINYMCE_CONFIG', {})
POST_EDIT_CALLBACK = getattr(settings, 'CMS_POST_EDIT_CALLBACK', '""')
MAX_IMAGE_DIMENSIONS = getattr(settings, 'CMS_MAX_IMAGE_DIMENSIONS',
(1920, 1200))
BLOCK_REQUIRED_CALLBACK = getattr(settings, 'CMS_BLOCK_REQUIRED_CALLBACK',
None)
IMAGE_REQUIRED_CALLBACK = getattr(settings, 'CMS_IMAGE_REQUIRED_CALLBACK',
None)
VIDEO_REQUIRED_CALLBACK = getattr(settings, 'CMS_VIDEO_REQUIRED_CALLBACK',
None)
DUMMY_IMAGE_SOURCE = getattr(settings, 'CMS_DUMMY_IMAGE_SOURCE', None)
DB_ALIAS = getattr(settings, 'CMS_DB_ALIAS', 'default')
UPLOAD_PATH = getattr(settings, 'CMS_UPLOAD_PATH', 'cms/%Y_%m')
LANGUAGES = getattr(settings, 'CMS_LANGUAGES', getattr(settings, 'LANGUAGES'))
# The following are for internal use and can't be customised
STATIC_URL = settings.STATIC_URL + 'cms/'
# Assume there's a template engine, and that the first one is the one we want
try:
TEMPLATE_DIRS = settings.TEMPLATES[0].get('DIRS', [])
except AttributeError:
# pre-1.8 fallback
TEMPLATE_DIRS = settings.TEMPLATE_DIRS
# let's be *really* careful not to display content from another site using
# the same cache
CACHE_PREFIX = 'cms-%s' % hashlib.sha1(
settings.SECRET_KEY.encode('utf-8')).hexdigest()[:5]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.