filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmn_server.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.py | # ISC License
#
# Copyright (c) 2016, Frank A. J. Wilson
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
""" main.py """
import logging
import os
import random
import time
import webapp2
from webapp2_extras.routes import RedirectRoute
from google.appengine.api import app_identity
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from basehandler import BaseHandler
compute = discovery.build(
'compute', 'v1', credentials=GoogleCredentials.get_application_default())
DEFAULT_ZONE = 'us-central1-f'
DEFAULT_MACHINE_TYPE = 'f1-micro'
DEFAULT_IMAGE_PROJECT = 'debian-cloud'
DEFAULT_IMAGE_FAMILY = 'debian-8'
INSTANCE_NAME_TEMPLATE = "{slug}-{date}-{rand_hex4:0=4x}"
DOCKER_IMAGE_NAME = 'datetimeupload'
def get_docker_image_urn():
"""
Gets the URN for our test image in our project specfic GCR repo
"""
return 'gcr.io/{}/{}'.format(
app_identity.get_application_id(), DOCKER_IMAGE_NAME)
def build_instance_name(slug):
"""
Build a compute instance name given a (human readable) slug that includes
the launch datetime and random component to prevent collisions
Parameters:
:slug: A meaning full name that describes the nature or purpose
of the instance
"""
if not isinstance(slug, basestring):
raise ValueError("Invalid slug value {}".format(slug))
return INSTANCE_NAME_TEMPLATE.format(
slug=slug,
date=time.strftime("%d%m%H%M"),
rand_hex4=random.getrandbits(16)
)
def get_instance_image_url():
"""
Gets the URL for the bootable image that the instance will run
"""
response = compute.images().getFromFamily(
project=DEFAULT_IMAGE_PROJECT, family=DEFAULT_IMAGE_FAMILY
).execute()
return response['selfLink']
def build_machine_type_urn(zone=DEFAULT_ZONE, machine_type=DEFAULT_MACHINE_TYPE):
"""
Build the URN for the machineType of the instance
"""
return 'zones/{}/machineTypes/{}'.format(zone, machine_type)
# N.B. get available machine types by executing the following on the CLI:
# gcloud compute machine-types list
def create_default_service_account(scopes=None):
"""
Creates a instance fragment for the default service account. The global
scope is used if an alternative list of scopes is not provided. This allows
the instance to perform any project action.
Keyword arguments:
:scopes: The list of scopes that this instance can access
"""
scopes = scopes or ['https://www.googleapis.com/auth/cloud-platform']
return {
'scopes': scopes,
'email': 'default'
}
# N.B. The default service account is consider a legacy mechanism and you
# may want to use IAM roles in production:
# https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam
# A config fragment to provide an instance with an external network interface
EXTERNAL_INTERFACE = {
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}
def build_insert_request_body(instance_name, startup_script):
"""
Builds a compute 'insert' request given a name and a startup_script.
Parameters:
:instance_name: The name to be used for the instance
:startup_script: The script to be run as the instance starts
"""
disk_image_source = get_instance_image_url()
return {
'name': instance_name,
'machineType': build_machine_type_urn(),
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': disk_image_source,
}
}
],
'serviceAccounts': [create_default_service_account()],
'metadata': {
'items': [
{
'key': 'startup-script',
'value': startup_script
}
]
},
'networkInterfaces': [
EXTERNAL_INTERFACE
]
}
class TaskLauncher(BaseHandler):
""" Handler for launching tasks on compute engine """
def _launch_instance(self):
instance_name = build_instance_name('test')
project = app_identity.get_application_id()
container_args = 'gs://{}.appspot.com/{}/{}.txt'.format(
project, DOCKER_IMAGE_NAME, instance_name)
startup_script = self.jinja2.render_template(
'startup.sh', instance_name=instance_name,
docker_image=get_docker_image_urn(),
container_args=container_args,
zone_id=DEFAULT_ZONE
)
request = compute.instances().insert(
project=project, zone=DEFAULT_ZONE,
body=build_insert_request_body(instance_name, startup_script))
logging.info('compute insert service response %s', request.execute())
return instance_name
def get(self):
""" Handle get request """
if self.request.headers.get('X-Appengine-Cron', False):
self._launch_instance()
return
self.render_response(
'launch_form.html',
launched=self.request.GET.get('launched'))
def post(self):
""" Handle post request """
instance_name = self._launch_instance()
self.redirect(
self.request.path + '?launched={}'.format(instance_name))
#####################
# Application Setup #
#####################
# This part sets up the WSGI app.
# It is a little more involved than usual because we are trying to
# setup a cookie secret without storing it in git.
def on_dev_server():
""" Return true if we are running on dev_appserver"""
return os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
def get_project_metadata(metadata_key):
"""
Get a project metadata value by key
Parameters:
:metadata_key: key for the value to fetch
"""
project_id = app_identity.get_application_id()
project = compute.projects().get(project=project_id).execute()
for entry in project['commonInstanceMetadata']['items']:
if entry['key'] == metadata_key:
print type(entry['value'])
return entry['value']
return None
def get_cookie_secret_error():
""" Return the appropriate error message if secrets have not been setup"""
target = 'secrets' if on_dev_server() else 'cloud_secrets'
return 'ERROR: Cookie secret not set, run "make {}"'.format(target)
def get_dev_secrets():
"""
Get secrets used when developing locally.
"""
cookie_secret = None
try:
import _dev
cookie_secret = _dev.secrets['cookie_secret']
except ImportError:
pass
return cookie_secret
def get_application(routes):
"""
Return a configured webapp2 WSGI app if secrets have been setup properly.
Otherwise return a trivial 'error' WSGI app if not.
Parameters:
:routes: webapp2 routes
"""
cookie_secret = None
if not on_dev_server():
cookie_secret = get_project_metadata('cookie_secret')
else:
cookie_secret = get_dev_secrets()
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': str(cookie_secret),
}
if cookie_secret:
return webapp2.WSGIApplication(routes=routes, config=config)
else:
return error_app(get_cookie_secret_error())
def error_app(msg):
"""
A trivial WSGI app that always displays a 500 error with a given message
Parameters:
:msg: The message to display in the response body with the 500 error
"""
def _app(_, start_response):
response_body = msg
status = '500 Error'
response_headers = [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response_body)))
]
start_response(status, response_headers)
return [response_body]
return _app
app = get_application([
RedirectRoute('/', redirect_to='/launcher'),
('/launcher', TaskLauncher),
])
| []
| []
| [
"SERVER_SOFTWARE"
]
| [] | ["SERVER_SOFTWARE"] | python | 1 | 0 | |
corn/main.go | package corn
import (
"github.com/robfig/cron/v3"
"os"
"strconv"
"time"
"weather-push/api"
"weather-push/mail"
"weather-push/model"
"weather-push/util"
)
// 开启定时任务
func Start() {
nyc, _ := time.LoadLocation("Asia/Shanghai")
cJob := cron.New(cron.WithLocation(nyc))
cronCfg := os.Getenv("Cron")
// 添加定时任务
_, err := cJob.AddFunc(cronCfg, func() {
dispatch()
})
if err != nil {
util.Log().Error("启动定时任务出错", err)
}
util.Log().Info("定时任务已开启成功: ", cronCfg)
cJob.Start()
}
func dispatch() {
users := model.GetUsers()
// 遍历每一个用户
for _, user := range users {
weather := api.QueryWithCityName(user.Address)
sendMail(user, weather)
}
}
func sendMail(user model.User, weather model.Weather) {
port, _ := strconv.Atoi(os.Getenv("Port"))
emailConfig := model.EmailConfig{
Host: os.Getenv("Host"),
Port: port,
Username: os.Getenv("Email_Username"),
Password: os.Getenv("Email_Password"),
}
mail.SendEmail(emailConfig, user, weather)
} | [
"\"Cron\"",
"\"Port\"",
"\"Host\"",
"\"Email_Username\"",
"\"Email_Password\""
]
| []
| [
"Port",
"Host",
"Cron",
"Email_Password",
"Email_Username"
]
| [] | ["Port", "Host", "Cron", "Email_Password", "Email_Username"] | go | 5 | 0 | |
product/migrations/0002_alter_product_id.py | # Generated by Django 3.2.4 on 2021-06-07 19:48
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='id',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
]
| []
| []
| []
| [] | [] | python | null | null | null |
compound.py | #!/usr/bin/env python3
"""Cyberjunky's 3Commas bot helpers."""
import argparse
import configparser
import json
import math
import os
import sqlite3
import sys
import time
from pathlib import Path
from helpers.logging import Logger, NotificationHandler
from helpers.misc import check_deal, get_round_digits, remove_prefix, wait_time_interval
from helpers.threecommas import get_threecommas_deals, init_threecommas_api
def load_config():
"""Create default or load existing config file."""
cfg = configparser.ConfigParser()
if cfg.read(f"{datadir}/{program}.ini"):
return cfg
cfg["settings"] = {
"timezone": "Europe/Amsterdam",
"timeinterval": 3600,
"debug": False,
"logrotate": 7,
"default-profittocompound": 1.0,
"3c-apikey": "Your 3Commas API Key",
"3c-apisecret": "Your 3Commas API Secret",
"notifications": False,
"notify-urls": ["notify-url1", "notify-url2"],
}
cfg["bot_id"] = {
"compoundmode": "boso",
"profittocompound": 1.0,
"usermaxactivedeals": 5,
"usermaxsafetyorders": 5,
"comment": "Just a description of the bot(s)",
}
with open(f"{datadir}/{program}.ini", "w") as cfgfile:
cfg.write(cfgfile)
return None
def upgrade_config(thelogger, theapi, cfg):
"""Upgrade config file if needed."""
if cfg.has_option("settings", "profittocompound"):
cfg.set(
"settings",
"default-profittocompound",
config.get("settings", "profittocompound"),
)
cfg.remove_option("settings", "profittocompound")
with open(f"{datadir}/{program}.ini", "w+") as cfgfile:
cfg.write(cfgfile)
thelogger.info("Upgraded the configuration file (default-profittocompound)")
if cfg.has_option("settings", "botids"):
thebotids = json.loads(cfg.get("settings", "botids"))
default_profit_percentage = float(
config.get("settings", "default-profittocompound")
)
# Walk through all bots configured
for thebot in thebotids:
if not cfg.has_section(f"bot_{thebot}"):
error, data = theapi.request(
entity="bots",
action="show",
action_id=str(thebot),
)
if data:
# Add new config section
cfg[f"bot_{thebot}"] = {
"compoundmode": "boso",
"profittocompound": default_profit_percentage,
"usermaxactivedeals": int(data["max_active_deals"]) + 5,
"usermaxsafetyorders": int(data["max_safety_orders"]) + 5,
"comment": data["name"],
}
else:
if error and "msg" in error:
logger.error(
"Error occurred upgrading config: %s" % error["msg"]
)
else:
logger.error("Error occurred upgrading config")
cfg.remove_option("settings", "botids")
with open(f"{datadir}/{program}.ini", "w+") as cfgfile:
cfg.write(cfgfile)
thelogger.info("Upgraded the configuration file (create sections)")
return cfg
def get_logged_profit_for_bot(bot_id):
"""Get the sum of all logged profit"""
data = cursor.execute(
f"SELECT sum(profit) FROM deals WHERE botid = {bot_id}"
).fetchone()[0]
if data is None:
return float(0)
return data
def update_bot_order_volumes(
thebot, new_base_order_volume, new_safety_order_volume, profit_sum, deals_count
):
"""Update bot with new order volumes."""
bot_name = thebot["name"]
base_order_volume = float(thebot["base_order_volume"])
safety_order_volume = float(thebot["safety_order_volume"])
logger.info(
"Calculated BO volume changed from: %s to %s"
% (base_order_volume, new_base_order_volume)
)
logger.info(
"Calculated SO volume changed from: %s to %s"
% (safety_order_volume, new_safety_order_volume)
)
error, data = api.request(
entity="bots",
action="update",
action_id=str(thebot["id"]),
payload={
"bot_id": thebot["id"],
"name": thebot["name"],
"pairs": thebot["pairs"],
"base_order_volume": new_base_order_volume, # new base order volume
"safety_order_volume": new_safety_order_volume, # new safety order volume
"take_profit": thebot["take_profit"],
"martingale_volume_coefficient": thebot["martingale_volume_coefficient"],
"martingale_step_coefficient": thebot["martingale_step_coefficient"],
"max_active_deals": thebot["max_active_deals"],
"max_safety_orders": thebot["max_safety_orders"],
"safety_order_step_percentage": thebot["safety_order_step_percentage"],
"take_profit_type": thebot["take_profit_type"],
"strategy_list": thebot["strategy_list"],
"active_safety_orders_count": thebot["active_safety_orders_count"],
"leverage_type": thebot["leverage_type"],
"leverage_custom_value": thebot["leverage_custom_value"],
},
)
if data:
rounddigits = get_round_digits(thebot["pairs"][0])
logger.info(
f"Compounded ₿{round(profit_sum, rounddigits)} in profit from {deals_count} deal(s) "
f"made by '{bot_name}'\nChanged BO from ₿{round(base_order_volume, rounddigits)} to "
f"₿{round(new_base_order_volume, rounddigits)}\nChanged SO from "
f"₿{round(safety_order_volume, rounddigits)} to ₿{round(new_safety_order_volume, rounddigits)}",
True,
)
else:
if error and "msg" in error:
logger.error(
"Error occurred updating bot with new BO/SO values: %s" % error["msg"]
)
else:
logger.error("Error occurred updating bot with new BO/SO values")
def process_deals(deals):
"""Check deals from bot."""
deals_count = 0
profit_sum = 0.0
for deal in deals:
deal_id = deal["id"]
bot_id = deal["bot_id"]
# Register deal in database
exist = check_deal(cursor, deal_id)
if exist:
logger.debug("Deal with id '%s' already processed, skipping." % deal_id)
else:
# Deal not processed yet
profit = float(deal["final_profit"])
deals_count += 1
profit_sum += profit
db.execute(
f"INSERT INTO deals (dealid, profit, botid) VALUES ({deal_id}, {profit}, {bot_id})"
)
logger.info("Finished deals: %s total profit: %s" % (deals_count, profit_sum))
db.commit()
# Calculate profit part to compound
logger.info("Profit available to compound: %s" % profit_sum)
return (deals_count, profit_sum)
def get_bot_values(thebot):
"""Load start boso values from database or calculate and store them."""
startbo = 0.0
startso = 0.0
startactivedeals = thebot["max_active_deals"]
bot_id = thebot["id"]
data = cursor.execute(
f"SELECT startbo, startso, startactivedeals FROM bots WHERE botid = {bot_id}"
).fetchone()
if data:
# Fetch values from database
startbo = data[0]
startso = data[1]
startactivedeals = data[2]
logger.info(
"Fetched bot start BO, SO values and max. active deals: %s %s %s"
% (startbo, startso, startactivedeals)
)
else:
# Store values in database
startbo = float(thebot["base_order_volume"])
startso = float(thebot["safety_order_volume"])
startactivedeals = thebot["max_active_deals"]
db.execute(
f"INSERT INTO bots (botid, startbo, startso, startactivedeals) "
f"VALUES ({bot_id}, {startbo}, {startso}, {startactivedeals})"
)
logger.info(
"Stored bot start BO, SO values and max. active deals: %s %s %s"
% (startbo, startso, startactivedeals)
)
db.commit()
return (startbo, startso, startactivedeals)
def update_bot_max_deals(thebot, org_base_order, org_safety_order, new_max_deals):
"""Update bot with new max deals and old bo/so values."""
bot_name = thebot["name"]
base_order_volume = float(thebot["base_order_volume"])
safety_order_volume = float(thebot["safety_order_volume"])
max_active_deals = thebot["max_active_deals"]
logger.info(
"Calculated max. active deals changed from: %s to %s"
% (max_active_deals, new_max_deals)
)
logger.info(
"Calculated BO volume changed from: %s to %s"
% (base_order_volume, org_base_order)
)
logger.info(
"Calculated SO volume changed from: %s to %s"
% (safety_order_volume, org_safety_order)
)
error, data = api.request(
entity="bots",
action="update",
action_id=str(thebot["id"]),
payload={
"bot_id": thebot["id"],
"name": thebot["name"],
"pairs": thebot["pairs"],
"base_order_volume": org_base_order, # original base order volume
"safety_order_volume": org_safety_order, # original safety order volume
"take_profit": thebot["take_profit"],
"martingale_volume_coefficient": thebot["martingale_volume_coefficient"],
"martingale_step_coefficient": thebot["martingale_step_coefficient"],
"max_active_deals": new_max_deals, # new max. deals value
"max_safety_orders": thebot["max_safety_orders"],
"safety_order_step_percentage": thebot["safety_order_step_percentage"],
"take_profit_type": thebot["take_profit_type"],
"strategy_list": thebot["strategy_list"],
"active_safety_orders_count": thebot["active_safety_orders_count"],
},
)
if data:
rounddigits = get_round_digits(thebot["pairs"][0])
logger.info(
f"Changed max. active deals from: %s to %s for bot\n'{bot_name}'\n"
f"Changed BO from ${round(base_order_volume, rounddigits)} to "
f"${round(org_base_order, rounddigits)}\nChanged SO from "
f"${round(safety_order_volume, rounddigits)} to ${round(org_safety_order, rounddigits)}"
% (max_active_deals, new_max_deals)
)
else:
if error and "msg" in error:
logger.error(
"Error occurred updating bot with new max. deals and BO/SO values: %s"
% error["msg"]
)
else:
logger.error(
"Error occurred updating bot with new max. deals and BO/SO values"
)
def update_bot_max_safety_orders(
thebot, org_base_order, org_safety_order, new_max_safety_orders
):
"""Update bot with new max safety orders and old bo/so values."""
bot_name = thebot["name"]
base_order_volume = float(thebot["base_order_volume"])
safety_order_volume = float(thebot["safety_order_volume"])
max_safety_orders = thebot["max_safety_orders"]
logger.info(
"Calculated max. safety orders changed from: %s to %s"
% (max_safety_orders, new_max_safety_orders)
)
logger.info(
"Calculated BO volume changed from: %s to %s"
% (base_order_volume, org_base_order)
)
logger.info(
"Calculated SO volume changed from: %s to %s"
% (safety_order_volume, org_safety_order)
)
error, data = api.request(
entity="bots",
action="update",
action_id=str(thebot["id"]),
payload={
"bot_id": thebot["id"],
"name": thebot["name"],
"pairs": thebot["pairs"],
"base_order_volume": org_base_order, # original base order volume
"safety_order_volume": org_safety_order, # original safety order volume
"take_profit": thebot["take_profit"],
"martingale_volume_coefficient": thebot["martingale_volume_coefficient"],
"martingale_step_coefficient": thebot["martingale_step_coefficient"],
"max_active_deals": thebot["max_active_deals"],
"max_safety_orders": new_max_safety_orders, # new max. safety orders value
"safety_order_step_percentage": thebot["safety_order_step_percentage"],
"take_profit_type": thebot["take_profit_type"],
"strategy_list": thebot["strategy_list"],
"active_safety_orders_count": thebot["active_safety_orders_count"],
},
)
if data:
rounddigits = get_round_digits(thebot["pairs"][0])
logger.info(
f"Changed max. active safety orders from: %s to %s for bot\n'{bot_name}'\n"
f"Changed BO from ${round(base_order_volume, rounddigits)} to "
f"${round(org_base_order, rounddigits)}\nChanged SO from "
f"${round(safety_order_volume, rounddigits)} to ${round(org_safety_order, rounddigits)}"
% (max_safety_orders, max_safety_orders)
)
else:
if error and "msg" in error:
logger.error(
"Error occurred updating bot with new max. safety orders and BO/SO values: %s"
% error["msg"]
)
else:
logger.error(
"Error occurred updating bot with new max. safety orders and BO/SO values"
)
def compound_bot(cfg, thebot):
"""Find profit from deals and calculate new SO and BO values."""
bot_name = thebot["name"]
bot_id = thebot["id"]
deals = get_threecommas_deals(logger, api, bot_id)
bot_profit_percentage = float(
cfg.get(
f"bot_{bot_id}",
"profittocompound",
fallback=cfg.get("settings", "default-profittocompound"),
)
)
if cfg.get(f"bot_{bot_id}", "compoundmode", fallback="boso") == "safetyorders":
logger.info("Compound mode for this bot is: Safety Orders")
# Get starting BO and SO values
(startbo, startso, startactivedeals) = get_bot_values(thebot)
# Get active safety order settings
user_defined_max_safety_orders = int(
cfg.get(f"bot_{bot_id}", "usermaxsafetyorders")
)
# Get active deal settings
user_defined_max_active_deals = int(
cfg.get(f"bot_{bot_id}", "usermaxactivedeals")
)
# Calculate amount used per deal
max_safety_orders = float(thebot["max_safety_orders"])
martingale_volume_coefficient = float(
thebot["martingale_volume_coefficient"]
) # Safety order volume scale
# Always add start_base_order_size
totalusedperdeal = startbo
isafetyorder = 1
while isafetyorder <= max_safety_orders:
# For the first Safety order, just use the startso
if isafetyorder == 1:
total_safety_order_volume = startso
# After the first SO, multiple the previous SO with the safety order volume scale
if isafetyorder > 1:
total_safety_order_volume *= martingale_volume_coefficient
totalusedperdeal += total_safety_order_volume
# Calculate profit needed to add a SO to all startactivedeals
if isafetyorder == max_safety_orders:
total_safety_order_volume *= (
martingale_volume_coefficient # order size van volgende SO
)
profit_needed_to_add_so = total_safety_order_volume * startactivedeals
isafetyorder += 1
# Calculate % to compound (per bot)
totalprofitforbot = get_logged_profit_for_bot(thebot["id"])
profitusedtocompound = totalprofitforbot * bot_profit_percentage
# If we have more profitusedtocompound
new_max_safety_orders = max_safety_orders
if profitusedtocompound > profit_needed_to_add_so:
new_max_safety_orders = max_safety_orders + 1
if new_max_safety_orders > user_defined_max_safety_orders:
logger.info(
f"Already reached max set number of safety orders ({user_defined_max_safety_orders}), "
f"skipping deal compounding"
)
if new_max_safety_orders > max_safety_orders:
if new_max_safety_orders <= user_defined_max_safety_orders:
logger.info("Enough profit has been made to add a safety order")
# Update the bot
update_bot_max_safety_orders(
thebot, startbo, startso, new_max_safety_orders
)
if cfg.get(f"bot_{bot_id}", "compoundmode", fallback="boso") == "deals":
logger.info("Compound mode for this bot is: DEALS")
# Get starting BO and SO values
(startbo, startso, startactivedeals) = get_bot_values(thebot)
# Get active deal settings
user_defined_max_active_deals = int(
cfg.get(f"bot_{bot_id}", "usermaxactivedeals")
)
# Calculate amount used per deal
max_safety_orders = float(thebot["max_safety_orders"])
martingale_volume_coefficient = float(
thebot["martingale_volume_coefficient"]
) # Safety order volume scale
# Always add start_base_order_size
totalusedperdeal = startbo
isafetyorder = 1
while isafetyorder <= max_safety_orders:
# For the first Safety order, just use the startso
if isafetyorder == 1:
total_safety_order_volume = startso
# After the first SO, multiple the previous SO with the safety order volume scale
if isafetyorder > 1:
total_safety_order_volume *= martingale_volume_coefficient
totalusedperdeal += total_safety_order_volume
isafetyorder += 1
# Calculate % to compound (per bot)
totalprofitforbot = get_logged_profit_for_bot(thebot["id"])
profitusedtocompound = totalprofitforbot * bot_profit_percentage
new_max_active_deals = (
math.floor(profitusedtocompound / totalusedperdeal) + startactivedeals
)
current_active_deals = thebot["max_active_deals"]
if new_max_active_deals > user_defined_max_active_deals:
logger.info(
f"Already reached max set number of deals ({user_defined_max_active_deals}), "
f"skipping deal compounding"
)
elif (
new_max_active_deals
> current_active_deals & new_max_active_deals
<= user_defined_max_active_deals
):
logger.info(
"Enough profit has been made to add a deal and lower BO & SO to their orginal values"
)
# Update the bot
update_bot_max_deals(thebot, startbo, startso, new_max_active_deals)
if deals:
(deals_count, profit_sum) = process_deals(deals)
profit_sum *= bot_profit_percentage
logger.info(
"Profit available after applying percentage value (%s): %s "
% (bot_profit_percentage, profit_sum)
)
if profit_sum:
# Bot values to calculate with
base_order_volume = float(thebot["base_order_volume"])
safety_order_volume = float(thebot["safety_order_volume"])
max_active_deals = thebot["max_active_deals"]
max_safety_orders = thebot["max_safety_orders"]
martingale_volume_coefficient = float(
thebot["martingale_volume_coefficient"]
)
funds_so_needed = safety_order_volume
total_so_funds = safety_order_volume
if max_safety_orders > 1:
for i in range(1, max_safety_orders):
funds_so_needed *= float(martingale_volume_coefficient)
total_so_funds += funds_so_needed
logger.info("Current bot settings :")
logger.info("Base order volume : %s" % base_order_volume)
logger.info("Safety order volume : %s" % safety_order_volume)
logger.info("Max active deals : %s" % max_active_deals)
logger.info("Max safety orders : %s" % max_safety_orders)
logger.info("SO volume scale : %s" % martingale_volume_coefficient)
# Calculate the BO/SO ratio
bo_percentage = (
100
* float(base_order_volume)
/ (float(base_order_volume) + float(total_so_funds))
)
so_percentage = (
100
* float(total_so_funds)
/ (float(total_so_funds) + float(base_order_volume))
)
logger.info("BO percentage: %s" % bo_percentage)
logger.info("SO percentage: %s" % so_percentage)
# Calculate compound values
bo_profit = ((profit_sum * bo_percentage) / 100) / max_active_deals
so_profit = bo_profit * (safety_order_volume / base_order_volume)
logger.info("BO compound value: %s" % bo_profit)
logger.info("SO compound value: %s" % so_profit)
# Update the bot
update_bot_order_volumes(
thebot,
(base_order_volume + bo_profit),
(safety_order_volume + so_profit),
profit_sum,
deals_count,
)
else:
logger.info(
f"{bot_name}\nNo (new) profit made, no BO/SO value updates needed!",
True,
)
else:
logger.info(f"{bot_name}\nNo (new) deals found for this bot!", True)
def init_compound_db():
"""Create or open database to store bot and deals data."""
try:
dbname = f"{program}.sqlite3"
dbpath = f"file:{datadir}/{dbname}?mode=rw"
dbconnection = sqlite3.connect(dbpath, uri=True)
logger.info(f"Database '{datadir}/{dbname}' opened successfully")
except sqlite3.OperationalError:
dbconnection = sqlite3.connect(f"{datadir}/{dbname}")
dbcursor = dbconnection.cursor()
logger.info(f"Database '{datadir}/{dbname}' created successfully")
dbcursor.execute(
"CREATE TABLE deals (dealid INT Primary Key, profit REAL, botid int)"
)
dbcursor.execute(
"CREATE TABLE bots (botid INT Primary Key, startbo REAL, startso REAL, startactivedeals int)"
)
logger.info("Database tables created successfully")
return dbconnection
def upgrade_compound_db():
"""Upgrade database if needed."""
try:
cursor.execute("ALTER TABLE deals ADD COLUMN profit REAL")
logger.info("Database table deals upgraded (column profit)")
except sqlite3.OperationalError:
pass
try:
cursor.execute("ALTER TABLE deals ADD COLUMN botid int")
logger.info("Database table deals upgraded (column botid)")
except sqlite3.OperationalError:
pass
try:
cursor.execute(
"CREATE TABLE bots (botid INT Primary Key, startbo REAL, startso REAL, startactivedeals int)"
)
logger.info("Database schema upgraded (table bots)")
except sqlite3.OperationalError:
pass
# Start application
program = Path(__file__).stem
# Parse and interpret options.
parser = argparse.ArgumentParser(description="Cyberjunky's 3Commas bot helper.")
parser.add_argument("-d", "--datadir", help="data directory to use", type=str)
args = parser.parse_args()
if args.datadir:
datadir = args.datadir
else:
datadir = os.getcwd()
# Create or load configuration file
config = load_config()
if not config:
# Initialise temp logging
logger = Logger(datadir, program, None, 7, False, False)
logger.info(
f"Created example config file '{datadir}/{program}.ini', edit it and restart the program"
)
sys.exit(0)
else:
# Handle timezone
if hasattr(time, "tzset"):
os.environ["TZ"] = config.get(
"settings", "timezone", fallback="Europe/Amsterdam"
)
time.tzset()
# Init notification handler
notification = NotificationHandler(
program,
config.getboolean("settings", "notifications"),
config.get("settings", "notify-urls"),
)
# Initialise logging
logger = Logger(
datadir,
program,
notification,
int(config.get("settings", "logrotate", fallback=7)),
config.getboolean("settings", "debug"),
config.getboolean("settings", "notifications"),
)
# Initialize 3Commas API
api = init_threecommas_api(config)
# Upgrade config file if needed
config = upgrade_config(logger, api, config)
logger.info(f"Loaded configuration from '{datadir}/{program}.ini'")
# Initialize or open the database
db = init_compound_db()
cursor = db.cursor()
# Upgrade the database if needed
upgrade_compound_db()
# Auto compound profit by tweaking SO/BO
while True:
# Reload config files and data to catch changes
config = load_config()
logger.info(f"Reloaded configuration from '{datadir}/{program}.ini'")
# Configuration settings
timeint = int(config.get("settings", "timeinterval"))
for section in config.sections():
# Each section is a bot
if section.startswith("bot_"):
botid = remove_prefix(section, "bot_")
if botid:
boterror, botdata = api.request(
entity="bots",
action="show",
action_id=str(botid),
)
if botdata:
compound_bot(config, botdata)
else:
if boterror and "msg" in boterror:
logger.error(
"Error occurred updating bots: %s" % boterror["msg"]
)
else:
logger.error("Error occurred updating bots")
else:
logger.error("Invalid botid found: %s" % botid)
if not wait_time_interval(logger, notification, timeint):
break
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
support/sdk/python/tinyos/message/MoteIF.py | #
# Copyright (c) 2005-2006
# The President and Fellows of Harvard College.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Geoffrey Mainland <[email protected]>
# Tinyos-2: Stephen Dawson-Haggerty
import os
import re
import struct
import sys
import traceback
from tinyos.packet.Serial import Serial
from tinyos.message.SerialPacket import SerialPacket
import tinyos.packet.PacketDispatcher
import tinyos.packet.PacketSource
import tinyos.packet.SFSource
try:
import tinyos.packet.SerialSource
except:
print >> sys.stderr, "SerialSource not imported"
tinyos.packet.SerialSource = None
import Queue
from threading import Thread
DEBUG = False
class MoteIFException(Exception):
def __init__(self, *args):
self.args = args
class MoteIF:
def __init__(self):
self.listeners = {}
self.receiveQueue = Queue.Queue()
self.running = True
self.signalError = lambda:None
def addListener(self, listener, msgClass):
if listener not in self.listeners:
self.listeners[listener] = {}
amTypes = self.listeners[listener]
amTypes[msgClass.get_amType()] = msgClass
# print "addListener", listener, msgClass, msgClass.get_amType()
def removeListener(self, listener):
del self.listeners[listener]
def addErrorSignal(self, signalError):
self.signalError = signalError
def dispatchPacket(self, source, packet):
#try:
#print "Packet length: ", len(packet)
# print "Dispatching from MoteIF"
# for i in packet:
# print ord(i)," ",
# print
try:
# Message.py ignores base_offset, so we'll just chop off
# the first byte (the SERIAL_AMTYPE) here.
serial_pkt = SerialPacket(packet[1:],
data_length=len(packet)-1)
except:
traceback.print_exc()
amType = None
try:
data_start = serial_pkt.offset_data(0) + 1
data_end = data_start + serial_pkt.get_header_length()
data = packet[data_start:data_end]
amType = serial_pkt.get_header_type()
except Exception, x:
print >>sys.stderr, x
print >>sys.stderr, traceback.print_tb(sys.exc_info()[2])
for l in self.listeners:
amTypes = self.listeners[l]
if amType in amTypes:
try:
msgClass = amTypes[amType]
msg = msgClass(data=data,
data_length = len(data),
addr=serial_pkt.get_header_src(),
gid=serial_pkt.get_header_group())
l.receive(source, msg)
except Exception, x:
print >>sys.stderr, x
print >>sys.stderr, traceback.print_tb(sys.exc_info()[2])
else:
#print "No listener for", amType
pass
def readPacket(self):
while self.running:
try:
msgTuple = self.receiveQueue.get(True, 0.25)
except Queue.Empty:
msgTuple = None
if msgTuple:
(source, msg) = msgTuple
self.dispatchPacket(source, msg)
def sendMsg(self, dest, addr, amType, group, msg):
try:
payload = msg.dataGet()
msg = SerialPacket(None)
msg.set_header_dest(int(addr))
msg.set_header_group(int(group))
msg.set_header_type(int(amType))
msg.set_header_length(len(payload))
# from tinyos.packet.Serial
data = chr(Serial.TOS_SERIAL_ACTIVE_MESSAGE_ID)
data += msg.dataGet()[0:msg.offset_data(0)]
data += payload
dest.writePacket(data)
except Exception, x:
print >>sys.stderr, x
print >>sys.stderr, traceback.print_tb(sys.exc_info()[2])
def addSource(self, name=None):
if name == None:
name = os.environ.get("MOTECOM", "sf@localhost:9002")
m = re.match(r'([^@]*)@(.*)', name)
if m == None:
raise MoteIFException("base source '%s'" % (name))
(sourceType, args) = m.groups()
if sourceType == "sf":
source = tinyos.packet.SFSource.SFSource(self, args)
elif sourceType == "serial" and tinyos.packet.SerialSource != None:
source = tinyos.packet.SerialSource.SerialSource(self,
args, self.signalError)
else:
raise MoteIFException("bad source")
source.start()
#block until the source has started up.
source.semaphore.acquire()
source.semaphore.release()
self.rxThread = Thread(target=self.readPacket,
name="rxDispatch")
self.rxThread.daemon = True
self.rxThread.start()
return source
def finishAll(self):
tinyos.packet.PacketSource.finishAll()
self.running = False
| []
| []
| [
"MOTECOM"
]
| [] | ["MOTECOM"] | python | 1 | 0 | |
ares/util/SetDefaultParameterValues.py | """
SetDefaultParameterValues.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on 2010-10-19.
Description: Defaults for all different kinds of parameters.
"""
import os, imp
import numpy as np
from ares import rcParams
from ..physics.Constants import m_H, cm_per_kpc, s_per_myr, E_LL
inf = np.inf
ARES = os.environ.get('ARES')
tau_prefix = os.path.join(ARES,'input','optical_depth') \
if (ARES is not None) else '.'
pgroups = ['Grid', 'Physics', 'Cosmology', 'Source', 'Population',
'Control', 'HaloMassFunction', 'Tanh', 'Gaussian', 'Slab',
'MultiPhase', 'Dust', 'ParameterizedQuantity', 'Old', 'PowerSpectrum',
'Halo', 'Absorber']
# Blob stuff
_blob_redshifts = list('BCD')
_blob_redshifts.extend([6, 7, 8, 9, 10, 15, 20, 25, 30, 35, 40])
# Nothing population specific
_blob_names = ['z', 'dTb', 'curvature', 'igm_Tk', 'igm_Ts', 'cgm_h_2',
'igm_h_1', 'cgm_k_ion', 'igm_k_heat', 'Ja', 'tau_e']
default_blobs = (_blob_names, _blob_names)
# Start setting up list of parameters to be set
defaults = []
for grp in pgroups:
defaults.append('{!s}Parameters()'.format(grp))
def SetAllDefaults():
pf = {'problem_type': 1}
for pset in defaults:
pf.update(eval('{!s}'.format(pset)))
return pf
def GridParameters():
pf = \
{
"grid_cells": 64,
"start_radius": 0.01,
"logarithmic_grid": False,
"density_units": 1e-3, # H number density
"length_units": 10. * cm_per_kpc,
"time_units": s_per_myr,
"include_He": False,
"include_H2": False,
# For MultiPhaseMedium calculations
"include_cgm": True,
"include_igm": True,
# Line photons
"include_injected_lya": True,
"initial_ionization": [1. - 1e-8, 1e-8, 1.-2e-8, 1e-8, 1e-8],
"initial_temperature": 1e4,
# These have shape len(absorbers)
"tables_logNmin": [None],
"tables_logNmax": [None],
"tables_dlogN": [0.1],
# overrides above parameters
"tables_logN": None,
"tables_xmin": [1e-8],
#
"tables_discrete_gen": False,
"tables_energy_bins": 100,
"tables_prefix": None,
"tables_logxmin": -4,
"tables_dlogx": 0.1,
"tables_dE": 5.,
"tables_times": None,
"tables_dt": s_per_myr,
}
pf.update(rcParams)
return pf
def MultiPhaseParameters():
"""
These are grid parameters -- we'll strip off the prefix in
MultiPhaseMedium calculations.
"""
pf = \
{
"cgm_grid_cells": 1,
"cgm_expansion": True,
"cgm_initial_temperature": [1e4],
"cgm_initial_ionization": [1.-1e-8, 1e-8, 1.-2e-8, 1e-8, 1e-8],
"cgm_isothermal": True,
"cgm_recombination": 'A',
"cgm_collisional_ionization": False,
"cgm_cosmological_ics": False,
"photon_counting": False,
"monotonic_EoR": 1e-6,
"igm_grid_cells": 1,
"igm_expansion": True,
"igm_initial_temperature": None,
'igm_initial_ionization': [1.-1e-8, 1e-8, 1.-2e-8, 1e-8, 1e-8],
"igm_isothermal": False,
"igm_recombination": 'B',
"igm_compton_scattering": True,
"igm_collisional_ionization": True,
"igm_cosmological_ics": False,
}
pf.update(rcParams)
return pf
def SlabParameters():
pf = \
{
"slab": 0,
"slab_position": 0.1,
"slab_radius": 0.05,
"slab_overdensity": 100,
"slab_temperature": 100,
"slab_ionization": [1. - 1e-8, 1e-8],
"slab_profile": 0,
}
pf.update(rcParams)
return pf
# BoundaryConditionParameters?
def FudgeParameters():
pf = \
{
"z_heII_EoR": 3.,
}
pf.update(rcParams)
return pf
def AbsorberParameters():
pf = \
{
'cddf_C': 0.25,
'cddf_beta': 1.4,
'cddf_gamma': 1.5,
'cddf_zlow': 1.5,
'cddf_gamma_low': 0.2,
}
pf.update(rcParams)
return pf
def PhysicsParameters():
pf = \
{
"radiative_transfer": 1,
"photon_conserving": 1,
"plane_parallel": 0,
"infinite_c": 1,
"collisional_ionization": 1,
"secondary_ionization": 1, # 0 = Deposit all energy as heat
# 1 = Shull & vanSteenberg (1985)
# 2 = Ricotti, Gnedin, & Shull (2002)
# 3 = Furlanetto & Stoever (2010)
"secondary_lya": False, # Collisionally excited Lyman alpha?
"isothermal": 1,
"expansion": 0, # Referring to cosmology
"collapse": 0, # Referring to single-zone collapse
"compton_scattering": 1,
"recombination": 'B',
"exotic_heating": False,
'exotic_heating_func': None,
"clumping_factor": 1,
"approx_H": False,
"approx_He": False,
"approx_sigma": False,
"approx_Salpha": 1, # 1 = Salpha = 1
# 2 = Chuzhoy, Alvarez, & Shapiro (2005),
# 3 = Furlanetto & Pritchard (2006)
"approx_thermal_history": False,
"inits_Tk_p0": None,
"inits_Tk_p1": None,
"inits_Tk_p2": None, # Set to -4/3 if thermal_hist = 'exp' to recover adiabatic cooling
"inits_Tk_p3": 0.0,
"inits_Tk_p4": inf,
"inits_Tk_p5": None,
"inits_Tk_dz": 1.,
"Tbg": None,
"Tbg_p0": None,
"Tbg_p1": None,
"Tbg_p2": None,
"Tbg_p3": None,
"Tbg_p4": None,
# Ad hoc way to make a flattened signal
"floor_Ts": False,
"floor_Ts_p0": None,
"floor_Ts_p1": None,
"floor_Ts_p2": None,
"floor_Ts_p3": None,
"floor_Ts_p4": None,
"floor_Ts_p5": None,
# Lyman alpha sources
"lya_nmax": 23,
"rate_source": 'fk94', # fk94, option for development here
# Feedback parameters
# LW
'feedback_clear_solver': True,
'feedback_LW': False,
'feedback_LW_dt': 0.0, # instantaneous response
'feedback_LW_Mmin': 'visbal2014',
'feedback_LW_fsh': None,
'feedback_LW_Tcut': 1e4,
'feedback_LW_mean_err': False,
'feedback_LW_maxiter': 15,
'feedback_LW_miniter': 0,
'feedback_LW_softening': 'sqrt',
'feedback_LW_Mmin_smooth': 0,
'feedback_LW_Mmin_fit': 0,
'feedback_LW_Mmin_afreq': 0,
'feedback_LW_Mmin_rtol': 0.0,
'feedback_LW_Mmin_atol': 0.0,
'feedback_LW_sfrd_rtol': 1e-1,
'feedback_LW_sfrd_atol': 0.0,
'feedback_LW_sfrd_popid': None,
'feedback_LW_zstart': None,
'feedback_LW_mixup_freq': 5,
'feedback_LW_mixup_delay': 20,
'feedback_LW_guesses': None,
'feedback_LW_guesses_from': None,
'feedback_LW_guesses_perfect': False,
# Assume that uniform background only emerges gradually as
# the typical separation of halos becomes << Hubble length
"feedback_LW_ramp": 0,
'feedback_streaming': False,
'feedback_vel_at_rec': 30.,
'feedback_Z': None,
'feedback_Z_Tcut': 1e4,
'feedback_Z_rtol': 0.,
'feedback_Z_atol': 1.,
'feedback_Z_mean_err': False,
'feedback_Z_Mmin_uponly': False,
'feedback_Z_Mmin_smooth': False,
'feedback_tau': None,
'feedback_tau_Tcut': 1e4,
'feedback_tau_rtol': 0.,
'feedback_tau_atol': 1.,
'feedback_tau_mean_err': False,
'feedback_tau_Mmin_uponly': False,
'feedback_tau_Mmin_smooth': False,
'feedback_ion': None,
'feedback_ion_Tcut': 1e4,
'feedback_ion_rtol': 0.,
'feedback_ion_atol': 1.,
'feedback_ion_mean_err': False,
'feedback_ion_Mmin_uponly': False,
'feedback_ion_Mmin_smooth': False,
}
pf.update(rcParams)
return pf
def ParameterizedQuantityParameters():
pf = \
{
"pq_func": 'dpl',
"pq_func_fun": None, # only used if pq_func == 'user'
"pq_func_var": 'Mh',
"pq_func_var2": None,
"pq_func_var_lim": None,
"pq_func_var2_lim": None,
"pq_func_var_fill": 0.0,
"pq_func_var2_fill": 0.0,
"pq_func_par0": None,
"pq_func_par1": None,
"pq_func_par2": None,
"pq_func_par3": None,
"pq_func_par4": None,
"pq_func_par5": None,
"pq_func_par6": None,
"pq_func_par7": None,
"pq_func_par7": None,
"pq_func_par8": None,
"pq_func_par9": None,
"pq_boost": 1.,
"pq_iboost": 1.,
"pq_val_ceil": None,
"pq_val_floor": None,
"pq_var_ceil": None,
"pq_var_floor": None,
}
pf.update(rcParams)
return pf
def DustParameters():
pf = {}
tmp = \
{
'dustcorr_method': None,
'dustcorr_beta': -2.,
# Only used if method is a list
'dustcorr_ztrans': None,
# Intrinsic scatter in the AUV-beta relation
'dustcorr_scatter_A': 0.0,
# Intrinsic scatter in the beta-mag relation (gaussian)
'dustcorr_scatter_B': 0.34,
'dustcorr_Bfun_par0': -2.,
'dustcorr_Bfun_par1': None,
'dustcorr_Bfun_par2': None,
}
pf.update(tmp)
pf.update(rcParams)
return pf
def PowerSpectrumParameters():
pf = {}
tmp = \
{
'ps_output_z': np.arange(6, 20, 1),
"ps_output_k": None,
"ps_output_lnkmin": -4.6,
"ps_output_lnkmax": 1.,
"ps_output_dlnk": 0.2,
"ps_output_R": None,
"ps_output_lnRmin": -8.,
"ps_output_lnRmax": 8.,
"ps_output_dlnR": 0.01,
'ps_linear_pert': False,
'ps_use_wick': False,
'ps_igm_model': 1, # 1=3-zone IGM, 2=other
'ps_include_acorr': True,
'ps_include_xcorr': False,
'ps_include_bias': True,
'ps_include_xcorr_wrt': None,
# Save all individual pieces that make up 21-cm PS?
"ps_output_components": False,
'ps_include_21cm': True,
'ps_include_density': True,
'ps_include_ion': True,
'ps_include_temp': False,
'ps_include_lya': False,
'ps_lya_cut': inf,
# Binary model switches
'ps_include_xcorr_ion_rho': False,
'ps_include_xcorr_hot_rho': False,
'ps_include_xcorr_ion_hot': False,
'ps_include_3pt': True,
'ps_include_4pt': True,
'ps_temp_model': 1, # 1=Bubble shells, 2=FZH04
'ps_saturated': 10.,
'ps_correct_gs_ion': True,
'ps_correct_gs_temp': True,
'ps_assume_saturated': False,
'ps_split_transform': True,
'ps_fht_rtol': 1e-4,
'ps_fht_atol': 1e-4,
'ps_include_lya_lc': False,
"ps_volfix": True,
"ps_rescale_Qlya": False,
"ps_rescale_Qhot": False,
"ps_rescale_dTb": False,
"bubble_size": None,
"bubble_density": None,
# Important that the number is at the end! ARES will interpret
# numbers within underscores as population ID numbers.
"bubble_shell_rvol_zone_0": None,
"bubble_shell_rdens_zone_0": 0.,
"bubble_shell_rsize_zone_0": None,
"bubble_shell_asize_zone_0": None,
"bubble_shell_ktemp_zone_0": None,
#"bubble_shell_tpert_zone_0": None,
#"bubble_shell_rsize_zone_1": None,
#"bubble_shell_asize_zone_1": None,
#"bubble_shell_ktemp_zone_1": None,
#"bubble_shell_tpert_zone_1": None,
#"bubble_shell_rsize_zone_2": None,
#"bubble_shell_asize_zone_2": None,
#"bubble_shell_ktemp_zone_2": None,
#"bubble_shell_tpert_zone_2": None,
"bubble_shell_include_xcorr": True,
#"bubble_pod_size": None,
#"bubble_pod_size_rel": None,
#"bubble_pod_size_abs": None,
#"bubble_pod_size_func": None,
#"bubble_pod_temp": None,
#"bubble_pod_Nsc": 1e3,
"ps_lya_method": 'lpt',
"ps_ion_method": None, # unused
#"powspec_lya_approx_sfr": 'exp',
"bubble_shell_size_dist": None,
"bubble_size_dist": 'fzh04', # or FZH04, PC14
}
pf.update(tmp)
pf.update(rcParams)
return pf
def PopulationParameters():
"""
Parameters associated with populations of objects, which give rise to
meta-galactic radiation backgrounds.
"""
pf = {}
# Grab all SourceParameters and replace "source_" with "pop_"
srcpars = SourceParameters()
for par in srcpars:
pf[par.replace('source', 'pop')] = srcpars[par]
tmp = \
{
"pop_type": 'galaxy',
"pop_tunnel": None,
"pop_sfr_model": 'fcoll', # or sfrd-func, sfrd-tab, sfe-func, sfh-tab, rates,
"pop_sed_model": True, # or False
"pop_sfr_above_threshold": True,
"pop_sfr_cross_threshold": True,
"pop_sfr_cross_upto_Tmin": inf,
# Mass accretion rate
"pop_MAR": 'hmf',
"pop_MAR_interp": 'linear',
"pop_MAR_corr": None,
"pop_MAR_delay": None,
"pop_MAR_from_hist": True,
"pop_interp_MAR": 'linear',
"pop_interp_sfrd": 'linear',
"pop_interp_lf": 'linear',
"pop_tdyn": 1e7,
"pop_tstar": None,
"pop_sSFR": None,
"pop_uvlf": None,
'pop_lf_Mmax': 1e15,
"pop_fduty": None,
"pop_fduty_seed": None,
"pop_fduty_dt": None, # if not None, SF occurs in on/off bursts, i.e.,
# it's coherent.
"pop_focc": 1.0,
"pop_fsup": 0.0, # Suppression of star-formation at threshold
# Set the emission interval and SED
"pop_sed": 'pl',
"pop_sed_sharp_at": None,
# Can degrade spectral resolution of stellar population synthesis models
# just to speed things up.
"pop_sed_degrade": None,
# If pop_sed == 'user'
"pop_E": None,
"pop_L": None,
# For synthesis models
"pop_Z": 0.02,
"pop_imf": 2.35,
"pop_tracks": None,
"pop_tracks_fn": None,
"pop_stellar_aging": False,
"pop_nebular": False,
"pop_nebular_ff": True,
"pop_nebular_fb": True,
"pop_nebular_2phot": True,
"pop_nebular_lookup": None,
"pop_ssp": False, # a.k.a., continuous SF
"pop_psm_instance": None,
"pop_src_instance": None,
# Cache tricks: must be pickleable for MCMC to work.
"pop_sps_data": None,
"pop_tsf": 100.,
"pop_binaries": False, # for BPASS
"pop_sed_by_Z": None,
"pop_sfh_oversample": 0,
"pop_ssp_oversample": False,
"pop_ssp_oversample_age": 30.,
"pop_sfh": False, # account for SFH in spectrum modeling
# Option of setting Z, t, or just supplying SSP table?
"pop_Emin": 2e2,
"pop_Emax": 3e4,
"pop_EminNorm": 5e2,
"pop_EmaxNorm": 8e3,
"pop_Enorm": None,
"pop_lmin": 900,
"pop_lmax": 1e4,
"pop_dlam": 10.,
"pop_wavelengths": None,
"pop_times": None,
# Reserved for delta function sources
"pop_E": None,
"pop_LE": None,
# Artificially kill emission in some band.
"pop_Ekill": None,
"pop_Emin_xray": 2e2,
# Controls IGM ionization for approximate CXB treatments
"pop_Ex": 500.,
"pop_Euv": 30.,
"pop_lf": None,
"pop_emissivity": None,
# By-hand parameterizations
"pop_Ja": None,
"pop_Tk": None,
"pop_xi": None,
"pop_ne": None,
#
"pop_ion_rate_cgm": None,
"pop_ion_rate_igm": None,
"pop_heat_rate": None,
"pop_k_ion_cgm": None,
"pop_k_ion_igm": None,
"pop_k_heat_igm": None,
# Set time interval over which emission occurs
"pop_zform": 60.,
"pop_zdead": 0.0,
# Main parameters in our typical global 21-cm models
"pop_fstar": 0.1,
'pop_fstar_cloud': 1., # cloud-scale star formation efficiency
"pop_fstar_max": 1.0,
"pop_fstar_negligible": 1e-5, # relative to maximum
"pop_sfr": None,
"pop_facc": 0.0,
"pop_fsmooth": 1.0,
# Next 3: relative to fraction of halo acquiring the material
'pop_acc_frac_metals': 1.0,
'pop_acc_frac_stellar': 1.0,
'pop_acc_frac_gas': 1.0,
'pop_metal_retention': 1.0,
"pop_star_formation": True,
"pop_sfe": None,
"pop_mlf": None,
"pop_sfr": None,
"pop_frd": None,
"pop_fshock": 1.0,
# For GalaxyEnsemble
"pop_aging": False,
"pop_enrichment": False,
"pop_quench": None,
"pop_quench_by": 'mass',
"pop_flag_sSFR": None,
"pop_flag_tau": None,
"pop_mag_bin": 0.5,
"pop_mag_min": -25,
"pop_mag_max": 0,
"pop_synth_dz": 0.5,
"pop_synth_zmax": 15.,
"pop_synth_zmin": 3.5,
"pop_synth_Mmax": 1e14,
"pop_synth_minimal": False, # Can turn off for testing (so we don't need MF)
"pop_synth_cache_level": 1, # Bigger = more careful
"pop_synth_age_interp": 'cubic',
"pop_synth_cache_phot": {},
# Need to avoid doing synthesis in super duper detail for speed.
# Still need to implement 'full' method.
"pop_synth_lwb_method": 0,
"pop_tau_bc": 0,
"pop_age_bc": 10.,
"pop_mergers": False,
# For Clusters
"pop_mdist": None,
"pop_age_res": 1.,
"pop_dlogM": 0.1,
"pop_histories": None,
"pop_guide_pop": None,
"pop_thin_hist": False,
"pop_scatter_mar": 0.0,
"pop_scatter_mar_seed": None,
"pop_scatter_sfr": 0.0,
"pop_scatter_sfe": 0.0,
"pop_scatter_env": 0.0,
"pop_update_dt": 'native',
# Cluster-centric model
"pop_feedback_rad": False,
"pop_feedback_sne": False,
"pop_delay_rad_feedback": 0,
"pop_delay_sne_feedback": 0,
"pop_force_equilibrium": np.inf,
"pop_sample_imf": False,
"pop_sample_cmf": False,
"pop_imf": 2.35, # default to standard SSPs.
"pop_imf_bins": None,#np.arange(0.1, 150.01, 0.01), # bin centers
"pop_cmf": None,
# Feedback for single-stars
"pop_coupling_sne": 0.1,
"pop_coupling_rad": 0.1,
# Energy per SNe in units of 1e51 erg.
"pop_omega_51": 0.01,
# Baryon cycling
"pop_multiphase": False,
"pop_fobsc": 0.0,
"pop_fobsc_by": None, # or 'age' or 'lum'
"pop_tab_z": None,
"pop_tab_Mh": None,
"pop_tab_sfe": None,
"pop_tab_sfr": None,
"pop_Tmin": 1e4,
"pop_Tmax": None,
"pop_Mmin": None,
"pop_Mmin_ceil": None,
"pop_Mmin_floor": None,
"pop_Tmin_ceil": None,
"pop_Tmin_floor": None,
"pop_Tmax_ceil": None,
"pop_Tmax_floor": None,
"pop_Mmax_ceil": None,
"pop_Mmax_floor": None,
"pop_Mmax": None,
"pop_time_limit": None,
"pop_time_limit_delay": True,
"pop_mass_limit": None,
"pop_abun_limit": None,
"pop_bind_limit": None,
"pop_temp_limit": None,
"pop_lose_metals": False,
"pop_limit_logic": 'and',
"pop_time_ceil": None,
"pop_initial_Mh": 1, # In units of Mmin. Zero means unused
"pop_sfrd": None,
"pop_sfrd_units": 'msun/yr/mpc^3',
# For BHs
"pop_bh_formation": False,
"pop_bh_md": None,
"pop_bh_ard": None,
"pop_bh_seed_ratio": 1e-3,
"pop_bh_seed_mass": None,
"pop_bh_seed_eff": None,
"pop_bh_facc": None,
# Scales SFRD
"pop_Nlw": 9690.,
"pop_Nion": 4e3,
"pop_fesc": 0.1,
"pop_fX": 1.0,
"pop_cX": 2.6e39,
# Should
"pop_fesc_LW": 1.,
"pop_fesc_LyC": 0.1,
# Parameters that sweep fstar under the rug
"pop_xi_XR": None, # product of fstar and fX
"pop_xi_LW": None, # product of fstar and Nlw
"pop_xi_UV": None, # product of fstar, Nion, and fesc
# Override luminosity density
"pop_rhoL": None,
# For multi-frequency calculations
"pop_E": None,
"pop_LE": None,
# What radiation does this population emit?
# These are passive fields
"pop_oir_src": False,
"pop_lw_src": True,
"pop_lya_src": True,
"pop_radio_src": False,
# These are active fields (i.e., they change the IGMs properties)
"pop_ion_src_cgm": True,
"pop_ion_src_igm": True,
"pop_heat_src_cgm": False,
"pop_heat_src_igm": True,
"pop_lya_fl": False,
"pop_ion_fl": False,
"pop_temp_fl": False,
"pop_one_halo_term": True,
"pop_two_halo_term": True,
# Generalized normalization
# Mineo et al. (2012) (from revised 0.5-8 keV L_X-SFR)
"pop_rad_yield": 2.6e39,
"pop_rad_yield_units": 'erg/s/sfr',
"pop_rad_yield_Z_index": None,
# Parameters for simple galaxy SAM
"pop_sam_method": 0,
"pop_sam_nz": 1,
"pop_mass_yield": 0.5,
"pop_metal_yield": 0.1,
"pop_dust_holes": 'big',
"pop_dust_yield": None, # Mdust = dust_yield * metal mass
"pop_dust_yield_delay": 0,
"pop_dust_growth": None,
"pop_dust_scale": 0.1, # 100 pc
"pop_dust_fcov": 1.0,
"pop_dust_geom": 'screen', # or 'mixed'
"pop_dust_kappa": None, # opacity in [cm^2 / g]
"pop_dust_scatter": None,
"pop_dust_scatter_seed": None,
"pop_fpoll": 1.0, # uniform pollution
"pop_fstall": 0.0,
"pop_mass_rec": 0.0,
"pop_mass_escape": 0.0,
"pop_fstar_res": 0.0,
# Transition mass
"pop_transition": 0,
"pop_dE": None,
"pop_dlam": 1.,
"pop_calib_wave": 1600,
"pop_calib_lum": None,
"pop_lum_per_sfr": None,
"pop_calib_Z": None, # not implemented
"pop_Lh_scatter": 0.0,
'pop_fXh': None,
'pop_frep': 1.0,
'pop_reproc': False,
'pop_frec_bar': 0.0, # Neglect injected photons by default if we're
# treating background in approximate way
# Nebular emission stuff
"pop_nebular_Tgas": 2e4,
"pop_lmin": 912.,
"pop_lmax": 1e4,
"pop_dlam": 10,
"pop_wavelengths": None,
"pop_times": None,
"pop_toysps_beta": -2.,
"pop_toysps_norm": 2e33, # at 1600A
"pop_toysps_gamma": -0.8,
"pop_toysps_delta": -0.25,
"pop_toysps_alpha": 8.,
"pop_toysps_t0": 100.,
"pop_toysps_lmin": 912.,
"pop_toysps_trise": 3,
"pop_solve_rte": False,
"pop_lya_permeable": False,
# Pre-created splines
"pop_fcoll": None,
"pop_dfcolldz": None,
# Get passed on to litdata instances
"source_kwargs": {},
"pop_kwargs": {},
"pop_test_param": None,
# Utility
"pop_user_par0": None,
"pop_user_par1": None,
"pop_user_par2": None,
"pop_user_par3": None,
"pop_user_par4": None,
"pop_user_par5": None,
"pop_user_par6": None,
"pop_user_par7": None,
"pop_user_par8": None,
"pop_user_par9": None,
"pop_user_pmap": {},
}
pf.update(tmp)
pf.update(rcParams)
return pf
def SourceParameters():
pf = \
{
"source_type": 'star',
"source_sed": 'bb',
"source_position": 0.0,
"source_sed_sharp_at": None,
"source_sed_degrade": None,
"source_sfr": 1.,
"source_fesc": 0.1,
# only for schaerer2002 right now
"source_piecewise": True,
"source_model": 'tavg_nms', # or "zams" or None
"source_tbirth": 0,
"source_lifetime": 1e10,
"source_dlogN": [0.1],
"source_logNmin": [None],
"source_logNmax": [None],
"source_table": None,
"source_E": None,
"source_LE": None,
"source_multigroup": False,
"source_Emin": E_LL,
"source_Emax": 1e2,
"source_Enorm": None,
"source_EminNorm": None,
"source_EmaxNorm": None,
"source_dE": None,
"source_dlam": None,
"source_lmin": 912.,
"source_lmax": 1e4,
"source_wavelengths": None,
"source_times": None,
"source_toysps_beta": -2.5,
"source_toysps_norm": 3e33, # at 1600A
"source_toysps_gamma": -1.,
"source_toysps_delta": -0.25,
"source_toysps_alpha":8.,
"source_toysps_t0": 350.,
"source_toysps_lmin": 912.,
"source_toysps_trise": 3,
"source_Ekill": None,
"source_logN": -inf,
"source_hardening": 'extrinsic',
# Synthesis models
"source_sfh": None,
"source_Z": 0.02,
"source_imf": 2.35,
"source_tracks": None,
"source_tracks_fn": None,
"source_stellar_aging": False,
"source_nebular": False,
"source_nebular_ff": True,
"source_nebular_fb": True,
"source_nebular_2phot": True,
"source_nebular_lookup": None,
"source_nebular_Tgas": 2e4,
"source_ssp": False, # a.k.a., continuous SF
"source_psm_instance": None,
"source_tsf": 100.,
"source_binaries": False, # for BPASS
"source_sed_by_Z": None,
"source_rad_yield": 'from_sed',
"source_interpolant": None,
"source_sps_data": None,
# Log masses
"source_imf_bins": np.arange(-1, 2.52, 0.02), # bin centers
"source_degradation": None, # Degrade spectra to this \AA resolution
"source_aging": False,
# Stellar
"source_temperature": 1e5,
"source_qdot": 5e48,
# SFH
"source_sfh": None,
"source_meh": None,
# BH
"source_mass": 1, # Also normalizes ssp's, so set to 1 by default.
"source_rmax": 1e3,
"source_alpha": -1.5,
"source_evolving": False,
# SIMPL
"source_fsc": 0.1,
"source_uponly": True,
"source_dlogE": 0.1,
"source_Lbol": None,
"source_fduty": 1.,
"source_eta": 0.1,
"source_isco": 6,
"source_rmax": 1e3,
}
pf.update(rcParams)
return pf
def StellarParameters():
pf = \
{
"source_temperature": 1e5,
"source_qdot": 5e48,
}
pf.update(SourceParameters())
pf.update(rcParams)
return pf
def BlackHoleParameters():
pf = \
{
#"source_mass": 1e5,
"source_rmax": 1e3,
"source_alpha": -1.5,
"source_fsc": 0.1,
"source_uponly": True,
"source_Lbol": None,
"source_mass": 10,
"source_fduty": 1.,
"source_eta": 0.1,
"source_isco": 6,
"source_rmax": 1e3,
}
pf.update(SourceParameters())
pf.update(rcParams)
return pf
def SynthesisParameters():
pf = \
{
# For synthesis models
"source_sed": None,
"source_sed_degrade": None,
"source_Z": 0.02,
"source_imf": 2.35,
"source_tracks": None,
"source_tracks_fn": None,
"source_stellar_aging": False,
"source_nebular": False,
# If doing nebular emission with ARES
"source_nebular_ff": True,
"source_nebular_fb": True,
"source_nebular_2phot": True,
"source_nebular_lookup": None,
"source_nebular_Tgas": 2e4,
"source_fesc": 0.,
"source_ssp": False, # a.k.a., continuous SF
"source_psm_instance": None,
"source_tsf": 100.,
"source_binaries": False, # for BPASS
"source_sed_by_Z": None,
"source_rad_yield": 'from_sed',
"source_sps_data": None,
# Only used by toy SPS
"source_dE": None,
"source_dlam": 10.,
"source_Emin": 1.,
"source_Emax": 54.4,
"source_lmin": 912.,
"source_lmax": 1e4,
"source_times": None,
"source_wavelengths": None,
"source_toysps_beta": -2.,
"source_toysps_norm": 2e33, # at 1600A
"source_toysps_gamma": -0.8,
"source_toysps_delta": -0.25,
"source_toysps_alpha": 8.,
"source_toysps_t0": 100.,
"source_toysps_lmin": 912.,
"source_toysps_trise": 3,
# Coefficient of Bpass in Hybrid synthesis model
"source_coef": 0.5,
}
return pf
def HaloMassFunctionParameters():
pf = \
{
"hmf_model": 'ST',
"hmf_instance": None,
"hmf_load": True,
"hmf_cache": None,
"hmf_load_ps": True,
"hmf_load_growth": False,
"hmf_use_splined_growth": True,
"hmf_table": None,
"hmf_analytic": False,
"hmf_params": None,
# Table resolution
"hmf_logMmin": 4,
"hmf_logMmax": 18,
"hmf_dlogM": 0.01,
"hmf_zmin": 0,
"hmf_zmax": 60,
"hmf_dz": 0.05,
# Optional: time instead of redshift
"hmf_tmin": 30.,
"hmf_tmax": 1000.,
"hmf_dt": None, # if not None, will switch this one.
# Augment suite of halo growth histories
"hgh_dlogM": 0.1,
'hgh_Mmax': None,
# to CAMB
'hmf_dlna': 2e-6, # hmf default value is 1e-2
'hmf_dlnk': 1e-2,
'hmf_lnk_min': -20.,
'hmf_lnk_max': 10.,
'hmf_transfer_k_per_logint': 11,
'hmf_transfer_kmax': 100., # hmf default value is 5
"hmf_dfcolldz_smooth": False,
"hmf_dfcolldz_trunc": False,
"hmf_path": None,
# For, e.g., fcoll, etc
"hmf_interp": 'cubic',
"hmf_func": None,
"hmf_extra_par0": None,
"hmf_extra_par1": None,
"hmf_extra_par2": None,
"hmf_extra_par3": None,
"hmf_extra_par4": None,
# Mean molecular weight of collapsing gas
"mu": 0.61,
"hmf_database": None,
# Directory where cosmology hmf tables are located
# For halo model.
"hps_zmin": 6,
"hps_zmax": 30,
"hps_dz": 0.5,
"hps_linear": False,
'hps_dlnk': 0.001,
'hps_dlnR': 0.001,
'hps_lnk_min': -10.,
'hps_lnk_max': 10.,
'hps_lnR_min': -10.,
'hps_lnR_max': 10.,
# Note that this is not passed to hmf yet.
"hmf_window": 'tophat',
"hmf_wdm_mass": None,
"hmf_wdm_interp": True,
"hmf_cosmology_location": None,
# PCA eigenvectors
"hmf_pca": None,
"hmf_pca_coef0":None,
"hmf_pca_coef1":None,
"hmf_pca_coef2":None,
"hmf_pca_coef3":None,
"hmf_pca_coef4":None,
"hmf_pca_coef5":None,
"hmf_pca_coef6":None,
"hmf_pca_coef7":None,
"hmf_pca_coef8":None,
"hmf_pca_coef9":None,
"hmf_pca_coef10": None,
"hmf_pca_coef11": None,
"hmf_pca_coef12": None,
"hmf_pca_coef13": None,
"hmf_pca_coef14": None,
"hmf_pca_coef15": None,
"hmf_pca_coef16": None,
"hmf_pca_coef17": None,
"hmf_pca_coef18": None,
"hmf_pca_coef19": None,
# If a new tab_MAR should be computed when using the PCA
"hmf_gen_MAR":False,
}
pf.update(rcParams)
return pf
def CosmologyParameters():
# Last column of Table 4 in Planck XIII. Cosmological Parameters (2015)
pf = \
{
"cosmology_propagation": False,
"cosmology_inits_location": None,
"omega_m_0": 0.3089,
"omega_b_0": round(0.0223 / 0.6774**2, 5), # O_b / h**2
"omega_l_0": 1. - 0.3089,
"omega_k_0": 0.0,
"hubble_0": 0.6774,
"helium_by_number": 0.0813,
"helium_by_mass": 0.2453, # predicted by BBN
"cmb_temp_0": 2.7255,
"sigma_8": 0.8159,
"primordial_index": 0.9667,
'relativistic_species': 3.04,
"approx_highz": False,
"cosmology_id": 'best',
"cosmology_name": 'planck_TTTEEE_lowl_lowE', # Can pass 'named cosmologies'
"cosmology_number": None,
"path_to_CosmoRec": None,
# As you might have guessed, these parameters are all unique to CosmoRec
'cosmorec_nz': 1000,
'cosmorec_z0': 3000,
'cosmorec_zf': 0,
'cosmorec_recfast_fudge': 1.14,
'cosmorec_nshells_H': 3,
'cosmorec_nS': 500,
'cosmorec_dm_annhil': 0,
'cosmorec_A2s1s': 0, # will use internal default if zero
'cosmorec_nshells_He': 3,
'cosmorec_HI_abs': 2, # during He recombination
'cosmorec_spin_forb': 1,
'cosmorec_feedback_He': 0,
'cosmorec_run_pde': 1,
'cosmorec_corr_2s1s': 2,
'cosmorec_2phot': 3,
'cosmorec_raman': 2,
'cosmorec_path': None,
'cosmorec_output': 'input/inits/outputs/',
'cosmorec_fmt': '.dat',
}
pf.update(rcParams)
return pf
def HaloParameters():
# Last column of Table 4 in Planck XIII. Cosmological Parameters (2015)
pf = \
{
"halo_profile": 'nfw',
"halo_cmr": 'duffy',
"halo_delta": 200.,
}
pf.update(rcParams)
return pf
def ControlParameters():
pf = \
{
'revision': None,
'nthreads': None,
# Start/stop/IO
"dtDataDump": 1.,
"dzDataDump": None,
'logdtDataDump': None,
'logdzDataDump': None,
"stop_time": 500,
"initial_redshift": 60.,
"final_redshift": 5,
"fallback_dz": 0.1, # only used when no other constraints
"kill_redshift": 0.0,
"first_light_redshift": 60.,
"save_rate_coefficients": 1,
"optically_thin": 0,
# Solvers
"solver_rtol": 1e-8,
"solver_atol": 1e-8,
"interp_tab": 'cubic',
"interp_cc": 'linear',
"interp_rc": 'linear',
"interp_Z": 'linear',
"interp_hist": 'linear',
"interp_all": 'linear', # backup
#"interp_sfrd": 'cubic',
#"interp_hmf": 'cubic',
"master_interp": None,
# Not implemented
"extrap_Z": False,
# Experimental
"conserve_memory": False,
# Initialization
"load_ics": 'cosmorec',
"cosmological_ics": False,
"load_sim": False,
"cosmological_Mmin": ['filtering', 'tegmark'],
# Timestepping
"max_timestep": 1.,
"epsilon_dt": 0.05,
"initial_timestep": 1e-2,
"tau_ifront": 0.5,
"restricted_timestep": ['ions', 'neutrals', 'electrons', 'temperature'],
"compute_fluxes_at_start": False,
# Real-time analysis junk
"stop": None, # 'B', 'C', 'trans', or 'D'
"stop_igm_h_2": 0.999,
"stop_cgm_h_2": 0.999,
"track_extrema": False,
"delay_extrema": 5, # Number of steps
"delay_tracking": 1., # dz below initial_redshift when tracking begins
"smooth_derivative": 0,
"blob_names": None,
"blob_ivars": None,
"blob_funcs": None,
"blob_kwargs": {},
# Real-time optical depth calculation once EoR begins
"EoR_xavg": 1.0, # ionized fraction indicating start of EoR (OFF by default)
"EoR_dlogx": 0.001,
"EoR_approx_tau": False, # 0 = trapezoidal integration,
# 1 = mean ionized fraction, approx cross sections
# 2 = neutral approx, approx cross sections
# Discretizing integration
"tau_table": None,
"tau_arrays": None,
"tau_prefix": tau_prefix,
"tau_instance": None,
"tau_redshift_bins": 400,
"tau_approx": True,
"tau_clumpy": None,
"tau_Emin": 2e2,
"tau_Emax": 3e4,
"tau_Emin_pin": True,
"sam_dt": 1., # Myr
"sam_dz": None, # Usually good enough!
"sam_atol": 1e-4,
"sam_rtol": 1e-4,
# File format
"preferred_format": 'hdf5',
# Finding SED tables
"load_sed": False,
"sed_prefix": None,
"unsampled_integrator": 'quad',
"sampled_integrator": 'simps',
"integrator_rtol": 1e-6,
"integrator_atol": 1e-4,
"integrator_divmax": 1e2,
"interpolator": 'spline',
"progress_bar": True,
"verbose": True,
"debug": False,
}
pf.update(rcParams)
return pf
_sampling_parameters = \
{
'parametric_model': False,
'output_frequencies': None,
'output_freq_min': 30.,
'output_freq_max': 200.,
'output_freq_res': 1.,
'output_dz': None, # Redshift sampling
'output_redshifts': None,
}
# Old != Deprecated
def OldParameters():
pf = \
{
'xi_LW': None,
'xi_UV': None,
'xi_XR': None,
}
return pf
def TanhParameters():
pf = \
{
'tanh_model': False,
'tanh_J0': 10.0,
'tanh_Jz0': 20.0,
'tanh_Jdz': 3.,
'tanh_T0': 1e3,
'tanh_Tz0': 8.,
'tanh_Tdz': 4.,
'tanh_x0': 1.0,
'tanh_xz0': 10.,
'tanh_xdz': 2.,
'tanh_bias_temp': 0.0, # in mK
'tanh_bias_freq': 0.0, # in MHz
'tanh_scale_temp': 1.0,
'tanh_scale_freq': 1.0
}
pf.update(rcParams)
pf.update(_sampling_parameters)
return pf
def GaussianParameters():
pf = \
{
'gaussian_model': False,
'gaussian_A': -100.,
'gaussian_nu': 70.,
'gaussian_sigma': 10.,
'gaussian_bias_temp': 0
}
pf.update(rcParams)
pf.update(_sampling_parameters)
return pf
| []
| []
| [
"ARES"
]
| [] | ["ARES"] | python | 1 | 0 | |
course/migrations/0020_tweak_field_access_exception.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0019_tweak_grading_opp'),
]
operations = [
migrations.AlterField(
model_name='flowaccessexceptionentry',
name='permission',
field=models.CharField(max_length=50, choices=[(b'view', b'View the flow'), (b'view_past', b'Review past attempts'), (b'start_credit', b'Start a for-credit session'), (b'start_no_credit', b'Start a not-for-credit session'), (b'change_answer', b'Change already-graded answer'), (b'see_correctness', b'See whether an answer is correct'), (b'see_answer', b'See the correct answer')]),
),
]
| []
| []
| []
| [] | [] | python | null | null | null |
config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <[email protected]>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
SSL_REDIRECT = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
import logging
# 基礎設定
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
handlers = [logging.FileHandler('my.log', 'w', 'utf-8'),])
# 定義 handler 輸出 sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# 設定輸出格式
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# handler 設定輸出格式
console.setFormatter(formatter)
# 加入 hander 到 root logger
logging.getLogger('').addHandler(console)
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_REDIRECT = True if os.environ.get('DYNO') else False
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle reverse proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
| []
| []
| [
"MAIL_SERVER",
"MAIL_PASSWORD",
"DEV_DATABASE_URL",
"DATABASE_URL",
"FLASKY_ADMIN",
"DYNO",
"MAIL_PORT",
"SECRET_KEY",
"MAIL_USERNAME",
"MAIL_USE_TLS",
"TEST_DATABASE_URL"
]
| [] | ["MAIL_SERVER", "MAIL_PASSWORD", "DEV_DATABASE_URL", "DATABASE_URL", "FLASKY_ADMIN", "DYNO", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_TLS", "TEST_DATABASE_URL"] | python | 11 | 0 | |
pype/ftrack/events/action_sync_to_avalon.py | import os
import time
import traceback
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import SyncEntitiesFactory
from pypeapp import config
class SyncToAvalonServer(BaseAction):
"""
Synchronizing data action - from Ftrack to Avalon DB
Stores all information about entity.
- Name(string) - Most important information = identifier of entity
- Parent(ObjectId) - Avalon Project Id, if entity is not project itself
- Data(dictionary):
- VisualParent(ObjectId) - Avalon Id of parent asset
- Parents(array of string) - All parent names except project
- Tasks(array of string) - Tasks on asset
- FtrackId(string)
- entityType(string) - entity's type on Ftrack
* All Custom attributes in group 'Avalon'
- custom attributes that start with 'avalon_' are skipped
* These information are stored for entities in whole project.
Avalon ID of asset is stored to Ftrack
- Custom attribute 'avalon_mongo_id'.
- action IS NOT creating this Custom attribute if doesn't exist
- run 'Create Custom Attributes' action
- or do it manually (Not recommended)
"""
#: Action identifier.
identifier = "sync.to.avalon.server"
#: Action label.
label = "Pype Admin"
variant = "- Sync To Avalon (Server)"
#: Action description.
description = "Send data from Ftrack to Avalon"
#: Action icon.
icon = "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get(
"PYPE_STATICS_SERVER",
"http://localhost:{}".format(
config.get_presets().get("services", {}).get(
"rest_api", {}
).get("default_port", 8021)
)
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.entities_factory = SyncEntitiesFactory(self.log, self.session)
def register(self):
self.session.event_hub.subscribe(
"topic=ftrack.action.discover",
self._discover,
priority=self.priority
)
launch_subscription = (
"topic=ftrack.action.launch and data.actionIdentifier={0}"
).format(self.identifier)
self.session.event_hub.subscribe(launch_subscription, self._launch)
def discover(self, session, entities, event):
""" Validation """
# Check if selection is valid
valid_selection = False
for ent in event["data"]["selection"]:
# Ignore entities that are not tasks or projects
if ent["entityType"].lower() in ["show", "task"]:
valid_selection = True
break
if not valid_selection:
return False
# Get user and check his roles
user_id = event.get("source", {}).get("user", {}).get("id")
if not user_id:
return False
user = session.query("User where id is \"{}\"".format(user_id)).first()
if not user:
return False
role_list = ["Pypeclub", "Administrator", "Project Manager"]
for role in user["user_security_roles"]:
if role["security_role"]["name"] in role_list:
return True
return False
def launch(self, session, in_entities, event):
time_start = time.time()
self.show_message(event, "Synchronization - Preparing data", True)
# Get ftrack project
if in_entities[0].entity_type.lower() == "project":
ft_project_name = in_entities[0]["full_name"]
else:
ft_project_name = in_entities[0]["project"]["full_name"]
try:
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()
time_2 = time.time()
# This must happen before all filtering!!!
self.entities_factory.prepare_avalon_entities(ft_project_name)
time_3 = time.time()
self.entities_factory.filter_by_ignore_sync()
time_4 = time.time()
self.entities_factory.duplicity_regex_check()
time_5 = time.time()
self.entities_factory.prepare_ftrack_ent_data()
time_6 = time.time()
self.entities_factory.synchronize()
time_7 = time.time()
self.log.debug(
"*** Synchronization finished ***"
)
self.log.debug(
"preparation <{}>".format(time_1 - time_start)
)
self.log.debug(
"set_cutom_attributes <{}>".format(time_2 - time_1)
)
self.log.debug(
"prepare_avalon_entities <{}>".format(time_3 - time_2)
)
self.log.debug(
"filter_by_ignore_sync <{}>".format(time_4 - time_3)
)
self.log.debug(
"duplicity_regex_check <{}>".format(time_5 - time_4)
)
self.log.debug(
"prepare_ftrack_ent_data <{}>".format(time_6 - time_5)
)
self.log.debug(
"synchronize <{}>".format(time_7 - time_6)
)
self.log.debug(
"* Total time: {}".format(time_7 - time_start)
)
report = self.entities_factory.report()
if report and report.get("items"):
default_title = "Synchronization report ({}):".format(
ft_project_name
)
self.show_interface(
items=report["items"],
title=report.get("title", default_title),
event=event
)
return {
"success": True,
"message": "Synchronization Finished"
}
except Exception:
self.log.error(
"Synchronization failed due to code error", exc_info=True
)
msg = "An error has happened during synchronization"
title = "Synchronization report ({}):".format(ft_project_name)
items = []
items.append({
"type": "label",
"value": "# {}".format(msg)
})
items.append({
"type": "label",
"value": "## Traceback of the error"
})
items.append({
"type": "label",
"value": "<p>{}</p>".format(
str(traceback.format_exc()).replace(
"\n", "<br>").replace(
" ", " "
)
)
})
report = {"items": []}
try:
report = self.entities_factory.report()
except Exception:
pass
_items = report.get("items", [])
if _items:
items.append(self.entities_factory.report_splitter)
items.extend(_items)
self.show_interface(items, title, event)
return {"success": True, "message": msg}
finally:
try:
self.entities_factory.dbcon.uninstall()
except Exception:
pass
try:
self.entities_factory.session.close()
except Exception:
pass
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
SyncToAvalonServer(session, plugins_presets).register()
| []
| []
| [
"PYPE_STATICS_SERVER"
]
| [] | ["PYPE_STATICS_SERVER"] | python | 1 | 0 | |
test/e2e/suite_test.go | package e2e
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/exec"
clusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/test/helper"
)
const (
// TestSuiteSetupTimeOut defines the time after which the suite setup times out.
TestSuiteSetupTimeOut = 300 * time.Second
// TestSuiteTeardownTimeOut defines the time after which the suite tear down times out.
TestSuiteTeardownTimeOut = 300 * time.Second
// pollInterval defines the interval time for a poll operation.
pollInterval = 5 * time.Second
// pollTimeout defines the time after which the poll operation times out.
pollTimeout = 60 * time.Second
// MinimumCluster represents the minimum number of member clusters to run E2E test.
MinimumCluster = 2
// RandomStrLength represents the random string length to combine names.
RandomStrLength = 3
)
var (
kubeconfig string
restConfig *rest.Config
kubeClient kubernetes.Interface
karmadaClient karmada.Interface
dynamicClient dynamic.Interface
controlPlaneClient client.Client
clusters []*clusterapi.Cluster
clusterNames []string
clusterClients []*util.ClusterClient
clusterDynamicClients []*util.DynamicClusterClient
testNamespace = fmt.Sprintf("karmadatest-%s", rand.String(RandomStrLength))
clusterProvider *cluster.Provider
pullModeClusters map[string]string
)
func TestE2E(t *testing.T) {
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "E2E Suite")
}
var _ = ginkgo.BeforeSuite(func() {
kubeconfig = os.Getenv("KUBECONFIG")
gomega.Expect(kubeconfig).ShouldNot(gomega.BeEmpty())
clusterProvider = cluster.NewProvider()
var err error
restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
kubeClient, err = kubernetes.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
karmadaClient, err = karmada.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
dynamicClient, err = dynamic.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
controlPlaneClient, err = client.New(restConfig, client.Options{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
pullModeClusters, err = fetchPullBasedClusters()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
clusters, err = fetchClusters(karmadaClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
var meetRequirement bool
meetRequirement, err = isClusterMeetRequirements(clusters)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(meetRequirement).Should(gomega.BeTrue())
for _, cluster := range clusters {
clusterClient, clusterDynamicClient, err := newClusterClientSet(cluster)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
clusterNames = append(clusterNames, cluster.Name)
clusterClients = append(clusterClients, clusterClient)
clusterDynamicClients = append(clusterDynamicClients, clusterDynamicClient)
}
gomega.Expect(clusterNames).Should(gomega.HaveLen(len(clusters)))
err = setupTestNamespace(testNamespace, kubeClient, clusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteSetupTimeOut.Seconds())
var _ = ginkgo.AfterSuite(func() {
// cleanup all namespaces we created both in control plane and member clusters.
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
err := cleanupTestNamespace(testNamespace, kubeClient, clusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteTeardownTimeOut.Seconds())
func fetchPullBasedClusters() (map[string]string, error) {
pullBasedClusters := os.Getenv("PULL_BASED_CLUSTERS")
if pullBasedClusters == "" {
return nil, nil
}
pullBasedClustersMap := make(map[string]string)
pullBasedClusters = strings.TrimSuffix(pullBasedClusters, ";")
clusterInfo := strings.Split(pullBasedClusters, ";")
for _, cluster := range clusterInfo {
clusterNameAndConfigPath := strings.Split(cluster, ":")
if len(clusterNameAndConfigPath) != 2 {
return nil, fmt.Errorf("failed to parse config path for cluster: %s", cluster)
}
pullBasedClustersMap[clusterNameAndConfigPath[0]] = clusterNameAndConfigPath[1]
}
return pullBasedClustersMap, nil
}
// fetchClusters will fetch all member clusters we have.
func fetchClusters(client karmada.Interface) ([]*clusterapi.Cluster, error) {
clusterList, err := client.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
clusters := make([]*clusterapi.Cluster, 0, len(clusterList.Items))
for _, cluster := range clusterList.Items {
pinedCluster := cluster
if pinedCluster.Spec.SyncMode == clusterapi.Pull {
if _, exist := pullModeClusters[cluster.Name]; !exist {
continue
}
}
clusters = append(clusters, &pinedCluster)
}
return clusters, nil
}
// isClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isClusterMeetRequirements(clusters []*clusterapi.Cluster) (bool, error) {
// check if member cluster number meets requirements
if len(clusters) < MinimumCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumCluster, len(clusters))
}
// check if all member cluster status is ready
for _, cluster := range clusters {
if !util.IsClusterReady(&cluster.Status) {
return false, fmt.Errorf("cluster %s not ready", cluster.GetName())
}
}
klog.Infof("Got %d member cluster and all in ready state.", len(clusters))
return true, nil
}
// setupTestNamespace will create a namespace in control plane and all member clusters, most of cases will run against it.
// The reason why we need a separated namespace is it will make it easier to cleanup resources deployed by the testing.
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, clusterClients []*util.ClusterClient) error {
namespaceObj := helper.NewNamespace(namespace)
_, err := util.CreateNamespace(kubeClient, namespaceObj)
if err != nil {
return err
}
for _, clusterClient := range clusterClients {
_, err = util.CreateNamespace(clusterClient.KubeClient, namespaceObj)
if err != nil {
return err
}
}
return nil
}
// cleanupTestNamespace will remove the namespace we setup before for the whole testing.
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, clusterClients []*util.ClusterClient) error {
err := util.DeleteNamespace(kubeClient, namespace)
if err != nil {
return err
}
for _, clusterClient := range clusterClients {
err = util.DeleteNamespace(clusterClient.KubeClient, namespace)
if err != nil {
return err
}
}
return nil
}
func getClusterClient(clusterName string) kubernetes.Interface {
for _, client := range clusterClients {
if client.ClusterName == clusterName {
return client.KubeClient
}
}
return nil
}
func getClusterDynamicClient(clusterName string) dynamic.Interface {
for _, client := range clusterDynamicClients {
if client.ClusterName == clusterName {
return client.DynamicClientSet
}
}
return nil
}
func createCluster(clusterName, kubeConfigPath, controlPlane, clusterContext string) error {
err := clusterProvider.Create(clusterName, cluster.CreateWithKubeconfigPath(kubeConfigPath))
if err != nil {
return err
}
cmd := exec.Command(
"docker", "inspect",
"--format", "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}",
controlPlane,
)
lines, err := exec.OutputLines(cmd)
if err != nil {
return err
}
pathOptions := clientcmd.NewDefaultPathOptions()
pathOptions.LoadingRules.ExplicitPath = kubeConfigPath
pathOptions.EnvVar = ""
config, err := pathOptions.GetStartingConfig()
if err != nil {
return err
}
serverIP := fmt.Sprintf("https://%s:6443", lines[0])
config.Clusters[clusterContext].Server = serverIP
err = clientcmd.ModifyConfig(pathOptions, *config, true)
if err != nil {
return err
}
return nil
}
func deleteCluster(clusterName, kubeConfigPath string) error {
return clusterProvider.Delete(clusterName, kubeConfigPath)
}
func newClusterClientSet(c *clusterapi.Cluster) (*util.ClusterClient, *util.DynamicClusterClient, error) {
if c.Spec.SyncMode == clusterapi.Push {
clusterClient, err := util.NewClusterClientSet(c, controlPlaneClient)
if err != nil {
return nil, nil, err
}
clusterDynamicClient, err := util.NewClusterDynamicClientSet(c, controlPlaneClient)
if err != nil {
return nil, nil, err
}
return clusterClient, clusterDynamicClient, nil
}
clusterConfigPath := pullModeClusters[c.Name]
clusterConfig, err := clientcmd.BuildConfigFromFlags("", clusterConfigPath)
if err != nil {
return nil, nil, err
}
clusterClientSet := util.ClusterClient{ClusterName: c.Name}
clusterDynamicClientSet := util.DynamicClusterClient{ClusterName: c.Name}
clusterClientSet.KubeClient = kubernetes.NewForConfigOrDie(clusterConfig)
clusterDynamicClientSet.DynamicClientSet = dynamic.NewForConfigOrDie(clusterConfig)
return &clusterClientSet, &clusterDynamicClientSet, nil
}
| [
"\"KUBECONFIG\"",
"\"PULL_BASED_CLUSTERS\""
]
| []
| [
"PULL_BASED_CLUSTERS",
"KUBECONFIG"
]
| [] | ["PULL_BASED_CLUSTERS", "KUBECONFIG"] | go | 2 | 0 | |
service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hive.service.cli.thrift;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import javax.security.auth.login.LoginException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.common.metrics.common.Metrics;
import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hive.service.AbstractService;
import org.apache.hive.service.ServiceException;
import org.apache.hive.service.ServiceUtils;
import org.apache.hive.service.auth.HiveAuthFactory;
import org.apache.hive.service.auth.TSetIpAddressProcessor;
import org.apache.hive.service.cli.CLIService;
import org.apache.hive.service.cli.FetchOrientation;
import org.apache.hive.service.cli.FetchType;
import org.apache.hive.service.cli.GetInfoType;
import org.apache.hive.service.cli.GetInfoValue;
import org.apache.hive.service.cli.HiveSQLException;
import org.apache.hive.service.cli.OperationHandle;
import org.apache.hive.service.cli.OperationStatus;
import org.apache.hive.service.cli.RowSet;
import org.apache.hive.service.cli.SessionHandle;
import org.apache.hive.service.cli.TableSchema;
import org.apache.hive.service.cli.session.SessionManager;
import org.apache.hive.service.server.HiveServer2;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.server.ServerContext;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TServerEventHandler;
import org.apache.thrift.transport.TTransport;
/**
* ThriftCLIService.
*
*/
public abstract class ThriftCLIService extends AbstractService implements TCLIService.Iface, Runnable {
public static final Log LOG = LogFactory.getLog(ThriftCLIService.class.getName());
protected CLIService cliService;
private static final TStatus OK_STATUS = new TStatus(TStatusCode.SUCCESS_STATUS);
protected static HiveAuthFactory hiveAuthFactory;
private static final AtomicInteger sessionCount = new AtomicInteger();
protected int portNum;
protected InetAddress serverIPAddress;
protected String hiveHost;
protected TServer server;
protected org.eclipse.jetty.server.Server httpServer;
private boolean isStarted = false;
protected boolean isEmbedded = false;
protected HiveConf hiveConf;
protected int minWorkerThreads;
protected int maxWorkerThreads;
protected long workerKeepAliveTime;
protected TServerEventHandler serverEventHandler;
protected ThreadLocal<ServerContext> currentServerContext;
static class ThriftCLIServerContext implements ServerContext {
private SessionHandle sessionHandle = null;
public void setSessionHandle(SessionHandle sessionHandle) {
this.sessionHandle = sessionHandle;
}
public SessionHandle getSessionHandle() {
return sessionHandle;
}
}
public ThriftCLIService(CLIService service, String serviceName) {
super(serviceName);
this.cliService = service;
currentServerContext = new ThreadLocal<ServerContext>();
serverEventHandler = new TServerEventHandler() {
@Override
public ServerContext createContext(
TProtocol input, TProtocol output) {
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
try {
metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
} catch (Exception e) {
LOG.warn("Error Reporting JDO operation to Metrics system", e);
}
}
return new ThriftCLIServerContext();
}
@Override
public void deleteContext(ServerContext serverContext,
TProtocol input, TProtocol output) {
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
try {
metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
} catch (Exception e) {
LOG.warn("Error Reporting JDO operation to Metrics system", e);
}
}
ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
SessionHandle sessionHandle = context.getSessionHandle();
if (sessionHandle != null) {
LOG.info("Session disconnected without closing properly, close it now");
try {
cliService.closeSession(sessionHandle);
} catch (HiveSQLException e) {
LOG.warn("Failed to close session: " + e, e);
}
}
}
@Override
public void preServe() {
}
@Override
public void processContext(ServerContext serverContext,
TTransport input, TTransport output) {
currentServerContext.set(serverContext);
}
};
}
@Override
public synchronized void init(HiveConf hiveConf) {
this.hiveConf = hiveConf;
// Initialize common server configs needed in both binary & http modes
String portString;
hiveHost = System.getenv("HIVE_SERVER2_THRIFT_BIND_HOST");
if (hiveHost == null) {
hiveHost = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST);
}
try {
if (hiveHost != null && !hiveHost.isEmpty()) {
serverIPAddress = InetAddress.getByName(hiveHost);
} else {
serverIPAddress = InetAddress.getLocalHost();
}
} catch (UnknownHostException e) {
throw new ServiceException(e);
}
// HTTP mode
if (HiveServer2.isHTTPTransportMode(hiveConf)) {
workerKeepAliveTime =
hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME,
TimeUnit.SECONDS);
portString = System.getenv("HIVE_SERVER2_THRIFT_HTTP_PORT");
if (portString != null) {
portNum = Integer.valueOf(portString);
} else {
portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT);
}
}
// Binary mode
else {
workerKeepAliveTime =
hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME, TimeUnit.SECONDS);
portString = System.getenv("HIVE_SERVER2_THRIFT_PORT");
if (portString != null) {
portNum = Integer.valueOf(portString);
} else {
portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT);
}
}
minWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS);
maxWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS);
super.init(hiveConf);
}
@Override
public synchronized void start() {
super.start();
if (!isStarted && !isEmbedded) {
new Thread(this).start();
isStarted = true;
}
}
@Override
public synchronized void stop() {
if (isStarted && !isEmbedded) {
if(server != null) {
server.stop();
LOG.info("Thrift server has stopped");
}
if((httpServer != null) && httpServer.isStarted()) {
try {
httpServer.stop();
LOG.info("Http server has stopped");
} catch (Exception e) {
LOG.error("Error stopping Http server: ", e);
}
}
isStarted = false;
}
super.stop();
}
public int getPortNumber() {
return portNum;
}
public InetAddress getServerIPAddress() {
return serverIPAddress;
}
@Override
public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req)
throws TException {
TGetDelegationTokenResp resp = new TGetDelegationTokenResp();
if (hiveAuthFactory == null) {
resp.setStatus(unsecureTokenErrorStatus());
} else {
try {
String token = cliService.getDelegationToken(
new SessionHandle(req.getSessionHandle()),
hiveAuthFactory, req.getOwner(), req.getRenewer());
resp.setDelegationToken(token);
resp.setStatus(OK_STATUS);
} catch (HiveSQLException e) {
LOG.error("Error obtaining delegation token", e);
TStatus tokenErrorStatus = HiveSQLException.toTStatus(e);
tokenErrorStatus.setSqlState("42000");
resp.setStatus(tokenErrorStatus);
}
}
return resp;
}
@Override
public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req)
throws TException {
TCancelDelegationTokenResp resp = new TCancelDelegationTokenResp();
if (hiveAuthFactory == null) {
resp.setStatus(unsecureTokenErrorStatus());
} else {
try {
cliService.cancelDelegationToken(new SessionHandle(req.getSessionHandle()),
hiveAuthFactory, req.getDelegationToken());
resp.setStatus(OK_STATUS);
} catch (HiveSQLException e) {
LOG.error("Error canceling delegation token", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
}
return resp;
}
@Override
public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req)
throws TException {
TRenewDelegationTokenResp resp = new TRenewDelegationTokenResp();
if (hiveAuthFactory == null) {
resp.setStatus(unsecureTokenErrorStatus());
} else {
try {
cliService.renewDelegationToken(new SessionHandle(req.getSessionHandle()),
hiveAuthFactory, req.getDelegationToken());
resp.setStatus(OK_STATUS);
} catch (HiveSQLException e) {
LOG.error("Error obtaining renewing token", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
}
return resp;
}
private TStatus unsecureTokenErrorStatus() {
TStatus errorStatus = new TStatus(TStatusCode.ERROR_STATUS);
errorStatus.setErrorMessage("Delegation token only supported over remote " +
"client with kerberos authentication");
return errorStatus;
}
@Override
public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException {
LOG.info("Client protocol version: " + req.getClient_protocol());
TOpenSessionResp resp = new TOpenSessionResp();
try {
SessionHandle sessionHandle = getSessionHandle(req, resp);
resp.setSessionHandle(sessionHandle.toTSessionHandle());
// TODO: set real configuration map
resp.setConfiguration(new HashMap<String, String>());
resp.setStatus(OK_STATUS);
ThriftCLIServerContext context =
(ThriftCLIServerContext)currentServerContext.get();
if (context != null) {
context.setSessionHandle(sessionHandle);
}
LOG.info("Opened a session, current sessions: " + sessionCount.incrementAndGet());
} catch (Exception e) {
LOG.warn("Error opening session: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
private String getIpAddress() {
String clientIpAddress;
// Http transport mode.
// We set the thread local ip address, in ThriftHttpServlet.
if (cliService.getHiveConf().getVar(
ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) {
clientIpAddress = SessionManager.getIpAddress();
}
else {
// Kerberos
if (isKerberosAuthMode()) {
clientIpAddress = hiveAuthFactory.getIpAddress();
}
// Except kerberos, NOSASL
else {
clientIpAddress = TSetIpAddressProcessor.getUserIpAddress();
}
}
LOG.debug("Client's IP Address: " + clientIpAddress);
return clientIpAddress;
}
/**
* Returns the effective username.
* 1. If hive.server2.allow.user.substitution = false: the username of the connecting user
* 2. If hive.server2.allow.user.substitution = true: the username of the end user,
* that the connecting user is trying to proxy for.
* This includes a check whether the connecting user is allowed to proxy for the end user.
* @param req
* @return
* @throws HiveSQLException
*/
private String getUserName(TOpenSessionReq req) throws HiveSQLException {
String userName = null;
// Kerberos
if (isKerberosAuthMode()) {
userName = hiveAuthFactory.getRemoteUser();
}
// Except kerberos, NOSASL
if (userName == null) {
userName = TSetIpAddressProcessor.getUserName();
}
// Http transport mode.
// We set the thread local username, in ThriftHttpServlet.
if (cliService.getHiveConf().getVar(
ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) {
userName = SessionManager.getUserName();
}
if (userName == null) {
userName = req.getUsername();
}
userName = getShortName(userName);
String effectiveClientUser = getProxyUser(userName, req.getConfiguration(), getIpAddress());
LOG.debug("Client's username: " + effectiveClientUser);
return effectiveClientUser;
}
private String getShortName(String userName) {
String ret = null;
if (userName != null) {
int indexOfDomainMatch = ServiceUtils.indexOfDomainMatch(userName);
ret = (indexOfDomainMatch <= 0) ? userName :
userName.substring(0, indexOfDomainMatch);
}
return ret;
}
/**
* Create a session handle
* @param req
* @param res
* @return
* @throws HiveSQLException
* @throws LoginException
* @throws IOException
*/
SessionHandle getSessionHandle(TOpenSessionReq req, TOpenSessionResp res)
throws HiveSQLException, LoginException, IOException {
String userName = getUserName(req);
String ipAddress = getIpAddress();
TProtocolVersion protocol = getMinVersion(CLIService.SERVER_VERSION,
req.getClient_protocol());
SessionHandle sessionHandle;
if (cliService.getHiveConf().getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) &&
(userName != null)) {
String delegationTokenStr = getDelegationToken(userName);
sessionHandle = cliService.openSessionWithImpersonation(protocol, userName,
req.getPassword(), ipAddress, req.getConfiguration(), delegationTokenStr);
} else {
sessionHandle = cliService.openSession(protocol, userName, req.getPassword(),
ipAddress, req.getConfiguration());
}
res.setServerProtocolVersion(protocol);
return sessionHandle;
}
private String getDelegationToken(String userName)
throws HiveSQLException, LoginException, IOException {
if (userName == null || !cliService.getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION)
.equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString())) {
return null;
}
try {
return cliService.getDelegationTokenFromMetaStore(userName);
} catch (UnsupportedOperationException e) {
// The delegation token is not applicable in the given deployment mode
}
return null;
}
private TProtocolVersion getMinVersion(TProtocolVersion... versions) {
TProtocolVersion[] values = TProtocolVersion.values();
int current = values[values.length - 1].getValue();
for (TProtocolVersion version : versions) {
if (current > version.getValue()) {
current = version.getValue();
}
}
for (TProtocolVersion version : values) {
if (version.getValue() == current) {
return version;
}
}
throw new IllegalArgumentException("never");
}
@Override
public TCloseSessionResp CloseSession(TCloseSessionReq req) throws TException {
TCloseSessionResp resp = new TCloseSessionResp();
try {
SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle());
cliService.closeSession(sessionHandle);
LOG.info("Closed a session, current sessions: " + sessionCount.decrementAndGet());
resp.setStatus(OK_STATUS);
ThriftCLIServerContext context =
(ThriftCLIServerContext)currentServerContext.get();
if (context != null) {
context.setSessionHandle(null);
}
} catch (Exception e) {
LOG.warn("Error closing session: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetInfoResp GetInfo(TGetInfoReq req) throws TException {
TGetInfoResp resp = new TGetInfoResp();
try {
GetInfoValue getInfoValue =
cliService.getInfo(new SessionHandle(req.getSessionHandle()),
GetInfoType.getGetInfoType(req.getInfoType()));
resp.setInfoValue(getInfoValue.toTGetInfoValue());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting info: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws TException {
TExecuteStatementResp resp = new TExecuteStatementResp();
try {
SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle());
String statement = req.getStatement();
Map<String, String> confOverlay = req.getConfOverlay();
Boolean runAsync = req.isRunAsync();
OperationHandle operationHandle = runAsync ?
cliService.executeStatementAsync(sessionHandle, statement, confOverlay)
: cliService.executeStatement(sessionHandle, statement, confOverlay);
resp.setOperationHandle(operationHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
// Note: it's rather important that this (and other methods) catch Exception, not Throwable;
// in combination with HiveSessionProxy.invoke code, perhaps unintentionally, it used
// to also catch all errors; and now it allows OOMs only to propagate.
LOG.warn("Error executing statement: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws TException {
TGetTypeInfoResp resp = new TGetTypeInfoResp();
try {
OperationHandle operationHandle = cliService.getTypeInfo(new SessionHandle(req.getSessionHandle()));
resp.setOperationHandle(operationHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting type info: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws TException {
TGetCatalogsResp resp = new TGetCatalogsResp();
try {
OperationHandle opHandle = cliService.getCatalogs(new SessionHandle(req.getSessionHandle()));
resp.setOperationHandle(opHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting catalogs: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws TException {
TGetSchemasResp resp = new TGetSchemasResp();
try {
OperationHandle opHandle = cliService.getSchemas(
new SessionHandle(req.getSessionHandle()), req.getCatalogName(), req.getSchemaName());
resp.setOperationHandle(opHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting schemas: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetTablesResp GetTables(TGetTablesReq req) throws TException {
TGetTablesResp resp = new TGetTablesResp();
try {
OperationHandle opHandle = cliService
.getTables(new SessionHandle(req.getSessionHandle()), req.getCatalogName(),
req.getSchemaName(), req.getTableName(), req.getTableTypes());
resp.setOperationHandle(opHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting tables: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws TException {
TGetTableTypesResp resp = new TGetTableTypesResp();
try {
OperationHandle opHandle = cliService.getTableTypes(new SessionHandle(req.getSessionHandle()));
resp.setOperationHandle(opHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting table types: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetColumnsResp GetColumns(TGetColumnsReq req) throws TException {
TGetColumnsResp resp = new TGetColumnsResp();
try {
OperationHandle opHandle = cliService.getColumns(
new SessionHandle(req.getSessionHandle()),
req.getCatalogName(),
req.getSchemaName(),
req.getTableName(),
req.getColumnName());
resp.setOperationHandle(opHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting columns: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws TException {
TGetFunctionsResp resp = new TGetFunctionsResp();
try {
OperationHandle opHandle = cliService.getFunctions(
new SessionHandle(req.getSessionHandle()), req.getCatalogName(),
req.getSchemaName(), req.getFunctionName());
resp.setOperationHandle(opHandle.toTOperationHandle());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting functions: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws TException {
TGetOperationStatusResp resp = new TGetOperationStatusResp();
try {
OperationStatus operationStatus = cliService.getOperationStatus(
new OperationHandle(req.getOperationHandle()));
resp.setOperationState(operationStatus.getState().toTOperationState());
HiveSQLException opException = operationStatus.getOperationException();
if (opException != null) {
resp.setSqlState(opException.getSQLState());
resp.setErrorCode(opException.getErrorCode());
resp.setErrorMessage(opException.getMessage());
}
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting operation status: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws TException {
TCancelOperationResp resp = new TCancelOperationResp();
try {
cliService.cancelOperation(new OperationHandle(req.getOperationHandle()));
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error cancelling operation: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws TException {
TCloseOperationResp resp = new TCloseOperationResp();
try {
cliService.closeOperation(new OperationHandle(req.getOperationHandle()));
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error closing operation: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req)
throws TException {
TGetResultSetMetadataResp resp = new TGetResultSetMetadataResp();
try {
TableSchema schema = cliService.getResultSetMetadata(new OperationHandle(req.getOperationHandle()));
resp.setSchema(schema.toTTableSchema());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting result set metadata: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public TFetchResultsResp FetchResults(TFetchResultsReq req) throws TException {
TFetchResultsResp resp = new TFetchResultsResp();
try {
RowSet rowSet = cliService.fetchResults(
new OperationHandle(req.getOperationHandle()),
FetchOrientation.getFetchOrientation(req.getOrientation()),
req.getMaxRows(),
FetchType.getFetchType(req.getFetchType()));
resp.setResults(rowSet.toTRowSet());
resp.setHasMoreRows(false);
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error fetching results: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
@Override
public abstract void run();
/**
* If the proxy user name is provided then check privileges to substitute the user.
* @param realUser
* @param sessionConf
* @param ipAddress
* @return
* @throws HiveSQLException
*/
private String getProxyUser(String realUser, Map<String, String> sessionConf,
String ipAddress) throws HiveSQLException {
String proxyUser = null;
// Http transport mode.
// We set the thread local proxy username, in ThriftHttpServlet.
if (cliService.getHiveConf().getVar(
ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) {
proxyUser = SessionManager.getProxyUserName();
LOG.debug("Proxy user from query string: " + proxyUser);
}
if (proxyUser == null && sessionConf != null && sessionConf.containsKey(HiveAuthFactory.HS2_PROXY_USER)) {
String proxyUserFromThriftBody = sessionConf.get(HiveAuthFactory.HS2_PROXY_USER);
LOG.debug("Proxy user from thrift body: " + proxyUserFromThriftBody);
proxyUser = proxyUserFromThriftBody;
}
if (proxyUser == null) {
return realUser;
}
// check whether substitution is allowed
if (!hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ALLOW_USER_SUBSTITUTION)) {
throw new HiveSQLException("Proxy user substitution is not allowed");
}
// If there's no authentication, then directly substitute the user
if (HiveAuthFactory.AuthTypes.NONE.toString().
equalsIgnoreCase(hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION))) {
return proxyUser;
}
// Verify proxy user privilege of the realUser for the proxyUser
HiveAuthFactory.verifyProxyAccess(realUser, proxyUser, ipAddress, hiveConf);
LOG.debug("Verified proxy user: " + proxyUser);
return proxyUser;
}
private boolean isKerberosAuthMode() {
return cliService.getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION)
.equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString());
}
}
| [
"\"HIVE_SERVER2_THRIFT_BIND_HOST\"",
"\"HIVE_SERVER2_THRIFT_HTTP_PORT\"",
"\"HIVE_SERVER2_THRIFT_PORT\""
]
| []
| [
"HIVE_SERVER2_THRIFT_BIND_HOST",
"HIVE_SERVER2_THRIFT_HTTP_PORT",
"HIVE_SERVER2_THRIFT_PORT"
]
| [] | ["HIVE_SERVER2_THRIFT_BIND_HOST", "HIVE_SERVER2_THRIFT_HTTP_PORT", "HIVE_SERVER2_THRIFT_PORT"] | java | 3 | 0 | |
build/build.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Build is a benchmark that examines compiler and linker performance.
// It executes 'go build -a cmd/go'.
package build // import "golang.org/x/benchmarks/build"
import (
"log"
"os"
"os/exec"
"golang.org/x/benchmarks/driver"
)
func init() {
driver.Register("build", benchmark)
}
func benchmark() driver.Result {
if os.Getenv("GOMAXPROCS") == "" {
os.Setenv("GOMAXPROCS", "1")
}
res := driver.MakeResult()
for i := 0; i < driver.BenchNum; i++ {
res1 := benchmarkOnce()
if res.RunTime == 0 || res.RunTime > res1.RunTime {
res = res1
}
log.Printf("Run %v: %+v\n", i, res)
}
perf1, perf2 := driver.RunUnderProfiler("go", "build", "-o", "goperf", "-a", "-p", os.Getenv("GOMAXPROCS"), "cmd/go")
if perf1 != "" {
res.Files["processes"] = perf1
}
if perf2 != "" {
res.Files["cpuprof"] = perf2
}
return res
}
func benchmarkOnce() driver.Result {
// run 'go build -a'
res := driver.MakeResult()
cmd := exec.Command("go", "build", "-o", "gobuild", "-a", "-p", os.Getenv("GOMAXPROCS"), "cmd/go")
out, err := driver.RunAndCollectSysStats(cmd, &res, 1, "build-")
if err != nil {
log.Fatalf("Failed to run 'go build -a cmd/go': %v\n%v", err, out)
}
// go command binary size
gof, err := os.Open("gobuild")
if err != nil {
log.Fatalf("Failed to open $GOROOT/bin/go: %v\n", err)
}
st, err := gof.Stat()
if err != nil {
log.Fatalf("Failed to stat $GOROOT/bin/go: %v\n", err)
}
res.Metrics["binary-size"] = uint64(st.Size())
sizef := driver.Size("gobuild")
if sizef != "" {
res.Files["sections"] = sizef
}
return res
}
| [
"\"GOMAXPROCS\"",
"\"GOMAXPROCS\"",
"\"GOMAXPROCS\""
]
| []
| [
"GOMAXPROCS"
]
| [] | ["GOMAXPROCS"] | go | 1 | 0 | |
integration/fake_dns_server.go | package integration
import (
"fmt"
"net"
"os"
"github.com/containous/traefik/log"
"github.com/miekg/dns"
)
type handler struct{}
// ServeDNS a fake DNS server
// Simplified version of the Challenge Test Server from Boulder
// https://github.com/letsencrypt/boulder/blob/a6597b9f120207eff192c3e4107a7e49972a0250/test/challtestsrv/dnsone.go#L40
func (s *handler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
m.Compress = false
fakeDNS := os.Getenv("DOCKER_HOST_IP")
if fakeDNS == "" {
fakeDNS = "127.0.0.1"
}
for _, q := range r.Question {
log.Printf("Query -- [%s] %s", q.Name, dns.TypeToString[q.Qtype])
switch q.Qtype {
case dns.TypeA:
record := new(dns.A)
record.Hdr = dns.RR_Header{
Name: q.Name,
Rrtype: dns.TypeA,
Class: dns.ClassINET,
Ttl: 0,
}
record.A = net.ParseIP(fakeDNS)
m.Answer = append(m.Answer, record)
case dns.TypeCAA:
addCAARecord := true
var value string
switch q.Name {
case "bad-caa-reserved.com.":
value = "sad-hacker-ca.invalid"
case "good-caa-reserved.com.":
value = "happy-hacker-ca.invalid"
case "accounturi.good-caa-reserved.com.":
uri := os.Getenv("ACCOUNT_URI")
value = fmt.Sprintf("happy-hacker-ca.invalid; accounturi=%s", uri)
case "recheck.good-caa-reserved.com.":
// Allow issuance when we're running in the past
// (under FAKECLOCK), otherwise deny issuance.
if os.Getenv("FAKECLOCK") != "" {
value = "happy-hacker-ca.invalid"
} else {
value = "sad-hacker-ca.invalid"
}
case "dns-01-only.good-caa-reserved.com.":
value = "happy-hacker-ca.invalid; validationmethods=dns-01"
case "http-01-only.good-caa-reserved.com.":
value = "happy-hacker-ca.invalid; validationmethods=http-01"
case "dns-01-or-http-01.good-caa-reserved.com.":
value = "happy-hacker-ca.invalid; validationmethods=dns-01,http-01"
default:
addCAARecord = false
}
if addCAARecord {
record := new(dns.CAA)
record.Hdr = dns.RR_Header{
Name: q.Name,
Rrtype: dns.TypeCAA,
Class: dns.ClassINET,
Ttl: 0,
}
record.Tag = "issue"
record.Value = value
m.Answer = append(m.Answer, record)
}
}
}
auth := new(dns.SOA)
auth.Hdr = dns.RR_Header{Name: "boulder.invalid.", Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: 0}
auth.Ns = "ns.boulder.invalid."
auth.Mbox = "master.boulder.invalid."
auth.Serial = 1
auth.Refresh = 1
auth.Retry = 1
auth.Expire = 1
auth.Minttl = 1
m.Ns = append(m.Ns, auth)
w.WriteMsg(m)
}
func startFakeDNSServer() *dns.Server {
srv := &dns.Server{
Addr: ":5053",
Net: "udp",
Handler: &handler{},
}
go func() {
log.Infof("Start a fake DNS server.")
if err := srv.ListenAndServe(); err != nil {
log.Fatalf("Failed to set udp listener %v", err)
}
}()
return srv
}
| [
"\"DOCKER_HOST_IP\"",
"\"ACCOUNT_URI\"",
"\"FAKECLOCK\""
]
| []
| [
"DOCKER_HOST_IP",
"FAKECLOCK",
"ACCOUNT_URI"
]
| [] | ["DOCKER_HOST_IP", "FAKECLOCK", "ACCOUNT_URI"] | go | 3 | 0 | |
go-apps/meep-metrics-engine/server/v2/metrics-engine.go | /*
* Copyright (c) 2019 InterDigital Communications, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package server
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
dkm "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-data-key-mgr"
dataModel "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-data-model"
httpLog "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-http-logger"
log "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-logger"
met "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-metrics"
clientv2 "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-metrics-engine-notification-client"
mod "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-model"
mq "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-mq"
redis "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-redis"
"github.com/gorilla/mux"
)
const influxDBAddr = "http://meep-influxdb.default.svc.cluster.local:8086"
const metricEvent = "events"
const metricNetwork = "network"
const ServiceName = "Metrics Engine"
const ModuleName = "meep-metrics-engine"
const redisAddr = "meep-redis-master.default.svc.cluster.local:6379"
const metricsEngineKey = "metrics-engine:"
const metricsBasePath = "/metrics/v2/"
const typeNetworkSubscription = "netsubs"
const typeEventSubscription = "eventsubs"
const (
notifEventMetrics = "EventMetricsNotification"
notifNetworkMetrics = "NetworkMetricsNotification"
)
var defaultDuration string = "1s"
var defaultLimit int32 = 1
var METRICS_DB = 0
var nextNetworkSubscriptionIdAvailable int
var nextEventSubscriptionIdAvailable int
var networkSubscriptionMap = map[string]*NetworkRegistration{}
var eventSubscriptionMap = map[string]*EventRegistration{}
var SandboxName string
var mqLocal *mq.MsgQueue
var handlerId int
var activeModel *mod.Model
var activeScenarioName string
var metricStore *met.MetricStore
var hostUrl *url.URL
var basePath string
var baseKey string
var rc *redis.Connector
type EventRegistration struct {
params *EventSubscriptionParams
requestedTags map[string]string
ticker *time.Ticker
}
type NetworkRegistration struct {
params *NetworkSubscriptionParams
requestedTags map[string]string
ticker *time.Ticker
}
// Init - Metrics engine initialization
func Init() (err error) {
// Retrieve Sandbox name from environment variable
SandboxName = strings.TrimSpace(os.Getenv("MEEP_SANDBOX_NAME"))
if SandboxName == "" {
err = errors.New("MEEP_SANDBOX_NAME env variable not set")
log.Error(err.Error())
return err
}
log.Info("MEEP_SANDBOX_NAME: ", SandboxName)
// hostUrl is the url of the node serving the resourceURL
// Retrieve public url address where service is reachable, if not present, use Host URL environment variable
hostUrl, err = url.Parse(strings.TrimSpace(os.Getenv("MEEP_PUBLIC_URL")))
if err != nil || hostUrl == nil || hostUrl.String() == "" {
hostUrl, err = url.Parse(strings.TrimSpace(os.Getenv("MEEP_HOST_URL")))
if err != nil {
hostUrl = new(url.URL)
}
}
log.Info("resource URL: ", hostUrl)
// Set base path
basePath = "/" + SandboxName + metricsBasePath
// Create message queue
mqLocal, err = mq.NewMsgQueue(mq.GetLocalName(SandboxName), ModuleName, SandboxName, redisAddr)
if err != nil {
log.Error("Failed to create Message Queue with error: ", err)
return err
}
log.Info("Message Queue created")
// Create new active scenario model
modelCfg := mod.ModelCfg{
Name: "activeScenario",
Namespace: SandboxName,
Module: ModuleName,
UpdateCb: nil,
DbAddr: redisAddr,
}
activeModel, err = mod.NewModel(modelCfg)
if err != nil {
log.Error("Failed to create model: ", err.Error())
return err
}
// Connect to Metric Store
metricStore, err = met.NewMetricStore("", SandboxName, influxDBAddr, redisAddr)
if err != nil {
log.Error("Failed connection to Redis: ", err)
return err
}
// Get base store key
baseKey = dkm.GetKeyRoot(SandboxName) + metricsEngineKey
// Connect to Redis DB to monitor metrics
rc, err = redis.NewConnector(redisAddr, METRICS_DB)
if err != nil {
log.Error("Failed connection to Redis DB. Error: ", err)
return err
}
log.Info("Connected to Redis DB")
nextNetworkSubscriptionIdAvailable = 1
nextEventSubscriptionIdAvailable = 1
networkSubscriptionReInit()
eventSubscriptionReInit()
// Initialize metrics engine if scenario already active
activateScenarioMetrics()
return nil
}
// Run - Start Metrics Engine execution
func Run() (err error) {
// Register Message Queue handler
handler := mq.MsgHandler{Handler: msgHandler, UserData: nil}
handlerId, err = mqLocal.RegisterHandler(handler)
if err != nil {
log.Error("Failed to listen for sandbox updates: ", err.Error())
return err
}
return nil
}
// Message Queue handler
func msgHandler(msg *mq.Msg, userData interface{}) {
switch msg.Message {
case mq.MsgScenarioActivate:
log.Debug("RX MSG: ", mq.PrintMsg(msg))
activateScenarioMetrics()
case mq.MsgScenarioTerminate:
log.Debug("RX MSG: ", mq.PrintMsg(msg))
terminateScenarioMetrics()
default:
log.Trace("Ignoring unsupported message: ", mq.PrintMsg(msg))
}
}
func activateScenarioMetrics() {
// Sync with active scenario store
activeModel.UpdateScenario()
// Update current active scenario name
activeScenarioName = activeModel.GetScenarioName()
if activeScenarioName == "" {
return
}
// Set new HTTP logger store name
_ = httpLog.ReInit(ModuleName, SandboxName, activeScenarioName, redisAddr, influxDBAddr)
// Set Metrics Store
err := metricStore.SetStore(activeScenarioName)
if err != nil {
log.Error("Failed to set store with error: " + err.Error())
return
}
// Flush metric store entries on activation
metricStore.Flush()
//inserting an INIT event at T0
var ev dataModel.Event
ev.Name = "Init"
ev.Type_ = "OTHER"
j, _ := json.Marshal(ev)
var em met.EventMetric
em.Event = string(j)
em.Description = "scenario deployed"
err = metricStore.SetEventMetric(ev.Type_, em)
if err != nil {
log.Error("Failed to sent init event: " + err.Error())
//do not return on this error, continue processing
}
// Start snapshot thread
err = metricStore.StartSnapshotThread()
if err != nil {
log.Error("Failed to start snapshot thread: " + err.Error())
return
}
}
func terminateScenarioMetrics() {
// Sync with active scenario store
activeModel.UpdateScenario()
// Terminate snapshot thread
metricStore.StopSnapshotThread()
// Set new HTTP logger store name
_ = httpLog.ReInit(ModuleName, SandboxName, activeScenarioName, redisAddr, influxDBAddr)
// Set Metrics Store
err := metricStore.SetStore("")
if err != nil {
log.Error(err.Error())
}
// Reset current active scenario name
activeScenarioName = ""
}
func mePostEventQuery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
log.Debug("mePostEventQuery")
// Retrieve network metric query parameters from request body
var params EventQueryParams
if r.Body == nil {
err := errors.New("Request body is missing")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(¶ms)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Make sure metrics store is up
if metricStore == nil {
err := errors.New("No active scenario to get metrics from")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusNotFound)
return
}
// Parse tags
tags := make(map[string]string)
for _, tag := range params.Tags {
tags[tag.Name] = tag.Value
}
// Get scope
duration := ""
limit := 0
if params.Scope != nil {
duration = params.Scope.Duration
limit = int(params.Scope.Limit)
}
// Get metrics
valuesArray, err := metricStore.GetInfluxMetric(met.EvMetName, tags, params.Fields, duration, limit)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(valuesArray) == 0 {
err := errors.New("No matching metrics found")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusNoContent)
return
}
// Prepare & send response
var response EventMetricList
response.Name = "event metrics"
response.Columns = append(params.Fields, "time")
response.Values = make([]EventMetric, len(valuesArray))
for index, values := range valuesArray {
metric := &response.Values[index]
metric.Time = values["time"].(string)
if values[met.EvMetEvent] != nil {
if val, ok := values[met.EvMetEvent].(string); ok {
metric.Event = val
}
}
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func mePostHttpQuery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
log.Debug("mePostHttpQuery")
// Retrieve network metric query parameters from request body
var params HttpQueryParams
if r.Body == nil {
err := errors.New("Request body is missing")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(¶ms)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Make sure metrics store is up
if metricStore == nil {
err := errors.New("No active scenario to get metrics from")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusNotFound)
return
}
// Parse tags
tags := make(map[string]string)
for _, tag := range params.Tags {
tags[tag.Name] = tag.Value
}
// Get scope
duration := ""
limit := 0
if params.Scope != nil {
duration = params.Scope.Duration
limit = int(params.Scope.Limit)
}
// Get metrics
valuesArray, err := metricStore.GetInfluxMetric(met.HttpLogMetricName, tags, params.Fields, duration, limit)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(valuesArray) == 0 {
err := errors.New("No matching metrics found")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusNoContent)
return
}
// Prepare & send response
var response HttpMetricList
response.Name = "http metrics"
response.Columns = append(params.Fields, "time")
response.Values = make([]HttpMetric, len(valuesArray))
for index, values := range valuesArray {
metric := &response.Values[index]
metric.Time = values["time"].(string)
if values[met.HttpLoggerName] != nil {
if val, ok := values[met.HttpLoggerName].(string); ok {
metric.LoggerName = val
}
}
if values[met.HttpLoggerDirection] != nil {
if val, ok := values[met.HttpLoggerDirection].(string); ok {
metric.Direction = val
}
}
if values[met.HttpLogId] != nil {
metric.Id = met.JsonNumToInt32(values[met.HttpLogId].(json.Number))
}
if values[met.HttpLogEndpoint] != nil {
if val, ok := values[met.HttpLogEndpoint].(string); ok {
metric.Endpoint = val
}
}
if values[met.HttpUrl] != nil {
if val, ok := values[met.HttpUrl].(string); ok {
metric.Url = val
}
}
if values[met.HttpMethod] != nil {
if val, ok := values[met.HttpMethod].(string); ok {
metric.Method = val
}
}
if values[met.HttpBody] != nil {
if val, ok := values[met.HttpBody].(string); ok {
metric.Body = val
}
}
if values[met.HttpRespBody] != nil {
if val, ok := values[met.HttpRespBody].(string); ok {
metric.RespBody = val
}
}
if values[met.HttpRespCode] != nil {
if val, ok := values[met.HttpRespCode].(string); ok {
metric.RespCode = val
}
}
if values[met.HttpProcTime] != nil {
if val, ok := values[met.HttpProcTime].(string); ok {
metric.ProcTime = val
}
}
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func mePostNetworkQuery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
log.Debug("mePostNetworkQuery")
// Retrieve network metric query parameters from request body
var params NetworkQueryParams
if r.Body == nil {
err := errors.New("Request body is missing")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(¶ms)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Make sure metrics store is up
if metricStore == nil {
err := errors.New("No active scenario to get metrics from")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusNotFound)
return
}
// Parse tags
tags := make(map[string]string)
for _, tag := range params.Tags {
tags[tag.Name] = tag.Value
}
// Get scope
duration := ""
limit := 0
if params.Scope != nil {
duration = params.Scope.Duration
limit = int(params.Scope.Limit)
}
// Get metrics
valuesArray, err := metricStore.GetInfluxMetric(met.NetMetName, tags, params.Fields, duration, limit)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(valuesArray) == 0 {
err := errors.New("No matching metrics found")
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusNoContent)
return
}
// Prepare & send response
var response NetworkMetricList
response.Name = "network metrics"
response.Columns = append(params.Fields, "time")
response.Values = make([]NetworkMetric, len(valuesArray))
for index, values := range valuesArray {
metric := &response.Values[index]
metric.Time = values["time"].(string)
if values[met.NetMetLatency] != nil {
metric.Lat = met.JsonNumToInt32(values[met.NetMetLatency].(json.Number))
}
if values[met.NetMetULThroughput] != nil {
metric.Ul = met.JsonNumToFloat64(values[met.NetMetULThroughput].(json.Number))
}
if values[met.NetMetDLThroughput] != nil {
metric.Dl = met.JsonNumToFloat64(values[met.NetMetDLThroughput].(json.Number))
}
if values[met.NetMetULPktLoss] != nil {
metric.Ulos = met.JsonNumToFloat64(values[met.NetMetULPktLoss].(json.Number))
}
if values[met.NetMetDLPktLoss] != nil {
metric.Dlos = met.JsonNumToFloat64(values[met.NetMetDLPktLoss].(json.Number))
}
}
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func createEventSubscription(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response EventSubscription
eventSubscriptionParams := new(EventSubscriptionParams)
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&eventSubscriptionParams)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
newSubsId := nextEventSubscriptionIdAvailable
nextEventSubscriptionIdAvailable++
subsIdStr := strconv.Itoa(newSubsId)
err = registerEvent(eventSubscriptionParams, subsIdStr)
if err != nil {
nextEventSubscriptionIdAvailable--
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
response.ResourceURL = hostUrl.String() + basePath + "subscriptions/event/" + subsIdStr
response.SubscriptionId = subsIdStr
response.SubscriptionType = eventSubscriptionParams.SubscriptionType
response.Period = eventSubscriptionParams.Period
response.ClientCorrelator = eventSubscriptionParams.ClientCorrelator
response.CallbackReference = eventSubscriptionParams.CallbackReference
response.EventQueryParams = eventSubscriptionParams.EventQueryParams
_ = rc.JSONSetEntry(baseKey+typeEventSubscription+":"+subsIdStr, ".", convertEventSubscriptionToJson(&response))
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, string(jsonResponse))
}
func createNetworkSubscription(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response NetworkSubscription
networkSubscriptionParams := new(NetworkSubscriptionParams)
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&networkSubscriptionParams)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
newSubsId := nextNetworkSubscriptionIdAvailable
nextNetworkSubscriptionIdAvailable++
subsIdStr := strconv.Itoa(newSubsId)
err = registerNetwork(networkSubscriptionParams, subsIdStr)
if err != nil {
nextNetworkSubscriptionIdAvailable--
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
response.ResourceURL = hostUrl.String() + basePath + "metrics/subscriptions/network/" + subsIdStr
response.SubscriptionId = subsIdStr
response.SubscriptionType = networkSubscriptionParams.SubscriptionType
response.Period = networkSubscriptionParams.Period
response.ClientCorrelator = networkSubscriptionParams.ClientCorrelator
response.CallbackReference = networkSubscriptionParams.CallbackReference
response.NetworkQueryParams = networkSubscriptionParams.NetworkQueryParams
_ = rc.JSONSetEntry(baseKey+typeNetworkSubscription+":"+subsIdStr, ".", convertNetworkSubscriptionToJson(&response))
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, string(jsonResponse))
}
func populateEventList(key string, jsonInfo string, userData interface{}) error {
subList := userData.(*EventSubscriptionList)
var subInfo EventSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &subInfo)
if err != nil {
return err
}
subList.EventSubscription = append(subList.EventSubscription, subInfo)
return nil
}
func populateNetworkList(key string, jsonInfo string, userData interface{}) error {
subList := userData.(*NetworkSubscriptionList)
var subInfo NetworkSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &subInfo)
if err != nil {
return err
}
subList.NetworkSubscription = append(subList.NetworkSubscription, subInfo)
return nil
}
func deregisterEvent(subsId string) bool {
eventRegistration := eventSubscriptionMap[subsId]
if eventRegistration == nil {
return false
}
eventRegistration.ticker.Stop()
eventSubscriptionMap[subsId] = nil
return true
}
func deregisterNetwork(subsId string) bool {
networkRegistration := networkSubscriptionMap[subsId]
if networkRegistration == nil {
return false
}
networkRegistration.ticker.Stop()
networkSubscriptionMap[subsId] = nil
return true
}
func createClient(notifyPath string) (*clientv2.APIClient, error) {
// Create & store client for App REST API
subsAppClientCfg := clientv2.NewConfiguration()
subsAppClientCfg.BasePath = notifyPath
subsAppClient := clientv2.NewAPIClient(subsAppClientCfg)
if subsAppClient == nil {
log.Error("Failed to create Subscription App REST API client: ", subsAppClientCfg.BasePath)
err := errors.New("Failed to create Subscription App REST API client")
return nil, err
}
return subsAppClient, nil
}
func sendEventNotification(notifyUrl string, ctx context.Context, subscriptionId string, notification clientv2.EventNotification) {
client, err := createClient(notifyUrl)
if err != nil {
log.Error(err)
return
}
startTime := time.Now()
resp, err := client.NotificationsApi.PostEventNotification(ctx, subscriptionId, notification)
duration := float64(time.Since(startTime).Microseconds()) / 1000.0
if err != nil {
log.Error(err)
met.ObserveNotification(SandboxName, ServiceName, notifEventMetrics, notifyUrl, nil, duration)
return
}
met.ObserveNotification(SandboxName, ServiceName, notifEventMetrics, notifyUrl, resp, duration)
}
func sendNetworkNotification(notifyUrl string, ctx context.Context, subscriptionId string, notification clientv2.NetworkNotification) {
client, err := createClient(notifyUrl)
if err != nil {
log.Error(err)
return
}
startTime := time.Now()
resp, err := client.NotificationsApi.PostNetworkNotification(ctx, subscriptionId, notification)
duration := float64(time.Since(startTime).Microseconds()) / 1000.0
if err != nil {
log.Error(err)
met.ObserveNotification(SandboxName, ServiceName, notifNetworkMetrics, notifyUrl, nil, duration)
return
}
met.ObserveNotification(SandboxName, ServiceName, notifNetworkMetrics, notifyUrl, resp, duration)
}
func processEventNotification(subsId string) {
eventRegistration := eventSubscriptionMap[subsId]
if eventRegistration == nil {
log.Error("Event registration not found for subscriptionId: ", subsId)
return
}
var response clientv2.EventMetricList
response.Name = "event metrics"
// Get metrics
if metricStore != nil {
valuesArray, err := metricStore.GetInfluxMetric(
metricEvent,
eventRegistration.requestedTags,
eventRegistration.params.EventQueryParams.Fields,
eventRegistration.params.EventQueryParams.Scope.Duration,
int(eventRegistration.params.EventQueryParams.Scope.Limit))
if err == nil {
response.Columns = append(eventRegistration.params.EventQueryParams.Fields, "time")
response.Values = make([]clientv2.EventMetric, len(valuesArray))
for index, values := range valuesArray {
metric := &response.Values[index]
metric.Time = values["time"].(string)
if values[met.EvMetEvent] != nil {
if val, ok := values[met.EvMetEvent].(string); ok {
metric.Event = val
}
}
}
}
}
var eventNotif clientv2.EventNotification
eventNotif.CallbackData = eventRegistration.params.ClientCorrelator
eventNotif.EventMetricList = &response
go sendEventNotification(eventRegistration.params.CallbackReference.NotifyURL, context.TODO(), subsId, eventNotif)
}
func processNetworkNotification(subsId string) {
networkRegistration := networkSubscriptionMap[subsId]
if networkRegistration == nil {
log.Error("Network registration not found for subscriptionId: ", subsId)
return
}
var response clientv2.NetworkMetricList
response.Name = "network metrics"
// Get metrics
if metricStore != nil {
valuesArray, err := metricStore.GetInfluxMetric(
metricNetwork,
networkRegistration.requestedTags,
networkRegistration.params.NetworkQueryParams.Fields,
networkRegistration.params.NetworkQueryParams.Scope.Duration,
int(networkRegistration.params.NetworkQueryParams.Scope.Limit))
if err == nil {
response.Columns = append(networkRegistration.params.NetworkQueryParams.Fields, "time")
response.Values = make([]clientv2.NetworkMetric, len(valuesArray))
for index, values := range valuesArray {
metric := &response.Values[index]
metric.Time = values["time"].(string)
if values[met.NetMetLatency] != nil {
metric.Lat = met.JsonNumToInt32(values[met.NetMetLatency].(json.Number))
}
if values[met.NetMetULThroughput] != nil {
metric.Ul = met.JsonNumToFloat64(values[met.NetMetULThroughput].(json.Number))
}
if values[met.NetMetDLThroughput] != nil {
metric.Dl = met.JsonNumToFloat64(values[met.NetMetDLThroughput].(json.Number))
}
if values[met.NetMetULPktLoss] != nil {
metric.Ulos = met.JsonNumToFloat64(values[met.NetMetULPktLoss].(json.Number))
}
if values[met.NetMetDLPktLoss] != nil {
metric.Dlos = met.JsonNumToFloat64(values[met.NetMetDLPktLoss].(json.Number))
}
}
}
}
var networkNotif clientv2.NetworkNotification
networkNotif.CallbackData = networkRegistration.params.ClientCorrelator
networkNotif.NetworkMetricList = &response
go sendNetworkNotification(networkRegistration.params.CallbackReference.NotifyURL, context.TODO(), subsId, networkNotif)
}
func registerEvent(params *EventSubscriptionParams, subsId string) (err error) {
if params == nil {
err = errors.New("Nil parameters")
return err
}
//only support one type of registration for now
switch params.SubscriptionType {
case ("period"):
if params.EventQueryParams.Scope == nil {
var scope Scope
scope.Limit = defaultLimit
scope.Duration = defaultDuration
params.EventQueryParams.Scope = &scope
} else {
if params.EventQueryParams.Scope.Duration == "" {
params.EventQueryParams.Scope.Duration = defaultDuration
}
if params.EventQueryParams.Scope.Limit == 0 {
params.EventQueryParams.Scope.Limit = defaultLimit
}
}
var eventRegistration EventRegistration
if params.Period != 0 {
ticker := time.NewTicker(time.Duration(params.Period) * time.Second)
eventRegistration.ticker = ticker
}
eventRegistration.params = params
//read the json tags and store for quicker access
tags := make(map[string]string)
for _, tag := range params.EventQueryParams.Tags {
//extracting name: and value: into a string
jsonInfo, err := json.Marshal(tag)
if err != nil {
log.Error(err.Error())
return err
}
var tmpTags map[string]string
//storing the tag in a temporary map to use the values
err = json.Unmarshal([]byte(jsonInfo), &tmpTags)
if err != nil {
log.Error(err.Error())
return err
}
tags[tmpTags["name"]] = tmpTags["value"]
}
eventRegistration.requestedTags = tags
eventSubscriptionMap[subsId] = &eventRegistration
if params.Period != 0 {
go func() {
for range eventRegistration.ticker.C {
processEventNotification(subsId)
}
}()
}
return nil
default:
}
err = errors.New("SubscriptionType unknown")
return err
}
func registerNetwork(params *NetworkSubscriptionParams, subsId string) (err error) {
if params == nil {
err = errors.New("Nil parameters")
return err
}
//only support one type of registration for now
switch params.SubscriptionType {
case ("period"):
if params.NetworkQueryParams.Scope == nil {
var scope Scope
scope.Limit = defaultLimit
scope.Duration = defaultDuration
params.NetworkQueryParams.Scope = &scope
} else {
if params.NetworkQueryParams.Scope.Duration == "" {
params.NetworkQueryParams.Scope.Duration = defaultDuration
}
if params.NetworkQueryParams.Scope.Limit == 0 {
params.NetworkQueryParams.Scope.Limit = defaultLimit
}
}
var networkRegistration NetworkRegistration
if params.Period != 0 {
ticker := time.NewTicker(time.Duration(params.Period) * time.Second)
networkRegistration.ticker = ticker
}
networkRegistration.params = params
//read the json tags and store for quicker access
tags := make(map[string]string)
for _, tag := range params.NetworkQueryParams.Tags {
//extracting name: and value: into a string
jsonInfo, err := json.Marshal(tag)
if err != nil {
log.Error(err.Error())
return err
}
var tmpTags map[string]string
//storing the tag in a temporary map to use the values
err = json.Unmarshal([]byte(jsonInfo), &tmpTags)
if err != nil {
log.Error(err.Error())
return err
}
tags[tmpTags["name"]] = tmpTags["value"]
}
networkRegistration.requestedTags = tags
networkSubscriptionMap[subsId] = &networkRegistration
if params.Period != 0 {
go func() {
for range networkRegistration.ticker.C {
processNetworkNotification(subsId)
}
}()
}
return nil
default:
}
err = errors.New("SubscriptionType unknown")
return err
}
func getEventSubscription(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response EventSubscriptionList
keyName := baseKey + typeEventSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateEventList, &response)
response.ResourceURL = hostUrl.String() + basePath + "metrics/subscriptions/event"
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func getNetworkSubscription(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var response NetworkSubscriptionList
keyName := baseKey + typeNetworkSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateNetworkList, &response)
response.ResourceURL = hostUrl.String() + basePath + "metrics/subscriptions/network"
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func getEventSubscriptionById(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
jsonResponse, _ := rc.JSONGetEntry(baseKey+typeEventSubscription+":"+vars["subscriptionId"], ".")
if jsonResponse == "" {
w.WriteHeader(http.StatusNotFound)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func getNetworkSubscriptionById(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
jsonResponse, _ := rc.JSONGetEntry(baseKey+typeNetworkSubscription+":"+vars["subscriptionId"], ".")
if jsonResponse == "" {
w.WriteHeader(http.StatusNotFound)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func deleteEventSubscriptionById(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
err := rc.JSONDelEntry(baseKey+typeEventSubscription+":"+vars["subscriptionId"], ".")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
found := deregisterEvent(vars["subscriptionId"])
if found {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusNotFound)
}
}
func deleteNetworkSubscriptionById(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
err := rc.JSONDelEntry(baseKey+typeNetworkSubscription+":"+vars["subscriptionId"], ".")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
found := deregisterNetwork(vars["subscriptionId"])
if found {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusNotFound)
}
}
func networkSubscriptionReInit() {
//reusing the object response for the get multiple zonalSubscription
var responseList NetworkSubscriptionList
keyName := baseKey + typeNetworkSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateNetworkList, &responseList)
maxSubscriptionId := 0
for _, response := range responseList.NetworkSubscription {
var networkSubscriptionParams NetworkSubscriptionParams
networkSubscriptionParams.ClientCorrelator = response.ClientCorrelator
networkSubscriptionParams.CallbackReference = response.CallbackReference
networkSubscriptionParams.NetworkQueryParams = response.NetworkQueryParams
networkSubscriptionParams.Period = response.Period
networkSubscriptionParams.SubscriptionType = response.SubscriptionType
subscriptionId, err := strconv.Atoi(response.SubscriptionId)
if err != nil {
log.Error(err)
} else {
if subscriptionId > maxSubscriptionId {
maxSubscriptionId = subscriptionId
}
_ = registerNetwork(&networkSubscriptionParams, response.SubscriptionId)
}
}
nextNetworkSubscriptionIdAvailable = maxSubscriptionId + 1
}
func eventSubscriptionReInit() {
//reusing the object response for the get multiple zonalSubscription
var responseList EventSubscriptionList
keyName := baseKey + typeEventSubscription + "*"
_ = rc.ForEachJSONEntry(keyName, populateEventList, &responseList)
maxSubscriptionId := 0
for _, response := range responseList.EventSubscription {
var eventSubscriptionParams EventSubscriptionParams
eventSubscriptionParams.ClientCorrelator = response.ClientCorrelator
eventSubscriptionParams.CallbackReference = response.CallbackReference
eventSubscriptionParams.EventQueryParams = response.EventQueryParams
eventSubscriptionParams.Period = response.Period
eventSubscriptionParams.SubscriptionType = response.SubscriptionType
subscriptionId, err := strconv.Atoi(response.SubscriptionId)
if err != nil {
log.Error(err)
} else {
if subscriptionId > maxSubscriptionId {
maxSubscriptionId = subscriptionId
}
_ = registerEvent(&eventSubscriptionParams, response.SubscriptionId)
}
}
nextEventSubscriptionIdAvailable = maxSubscriptionId + 1
}
| [
"\"MEEP_SANDBOX_NAME\"",
"\"MEEP_PUBLIC_URL\"",
"\"MEEP_HOST_URL\""
]
| []
| [
"MEEP_HOST_URL",
"MEEP_PUBLIC_URL",
"MEEP_SANDBOX_NAME"
]
| [] | ["MEEP_HOST_URL", "MEEP_PUBLIC_URL", "MEEP_SANDBOX_NAME"] | go | 3 | 0 | |
setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >= 3.6 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
if sys.platform == 'darwin':
extra_compile_args = ['-stdlib=libc++', '-O3']
else:
extra_compile_args = ['-std=c++11', '-O3']
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.data_utils_fast',
sources=['fairseq/data/data_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.token_block_utils_fast',
sources=['fairseq/data/token_block_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat',
sources=[
'fairseq/clib/libnat/edit_dist.cpp',
],
)
])
if 'CUDA_HOME' in os.environ:
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat_cuda',
sources=[
'fairseq/clib/libnat_cuda/edit_dist.cu',
'fairseq/clib/libnat_cuda/binding.cpp'
],
)])
cmdclass['build_ext'] = cpp_extension.BuildExtension
except ImportError:
pass
if 'READTHEDOCS' in os.environ:
# don't build extensions when generating docs
extensions = []
if 'build_ext' in cmdclass:
del cmdclass['build_ext']
# use CPU build of PyTorch
dependency_links = [
'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'
]
else:
dependency_links = []
if 'clean' in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True)
setup(
name='fairseq',
version='0.9.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
setup_requires=[
'cython',
'numpy',
'setuptools>=18.0',
],
install_requires=[
'cffi',
'cython',
'kaldi_io',
'numpy',
'regex',
'sacrebleu',
'torch',
'tqdm',
],
dependency_links=dependency_links,
packages=find_packages(exclude=['scripts', 'tests']),
ext_modules=extensions,
test_suite='tests',
entry_points={
'console_scripts': [
'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',
'fairseq-generate = fairseq_cli.generate:cli_main',
'fairseq-interactive = fairseq_cli.interactive:cli_main',
'fairseq-preprocess = fairseq_cli.preprocess:cli_main',
'fairseq-score = fairseq_cli.score:cli_main',
'fairseq-train = fairseq_cli.train:cli_main',
'fairseq-validate = fairseq_cli.validate:cli_main',
],
},
cmdclass=cmdclass,
zip_safe=False,
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/ddevapp/config.go | package ddevapp
import (
"bytes"
"fmt"
"github.com/Masterminds/sprig"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/nodeps"
"html/template"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/drud/ddev/pkg/globalconfig"
"regexp"
"runtime"
"github.com/drud/ddev/pkg/appports"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"github.com/drud/ddev/pkg/version"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
// Regexp pattern to determine if a hostname is valid per RFC 1123.
var hostRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
// Command defines commands to be run as pre/post hooks
type Command struct {
Exec string `yaml:"exec,omitempty"`
ExecHost string `yaml:"exec-host,omitempty"`
}
// Provider is the interface which all provider plugins must implement.
type Provider interface {
Init(app *DdevApp) error
ValidateField(string, string) error
PromptForConfig() error
Write(string) error
Read(string) error
Validate() error
GetBackup(string, string) (fileLocation string, importPath string, err error)
}
// init() is for testing situations only, allowing us to override the default webserver type
// or caching behavior
func init() {
// This is for automated testing only. It allows us to override the webserver type.
if testWebServerType := os.Getenv("DDEV_TEST_WEBSERVER_TYPE"); testWebServerType != "" {
WebserverDefault = testWebServerType
}
if testWebcache := os.Getenv("DDEV_TEST_USE_WEBCACHE"); testWebcache != "" {
WebcacheEnabledDefault = true
}
if testNFSMount := os.Getenv("DDEV_TEST_USE_NFSMOUNT"); testNFSMount != "" {
NFSMountEnabledDefault = true
}
}
// NewApp creates a new DdevApp struct with defaults set and overridden by any existing config.yml.
func NewApp(AppRoot string, includeOverrides bool, provider string) (*DdevApp, error) {
// Set defaults.
app := &DdevApp{}
if !fileutil.FileExists(AppRoot) {
return app, fmt.Errorf("project root %s does not exist", AppRoot)
}
app.AppRoot = AppRoot
app.ConfigPath = app.GetConfigPath("config.yaml")
app.APIVersion = version.DdevVersion
app.Type = AppTypePHP
app.PHPVersion = PHPDefault
app.WebserverType = WebserverDefault
app.WebcacheEnabled = WebcacheEnabledDefault
app.NFSMountEnabled = NFSMountEnabledDefault
app.RouterHTTPPort = DdevDefaultRouterHTTPPort
app.RouterHTTPSPort = DdevDefaultRouterHTTPSPort
app.MariaDBVersion = version.MariaDBDefaultVersion
// Provide a default app name based on directory name
app.Name = filepath.Base(app.AppRoot)
app.OmitContainers = globalconfig.DdevGlobalConfig.OmitContainers
// These should always default to the latest image/tag names from the Version package.
app.WebImage = version.GetWebImage()
app.DBImage = version.GetDBImage(version.MariaDBDefaultVersion)
app.DBAImage = version.GetDBAImage()
app.BgsyncImage = version.GetBgsyncImage()
// Load from file if available. This will return an error if the file doesn't exist,
// and it is up to the caller to determine if that's an issue.
if _, err := os.Stat(app.ConfigPath); !os.IsNotExist(err) {
_, err = app.ReadConfig(includeOverrides)
if err != nil {
return app, fmt.Errorf("%v exists but cannot be read. It may be invalid due to a syntax error.: %v", app.ConfigPath, err)
}
}
app.SetApptypeSettingsPaths()
// If the dbimage has not been overridden (because it takes precedence
// and the mariadb_version *has* been changed by config,
// use the related dbimage.
if app.DBImage == version.GetDBImage(version.MariaDBDefaultVersion) && app.MariaDBVersion != version.MariaDBDefaultVersion {
app.DBImage = version.GetDBImage(app.MariaDBVersion)
}
// Turn off webcache_enabled except if macOS/darwin or global `developer_mode: true`
if runtime.GOOS != "darwin" && app.WebcacheEnabled && !globalconfig.DdevGlobalConfig.DeveloperMode {
app.WebcacheEnabled = false
util.Warning("webcache_enabled is not yet supported on %s, disabling it", runtime.GOOS)
}
// Allow override with provider.
// Otherwise we accept whatever might have been in config file if there was anything.
if provider == "" && app.Provider != "" {
// Do nothing. This is the case where the config has a provider and no override is provided. Config wins.
} else if provider == ProviderPantheon || provider == ProviderDrudS3 || provider == ProviderDefault {
app.Provider = provider // Use the provider passed-in. Function argument wins.
} else if provider == "" && app.Provider == "" {
app.Provider = ProviderDefault // Nothing passed in, nothing configured. Set c.Provider to default
} else {
return app, fmt.Errorf("provider '%s' is not implemented", provider)
}
app.SetRavenTags()
return app, nil
}
// GetConfigPath returns the path to an application config file specified by filename.
func (app *DdevApp) GetConfigPath(filename string) string {
return filepath.Join(app.AppRoot, ".ddev", filename)
}
// WriteConfig writes the app configuration into the .ddev folder.
func (app *DdevApp) WriteConfig() error {
// Work against a copy of the DdevApp, since we don't want to actually change it.
appcopy := *app
// Update the "APIVersion" to be the ddev version.
appcopy.APIVersion = version.DdevVersion
// Only set the images on write if non-default values have been specified.
if appcopy.WebImage == version.GetWebImage() {
appcopy.WebImage = ""
}
if appcopy.DBImage == version.GetDBImage(appcopy.MariaDBVersion) {
appcopy.DBImage = ""
}
if appcopy.DBAImage == version.GetDBAImage() {
appcopy.DBAImage = ""
}
if appcopy.DBAImage == version.GetDBAImage() {
appcopy.DBAImage = ""
}
if appcopy.BgsyncImage == version.GetBgsyncImage() {
appcopy.BgsyncImage = ""
}
// We now want to reserve the port we're writing for HostDBPort and HostWebserverPort and so they don't
// accidentally get used for other projects.
err := app.CheckAndReserveHostPorts()
if err != nil {
return err
}
// Don't write default working dir values to config
defaults := appcopy.DefaultWorkingDirMap()
for service, defaultWorkingDir := range defaults {
if app.WorkingDir[service] == defaultWorkingDir {
delete(appcopy.WorkingDir, service)
}
}
err = PrepDdevDirectory(filepath.Dir(appcopy.ConfigPath))
if err != nil {
return err
}
cfgbytes, err := yaml.Marshal(appcopy)
if err != nil {
return err
}
// Append current image information
cfgbytes = append(cfgbytes, []byte(fmt.Sprintf("\n\n# This config.yaml was created with ddev version %s \n# webimage: %s\n# dbimage: %s\n# dbaimage: %s\n# bgsyncimage: %s\n# However we do not recommend explicitly wiring these images into the\n# config.yaml as they may break future versions of ddev.\n# You can update this config.yaml using 'ddev config'.\n", version.DdevVersion, version.GetWebImage(), version.GetDBImage(), version.GetDBAImage(), version.GetBgsyncImage()))...)
// Append hook information and sample hook suggestions.
cfgbytes = append(cfgbytes, []byte(ConfigInstructions)...)
cfgbytes = append(cfgbytes, appcopy.GetHookDefaultComments()...)
err = ioutil.WriteFile(appcopy.ConfigPath, cfgbytes, 0644)
if err != nil {
return err
}
provider, err := appcopy.GetProvider()
if err != nil {
return err
}
err = provider.Write(appcopy.GetConfigPath("import.yaml"))
if err != nil {
return err
}
// Allow project-specific post-config action
err = appcopy.PostConfigAction()
if err != nil {
return err
}
return nil
}
// CheckAndReserveHostPorts checks that configured host ports are not already
// reserved by another project.
func (app *DdevApp) CheckAndReserveHostPorts() error {
portsToReserve := []string{}
if app.HostDBPort != "" {
portsToReserve = append(portsToReserve, app.HostDBPort)
}
if app.HostWebserverPort != "" {
portsToReserve = append(portsToReserve, app.HostWebserverPort)
}
if app.HostHTTPSPort != "" {
portsToReserve = append(portsToReserve, app.HostHTTPSPort)
}
if len(portsToReserve) > 0 {
err := globalconfig.CheckHostPortsAvailable(app.Name, portsToReserve)
if err != nil {
return err
}
}
err := globalconfig.ReservePorts(app.Name, portsToReserve)
return err
}
// ReadConfig reads project configuration from the config.yaml file
// It does not attempt to set default values; that's NewApp's job.
func (app *DdevApp) ReadConfig(includeOverrides bool) ([]string, error) {
// Load config.yaml
err := app.LoadConfigYamlFile(app.ConfigPath)
if err != nil {
return []string{}, fmt.Errorf("unable to load config file %s: %v", app.ConfigPath, err)
}
configOverrides := []string{}
// Load config.*.y*ml after in glob order
if includeOverrides {
glob := filepath.Join(filepath.Dir(app.ConfigPath), "config.*.y*ml")
configOverrides, err = filepath.Glob(glob)
if err != nil {
return []string{}, err
}
for _, item := range configOverrides {
err = app.LoadConfigYamlFile(item)
if err != nil {
return []string{}, fmt.Errorf("unable to load config file %s: %v", item, err)
}
}
}
return append([]string{app.ConfigPath}, configOverrides...), nil
}
// LoadConfigYamlFile loads one config.yaml into app, overriding what might be there.
func (app *DdevApp) LoadConfigYamlFile(filePath string) error {
source, err := ioutil.ReadFile(filePath)
if err != nil {
return fmt.Errorf("could not find an active ddev configuration at %s have you run 'ddev config'? %v", app.ConfigPath, err)
}
// validate extend command keys
err = validateCommandYaml(source)
if err != nil {
return fmt.Errorf("invalid configuration in %s: %v", app.ConfigPath, err)
}
// ReadConfig config values from file.
err = yaml.Unmarshal(source, app)
if err != nil {
return err
}
return nil
}
// WarnIfConfigReplace just messages user about whether config is being replaced or created
func (app *DdevApp) WarnIfConfigReplace() {
if app.ConfigExists() {
util.Warning("You are reconfiguring the project at %s. \nThe existing configuration will be updated and replaced.", app.AppRoot)
} else {
util.Success("Creating a new ddev project config in the current directory (%s)", app.AppRoot)
util.Success("Once completed, your configuration will be written to %s\n", app.ConfigPath)
}
}
// PromptForConfig goes through a set of prompts to receive user input and generate an Config struct.
func (app *DdevApp) PromptForConfig() error {
app.WarnIfConfigReplace()
for {
err := app.promptForName()
if err == nil {
break
}
output.UserOut.Printf("%v", err)
}
if err := app.docrootPrompt(); err != nil {
return err
}
err := app.AppTypePrompt()
if err != nil {
return err
}
err = app.ConfigFileOverrideAction()
if err != nil {
return err
}
err = app.providerInstance.PromptForConfig()
return err
}
// ValidateConfig ensures the configuration meets ddev's requirements.
func (app *DdevApp) ValidateConfig() error {
provider, err := app.GetProvider()
if err != nil {
return err.(invalidProvider)
}
// validate project name
if err = provider.ValidateField("Name", app.Name); err != nil {
return err.(invalidAppName)
}
// validate hostnames
for _, hn := range app.GetHostnames() {
if !hostRegex.MatchString(hn) {
return fmt.Errorf("invalid hostname: %s. See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_hostnames for valid hostname requirements", hn).(invalidHostname)
}
}
// validate apptype
if !IsValidAppType(app.Type) {
return fmt.Errorf("invalid app type: %s", app.Type).(invalidAppType)
}
// validate PHP version
if !IsValidPHPVersion(app.PHPVersion) {
return fmt.Errorf("invalid PHP version: %s, must be one of %v", app.PHPVersion, GetValidPHPVersions()).(invalidPHPVersion)
}
// validate webserver type
if !IsValidWebserverType(app.WebserverType) {
return fmt.Errorf("invalid webserver type: %s, must be one of %s", app.WebserverType, GetValidWebserverTypes()).(invalidWebserverType)
}
if !IsValidOmitContainers(app.OmitContainers) {
return fmt.Errorf("invalid omit_containers: %s, must be one of %s", app.OmitContainers, GetValidOmitContainers()).(InvalidOmitContainers)
}
// Validate mariadb version
if !IsValidMariaDBVersion(app.MariaDBVersion) {
return fmt.Errorf("invalid mariadb_version: %s, must be one of %s", app.MariaDBVersion, GetValidMariaDBVersions()).(invalidMariaDBVersion)
}
if app.WebcacheEnabled && app.NFSMountEnabled {
return fmt.Errorf("webcache_enabled and nfs_mount_enabled cannot both be set to true, use one or the other")
}
return nil
}
// DockerComposeYAMLPath returns the absolute path to where the
// docker-compose.yaml should exist for this app.
func (app *DdevApp) DockerComposeYAMLPath() string {
return app.GetConfigPath("docker-compose.yaml")
}
// GetHostname returns the primary hostname of the app.
func (app *DdevApp) GetHostname() string {
return app.Name + "." + version.DDevTLD
}
// GetHostnames returns an array of all the configured hostnames.
func (app *DdevApp) GetHostnames() []string {
// Use a map to make sure that we have unique hostnames
// The value is useless, so just use the int 1 for assignment.
nameListMap := make(map[string]int)
nameListMap[app.GetHostname()] = 1
for _, name := range app.AdditionalHostnames {
nameListMap[name+"."+version.DDevTLD] = 1
}
for _, name := range app.AdditionalFQDNs {
nameListMap[name] = 1
}
// Now walk the map and extract the keys into an array.
nameListArray := make([]string, 0, len(nameListMap))
for k := range nameListMap {
nameListArray = append(nameListArray, k)
}
return nameListArray
}
// WriteDockerComposeConfig writes a docker-compose.yaml to the app configuration directory.
func (app *DdevApp) WriteDockerComposeConfig() error {
var err error
if fileutil.FileExists(app.DockerComposeYAMLPath()) {
found, err := fileutil.FgrepStringInFile(app.DockerComposeYAMLPath(), DdevFileSignature)
util.CheckErr(err)
// If we did *not* find the ddev file signature in docker-compose.yaml, we'll back it up and warn about it.
if !found {
util.Warning("User-managed docker-compose.yaml will be replaced with ddev-generated docker-compose.yaml. Original file will be placed in docker-compose.yaml.bak")
_ = os.Remove(app.DockerComposeYAMLPath() + ".bak")
err = os.Rename(app.DockerComposeYAMLPath(), app.DockerComposeYAMLPath()+".bak")
util.CheckErr(err)
}
}
f, err := os.Create(app.DockerComposeYAMLPath())
if err != nil {
return err
}
defer util.CheckClose(f)
rendered, err := app.RenderComposeYAML()
if err != nil {
return err
}
_, err = f.WriteString(rendered)
if err != nil {
return err
}
return err
}
// CheckCustomConfig warns the user if any custom configuration files are in use.
func (app *DdevApp) CheckCustomConfig() {
// Get the path to .ddev for the current app.
ddevDir := filepath.Dir(app.ConfigPath)
customConfig := false
if _, err := os.Stat(filepath.Join(ddevDir, "nginx-site.conf")); err == nil && app.WebserverType == WebserverNginxFPM {
util.Warning("Using custom nginx configuration in nginx-site.conf")
customConfig = true
}
if _, err := os.Stat(filepath.Join(ddevDir, "apache", "apache-site.conf")); err == nil && app.WebserverType != WebserverNginxFPM {
util.Warning("Using custom apache configuration in apache/apache-site.conf")
customConfig = true
}
nginxPath := filepath.Join(ddevDir, "nginx")
if _, err := os.Stat(nginxPath); err == nil {
nginxFiles, err := filepath.Glob(nginxPath + "/*.conf")
util.CheckErr(err)
if len(nginxFiles) > 0 {
util.Warning("Using custom nginx partial configuration: %v", nginxFiles)
customConfig = true
}
}
mysqlPath := filepath.Join(ddevDir, "mysql")
if _, err := os.Stat(mysqlPath); err == nil {
mysqlFiles, err := filepath.Glob(mysqlPath + "/*.cnf")
util.CheckErr(err)
if len(mysqlFiles) > 0 {
util.Warning("Using custom mysql configuration: %v", mysqlFiles)
customConfig = true
}
}
phpPath := filepath.Join(ddevDir, "php")
if _, err := os.Stat(phpPath); err == nil {
phpFiles, err := filepath.Glob(phpPath + "/*.ini")
util.CheckErr(err)
if len(phpFiles) > 0 {
util.Warning("Using custom PHP configuration: %v", phpFiles)
customConfig = true
}
}
if customConfig {
util.Warning("Custom configuration takes effect when container is created, \nusually on start, use 'ddev restart' if you're not seeing it take effect.")
}
}
type composeYAMLVars struct {
Name string
Plugin string
AppType string
MailhogPort string
DBAPort string
DBPort string
DdevGenerated string
HostDockerInternalIP string
ComposeVersion string
MountType string
WebMount string
OmitDBA bool
OmitSSHAgent bool
WebcacheEnabled bool
NFSMountEnabled bool
NFSSource string
DockerIP string
IsWindowsFS bool
Hostnames []string
}
// RenderComposeYAML renders the contents of docker-compose.yaml.
func (app *DdevApp) RenderComposeYAML() (string, error) {
var doc bytes.Buffer
var err error
templ, err := template.New("compose template").Funcs(sprig.HtmlFuncMap()).Parse(DDevComposeTemplate)
if err != nil {
return "", err
}
templ, err = templ.Parse(DDevComposeTemplate)
if err != nil {
return "", err
}
hostDockerInternalIP, err := dockerutil.GetHostDockerInternalIP()
if err != nil {
return "", err
}
// The fallthrough default for hostDockerInternalIdentifier is the
// hostDockerInternalHostname == host.docker.internal
templateVars := composeYAMLVars{
Name: app.Name,
Plugin: "ddev",
AppType: app.Type,
MailhogPort: appports.GetPort("mailhog"),
DBAPort: appports.GetPort("dba"),
DBPort: appports.GetPort("db"),
DdevGenerated: DdevFileSignature,
HostDockerInternalIP: hostDockerInternalIP,
ComposeVersion: version.DockerComposeFileFormatVersion,
OmitDBA: nodeps.ArrayContainsString(app.OmitContainers, "dba"),
OmitSSHAgent: nodeps.ArrayContainsString(app.OmitContainers, "ddev-ssh-agent"),
WebcacheEnabled: app.WebcacheEnabled,
NFSMountEnabled: app.NFSMountEnabled,
NFSSource: "",
IsWindowsFS: runtime.GOOS == "windows",
MountType: "bind",
WebMount: "../",
Hostnames: app.GetHostnames(),
}
if app.WebcacheEnabled {
templateVars.MountType = "volume"
templateVars.WebMount = "webcachevol"
}
if app.NFSMountEnabled {
templateVars.MountType = "volume"
templateVars.WebMount = "nfsmount"
templateVars.NFSSource = app.AppRoot
if runtime.GOOS == "windows" {
// WinNFSD can only handle a mountpoint like /C/Users/rfay/workspace/d8git
// and completely chokes in C:\Users\rfay...
templateVars.NFSSource = dockerutil.MassageWIndowsNFSMount(app.AppRoot)
}
}
templateVars.DockerIP, err = dockerutil.GetDockerIP()
if err != nil {
return "", err
}
err = templ.Execute(&doc, templateVars)
return doc.String(), err
}
// prompt for a project name.
func (app *DdevApp) promptForName() error {
provider, err := app.GetProvider()
if err != nil {
return err
}
if app.Name == "" {
dir, err := os.Getwd()
// if working directory name is invalid for hostnames, we shouldn't suggest it
if err == nil && hostRegex.MatchString(filepath.Base(dir)) {
app.Name = filepath.Base(dir)
}
}
app.Name = util.Prompt("Project name", app.Name)
return provider.ValidateField("Name", app.Name)
}
// AvailableDocrootLocations returns an of default docroot locations to look for.
func AvailableDocrootLocations() []string {
return []string{
"web/public",
"web",
"docroot",
"htdocs",
"_www",
"public",
}
}
// DiscoverDefaultDocroot returns the default docroot directory.
func DiscoverDefaultDocroot(app *DdevApp) string {
// Provide use the app.Docroot as the default docroot option.
var defaultDocroot = app.Docroot
if defaultDocroot == "" {
for _, docroot := range AvailableDocrootLocations() {
if _, err := os.Stat(filepath.Join(app.AppRoot, docroot)); err != nil {
continue
}
if fileutil.FileExists(filepath.Join(app.AppRoot, docroot, "index.php")) {
defaultDocroot = docroot
break
}
}
}
return defaultDocroot
}
// Determine the document root.
func (app *DdevApp) docrootPrompt() error {
provider, err := app.GetProvider()
if err != nil {
return err
}
// Determine the document root.
util.Warning("\nThe docroot is the directory from which your site is served.\nThis is a relative path from your project root at %s", app.AppRoot)
output.UserOut.Println("You may leave this value blank if your site files are in the project root")
var docrootPrompt = "Docroot Location"
var defaultDocroot = DiscoverDefaultDocroot(app)
// If there is a default docroot, display it in the prompt.
if defaultDocroot != "" {
docrootPrompt = fmt.Sprintf("%s (%s)", docrootPrompt, defaultDocroot)
} else if cd, _ := os.Getwd(); cd == filepath.Join(app.AppRoot, defaultDocroot) {
// Preserve the case where the docroot is the current directory
docrootPrompt = fmt.Sprintf("%s (current directory)", docrootPrompt)
} else {
// Explicitly state 'project root' when in a subdirectory
docrootPrompt = fmt.Sprintf("%s (project root)", docrootPrompt)
}
fmt.Print(docrootPrompt + ": ")
app.Docroot = util.GetInput(defaultDocroot)
// Ensure the docroot exists. If it doesn't, prompt the user to verify they entered it correctly.
fullPath := filepath.Join(app.AppRoot, app.Docroot)
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
util.Warning("Warning: the provided docroot at %s does not currently exist.", fullPath)
// Ask the user for permission to create the docroot
if !util.Confirm(fmt.Sprintf("Create docroot at %s?", fullPath)) {
return fmt.Errorf("docroot must exist to continue configuration")
}
if err = os.MkdirAll(fullPath, 0755); err != nil {
return fmt.Errorf("unable to create docroot: %v", err)
}
util.Success("Created docroot at %s.", fullPath)
}
return provider.ValidateField("Docroot", app.Docroot)
}
// ConfigExists determines if a ddev config file exists for this application.
func (app *DdevApp) ConfigExists() bool {
if _, err := os.Stat(app.ConfigPath); os.IsNotExist(err) {
return false
}
return true
}
// AppTypePrompt handles the Type workflow.
func (app *DdevApp) AppTypePrompt() error {
provider, err := app.GetProvider()
if err != nil {
return err
}
validAppTypes := strings.Join(GetValidAppTypes(), ", ")
typePrompt := fmt.Sprintf("Project Type [%s]", validAppTypes)
// First, see if we can auto detect what kind of site it is so we can set a sane default.
detectedAppType := app.DetectAppType()
// If the detected detectedAppType is php, we'll ask them to confirm,
// otherwise go with it.
// If we found an application type just set it and inform the user.
util.Success("Found a %s codebase at %s.", detectedAppType, filepath.Join(app.AppRoot, app.Docroot))
typePrompt = fmt.Sprintf("%s (%s)", typePrompt, detectedAppType)
fmt.Printf(typePrompt + ": ")
appType := strings.ToLower(util.GetInput(detectedAppType))
for !IsValidAppType(appType) {
output.UserOut.Errorf("'%s' is not a valid project type. Allowed project types are: %s\n", appType, validAppTypes)
fmt.Printf(typePrompt + ": ")
appType = strings.ToLower(util.GetInput(appType))
}
app.Type = appType
return provider.ValidateField("Type", app.Type)
}
// PrepDdevDirectory creates a .ddev directory in the current working directory
func PrepDdevDirectory(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
log.WithFields(log.Fields{
"directory": dir,
}).Debug("Config Directory does not exist, attempting to create.")
err := os.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
err := CreateGitIgnore(dir, "import.yaml", "docker-compose.yaml", "db_snapshots", "sequelpro.spf", "import-db", ".bgsync*", "config.*.y*ml")
if err != nil {
return fmt.Errorf("failed to create gitignore in %s: %v", dir, err)
}
return nil
}
// validateCommandYaml validates command hooks and tasks defined in hooks for config.yaml
func validateCommandYaml(source []byte) error {
validHooks := []string{
"pre-start",
"post-start",
"pre-import-db",
"post-import-db",
"pre-import-files",
"post-import-files",
}
validTasks := []string{
"exec",
"exec-host",
}
type Validate struct {
Commands map[string][]map[string]interface{} `yaml:"hooks,omitempty"`
}
val := &Validate{}
err := yaml.Unmarshal(source, val)
if err != nil {
return err
}
for command, tasks := range val.Commands {
var match bool
for _, hook := range validHooks {
if command == hook {
match = true
}
}
if !match {
return fmt.Errorf("invalid command hook %s defined in config.yaml", command)
}
for _, taskSet := range tasks {
for taskName := range taskSet {
var match bool
for _, validTask := range validTasks {
if taskName == validTask {
match = true
}
}
if !match {
return fmt.Errorf("invalid task '%s' defined for %s hook in config.yaml", taskName, command)
}
}
}
}
return nil
}
| [
"\"DDEV_TEST_WEBSERVER_TYPE\"",
"\"DDEV_TEST_USE_WEBCACHE\"",
"\"DDEV_TEST_USE_NFSMOUNT\""
]
| []
| [
"DDEV_TEST_USE_WEBCACHE",
"DDEV_TEST_WEBSERVER_TYPE",
"DDEV_TEST_USE_NFSMOUNT"
]
| [] | ["DDEV_TEST_USE_WEBCACHE", "DDEV_TEST_WEBSERVER_TYPE", "DDEV_TEST_USE_NFSMOUNT"] | go | 3 | 0 | |
cmd/capmcd/capmcd.go | /*
* MIT License
*
* (C) Copyright [2019-2021] Hewlett Packard Enterprise Development LP
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/Cray-HPE/hms-capmc/internal/capmc"
"github.com/Cray-HPE/hms-certs/pkg/hms_certs"
base "github.com/Cray-HPE/hms-base"
compcreds "github.com/Cray-HPE/hms-compcredentials"
sstorage "github.com/Cray-HPE/hms-securestorage"
)
const clientTimeout = time.Duration(180) * time.Second
// API defines the relationship between the REST URL and the backend server
// function
type API struct {
pattern string
handler http.HandlerFunc
}
// APIs contains groupings of REST URL to backend server functionality for the
// different versions of the API
type APIs []API
var serviceName string
var svc CapmcD
var hms_ca_uri string
var rfClientLock sync.RWMutex
// TODO Move these to new file router.go
var capmcAPIs = []APIs{
{
API{capmc.HealthV1, svc.doHealth},
API{capmc.LivenessV1, svc.doLiveness},
API{capmc.PowerCapCapabilitiesV1, svc.doPowerCapCapabilities},
API{capmc.PowerCapGetV1, svc.doPowerCapGet},
API{capmc.PowerCapSetV1, svc.doPowerCapSet},
API{capmc.ReadinessV1, svc.doReadiness},
API{capmc.XnameOffV1, svc.doXnameOff},
API{capmc.XnameOnV1, svc.doXnameOn},
API{capmc.XnameReinitV1, svc.doXnameReinit},
API{capmc.XnameStatusV1, svc.doXnameStatus},
},
}
// ResponseWriter contains the http ResponseWriter function to allow for
// wrapping of HTTP calls, as a server, for logging purposes.
type ResponseWriter struct {
status int
length int
data string
http.ResponseWriter
}
// WriteHeader wraps HTTP calls, as a server, to enable logging of requests and
// responses.
func (w *ResponseWriter) WriteHeader(status int) {
w.status = status
w.ResponseWriter.WriteHeader(status)
}
// Write wrapps HTTP calls, as a server, to enable logging of requests and
// responses.
func (w *ResponseWriter) Write(b []byte) (int, error) {
if svc.debug && svc.debugLevel > 1 {
// This is simpler than using httptest.NewRecorder in the
// middleware logger for a response to and incoming request.
w.data = string(b)
}
n, err := w.ResponseWriter.Write(b)
w.length += n
return n, err
}
var suppressLogPaths map[string]bool
// Add a path to the list suppress for logging
func suppressLoggingForPath(p string) {
// make the map if it doesn't already exist
if suppressLogPaths == nil {
suppressLogPaths = make(map[string]bool)
}
// add the entry
suppressLogPaths[p] = true
}
// find if the input path has logging suppressed
func isPathSuppressed(p string) bool {
// if the map wasn't created, false
if suppressLogPaths == nil {
return false
}
// query the map
_, retVal := suppressLogPaths[p]
return retVal
}
// logRequest is a middleware wrapper that handles logging server HTTP
// inbound requests and outbound responses.
func logRequest(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var (
sendFmts = []string{
"Debug: --> Response %s:\n%s\n",
"Debug: --> Response %s: %q\n", // "safe"
}
recvFmts = []string{
"Debug: <-- Request %s:\n%s\n",
"Debug: <-- Request %s: %q\n", // "safe"
}
)
rw := &ResponseWriter{
status: http.StatusOK,
ResponseWriter: w,
}
// see if this is a path to be suppressed
suppressLog := isPathSuppressed(r.URL.Path)
start := time.Now()
if !suppressLog {
log.Printf("Info: <-- %s HTTP %s %s\n",
r.RemoteAddr, r.Method, r.URL)
}
if svc.debug && svc.debugLevel > 1 {
sendFmt := sendFmts[svc.debugLevel%2]
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Printf("Debug: failed to dump request: %s",
err)
http.Error(w, fmt.Sprint(err),
http.StatusInternalServerError)
return
}
s := bytes.SplitAfterN(dump, []byte("\r\n\r\n"), 2)
if svc.debugLevel > 3 {
log.Printf(sendFmt, "Header", s[0])
}
log.Printf(sendFmt, "Body", dump)
}
handler.ServeHTTP(rw, r)
if !suppressLog {
log.Printf("Info: --> %s HTTP %d %s %s %s (%s)",
r.RemoteAddr, rw.status, http.StatusText(rw.status),
r.Method, r.URL.String(), time.Since(start))
}
if svc.debug && svc.debugLevel > 1 {
// Capturing the reponse headers requires more
// work using httptest.NewRecorder. Skip for now.
recvFmt := recvFmts[svc.debugLevel%2]
log.Printf(recvFmt, "Body", rw.data)
}
})
}
// Transport contains the http RoundTripper function to allow for wrapping of
// HTTP calls, as a client, for logging purposes.
type Transport struct {
Transport http.RoundTripper
}
// RoundTrip wraps HTTP calls, as a client, to enable logging of requests and
// responses.
func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
var (
sendFmts = []string{
"Debug: --> Request %s:\n%s\n",
"Debug: --> Request %s: %q\n", // "safe"
}
recvFmts = []string{
"Debug: <-- Response %s:\n%s\n",
"Debug: <-- Response %s: %q\n", // "safe"
}
)
log.Printf("Info: --> HTTP %s %s\n", r.Method, r.URL)
if svc.debug && svc.debugLevel > 1 {
sendFmt := sendFmts[svc.debugLevel%2]
dump, err := httputil.DumpRequestOut(r, true)
if err != nil {
log.Printf("Debug: failed to dump request: %s", err)
} else {
s := bytes.SplitAfterN(dump, []byte("\r\n\r\n"), 2)
if svc.debugLevel > 3 {
log.Printf(sendFmt, "Header", s[0])
}
log.Printf(sendFmt, "Body", s[1])
}
}
start := time.Now()
resp, err := t.Transport.RoundTrip(r)
if err != nil {
return resp, err
}
log.Printf("Info: <-- HTTP %s %s %s (%s)\n",
resp.Status, resp.Request.Method, resp.Request.URL,
time.Since(start))
if svc.debug && svc.debugLevel > 1 {
recvFmt := recvFmts[svc.debugLevel%2]
dump, err := httputil.DumpResponse(resp, true)
s := bytes.SplitAfterN(dump, []byte("\r\n\r\n"), 2)
if err != nil {
log.Printf("Debug: failed to dump response: %s", err)
} else {
if svc.debugLevel > 3 {
log.Printf(recvFmt, "Header", s[0])
}
log.Printf(recvFmt, "Body", s[1])
}
}
return resp, err
}
type clientFlags int
const (
clientInsecure clientFlags = 1 << iota
// Other flags could be added here...
)
// Construct an http client object.
func makeClient(flags clientFlags, timeout time.Duration) (*hms_certs.HTTPClientPair, error) {
var pair *hms_certs.HTTPClientPair
var err error
uri := hms_ca_uri
if (flags & clientInsecure) != 0 {
uri = ""
}
toSec := int(timeout.Seconds())
pair, err = hms_certs.CreateHTTPClientPair(uri, toSec)
if err != nil {
log.Printf("ERROR: Can't set up cert-secured HTTP client: %v", err)
return nil, err
}
return pair, nil
}
// This will initialize the global CapmcD struct with default values upon startup
func init() {
}
// Called when CA cert bundle is rolled.
func caCB(caData string) {
log.Printf("INFO: Updating CA bundle for Redfish HTTP transports.")
//Wait for all reader locks to release, prevent new reader locks. Once
//acquired, all RF calls are blocked.
rfClientLock.Lock()
log.Printf("INFO: All RF threads are paused.")
//Update the the transports.
if captureTag != "" {
svc.rfClient = captureClient(0, clientTimeout)
} else {
cl, err := makeClient(0, clientTimeout)
if err != nil {
log.Printf("ERROR: can't create Redfish HTTP client after CA roll: %v", err)
log.Printf(" Using previous HTTP client (CA bundle may not work.)")
} else {
svc.rfClient = cl
}
}
log.Printf("Redfish transport clients updated with new CA bundle.")
rfClientLock.Unlock()
}
// Set up Redfish and non-redfish HTTP clients.
func setupRedfishHTTPClients(captureTag string) error {
var err error
if hms_ca_uri != "" {
log.Printf("INFO: Creating Redfish HTTP transport using CA bundle '%s'",
hms_ca_uri)
} else {
log.Printf("INFO: Creating non-validated Redfish HTTP transport (no CA bundle)")
}
if captureTag != "" {
svc.rfClient = captureClient(0, clientTimeout)
} else {
svc.rfClient, err = makeClient(0, clientTimeout)
}
if err != nil {
log.Printf("ERROR setting up Redfish HTTP transport: '%v'", err)
return err
}
if hms_ca_uri != "" {
err := hms_certs.CAUpdateRegister(hms_ca_uri, caCB)
if err != nil {
log.Printf("ERROR: can't register CA bundle watcher: %v", err)
log.Printf(" This means CA bundle updates will not update Redfish HTTP transports.")
} else {
log.Printf("INFO: CA bundle watcher registered for '%s'.", hms_ca_uri)
}
} else {
log.Printf("INFO: CA bundle URI is empty, no CA bundle watcher registered.")
}
return nil
}
func main() {
var (
configFile string
hsm string
err error
)
// TODO Add debug levels at some point
flag.BoolVar(&svc.debug, "debug", false, "enable debug messages")
flag.IntVar(&svc.debugLevel, "debug-level", 0, "increase debug verbosity")
// Simulation only
// NOTE: if this is set to 'true' then all of the calls to BMC's will only
// be logged and not executed. This is provided as a mechanism for testing
// on real hardware without actually turning systems on and off.
flag.BoolVar(&svc.simulationOnly, "simulateOnly", false, "Only log calls to BMC's instead of executing them")
// TODO Add support for specifying http/https with the latter as default
// It might make sense to use the URI format here too.
flag.StringVar(&svc.httpListen, "http-listen", "0.0.0.0:27777", "HTTP server IP + port binding")
flag.StringVar(&hsm, "hsm", "http://localhost:27779",
"Hardware State Manager location as URI, e.g. [scheme]://[host[:port]]")
flag.StringVar(&hms_ca_uri, "ca_uri", "",
"Certificate Authority CA bundle URI")
// The "default" is installed with the service. The intent is
// ConfigPath/ConfigFile is a customized config and
// ConfigPath/default/ConfigFile contains the installed (and internal)
// default values.
// TODO Add a development ConfigPath allowing for non-install,
// non-container development without needed to specify -config <file>.
flag.StringVar(&configFile, "config",
filepath.Join(ConfigPath, "default", ConfigFile),
"Configuration file")
var captureTag string
flag.StringVar(&captureTag, "capture", "",
"Capture client traffic for test case using TAG")
flag.Parse()
log.SetFlags(log.Lshortfile | log.LstdFlags)
serviceName, err = base.GetServiceInstanceName()
if err != nil {
serviceName = "CAPMC"
log.Printf("WARNING: can't get service/instance name, using: '%s'",
serviceName)
}
log.Printf("Service name/instance: '%s'", serviceName)
svc.config = loadConfig(configFile)
conf := svc.config.CapmcConf
log.Printf("Configuration loaded:\n")
log.Printf("\tMax workers: %d\n", conf.ActionMaxWorkers)
log.Printf("\tOn unsupported action: %s\n", conf.OnUnsupportedAction)
log.Printf("\tReinit seq: %v\n", conf.ReinitActionSeq)
log.Printf("\tWait for off retries: %d\n", conf.WaitForOffRetries)
log.Printf("\tWait for off sleep: %d\n", conf.WaitForOffSleep)
svc.ActionMaxWorkers = conf.ActionMaxWorkers
svc.OnUnsupportedAction = conf.OnUnsupportedAction
svc.ReinitActionSeq = conf.ReinitActionSeq
// log the hostname of this instance - mostly useful for pod name in
// multi-replica k8s envinronment
hostname, hostErr := os.Hostname()
if hostErr != nil {
log.Printf("Error getting hostname:%s", hostErr.Error())
} else {
log.Printf("Starting on host: %s", hostname)
}
// log if this is in simulate only mode
if svc.simulationOnly {
log.Printf("WARNING: Started in SIMULATION ONLY mode - no commands will be sent to BMC hardware")
}
// CapmcD is both HTTP server and client
if svc.hsmURL, err = url.Parse(hsm); err != nil {
log.Fatalf("Invalid HSM URI specified: %s", err)
}
// Set up the hsm information before connecting to any external
// resources since we may bail if we don't find what we want
// Check for non-empty URL (URI) scheme
if !svc.hsmURL.IsAbs() {
log.Fatal("WARNING: HSM URL not absolute\n")
}
switch svc.hsmURL.Scheme {
case "http", "https":
log.Printf("Info: hardware state manager (HSM) --> %s\n",
svc.hsmURL.String())
// Stash the HSM Base version in the URL.Path (default)
// XXX Should the HSM API (default) version be configurable?
switch svc.hsmURL.Path {
case "":
svc.hsmURL.Path = "/hsm/v1"
case "/hsm/v1":
// do nothing
default:
if !strings.HasSuffix(svc.hsmURL.Path, "/hsm/v1") {
svc.hsmURL.Path += "/hsm/v1"
}
}
default:
log.Fatalf("Unexpected HSM URL scheme: %s", svc.hsmURL.Scheme)
}
//CA/cert stuff
vurl := os.Getenv("CAPMC_VAULT_CA_URL")
if vurl != "" {
log.Printf("Replacing default Vault CA URL with: '%s'", vurl)
hms_certs.ConfigParams.VaultCAUrl = vurl
}
vurl = os.Getenv("CAPMC_VAULT_PKI_URL")
if vurl != "" {
log.Printf("Replacing default Vault PKI URL with: '%s'", vurl)
hms_certs.ConfigParams.VaultPKIUrl = vurl
}
if hms_ca_uri == "" {
vurl = os.Getenv("CAPMC_CA_URI")
if vurl != "" {
log.Printf("Using CA URI: '%s'", vurl)
hms_ca_uri = vurl
}
}
vurl = os.Getenv("CAPMC_LOG_INSECURE_FAILOVER")
if vurl != "" {
yn, _ := strconv.ParseBool(vurl)
if yn == false {
hms_certs.ConfigParams.LogInsecureFailover = false
}
}
hms_certs.InitInstance(nil, serviceName)
log.Printf("CAPMC serivce starting (debug=%v)\n", svc.debug)
// set up a channel to wait for the os to tell us to stop
// NOTE - must be set up before initializing anything that needs
// to be cleaned up. This will trap any signals and wait to
// process them until the channel is read.
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
//initialize the service-reservation pkg
svc.reservation.Init(svc.hsmURL.Scheme+"://"+svc.hsmURL.Host, "", 3, nil)
svc.reservationsEnabled = true
// Spin a thread for connecting to Vault
go func() {
const (
initBackoff time.Duration = 5
maxBackoff time.Duration = 60
)
var err error
backoff := initBackoff
for {
log.Printf("Info: Connecting to secure store (Vault)...")
// Start a connection to Vault
if svc.ss, err = sstorage.NewVaultAdapter(""); err != nil {
log.Printf("Info: Secure Store connection failed - %s", err)
time.Sleep(backoff * time.Second)
} else {
log.Printf("Info: Connection to secure store (Vault) succeeded")
vaultKeypath, ok := os.LookupEnv("VAULT_KEYPATH")
if !ok {
vaultKeypath = "secret/hms-creds"
}
svc.ccs = compcreds.NewCompCredStore(vaultKeypath, svc.ss)
break
}
if backoff < maxBackoff {
backoff += backoff
}
if backoff > maxBackoff {
backoff = maxBackoff
}
}
}()
//Set up non-secure HTTP client for HSM, etc.
log.Printf("INFO: Creating inter-service HTTP transport (not TLS validated).")
if captureTag != "" {
captureInit(captureTag)
svc.smClient = captureClient(clientInsecure, clientTimeout)
} else {
svc.smClient, _ = makeClient(clientInsecure, clientTimeout)
}
//Set up secure HTTP clients for Redfish. Keep trying in the background until success.
go func() {
const (
initBackoff time.Duration = 2
maxBackoff time.Duration = 30
)
backoff := initBackoff
for {
log.Printf("Info: Creating Redfish secure HTTP client transports...")
err := setupRedfishHTTPClients(captureTag)
if err == nil {
log.Printf("Info: Success creating Redfish HTTP clients.")
break
} else {
log.Printf("Redfish HTTP client creation error: %v", err)
}
if backoff < maxBackoff {
backoff += backoff
}
if backoff > maxBackoff {
backoff = maxBackoff
}
}
}()
// These are registrations for the CAPMC API calls.
for _, vers := range capmcAPIs {
for _, api := range vers {
if captureTag == "" {
http.HandleFunc(api.pattern, api.handler)
} else {
captureHandleFunc(api.pattern, api.handler)
}
}
}
// Do not log the calls for liveness/readiness
suppressLoggingForPath(capmc.LivenessV1)
suppressLoggingForPath(capmc.ReadinessV1)
// Spin up our global worker goroutine pool.
svc.WPool = base.NewWorkerPool(svc.ActionMaxWorkers, svc.ActionMaxWorkers*10)
svc.WPool.Run()
// The following thread talks about limiting the max post body size...
// https://stackoverflow.com/questions/28282370/is-it-advisable-to-further-limit-the-size-of-forms-when-using-golang
// spin the server in a separate thread so main can wait on an os
// signal to cleanly shut down
httpSrv := http.Server{
Addr: svc.httpListen,
Handler: logRequest(http.DefaultServeMux),
}
go func() {
// NOTE: do not use log.Fatal as that will immediately exit
// the program and short-circuit the shutdown logic below
log.Printf("Info: Server %s\n", httpSrv.ListenAndServe())
}()
log.Printf("Info: CAPMC API listening on: %v\n", svc.httpListen)
//////////////////
// Clean shutdown section
//////////////////
// wait here for a signal from the os that we are shutting down
sig := <-sigs
log.Printf("Info: Detected signal to close service: %s", sig)
// The service is being killed, so release all active locks in hsm
// NOTE: this happens when k8s kills a pod
svc.removeAllActiveReservations()
// stop the server from taking requests
// NOTE: this waits for active connections to finish
log.Printf("Info: Server shutting down")
httpSrv.Shutdown(context.Background())
// terminate worker pool
// NOTE: have to do this the hard way since there isn't a
// clean shutdown implemented on the WorkerPool
log.Printf("Info: Finishing current jobs in the pool")
waitTime := time.Second * 10 // hard code to 10 sec wait time for now
poolTimeout := time.Now().Add(waitTime)
for {
// if we have hit the timeout or all the jobs are out of the queue
// we can bail
if len(svc.WPool.JobQueue) == 0 || time.Now().After(poolTimeout) {
break
}
// wait another second then check again
time.Sleep(time.Second)
}
// with no jobs left in the queue, we can stop the service
// this waits until currently running jobs are complete before exiting
svc.WPool.Stop()
// NOTE: This is where we should terminate our connection to the
// vault, but it looks like there is no way to do so at this time.
// NOTE: The db connection is being shut down via a deferred function call
// shut down hsm client connections
log.Printf("Info: Closing idle client connections...")
svc.smClient.CloseIdleConnections()
svc.rfClient.CloseIdleConnections()
log.Printf("Info: Service Exiting.")
}
// TODO https://blog.golang.org/error-handling-and-go
// Send error or empty OK response. This format matches existing CAPMC API.
// Error code is the http status response.
func sendJsonError(w http.ResponseWriter, ecode int, message string) {
// If HTTP call is success, put zero in returned json error field.
// This is what Cascade capmc does today.
httpCode := ecode
if ecode >= 200 && ecode <= 299 {
ecode = 0
}
data := capmc.ErrResponse{
E: ecode,
ErrMsg: message,
}
SendResponseJSON(w, httpCode, data)
}
// SendResponseJSON sends data marshalled as a JSON body and sets the HTTP
// status code to sc.
func SendResponseJSON(w http.ResponseWriter, sc int, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(sc)
err := json.NewEncoder(w).Encode(data)
if err != nil {
log.Printf("Error: encoding/sending JSON response: %s\n", err)
return
}
}
// NotImplemented is used as a placeholder API entry point.
func (d *CapmcD) NotImplemented(w http.ResponseWriter, r *http.Request) {
var body = capmc.ErrResponse{
E: http.StatusNotImplemented,
ErrMsg: fmt.Sprintf("%s API Unavailable/Not Implemented", r.URL.Path),
}
SendResponseJSON(w, http.StatusNotImplemented, body)
return
}
| [
"\"CAPMC_VAULT_CA_URL\"",
"\"CAPMC_VAULT_PKI_URL\"",
"\"CAPMC_CA_URI\"",
"\"CAPMC_LOG_INSECURE_FAILOVER\""
]
| []
| [
"CAPMC_VAULT_CA_URL",
"CAPMC_VAULT_PKI_URL",
"CAPMC_CA_URI",
"CAPMC_LOG_INSECURE_FAILOVER"
]
| [] | ["CAPMC_VAULT_CA_URL", "CAPMC_VAULT_PKI_URL", "CAPMC_CA_URI", "CAPMC_LOG_INSECURE_FAILOVER"] | go | 4 | 0 | |
openwisp_ipam/tests/tests.py | import os
from unittest import skipIf
import swapper
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import ModelForm
from django.test import TestCase
from django.urls import reverse
from openwisp_utils.tests.utils import TestMultitenantAdminMixin
from django_ipam.tests.base.test_admin import BaseTestAdmin
from django_ipam.tests.base.test_api import BaseTestApi
from django_ipam.tests.base.test_commands import BaseTestCommands
from django_ipam.tests.base.test_forms import BaseTestForms
from django_ipam.tests.base.test_models import BaseTestModel
from .mixins import CreateModelsMixin, FileMixin, PostDataMixin
class TestAdmin(BaseTestAdmin, CreateModelsMixin, PostDataMixin, TestCase):
app_name = 'openwisp_ipam'
ipaddress_model = swapper.load_model('openwisp_ipam', 'IPAddress')
subnet_model = swapper.load_model('openwisp_ipam', 'Subnet')
def setUp(self):
self._create_org()
super(TestAdmin, self).setUp()
def test_csv_upload(self):
csv_data = """Monachers - Matera,
10.27.1.0/24,
Monachers,
,
ip address,description
10.27.1.1,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(str(self.subnet_model.objects.first().subnet), '10.27.1.0/24')
self.assertEqual(str(self.ipaddress_model.objects.all()[0].ip_address), '10.27.1.1')
self.assertEqual(str(self.ipaddress_model.objects.all()[1].ip_address), '10.27.1.252')
self.assertEqual(str(self.ipaddress_model.objects.all()[2].ip_address), '10.27.1.253')
self.assertEqual(str(self.ipaddress_model.objects.all()[3].ip_address), '10.27.1.254')
def test_existing_csv_data(self):
subnet = self._create_subnet(name='Monachers - Matera', subnet='10.27.1.0/24')
self._create_ipaddress(ip_address='10.27.1.1', subnet=subnet, description='Monachers')
csv_data = """Monachers - Matera,
10.27.1.0/24,
test-organization,
,
ip address,description
10.27.1.1,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(str(self.ipaddress_model.objects.all()[1].ip_address), '10.27.1.252')
self.assertEqual(str(self.ipaddress_model.objects.all()[2].ip_address), '10.27.1.253')
self.assertEqual(str(self.ipaddress_model.objects.all()[3].ip_address), '10.27.1.254')
def test_invalid_ipaddress_csv_data(self):
csv_data = """Monachers - Matera,
10.27.1.0/24,
Monachers,
,
ip address,description
10123142131,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'does not appear to be an IPv4 or IPv6 address')
class TestApi(CreateModelsMixin, BaseTestApi, PostDataMixin, TestCase):
ipaddress_model = swapper.load_model('openwisp_ipam', 'IPAddress')
subnet_model = swapper.load_model('openwisp_ipam', 'Subnet')
def setUp(self):
self._create_org()
super(TestApi, self).setUp()
def test_import_subnet_api(self):
csv_data = """Monachers - Matera,
10.27.1.0/24,
Monachers,
,
ip address,description
10.27.1.1,Monachers
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('ipam:import-subnet'), {'csvfile': csvfile})
self.assertEqual(response.status_code, 200)
self.assertEqual(str(self.subnet_model.objects.first().subnet), '10.27.1.0/24')
self.assertEqual(str(self.ipaddress_model.objects.all()[0].ip_address), '10.27.1.1')
self.assertEqual(str(self.ipaddress_model.objects.all()[1].ip_address), '10.27.1.254')
csvfile = SimpleUploadedFile('data.txt', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('ipam:import-subnet'),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 400)
csv_data = """Monachers - Matera,
,
,
ip address,description
10.27.1.1,Monachers
10.27.1.254,Nano Beam 5 19AC"""
invalid_file = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('ipam:import-subnet'), {'csvfile': invalid_file})
self.assertEqual(response.status_code, 400)
class TestCommands(CreateModelsMixin, BaseTestCommands, FileMixin, TestCase):
app_name = 'openwisp_ipam'
subnet_model = swapper.load_model('openwisp_ipam', 'Subnet')
ipaddress_model = swapper.load_model('openwisp_ipam', 'IpAddress')
class NetworkAddressTestModelForm(ModelForm):
class Meta:
model = swapper.load_model('openwisp_ipam', 'Subnet')
fields = ('subnet',)
@skipIf(os.environ.get('SAMPLE_APP', False), 'Running tests on SAMPLE_APP')
class TestForms(BaseTestForms, TestCase):
form_class = NetworkAddressTestModelForm
class TestModel(BaseTestModel, CreateModelsMixin, TestCase):
ipaddress_model = swapper.load_model('openwisp_ipam', 'IPAddress')
subnet_model = swapper.load_model('openwisp_ipam', 'Subnet')
class TestMultitenantAdmin(TestMultitenantAdminMixin, CreateModelsMixin, TestCase):
ipaddress_model = swapper.load_model('openwisp_ipam', 'IPAddress')
subnet_model = swapper.load_model('openwisp_ipam', 'Subnet')
def _create_multitenancy_test_env(self):
org1 = self._create_org(name="test1organization")
org2 = self._create_org(name="test2organization")
subnet1 = self._create_subnet(
subnet='172.16.0.1/16',
organization=org1
)
subnet2 = self._create_subnet(
subnet='192.168.0.1/16',
organization=org2
)
ipadd1 = self._create_ipaddress(
ip_address='172.16.0.1',
organization=org1,
subnet=subnet1
)
ipadd2 = self._create_ipaddress(
ip_address='192.168.0.1',
organization=org2,
subnet=subnet2
)
operator = self._create_operator(organizations=[org1])
data = dict(
org1=org1, org2=org2,
subnet1=subnet1, subnet2=subnet2,
ipadd1=ipadd1, ipadd2=ipadd2,
operator=operator
)
return data
def test_multitenancy_ip_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:openwisp_ipam_ipaddress_changelist'),
visible=[data['ipadd1']],
hidden=[data['ipadd2']]
)
def test_multitenancy_subnet_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:openwisp_ipam_subnet_changelist'),
visible=[data['subnet1']],
hidden=[data['subnet2']]
)
| []
| []
| [
"SAMPLE_APP"
]
| [] | ["SAMPLE_APP"] | python | 1 | 0 | |
skills/dff_gaming_skill/dialogflows/common/shared_memory_ops.py | import logging
import os
import sentry_sdk
from nltk.tokenize import sent_tokenize
import common.dialogflow_framework.utils.state as state_utils
import common.gaming as common_gaming
from dialogflows.common import game_info
from dialogflows.common.game_info import search_igdb_game_description_by_user_and_bot_phrases
NUM_SENTENCES_IN_ONE_TURN_OF_GAME_DESCRIPTION = 2
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"))
logger = logging.getLogger(__name__)
ASSERTION_ERROR_NO_GAMES_IN_SHARED_MEMORY = (
"Shared memory field 'igdb_game_ids_user_wanted_to_discuss' is empty. "
"If dff_gaming_skill reached state SYS_USER_CONFIRMS_GAME 'games_user_wanted_do_discuss' cannot be empty "
)
ASSERTION_ERROR_MSG_CANDIDATE_GAME_IS_NOT_SET = (
"This function should be called if shared memory field "
"`candidate_game_id` is set. Shared memory field 'candidate_game_id' is supposed to be set when skill goes "
"through states SYS_USER_MAYBE_WANTS_TO_TALK_ABOUT_PARTICULAR_GAME -> USR_CHECK_WITH_USER_GAME_TITLE and "
"the field is supposed to be emptied when skill leaves states SYS_USER_CONFIRMS_GAME_BOT_NEVER_PLAYED, "
"(scopes.MINECRAFT, State.USR_START), SYS_USER_CONFIRMS_GAME_BOT_NEVER_PLAYED."
)
def _build_linkto_responses_to_ids_dictionary(
movies_linktos_based_on_themes_and_genres,
books_linktos_based_on_themes_and_genres,
movies_special_linktos,
books_special_linktos,
):
res = {}
count = 0
for to_skill_responses in [movies_linktos_based_on_themes_and_genres, books_linktos_based_on_themes_and_genres]:
for based_on_responses in to_skill_responses.values():
for responses in based_on_responses.values():
for r in responses:
res[r] = count
count += 1
for to_skill_responses in [movies_special_linktos, books_special_linktos]:
for responses in to_skill_responses.values():
for r in responses:
res[r] = count
count += 1
return res
LINKTO_RESPONSES_TO_LINKTO_IDS = _build_linkto_responses_to_ids_dictionary(
common_gaming.links_to_movies,
common_gaming.links_to_books,
common_gaming.special_links_to_movies,
common_gaming.special_links_to_books,
)
def get_igdb_ids_for_games_user_wanted_to_discuss(vars, assert_not_empty=True):
shared_memory = state_utils.get_shared_memory(vars)
ids = shared_memory.get("igdb_ids_for_games_user_wanted_to_discuss", [])
if assert_not_empty:
assert ids, (
"Shared memory field 'igdb_game_ids_user_wanted_to_discuss' is empty. "
"If dff_gaming_skill reached state SYS_USER_CONFIRMS_GAME 'games_user_wanted_do_discuss' cannot be empty "
)
return ids
def get_candidate_game_id(vars):
shared_memory = state_utils.get_shared_memory(vars)
logger.info(f"(get_candidate_game_id)shared_memory: {shared_memory.keys()}")
candidate_game_id = shared_memory.get("candidate_game_id")
return candidate_game_id
def get_current_igdb_game(vars, assert_not_empty=True):
shared_memory = state_utils.get_shared_memory(vars)
game_id = shared_memory.get("current_igdb_game_id", "")
if game_id:
game = game_info.games_igdb_ids.get(str(game_id))
assert game is not None, (
f"If some game is set for discussion it should have been added to `games_igdb_ids`."
f" No game for id {repr(game_id)}."
)
else:
game = None
if assert_not_empty:
assert game_id, (
"Shared memory field 'current_igdb_game_id' is empty. If dff_gaming_skill reached "
"state SYS_USER_CONFIRMS_GAME and did not reached SYS_ERR 'current_igdb_game_id' cannot be empty"
)
return game
def get_used_linkto_phrase_ids(vars):
shared_memory = state_utils.get_shared_memory(vars)
return shared_memory.get("used_linkto_phrase_ids_from_gaming", [])
def put_game_id_to_igdb_game_ids_user_wanted_to_discuss(vars, id_):
ids = get_igdb_ids_for_games_user_wanted_to_discuss(vars, False)
ids.append(id_)
state_utils.save_to_shared_memory(vars, igdb_game_ids_user_wanted_to_discuss=ids)
def put_candidate_id_to_igdb_game_ids_user_wanted_to_discuss(vars):
candidate_game_id = get_candidate_game_id(vars)
assert candidate_game_id is not None and candidate_game_id, ASSERTION_ERROR_MSG_CANDIDATE_GAME_IS_NOT_SET
put_game_id_to_igdb_game_ids_user_wanted_to_discuss(vars, candidate_game_id)
def clean_candidate_game_id(vars):
state_utils.save_to_shared_memory(vars, candidate_game_id="")
def set_current_igdb_game_id_from_candidate_game_id(vars):
logger.info("set_current_igdb_game_id_from_candidate_game_id")
candidate_game_id = get_candidate_game_id(vars)
assert candidate_game_id is not None and candidate_game_id, ASSERTION_ERROR_MSG_CANDIDATE_GAME_IS_NOT_SET
state_utils.save_to_shared_memory(vars, current_igdb_game_id=candidate_game_id)
def set_current_igdb_game_id_if_game_for_discussion_is_identified(vars, candidate_game_id_is_already_set):
if candidate_game_id_is_already_set:
set_current_igdb_game_id_from_candidate_game_id(vars)
put_candidate_id_to_igdb_game_ids_user_wanted_to_discuss(vars)
else:
igdb_game_description, _ = search_igdb_game_description_by_user_and_bot_phrases(vars)
if igdb_game_description is not None:
state_utils.save_to_shared_memory(vars, current_igdb_game_id=igdb_game_description["id"])
put_game_id_to_igdb_game_ids_user_wanted_to_discuss(vars, igdb_game_description["id"])
else:
state_utils.save_to_shared_memory(vars, current_igdb_game_id="")
clean_candidate_game_id(vars)
def add_used_linkto_to_shared_memory(vars, text):
used_linkto_phrase_ids = get_used_linkto_phrase_ids(vars)
id_ = LINKTO_RESPONSES_TO_LINKTO_IDS.get(text)
assert id_ is not None, f"Link phrases added to shared memory has to be from `common.gaming`. Got: '{text}'"
used_linkto_phrase_ids.append(id_)
state_utils.save_to_shared_memory(vars, used_linkto_phrase_ids_from_gaming=used_linkto_phrase_ids)
def get_split_summary(vars):
shared_memory = state_utils.get_shared_memory(vars)
current_index = shared_memory.get("curr_summary_sent_index", 0)
game = get_current_igdb_game(vars)
summary = game.get("summary")
assert summary is not None, (
"Game descriptions without required keys are filtered in function "
"`search_igdb_for_game` function. Maybe the wrong list of keys was passed to `search_igdb_for_game`, or "
"game description was acquired some other way, or game description was modified."
)
sentences = sent_tokenize(summary)
split_summary = {"sentences": sentences, "current_index": current_index}
return split_summary
def get_next_sentences_from_summary_and_num_remaining(vars, n_sent=2):
split_summary = get_split_summary(vars)
i = split_summary["current_index"]
split_summary = get_split_summary(vars)
text = " ".join(split_summary["sentences"][i : i + n_sent])
split_summary["current_index"] += n_sent
num_remaining = len(split_summary["sentences"]) - split_summary["current_index"]
state_utils.save_to_shared_memory(vars, curr_summary_sent_index=split_summary["current_index"])
return text, num_remaining
def are_there_2_or_more_turns_left_in_game_description(ngrams, vars):
split_summary = get_split_summary(vars)
if split_summary:
num_remaining_sentences = len(split_summary["sentences"]) - split_summary["current_index"]
res = num_remaining_sentences / NUM_SENTENCES_IN_ONE_TURN_OF_GAME_DESCRIPTION > 1
else:
res = False
return res
def add_how_to_index_to_used_how_to_indices(vars, i):
shared_memory = state_utils.get_shared_memory(vars)
indices = shared_memory.get("used_how_to_indices", [])
indices.append(i)
state_utils.save_to_shared_memory(vars, used_how_to_indices=indices)
def mark_current_bot_utterance_as_link_to_other_skill(vars):
current_human_utterance_index = state_utils.get_human_utter_index(vars)
logger.info(
f"Bot utterance after human utterance with index {current_human_utterance_index} "
f"is marked to link to other skill"
)
state_utils.save_to_shared_memory(
vars, index_of_last_human_utterance_after_which_link_from_gaming_was_made=current_human_utterance_index
)
def was_link_from_gaming_to_other_skill_made_in_previous_bot_utterance(vars):
shared_memory = state_utils.get_shared_memory(vars)
prev_active_skill = state_utils.get_last_bot_utterance(vars).get("active_skill")
index_of_last_human_utterance_after_which_link_from_gaming_was_made = shared_memory.get(
"index_of_last_human_utterance_after_which_link_from_gaming_was_made", -2
)
current_human_utterance_index = state_utils.get_human_utter_index(vars)
diff = current_human_utterance_index - index_of_last_human_utterance_after_which_link_from_gaming_was_made
if index_of_last_human_utterance_after_which_link_from_gaming_was_made < 0:
logger.info(f"No link from dff_gaming_skill was done in this dialog.")
else:
logger.info(f"The last link from dff_gaming_skill to other skill was done {diff} turns before")
return diff < 2 and prev_active_skill is not None and prev_active_skill == "dff_gaming_skill"
| []
| []
| [
"SENTRY_DSN"
]
| [] | ["SENTRY_DSN"] | python | 1 | 0 | |
apis/v1/example_test.go | // Copyright 2021-2022 The phy-api-go authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1_test
import (
"context"
"fmt"
"os"
v1 "github.com/sacloud/phy-api-go/apis/v1"
)
var serverURL = "https://secure.sakura.ad.jp/cloud/api/dedicated-phy/1.0"
// Example API定義から生成されたコードを直接利用する例
func Example() {
token := os.Getenv("SAKURACLOUD_ACCESS_TOKEN")
secret := os.Getenv("SAKURACLOUD_ACCESS_TOKEN_SECRET")
client, err := v1.NewClientWithResponses(serverURL, func(c *v1.Client) error {
c.RequestEditors = []v1.RequestEditorFn{
v1.PhyAuthInterceptor(token, secret),
v1.PhyRequestInterceptor(),
}
return nil
})
if err != nil {
panic(err)
}
services, err := client.ListServicesWithResponse(context.Background(), &v1.ListServicesParams{})
if err != nil {
panic(err)
}
fmt.Println(services.JSON200.Services[0].Nickname)
// output:
// server01
}
| [
"\"SAKURACLOUD_ACCESS_TOKEN\"",
"\"SAKURACLOUD_ACCESS_TOKEN_SECRET\""
]
| []
| [
"SAKURACLOUD_ACCESS_TOKEN",
"SAKURACLOUD_ACCESS_TOKEN_SECRET"
]
| [] | ["SAKURACLOUD_ACCESS_TOKEN", "SAKURACLOUD_ACCESS_TOKEN_SECRET"] | go | 2 | 0 | |
cmd/mtaBuild.go | package cmd
import (
"bytes"
"encoding/json"
"fmt"
"os"
"path"
"strings"
"text/template"
"time"
"github.com/SAP/jenkins-library/pkg/npm"
"github.com/SAP/jenkins-library/pkg/command"
piperhttp "github.com/SAP/jenkins-library/pkg/http"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/maven"
"github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/ghodss/yaml"
)
const templateMtaYml = `_schema-version: "3.1"
ID: "{{.ID}}"
version: {{.Version}}
parameters:
hcp-deployer-version: "1.1.0"
modules:
- name: {{.ApplicationName}}
type: html5
path: .
parameters:
version: {{.Version}}-${timestamp}
name: {{.ApplicationName}}
build-parameters:
builder: grunt
build-result: dist`
// for mocking
var downloadAndCopySettingsFiles = maven.DownloadAndCopySettingsFiles
// MTABuildTarget ...
type MTABuildTarget int
const (
// NEO ...
NEO MTABuildTarget = iota
// CF ...
CF MTABuildTarget = iota
//XSA ...
XSA MTABuildTarget = iota
)
// ValueOfBuildTarget ...
func ValueOfBuildTarget(str string) (MTABuildTarget, error) {
switch str {
case "NEO":
return NEO, nil
case "CF":
return CF, nil
case "XSA":
return XSA, nil
default:
return -1, fmt.Errorf("Unknown BuildTarget/Platform: '%s'", str)
}
}
// String ...
func (m MTABuildTarget) String() string {
return [...]string{
"NEO",
"CF",
"XSA",
}[m]
}
func mtaBuild(config mtaBuildOptions,
telemetryData *telemetry.CustomData,
commonPipelineEnvironment *mtaBuildCommonPipelineEnvironment) {
log.Entry().Debugf("Launching mta build")
files := piperutils.Files{}
httpClient := piperhttp.Client{}
e := command.Command{}
npmExecutorOptions := npm.ExecutorOptions{DefaultNpmRegistry: config.DefaultNpmRegistry, ExecRunner: &e}
npmExecutor := npm.NewExecutor(npmExecutorOptions)
err := runMtaBuild(config, commonPipelineEnvironment, &e, &files, &httpClient, npmExecutor)
if err != nil {
log.Entry().
WithError(err).
Fatal("failed to execute mta build")
}
}
func runMtaBuild(config mtaBuildOptions,
commonPipelineEnvironment *mtaBuildCommonPipelineEnvironment,
e command.ExecRunner,
p piperutils.FileUtils,
httpClient piperhttp.Downloader,
npmExecutor npm.Executor) error {
e.Stdout(log.Writer()) // not sure if using the logging framework here is a suitable approach. We handover already log formatted
e.Stderr(log.Writer()) // entries to a logging framework again. But this is considered to be some kind of project standard.
var err error
err = handleSettingsFiles(config, p, httpClient)
if err != nil {
return err
}
err = npmExecutor.SetNpmRegistries()
mtaYamlFile := "mta.yaml"
mtaYamlFileExists, err := p.FileExists(mtaYamlFile)
if err != nil {
return err
}
if !mtaYamlFileExists {
if err = createMtaYamlFile(mtaYamlFile, config.ApplicationName, p); err != nil {
return err
}
} else {
log.Entry().Infof("\"%s\" file found in project sources", mtaYamlFile)
}
if err = setTimeStamp(mtaYamlFile, p); err != nil {
return err
}
mtarName, err := getMtarName(config, mtaYamlFile, p)
if err != nil {
return err
}
var call []string
switch config.MtaBuildTool {
case "classic":
mtaJar := getMarJarName(config)
buildTarget, err := ValueOfBuildTarget(config.BuildTarget)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
call = append(call, "java", "-jar", mtaJar, "--mtar", mtarName, fmt.Sprintf("--build-target=%s", buildTarget), "build")
if len(config.Extensions) != 0 {
call = append(call, fmt.Sprintf("--extension=%s", config.Extensions))
}
case "cloudMbt":
platform, err := ValueOfBuildTarget(config.Platform)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
call = append(call, "mbt", "build", "--mtar", mtarName, "--platform", platform.String())
if len(config.Extensions) != 0 {
call = append(call, fmt.Sprintf("--extensions=%s", config.Extensions))
}
call = append(call, "--target", "./")
default:
log.SetErrorCategory(log.ErrorConfiguration)
return fmt.Errorf("Unknown mta build tool: \"%s\"", config.MtaBuildTool)
}
if err = addNpmBinToPath(e); err != nil {
return err
}
if len(config.M2Path) > 0 {
absolutePath, err := p.Abs(config.M2Path)
if err != nil {
return err
}
e.AppendEnv([]string{"MAVEN_OPTS=-Dmaven.repo.local=" + absolutePath})
}
log.Entry().Infof("Executing mta build call: \"%s\"", strings.Join(call, " "))
if err := e.RunExecutable(call[0], call[1:]...); err != nil {
log.SetErrorCategory(log.ErrorBuild)
return err
}
commonPipelineEnvironment.mtarFilePath = mtarName
if config.InstallArtifacts {
// install maven artifacts in local maven repo because `mbt build` executes `mvn package -B`
err = installMavenArtifacts(e, config)
if err != nil {
return err
}
// mta-builder executes 'npm install --production', therefore we need 'npm ci/install' to install the dev-dependencies
err = npmExecutor.InstallAllDependencies(npmExecutor.FindPackageJSONFiles())
if err != nil {
return err
}
}
return err
}
func installMavenArtifacts(e command.ExecRunner, config mtaBuildOptions) error {
pomXMLExists, err := piperutils.FileExists("pom.xml")
if err != nil {
return err
}
if pomXMLExists {
err = maven.InstallMavenArtifacts(e, maven.EvaluateOptions{M2Path: config.M2Path})
if err != nil {
return err
}
}
return nil
}
func getMarJarName(config mtaBuildOptions) string {
mtaJar := "mta.jar"
if len(config.MtaJarLocation) > 0 {
mtaJar = config.MtaJarLocation
}
return mtaJar
}
func addNpmBinToPath(e command.ExecRunner) error {
dir, _ := os.Getwd()
newPath := path.Join(dir, "node_modules", ".bin")
oldPath := os.Getenv("PATH")
if len(oldPath) > 0 {
newPath = newPath + ":" + oldPath
}
e.SetEnv([]string{"PATH=" + newPath})
return nil
}
func getMtarName(config mtaBuildOptions, mtaYamlFile string, p piperutils.FileUtils) (string, error) {
mtarName := config.MtarName
if len(mtarName) == 0 {
log.Entry().Debugf("mtar name not provided via config. Extracting from file \"%s\"", mtaYamlFile)
mtaID, err := getMtaID(mtaYamlFile, p)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return "", err
}
if len(mtaID) == 0 {
log.SetErrorCategory(log.ErrorConfiguration)
return "", fmt.Errorf("Invalid mtar ID. Was empty")
}
log.Entry().Debugf("mtar name extracted from file \"%s\": \"%s\"", mtaYamlFile, mtaID)
mtarName = mtaID + ".mtar"
}
return mtarName, nil
}
func setTimeStamp(mtaYamlFile string, p piperutils.FileUtils) error {
mtaYaml, err := p.FileRead(mtaYamlFile)
if err != nil {
return err
}
mtaYamlStr := string(mtaYaml)
timestampVar := "${timestamp}"
if strings.Contains(mtaYamlStr, timestampVar) {
if err := p.FileWrite(mtaYamlFile, []byte(strings.ReplaceAll(mtaYamlStr, timestampVar, getTimestamp())), 0644); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.Entry().Infof("Timestamp replaced in \"%s\"", mtaYamlFile)
} else {
log.Entry().Infof("No timestamp contained in \"%s\". File has not been modified.", mtaYamlFile)
}
return nil
}
func getTimestamp() string {
t := time.Now()
return fmt.Sprintf("%d%02d%02d%02d%02d%02d\n", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
}
func createMtaYamlFile(mtaYamlFile, applicationName string, p piperutils.FileUtils) error {
log.Entry().Debugf("mta yaml file not found in project sources.")
if len(applicationName) == 0 {
return fmt.Errorf("'%[1]s' not found in project sources and 'applicationName' not provided as parameter - cannot generate '%[1]s' file", mtaYamlFile)
}
packageFileExists, err := p.FileExists("package.json")
if !packageFileExists {
return fmt.Errorf("package.json file does not exist")
}
var result map[string]interface{}
pContent, err := p.FileRead("package.json")
if err != nil {
return err
}
json.Unmarshal(pContent, &result)
version, ok := result["version"].(string)
if !ok {
return fmt.Errorf("Version not found in \"package.json\" (or wrong type)")
}
name, ok := result["name"].(string)
if !ok {
return fmt.Errorf("Name not found in \"package.json\" (or wrong type)")
}
mtaConfig, err := generateMta(name, applicationName, version)
if err != nil {
return err
}
p.FileWrite(mtaYamlFile, []byte(mtaConfig), 0644)
log.Entry().Infof("\"%s\" created.", mtaYamlFile)
return nil
}
func handleSettingsFiles(config mtaBuildOptions,
p piperutils.FileUtils,
httpClient piperhttp.Downloader) error {
return downloadAndCopySettingsFiles(config.GlobalSettingsFile, config.ProjectSettingsFile, p, httpClient)
}
func generateMta(id, applicationName, version string) (string, error) {
if len(id) == 0 {
return "", fmt.Errorf("Generating mta file: ID not provided")
}
if len(applicationName) == 0 {
return "", fmt.Errorf("Generating mta file: ApplicationName not provided")
}
if len(version) == 0 {
return "", fmt.Errorf("Generating mta file: Version not provided")
}
tmpl, e := template.New("mta.yaml").Parse(templateMtaYml)
if e != nil {
return "", e
}
type properties struct {
ID string
ApplicationName string
Version string
}
props := properties{ID: id, ApplicationName: applicationName, Version: version}
var script bytes.Buffer
tmpl.Execute(&script, props)
return script.String(), nil
}
func getMtaID(mtaYamlFile string, fileUtils piperutils.FileUtils) (string, error) {
var result map[string]interface{}
p, err := fileUtils.FileRead(mtaYamlFile)
if err != nil {
return "", err
}
err = yaml.Unmarshal(p, &result)
if err != nil {
return "", err
}
id, ok := result["ID"].(string)
if !ok || len(id) == 0 {
fmt.Errorf("Id not found in mta yaml file (or wrong type)")
}
return id, nil
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Basic setup file to enable pip install
See:
https://pythonhosted.org/setuptools/
https://bitbucket.org/pypa/setuptools
$ python setup.py register sdist upload
More secure to use twine to upload
$ pip3 install twine
$ python3 setup.py sdist
$ twine upload dist/toba-0.1.0.tar.gz
"""
from __future__ import generator_stop
import sys
import io
import os
import re
v = sys.version_info
if v < (3, 5):
msg = "FAIL: Requires Python 3.6 or later, but setup.py was run using {}.{}.{}"
print(msg.format(v.major, v.minor, v.micro))
print("NOTE: Installation failed. Run setup.py using python3")
sys.exit(1)
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
try:
# Allow installing package without any Cython available. This
# assumes you are going to include the .c files in your sdist.
import Cython
except ImportError:
Cython = None
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
# Enable code coverage for C code: we can't use CFLAGS=-coverage in tox.ini, since that may mess with compiling
# dependencies (e.g. numpy). Therefore we set SETUPPY_CFLAGS=-coverage in tox.ini and copy it to CFLAGS here (after
# deps have been safely installed).
if 'TOXENV' in os.environ and 'SETUPPY_CFLAGS' in os.environ:
os.environ['CFLAGS'] = os.environ['SETUPPY_CFLAGS']
class optional_build_ext(build_ext):
"""Allow the building of C extensions to fail."""
def run(self):
try:
build_ext.run(self)
except Exception as e:
self._unavailable(e)
self.extensions = [] # avoid copying missing files (it would fail).
def _unavailable(self, e):
print('*' * 80)
print('''WARNING:
An optional code optimization (C extension) could not be compiled.
Optimizations for this package will not be available!
''')
print('CAUSE:')
print('')
print(' ' + repr(e))
print('*' * 80)
setup(
name='didery',
version="0.1.2",
license='Apache2',
description='DIDery Key Management Server',
long_description="Redundant persistent backup of key rotation events and otp encrypted private keys.",
author='Nicholas Telfer, Brady Hammond, Michael Mendoza',
author_email='[email protected]',
url='https://github.com/reputage/didery',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
package_data={
'didery': ['static/main.html',
'static/css/*.css',
'static/fonts/Raleway/*.ttf',
'static/node_modules/mithril/mithril.min.js',
'static/node_modules/jquery/dist/jquery.min.js',
'static/node_modules/semantic-ui/dist/semantic.min.css',
'static/node_modules/semantic-ui/dist/semantic.min.js',
'static/node_modules/semantic-ui/dist/themes/default/assets/fonts/*.woff2',
'static/node_modules/semantic-ui/dist/themes/default/assets/fonts/*.woff',
'static/node_modules/semantic-ui/dist/themes/default/assets/fonts/*.ttf',
'static/transcrypt/__javascript__/main.js',
'flo/*.flo'
]
},
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'click', 'falcon>=1.2', 'ioflo>=1.6.8', 'libnacl>=1.5.1',
'simplejson>=3.11.1', 'pytest-falcon>=0.4.2', 'arrow>=0.10.0',
'transcrypt<=3.6.101', 'lmdb',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
setup_requires=[
'cython',
] if Cython else [],
entry_points={
'console_scripts': [
'didery = didery.cli:main',
'dideryd = didery.app:main',
]
},
cmdclass={'build_ext': optional_build_ext},
ext_modules=[
Extension(
splitext(relpath(path, 'src').replace(os.sep, '.'))[0],
sources=[path],
include_dirs=[dirname(path)]
)
for root, _, _ in os.walk('src')
for path in glob(join(root, '*.pyx' if Cython else '*.c'))
],
)
| []
| []
| [
"SETUPPY_CFLAGS",
"CFLAGS"
]
| [] | ["SETUPPY_CFLAGS", "CFLAGS"] | python | 2 | 0 | |
pkg/config/client.go | // Copyright 2020 The frp Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/halfcoke/frp/pkg/auth"
"github.com/halfcoke/frp/pkg/util/util"
"gopkg.in/ini.v1"
)
// ClientCommonConf contains information for a client service. It is
// recommended to use GetDefaultClientConf instead of creating this object
// directly, so that all unspecified fields have reasonable default values.
type ClientCommonConf struct {
auth.ClientConfig `ini:",extends"`
// ServerAddr specifies the address of the server to connect to. By
// default, this value is "0.0.0.0".
ServerAddr string `ini:"server_addr" json:"server_addr"`
// ServerPort specifies the port to connect to the server on. By default,
// this value is 7000.
ServerPort int `ini:"server_port" json:"server_port"`
// HTTPProxy specifies a proxy address to connect to the server through. If
// this value is "", the server will be connected to directly. By default,
// this value is read from the "http_proxy" environment variable.
HTTPProxy string `ini:"http_proxy" json:"http_proxy"`
// LogFile specifies a file where logs will be written to. This value will
// only be used if LogWay is set appropriately. By default, this value is
// "console".
LogFile string `ini:"log_file" json:"log_file"`
// LogWay specifies the way logging is managed. Valid values are "console"
// or "file". If "console" is used, logs will be printed to stdout. If
// "file" is used, logs will be printed to LogFile. By default, this value
// is "console".
LogWay string `ini:"log_way" json:"log_way"`
// LogLevel specifies the minimum log level. Valid values are "trace",
// "debug", "info", "warn", and "error". By default, this value is "info".
LogLevel string `ini:"log_level" json:"log_level"`
// LogMaxDays specifies the maximum number of days to store log information
// before deletion. This is only used if LogWay == "file". By default, this
// value is 0.
LogMaxDays int64 `ini:"log_max_days" json:"log_max_days"`
// DisableLogColor disables log colors when LogWay == "console" when set to
// true. By default, this value is false.
DisableLogColor bool `ini:"disable_log_color" json:"disable_log_color"`
// AdminAddr specifies the address that the admin server binds to. By
// default, this value is "127.0.0.1".
AdminAddr string `ini:"admin_addr" json:"admin_addr"`
// AdminPort specifies the port for the admin server to listen on. If this
// value is 0, the admin server will not be started. By default, this value
// is 0.
AdminPort int `ini:"admin_port" json:"admin_port"`
// AdminUser specifies the username that the admin server will use for
// login.
AdminUser string `ini:"admin_user" json:"admin_user"`
// AdminPwd specifies the password that the admin server will use for
// login.
AdminPwd string `ini:"admin_pwd" json:"admin_pwd"`
// AssetsDir specifies the local directory that the admin server will load
// resources from. If this value is "", assets will be loaded from the
// bundled executable using statik. By default, this value is "".
AssetsDir string `ini:"assets_dir" json:"assets_dir"`
// PoolCount specifies the number of connections the client will make to
// the server in advance. By default, this value is 0.
PoolCount int `ini:"pool_count" json:"pool_count"`
// TCPMux toggles TCP stream multiplexing. This allows multiple requests
// from a client to share a single TCP connection. If this value is true,
// the server must have TCP multiplexing enabled as well. By default, this
// value is true.
TCPMux bool `ini:"tcp_mux" json:"tcp_mux"`
// User specifies a prefix for proxy names to distinguish them from other
// clients. If this value is not "", proxy names will automatically be
// changed to "{user}.{proxy_name}". By default, this value is "".
User string `ini:"user" json:"user"`
// DNSServer specifies a DNS server address for FRPC to use. If this value
// is "", the default DNS will be used. By default, this value is "".
DNSServer string `ini:"dns_server" json:"dns_server"`
// LoginFailExit controls whether or not the client should exit after a
// failed login attempt. If false, the client will retry until a login
// attempt succeeds. By default, this value is true.
LoginFailExit bool `ini:"login_fail_exit" json:"login_fail_exit"`
// Start specifies a set of enabled proxies by name. If this set is empty,
// all supplied proxies are enabled. By default, this value is an empty
// set.
Start []string `ini:"start" json:"start"`
//Start map[string]struct{} `json:"start"`
// Protocol specifies the protocol to use when interacting with the server.
// Valid values are "tcp", "kcp" and "websocket". By default, this value
// is "tcp".
Protocol string `ini:"protocol" json:"protocol"`
// TLSEnable specifies whether or not TLS should be used when communicating
// with the server. If "tls_cert_file" and "tls_key_file" are valid,
// client will load the supplied tls configuration.
TLSEnable bool `ini:"tls_enable" json:"tls_enable"`
// TLSCertPath specifies the path of the cert file that client will
// load. It only works when "tls_enable" is true and "tls_key_file" is valid.
TLSCertFile string `ini:"tls_cert_file" json:"tls_cert_file"`
// TLSKeyPath specifies the path of the secret key file that client
// will load. It only works when "tls_enable" is true and "tls_cert_file"
// are valid.
TLSKeyFile string `ini:"tls_key_file" json:"tls_key_file"`
// TLSTrustedCaFile specifies the path of the trusted ca file that will load.
// It only works when "tls_enable" is valid and tls configuration of server
// has been specified.
TLSTrustedCaFile string `ini:"tls_trusted_ca_file" json:"tls_trusted_ca_file"`
// TLSServerName specifices the custom server name of tls certificate. By
// default, server name if same to ServerAddr.
TLSServerName string `ini:"tls_server_name" json:"tls_server_name"`
// By default, frpc will connect frps with first custom byte if tls is enabled.
// If DisableCustomTLSFirstByte is true, frpc will not send that custom byte.
DisableCustomTLSFirstByte bool `ini:"disable_custom_tls_first_byte" json:"disable_custom_tls_first_byte"`
// HeartBeatInterval specifies at what interval heartbeats are sent to the
// server, in seconds. It is not recommended to change this value. By
// default, this value is 30.
HeartbeatInterval int64 `ini:"heartbeat_interval" json:"heartbeat_interval"`
// HeartBeatTimeout specifies the maximum allowed heartbeat response delay
// before the connection is terminated, in seconds. It is not recommended
// to change this value. By default, this value is 90.
HeartbeatTimeout int64 `ini:"heartbeat_timeout" json:"heartbeat_timeout"`
// Client meta info
Metas map[string]string `ini:"-" json:"metas"`
// UDPPacketSize specifies the udp packet size
// By default, this value is 1500
UDPPacketSize int64 `ini:"udp_packet_size" json:"udp_packet_size"`
// Include other config files for proxies.
IncludeConfigFiles []string `ini:"includes" json:"includes"`
}
// GetDefaultClientConf returns a client configuration with default values.
func GetDefaultClientConf() ClientCommonConf {
return ClientCommonConf{
ClientConfig: auth.GetDefaultClientConf(),
ServerAddr: "0.0.0.0",
ServerPort: 7000,
HTTPProxy: os.Getenv("http_proxy"),
LogFile: "console",
LogWay: "console",
LogLevel: "info",
LogMaxDays: 3,
DisableLogColor: false,
AdminAddr: "127.0.0.1",
AdminPort: 0,
AdminUser: "",
AdminPwd: "",
AssetsDir: "",
PoolCount: 1,
TCPMux: true,
User: "",
DNSServer: "",
LoginFailExit: true,
Start: make([]string, 0),
Protocol: "tcp",
TLSEnable: false,
TLSCertFile: "",
TLSKeyFile: "",
TLSTrustedCaFile: "",
HeartbeatInterval: 30,
HeartbeatTimeout: 90,
Metas: make(map[string]string),
UDPPacketSize: 1500,
IncludeConfigFiles: make([]string, 0),
}
}
func (cfg *ClientCommonConf) Complete() {
if cfg.LogFile == "console" {
cfg.LogWay = "console"
} else {
cfg.LogWay = "file"
}
}
func (cfg *ClientCommonConf) Validate() error {
if cfg.HeartbeatInterval <= 0 {
return fmt.Errorf("invalid heartbeat_interval")
}
if cfg.HeartbeatTimeout < cfg.HeartbeatInterval {
return fmt.Errorf("invalid heartbeat_timeout, heartbeat_timeout is less than heartbeat_interval")
}
if cfg.TLSEnable == false {
if cfg.TLSCertFile != "" {
fmt.Println("WARNING! tls_cert_file is invalid when tls_enable is false")
}
if cfg.TLSKeyFile != "" {
fmt.Println("WARNING! tls_key_file is invalid when tls_enable is false")
}
if cfg.TLSTrustedCaFile != "" {
fmt.Println("WARNING! tls_trusted_ca_file is invalid when tls_enable is false")
}
}
if cfg.Protocol != "tcp" && cfg.Protocol != "kcp" && cfg.Protocol != "websocket" {
return fmt.Errorf("invalid protocol")
}
for _, f := range cfg.IncludeConfigFiles {
absDir, err := filepath.Abs(filepath.Dir(f))
if err != nil {
return fmt.Errorf("include: parse directory of %s failed: %v", f, absDir)
}
if _, err := os.Stat(absDir); os.IsNotExist(err) {
return fmt.Errorf("include: directory of %s not exist", f)
}
}
return nil
}
// Supported sources including: string(file path), []byte, Reader interface.
func UnmarshalClientConfFromIni(source interface{}) (ClientCommonConf, error) {
f, err := ini.LoadSources(ini.LoadOptions{
Insensitive: false,
InsensitiveSections: false,
InsensitiveKeys: false,
IgnoreInlineComment: true,
AllowBooleanKeys: true,
}, source)
if err != nil {
return ClientCommonConf{}, err
}
s, err := f.GetSection("common")
if err != nil {
return ClientCommonConf{}, fmt.Errorf("invalid configuration file, not found [common] section")
}
common := GetDefaultClientConf()
err = s.MapTo(&common)
if err != nil {
return ClientCommonConf{}, err
}
common.Metas = GetMapWithoutPrefix(s.KeysHash(), "meta_")
return common, nil
}
// if len(startProxy) is 0, start all
// otherwise just start proxies in startProxy map
func LoadAllProxyConfsFromIni(
prefix string,
source interface{},
start []string,
) (map[string]ProxyConf, map[string]VisitorConf, error) {
f, err := ini.LoadSources(ini.LoadOptions{
Insensitive: false,
InsensitiveSections: false,
InsensitiveKeys: false,
IgnoreInlineComment: true,
AllowBooleanKeys: true,
}, source)
if err != nil {
return nil, nil, err
}
proxyConfs := make(map[string]ProxyConf)
visitorConfs := make(map[string]VisitorConf)
if prefix != "" {
prefix += "."
}
startProxy := make(map[string]struct{})
for _, s := range start {
startProxy[s] = struct{}{}
}
startAll := true
if len(startProxy) > 0 {
startAll = false
}
// Build template sections from range section And append to ini.File.
rangeSections := make([]*ini.Section, 0)
for _, section := range f.Sections() {
if !strings.HasPrefix(section.Name(), "range:") {
continue
}
rangeSections = append(rangeSections, section)
}
for _, section := range rangeSections {
err = renderRangeProxyTemplates(f, section)
if err != nil {
return nil, nil, fmt.Errorf("failed to render template for proxy %s: %v", section.Name(), err)
}
}
for _, section := range f.Sections() {
name := section.Name()
if name == ini.DefaultSection || name == "common" || strings.HasPrefix(name, "range:") {
continue
}
_, shouldStart := startProxy[name]
if !startAll && !shouldStart {
continue
}
roleType := section.Key("role").String()
if roleType == "" {
roleType = "server"
}
switch roleType {
case "server":
newConf, newErr := NewProxyConfFromIni(prefix, name, section)
if newErr != nil {
return nil, nil, fmt.Errorf("failed to parse proxy %s, err: %v", name, newErr)
}
proxyConfs[prefix+name] = newConf
case "visitor":
newConf, newErr := NewVisitorConfFromIni(prefix, name, section)
if newErr != nil {
return nil, nil, newErr
}
visitorConfs[prefix+name] = newConf
default:
return nil, nil, fmt.Errorf("proxy %s role should be 'server' or 'visitor'", name)
}
}
return proxyConfs, visitorConfs, nil
}
func renderRangeProxyTemplates(f *ini.File, section *ini.Section) error {
// Validation
localPortStr := section.Key("local_port").String()
remotePortStr := section.Key("remote_port").String()
if localPortStr == "" || remotePortStr == "" {
return fmt.Errorf("local_port or remote_port is empty")
}
localPorts, err := util.ParseRangeNumbers(localPortStr)
if err != nil {
return err
}
remotePorts, err := util.ParseRangeNumbers(remotePortStr)
if err != nil {
return err
}
if len(localPorts) != len(remotePorts) {
return fmt.Errorf("local ports number should be same with remote ports number")
}
if len(localPorts) == 0 {
return fmt.Errorf("local_port and remote_port is necessary")
}
// Templates
prefix := strings.TrimSpace(strings.TrimPrefix(section.Name(), "range:"))
for i := range localPorts {
tmpname := fmt.Sprintf("%s_%d", prefix, i)
tmpsection, err := f.NewSection(tmpname)
if err != nil {
return err
}
copySection(section, tmpsection)
tmpsection.NewKey("local_port", fmt.Sprintf("%d", localPorts[i]))
tmpsection.NewKey("remote_port", fmt.Sprintf("%d", remotePorts[i]))
}
return nil
}
func copySection(source, target *ini.Section) {
for key, value := range source.KeysHash() {
target.NewKey(key, value)
}
}
| [
"\"http_proxy\""
]
| []
| [
"http_proxy"
]
| [] | ["http_proxy"] | go | 1 | 0 | |
core/src/main/java/org/testcontainers/utility/RegistryAuthLocator.java | package org.testcontainers.utility;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.dockerjava.api.model.AuthConfig;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.zeroturnaround.exec.InvalidResultException;
import org.zeroturnaround.exec.ProcessExecutor;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.util.Base64;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.commons.lang.StringUtils.isBlank;
import static org.slf4j.LoggerFactory.getLogger;
import static org.testcontainers.utility.AuthConfigUtil.toSafeString;
/**
* Utility to look up registry authentication information for an image.
*/
public class RegistryAuthLocator {
private static final Logger log = getLogger(RegistryAuthLocator.class);
private static final String DEFAULT_REGISTRY_NAME = "index.docker.io";
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static RegistryAuthLocator instance;
private final String commandPathPrefix;
private final String commandExtension;
private final File configFile;
/**
* key - credential helper's name
* value - helper's response for "credentials not found" use case
*/
private final Map<String, String> CREDENTIALS_HELPERS_NOT_FOUND_MESSAGE_CACHE;
@VisibleForTesting
RegistryAuthLocator(File configFile, String commandPathPrefix, String commandExtension,
Map<String, String> notFoundMessageHolderReference) {
this.configFile = configFile;
this.commandPathPrefix = commandPathPrefix;
this.commandExtension = commandExtension;
this.CREDENTIALS_HELPERS_NOT_FOUND_MESSAGE_CACHE = notFoundMessageHolderReference;
}
/**
*/
protected RegistryAuthLocator() {
final String dockerConfigLocation = System.getenv().getOrDefault("DOCKER_CONFIG",
System.getProperty("user.home") + "/.docker");
this.configFile = new File(dockerConfigLocation + "/config.json");
this.commandPathPrefix = "";
this.commandExtension = "";
this.CREDENTIALS_HELPERS_NOT_FOUND_MESSAGE_CACHE = new HashMap<>();
}
public synchronized static RegistryAuthLocator instance() {
if (instance == null) {
instance = new RegistryAuthLocator();
}
return instance;
}
@VisibleForTesting
static void setInstance(RegistryAuthLocator overrideInstance) {
instance = overrideInstance;
}
/**
* Looks up an AuthConfig for a given image name.
* <p>
* Lookup is performed in following order, as per
* https://docs.docker.com/engine/reference/commandline/cli/:
* <ol>
* <li>{@code credHelpers}</li>
* <li>{@code credsStore}</li>
* <li>Hard-coded Base64 encoded auth in {@code auths}</li>
* <li>otherwise, if no credentials have been found then behaviour falls back to docker-java's
* implementation</li>
* </ol>
*
* @param dockerImageName image name to be looked up (potentially including a registry URL part)
* @param defaultAuthConfig an AuthConfig object that should be returned if there is no overriding authentication available for images that are looked up
* @return an AuthConfig that is applicable to this specific image OR the defaultAuthConfig.
*/
public AuthConfig lookupAuthConfig(DockerImageName dockerImageName, AuthConfig defaultAuthConfig) {
log.debug("Looking up auth config for image: {}", dockerImageName);
log.debug("RegistryAuthLocator has configFile: {} ({}) and commandPathPrefix: {}",
configFile,
configFile.exists() ? "exists" : "does not exist",
commandPathPrefix);
try {
final JsonNode config = OBJECT_MAPPER.readTree(configFile);
final String registryName = effectiveRegistryName(dockerImageName);
log.debug("registryName [{}] for dockerImageName [{}]", registryName, dockerImageName);
// use helper preferentially (per https://docs.docker.com/engine/reference/commandline/cli/)
final AuthConfig helperAuthConfig = authConfigUsingHelper(config, registryName);
if (helperAuthConfig != null) {
log.debug("found helper auth config [{}]", toSafeString(helperAuthConfig));
return helperAuthConfig;
}
// no credsHelper to use, using credsStore:
final AuthConfig storeAuthConfig = authConfigUsingStore(config, registryName);
if (storeAuthConfig != null) {
log.debug("found creds store auth config [{}]", toSafeString(storeAuthConfig));
return storeAuthConfig;
}
// fall back to base64 encoded auth hardcoded in config file
final AuthConfig existingAuthConfig = findExistingAuthConfig(config, registryName);
if (existingAuthConfig != null) {
log.debug("found existing auth config [{}]", toSafeString(existingAuthConfig));
return existingAuthConfig;
}
log.debug("no matching Auth Configs - falling back to defaultAuthConfig [{}]", toSafeString(defaultAuthConfig));
// otherwise, defaultAuthConfig should already contain any credentials available
} catch (Exception e) {
log.warn("Failure when attempting to lookup auth config (dockerImageName: {}, configFile: {}. Falling back to docker-java default behaviour. Exception message: {}",
dockerImageName,
configFile,
e.getMessage());
}
return defaultAuthConfig;
}
private AuthConfig findExistingAuthConfig(final JsonNode config, final String reposName) throws Exception {
final Map.Entry<String, JsonNode> entry = findAuthNode(config, reposName);
if (entry != null && entry.getValue() != null && entry.getValue().size() > 0) {
final AuthConfig deserializedAuth = OBJECT_MAPPER
.treeToValue(entry.getValue(), AuthConfig.class)
.withRegistryAddress(entry.getKey());
if (isBlank(deserializedAuth.getUsername()) &&
isBlank(deserializedAuth.getPassword()) &&
!isBlank(deserializedAuth.getAuth())) {
final String rawAuth = new String(Base64.getDecoder().decode(deserializedAuth.getAuth()));
final String[] splitRawAuth = rawAuth.split(":", 2);
if (splitRawAuth.length == 2) {
deserializedAuth.withUsername(splitRawAuth[0]);
deserializedAuth.withPassword(splitRawAuth[1]);
}
}
return deserializedAuth;
}
return null;
}
private AuthConfig authConfigUsingHelper(final JsonNode config, final String reposName) throws Exception {
final JsonNode credHelpers = config.get("credHelpers");
if (credHelpers != null && credHelpers.size() > 0) {
final JsonNode helperNode = credHelpers.get(reposName);
if (helperNode != null && helperNode.isTextual()) {
final String helper = helperNode.asText();
return runCredentialProvider(reposName, helper);
}
}
return null;
}
private AuthConfig authConfigUsingStore(final JsonNode config, final String reposName) throws Exception {
final JsonNode credsStoreNode = config.get("credsStore");
if (credsStoreNode != null && !credsStoreNode.isMissingNode() && credsStoreNode.isTextual()) {
final String credsStore = credsStoreNode.asText();
if (isBlank(credsStore)) {
log.warn("Docker auth config credsStore field will be ignored, because value is blank");
return null;
}
return runCredentialProvider(reposName, credsStore);
}
return null;
}
private Map.Entry<String, JsonNode> findAuthNode(final JsonNode config, final String reposName) {
final JsonNode auths = config.get("auths");
if (auths != null && auths.size() > 0) {
final Iterator<Map.Entry<String, JsonNode>> fields = auths.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
if (entry.getKey().contains("://" + reposName) || entry.getKey().equals(reposName)) {
return entry;
}
}
}
return null;
}
private AuthConfig runCredentialProvider(String hostName, String helperOrStoreName) throws Exception {
if (isBlank(hostName)) {
log.debug("There is no point in locating AuthConfig for blank hostName. Returning NULL to allow fallback");
return null;
}
final String credentialProgramName = getCredentialProgramName(helperOrStoreName);
final String data;
log.debug("Executing docker credential provider: {} to locate auth config for: {}",
credentialProgramName, hostName);
try {
data = runCredentialProgram(hostName, credentialProgramName);
} catch (InvalidResultException e) {
final String responseErrorMsg = extractCredentialProviderErrorMessage(e);
if (!isBlank(responseErrorMsg)) {
String credentialsNotFoundMsg = getGenericCredentialsNotFoundMsg(credentialProgramName);
if (credentialsNotFoundMsg != null && credentialsNotFoundMsg.equals(responseErrorMsg)) {
log.info("Credential helper/store ({}) does not have credentials for {}",
credentialProgramName,
hostName);
return null;
}
log.debug("Failure running docker credential helper/store ({}) with output '{}'",
credentialProgramName, responseErrorMsg);
} else {
log.debug("Failure running docker credential helper/store ({})", credentialProgramName);
}
throw e;
} catch (Exception e) {
log.debug("Failure running docker credential helper/store ({})", credentialProgramName);
throw e;
}
final JsonNode helperResponse = OBJECT_MAPPER.readTree(data);
log.debug("Credential helper/store provided auth config for: {}", hostName);
final String username = helperResponse.at("/Username").asText();
final String password = helperResponse.at("/Secret").asText();
if ("<token>".equals(username)) {
return new AuthConfig().withIdentityToken(password);
} else {
return new AuthConfig()
.withRegistryAddress(helperResponse.at("/ServerURL").asText())
.withUsername(username)
.withPassword(password);
}
}
private String getCredentialProgramName(String credHelper) {
return commandPathPrefix + "docker-credential-" + credHelper + commandExtension;
}
private String effectiveRegistryName(DockerImageName dockerImageName) {
return StringUtils.defaultIfEmpty(dockerImageName.getRegistry(), DEFAULT_REGISTRY_NAME);
}
private String getGenericCredentialsNotFoundMsg(String credentialHelperName) {
if (!CREDENTIALS_HELPERS_NOT_FOUND_MESSAGE_CACHE.containsKey(credentialHelperName)) {
String credentialsNotFoundMsg = discoverCredentialsHelperNotFoundMessage(credentialHelperName);
if (!isBlank(credentialsNotFoundMsg)) {
CREDENTIALS_HELPERS_NOT_FOUND_MESSAGE_CACHE.put(credentialHelperName, credentialsNotFoundMsg);
}
}
return CREDENTIALS_HELPERS_NOT_FOUND_MESSAGE_CACHE.get(credentialHelperName);
}
private String discoverCredentialsHelperNotFoundMessage(String credentialHelperName) {
// will do fake call to given credential helper to find out with which message
// it response when there are no credentials for given hostName
// hostName should be valid, but most probably not existing
// IF its not enough, then should probably run 'list' command first to be sure...
final String notExistentFakeHostName = "https://not.a.real.registry/url";
String credentialsNotFoundMsg = null;
try {
runCredentialProgram(notExistentFakeHostName, credentialHelperName);
// should not reach here
log.warn("Failure running docker credential helper ({}) with fake call, expected 'credentials not found' response",
credentialHelperName);
} catch(Exception e) {
if (e instanceof InvalidResultException) {
credentialsNotFoundMsg = extractCredentialProviderErrorMessage((InvalidResultException)e);
}
if (isBlank(credentialsNotFoundMsg)) {
log.warn("Failure running docker credential helper ({}) with fake call, expected 'credentials not found' response. Exception message: {}",
credentialHelperName,
e.getMessage());
} else {
log.debug("Got credentials not found error message from docker credential helper - {}", credentialsNotFoundMsg);
}
}
return credentialsNotFoundMsg;
}
private String extractCredentialProviderErrorMessage(InvalidResultException invalidResultEx) {
if (invalidResultEx.getResult() != null && invalidResultEx.getResult().hasOutput()) {
return invalidResultEx.getResult().outputString().trim();
}
return null;
}
private String runCredentialProgram(String hostName, String credentialHelperName)
throws InvalidResultException, InterruptedException, TimeoutException, IOException {
return new ProcessExecutor()
.command(credentialHelperName, "get")
.redirectInput(new ByteArrayInputStream(hostName.getBytes()))
.readOutput(true)
.exitValueNormal()
.timeout(30, TimeUnit.SECONDS)
.execute()
.outputUTF8()
.trim();
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
autorest/client_test.go | package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
"github.com/thycotic-rd/go-autorest/autorest/mocks"
"github.com/thycotic-rd/go-autorest/tracing"
"github.com/thycotic-rd/go-autorest/version"
)
func TestLoggingInspectorWithInspection(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.RequestInspector = li.WithInspection()
Prepare(mocks.NewRequestWithContent("Content"),
c.WithInspection())
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log")
}
}
func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewRequestWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.RequestInspector = li.WithInspection()
if _, err := Prepare(r,
c.WithInspection()); err != nil {
t.Error(err)
}
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log")
}
}
func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewRequestWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.RequestInspector = li.WithInspection()
Prepare(r,
c.WithInspection())
s, _ := ioutil.ReadAll(r.Body)
if len(s) <= 0 {
t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body")
}
}
func TestLoggingInspectorByInspecting(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.ResponseInspector = li.ByInspecting()
Respond(mocks.NewResponseWithContent("Content"),
c.ByInspecting())
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log")
}
}
func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewResponseWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.ResponseInspector = li.ByInspecting()
if err := Respond(r,
c.ByInspecting()); err != nil {
t.Fatal(err)
}
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log")
}
}
func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewResponseWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.ResponseInspector = li.ByInspecting()
Respond(r,
c.ByInspecting())
s, _ := ioutil.ReadAll(r.Body)
if len(s) <= 0 {
t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body")
}
}
func TestNewClientWithUserAgent(t *testing.T) {
ua := "UserAgent"
c := NewClientWithUserAgent(ua)
completeUA := fmt.Sprintf("%s %s", version.UserAgent(), ua)
if c.UserAgent != completeUA {
t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s",
completeUA, c.UserAgent)
}
}
func TestAddToUserAgent(t *testing.T) {
ua := "UserAgent"
c := NewClientWithUserAgent(ua)
ext := "extension"
err := c.AddToUserAgent(ext)
if err != nil {
t.Fatalf("autorest: AddToUserAgent returned error -- expected nil, received %s", err)
}
completeUA := fmt.Sprintf("%s %s %s", version.UserAgent(), ua, ext)
if c.UserAgent != completeUA {
t.Fatalf("autorest: AddToUserAgent failed to add an extension to the UserAgent -- expected %s, received %s",
completeUA, c.UserAgent)
}
err = c.AddToUserAgent("")
if err == nil {
t.Fatalf("autorest: AddToUserAgent didn't return error -- expected %s, received nil",
fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent))
}
if c.UserAgent != completeUA {
t.Fatalf("autorest: AddToUserAgent failed to not add an empty extension to the UserAgent -- expected %s, received %s",
completeUA, c.UserAgent)
}
}
func TestClientSenderReturnsHttpClientByDefault(t *testing.T) {
c := Client{}
if fmt.Sprintf("%T", c.sender()) != "*http.Client" {
t.Fatal("autorest: Client#sender failed to return http.Client by default")
}
}
func TestClientSenderReturnsSetSender(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Sender = s
if c.sender() != s {
t.Fatal("autorest: Client#sender failed to return set Sender")
}
}
func TestClientDoInvokesSender(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Sender = s
c.Do(&http.Request{})
if s.Attempts() != 1 {
t.Fatal("autorest: Client#Do failed to invoke the Sender")
}
}
func TestClientDoSetsUserAgent(t *testing.T) {
ua := "UserAgent"
c := Client{UserAgent: ua}
r := mocks.NewRequest()
s := mocks.NewSender()
c.Sender = s
c.Do(r)
if r.UserAgent() != ua {
t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s",
http.CanonicalHeaderKey(headerUserAgent), r.UserAgent())
}
}
func TestClientDoSetsAuthorization(t *testing.T) {
r := mocks.NewRequest()
s := mocks.NewSender()
c := Client{Authorizer: mockAuthorizer{}, Sender: s}
c.Do(r)
if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 {
t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s",
http.CanonicalHeaderKey(headerAuthorization),
r.Header.Get(http.CanonicalHeaderKey(headerAuthorization)))
}
}
func TestClientDoInvokesRequestInspector(t *testing.T) {
r := mocks.NewRequest()
s := mocks.NewSender()
i := &mockInspector{}
c := Client{RequestInspector: i.WithInspection(), Sender: s}
c.Do(r)
if !i.wasInvoked {
t.Fatal("autorest: Client#Send failed to invoke the RequestInspector")
}
}
func TestClientDoInvokesResponseInspector(t *testing.T) {
r := mocks.NewRequest()
s := mocks.NewSender()
i := &mockInspector{}
c := Client{ResponseInspector: i.ByInspecting(), Sender: s}
c.Do(r)
if !i.wasInvoked {
t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector")
}
}
func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Authorizer = mockFailingAuthorizer{}
c.Sender = s
_, err := c.Do(&http.Request{})
if err == nil {
t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed")
}
}
func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Authorizer = mockFailingAuthorizer{}
c.Sender = s
c.Do(&http.Request{})
if s.Attempts() > 0 {
t.Fatal("autorest: Client#Do failed to invoke the Sender")
}
}
func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) {
c := Client{}
if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" {
t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default")
}
}
func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) {
c := Client{}
c.Authorizer = mockAuthorizer{}
if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" {
t.Fatal("autorest: Client#authorizer failed to return the set Authorizer")
}
}
func TestClientWithAuthorizer(t *testing.T) {
c := Client{}
c.Authorizer = mockAuthorizer{}
req, _ := Prepare(&http.Request{},
c.WithAuthorization())
if req.Header.Get(headerAuthorization) == "" {
t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer")
}
}
func TestClientWithInspection(t *testing.T) {
c := Client{}
r := &mockInspector{}
c.RequestInspector = r.WithInspection()
Prepare(&http.Request{},
c.WithInspection())
if !r.wasInvoked {
t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector")
}
}
func TestClientWithInspectionSetsDefault(t *testing.T) {
c := Client{}
r1 := &http.Request{}
r2, _ := Prepare(r1,
c.WithInspection())
if !reflect.DeepEqual(r1, r2) {
t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector")
}
}
func TestClientByInspecting(t *testing.T) {
c := Client{}
r := &mockInspector{}
c.ResponseInspector = r.ByInspecting()
Respond(&http.Response{},
c.ByInspecting())
if !r.wasInvoked {
t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector")
}
}
func TestClientByInspectingSetsDefault(t *testing.T) {
c := Client{}
r := &http.Response{}
Respond(r,
c.ByInspecting())
if !reflect.DeepEqual(r, &http.Response{}) {
t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector")
}
}
func TestClientTracing(t *testing.T) {
c := Client{}
httpClient, ok := c.sender().(*http.Client)
if !ok {
t.Fatal("autorest: Client#sender failed to return http.Client by default")
}
if httpClient.Transport != tracing.Transport {
t.Fatal("autorest: Client.Sender Default transport is not the tracing transport")
}
}
func TestCookies(t *testing.T) {
second := "second"
expected := http.Cookie{
Name: "tastes",
Value: "delicious",
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.SetCookie(w, &expected)
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("autorest: ioutil.ReadAll failed reading request body: %s", err)
}
if string(b) == second {
cookie, err := r.Cookie(expected.Name)
if err != nil {
t.Fatalf("autorest: r.Cookie could not get request cookie: %s", err)
}
if cookie == nil {
t.Fatalf("autorest: got nil cookie, expecting %v", expected)
}
if cookie.Value != expected.Value {
t.Fatalf("autorest: got cookie value '%s', expecting '%s'", cookie.Value, expected.Name)
}
}
}))
defer server.Close()
client := NewClientWithUserAgent("")
_, err := SendWithSender(client, mocks.NewRequestForURL(server.URL))
if err != nil {
t.Fatalf("autorest: first request failed: %s", err)
}
r2, err := http.NewRequest(http.MethodGet, server.URL, mocks.NewBody(second))
if err != nil {
t.Fatalf("autorest: failed creating second request: %s", err)
}
_, err = SendWithSender(client, r2)
if err != nil {
t.Fatalf("autorest: second request failed: %s", err)
}
}
func randomString(n int) string {
const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
s := make([]byte, n)
for i := range s {
s[i] = chars[r.Intn(len(chars))]
}
return string(s)
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/boilr/configuration.go | package boilr
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/Ilyes512/boilr/pkg/util/exit"
"github.com/Ilyes512/boilr/pkg/util/osutil"
)
// All the below variables should be set with -X go linker option
var (
// Version of the application
Version = "NOT_SET"
// BuildDate of the application
BuildDate = "NOT_SET"
// Commit hash of the application
Commit = "NOT_SET"
)
const (
// AppName of the application
AppName = "boilr"
// ConfigDirPath is the configuration directory of the application
ConfigDirPath = ".config/boilr"
// ConfigFileName is the configuration file name of the application
ConfigFileName = "config.json"
// TemplateDir is the directory that contains the template registry
TemplateDir = "templates"
// ContextFileName is the name of the file that contains the context values for the template
ContextFileName = "project.json"
// TemplateDirName is the name of the directory that contains the template files in a boilr template
TemplateDirName = "template"
// TemplateMetadataName is the name of the file that contains the metadata about the template saved in registry
TemplateMetadataName = "__metadata.json"
// GithubOwner is the owner of the github repository
GithubOwner = "ilyes512"
// GithubRepo is the name of the github repository
GithubRepo = "boilr"
)
// Configuration contains the values for needed for boilr to operate.
// These values can be overridden by the inclusion of a boilr.json
// file in the configuration directory.
var Configuration = struct {
FilePath string
ConfigDirPath string
TemplateDirPath string
IgnoreCopyFiles []string
}{}
// TemplatePath returns the absolute path of a template given the name of the template.
func TemplatePath(name string) (string, error) {
return filepath.Join(Configuration.TemplateDirPath, name), nil
}
// IsTemplateDirInitialized returns true when the template dir is already initialized.
func IsTemplateDirInitialized() (bool, error) {
return osutil.DirExists(Configuration.TemplateDirPath)
}
func init() {
homeDir := os.Getenv("HOME")
if homeDir == "" {
// FIXME is this really necessary?
exit.Error(fmt.Errorf("environment variable ${HOME} should be set"))
}
Configuration.FilePath = filepath.Join(homeDir, ConfigDirPath, ConfigFileName)
Configuration.ConfigDirPath = filepath.Join(homeDir, ConfigDirPath)
Configuration.TemplateDirPath = filepath.Join(homeDir, ConfigDirPath, TemplateDir)
Configuration.IgnoreCopyFiles = []string{
// MacOS
".DS_Store", "._*",
// Windows
"Thumbs.db", "Thumbs.db:encryptable", "desktop.ini", "Desktop.ini",
// Linux
".directory", "*~",
}
// Read .config/boilr/config.json if exists
// TODO use defaults if config.json doesn't exist
hasConfig, err := osutil.FileExists(Configuration.FilePath)
if err != nil {
exit.Error(err)
}
if !hasConfig {
// TODO report the absence of config.json
// tlog.Debug("Couldn't find %s user configuration", ConfigFileName)
return
}
buf, err := ioutil.ReadFile(Configuration.FilePath)
if err != nil {
exit.Error(err)
}
if err := json.Unmarshal(buf, &Configuration); err != nil {
exit.Error(err)
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
git/git.go | package git
import (
"charly/types"
"github.com/go-git/go-git/v5/plumbing"
"os"
"path"
"strings"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/plumbing/transport/ssh"
)
func GetRepo(repo types.Repos) ([]*plumbing.Reference, error) {
var auth transport.AuthMethod
var err error
if strings.HasPrefix(repo.URL, "http://") || strings.HasPrefix(repo.URL, "https://") {
if repo.Auth.Token != "" {
auth = &http.BasicAuth{
Username: "xyz",
Password: repo.Auth.Token,
}
} else if repo.Auth.Username != "" && repo.Auth.Password != "" {
auth = &http.BasicAuth{
Username: repo.Auth.Username,
Password: repo.Auth.Password,
}
}
} else {
if repo.Auth.SSHKeyfile == "" {
home := os.Getenv("HOME")
repo.Auth.SSHKeyfile = path.Join(home, ".ssh", "id_rsa")
}
auth, err = ssh.NewPublicKeysFromFile("git", repo.Auth.SSHKeyfile, repo.Auth.SSHKeyPassword)
if err != nil {
return nil, err
}
}
rem := git.NewRemote(nil, &config.RemoteConfig{Name: "origin", URLs: []string{repo.URL}})
data, err := rem.List(&git.ListOptions{Auth: auth})
if err != nil {
return nil, err
}
return data, nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "standup_pivots.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
makedataset.py | # -*- coding: utf-8 -*-
# @Time : 2018/10/9 22:31
# @Author : qxliu
# @Email : [email protected]
# @File : makedataset.py
# @Software: PyCharm
import tensorflow as tf
from datetime import datetime
import pandas as pd
import numpy as np
from PIL import Image
import logging
import os
import sys
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # 可以使用的GPU
INPUT_DIR = './datasets/mode_2003/'
OUTPUT_DIR = './datasets/separate_relabel2/'
OUTPUT_CHANNEL = 3
IMG_RESIZE = [512, 512]
CUT_NUM = [4, 2]
CLASS_LIST = ['A', 'B', 'C', 'D', 'E', 'nodata']
CLASS_LIST_MAY = ['may_nodata', 'may_abcde', 'may_A', 'may_B', 'may_C', 'may_D', 'may_E']
def processBar(num, total, msg='', length=30):
rate = num / total
rate_num = int(rate * 100)
clth = int(rate * length)
if len(msg) > 0:
msg += ':'
if rate_num == 100:
r = '\r%s[%s%d%%]\n' % (msg, '*' * length, rate_num,)
else:
r = '\r%s[%s%s%d%%]' % (msg, '*' * clth, '-' * (length - clth), rate_num,)
sys.stdout.write(r)
sys.stdout.flush
def mk_childfolders(parent_dir, child_list=[]):
os.makedirs(parent_dir, exist_ok=True)
for dir in child_list:
path = os.path.join(parent_dir, dir)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def _cut_images(image_to_cut, img_resize=[64, 64], cut_num=[2, 2], cut_order='horizontal'):
'''
:param img: image string from tf.image.decode
:param img_resize: size to resize the orgin image, [height, width]
:param cut_num: numbers to cut in horizontal and vertical, [vertical, horizontal]
:param cut_order: the output squeence for cutted images, 'horizontal' first or 'vertical' first
:param channel: output images channel, 1 for grayscale or 3 for rgb
:return: s list of small images in type of uint8 tensor
'''
assert type(img_resize) in [list, np.ndarray] and len(img_resize) == 2
assert type(cut_num) in [list, np.ndarray] and len(cut_num) == 2
assert img_resize[0] % cut_num[0] == 0 and img_resize[1] % cut_num[1] == 0
assert cut_order in ['horizontal', 'vertical']
x_img_resized = tf.image.resize_images(image_to_cut, size=img_resize,
method=tf.image.ResizeMethod.BILINEAR) # shape[512,512,?]
h = img_resize[0] // cut_num[0] # height of small img
w = img_resize[1] // cut_num[1] # width of small img
if cut_order == 'horizontal':
off_hs = list((np.arange(cut_num[0]) * h).repeat(cut_num[1]))
off_ws = list(np.arange(cut_num[1]) * w) * cut_num[0]
else:
off_ws = list((np.arange(cut_num[1]) * h).repeat(cut_num[0]))
off_hs = list(np.arange(cut_num[0]) * w) * cut_num[1]
x_img_cuts = [tf.image.crop_to_bounding_box(x_img_resized, hs, ws, h, w) \
for hs, ws in zip(off_hs, off_ws)] # [shape[128,256,?] * batch_size]
x_img_cuts = [tf.cast(x, tf.uint8) for x in x_img_cuts]
return x_img_cuts
def init_separate_dataset(tag='train', start_index=None, max_count=None, datatype='jpg'):
def init_csv_reader(input_dir, is_train, start_index=None, max_count=None, channels=3):
'''read records from .csv file and separate original images into 8 small images'''
assert channels in [1, 3]
def _parse_function(file_paths, labels):
'''Decode images and devide into 8 small images
:param file_paths: shape[]
:param labels: shape[8]
'''
batch_size = CUT_NUM[0] * CUT_NUM[1]
x_img_str = tf.read_file(file_paths) # shape[]
x_img_decoded = tf.image.decode_jpeg(x_img_str, channels=channels) # shape[?,?,channels]
batch_xs = _cut_images(x_img_decoded, IMG_RESIZE, CUT_NUM, 'horizontal')
batch_ys = tf.reshape(tf.split(labels, batch_size, axis=0), [-1, 1], name='batch_ys') # shape[batch_size,1]
return file_paths, batch_xs, batch_ys
# Processing the image filenames
fs = os.listdir(input_dir)
csv_name = os.path.join(input_dir, [it for it in fs if '.csv' in it][0])
frame = pd.read_csv(csv_name)
# Add one more column named "Train" to split the training set and validation set
if is_train:
frame = frame.loc[frame['Train'] == 'T']
if isinstance(start_index, int) and start_index > 0:
frame = frame[start_index:]
if isinstance(max_count, int) and max_count > 0:
frame = frame[:max_count]
print(' [*] {} images initialized as training data'.format(frame['num_id'].count()))
else:
frame = frame.loc[frame['Train'] == 'F']
if isinstance(start_index, int) and start_index > 0:
frame = frame[start_index:]
if isinstance(max_count, int) and max_count > 0:
frame = frame[:max_count]
print(' [*] {} images initialized as validation data'.format(frame['num_id'].count()))
count = frame['num_id'].count()
num_idx = frame['num_id'].values.astype(str).tolist()
t_names = [item + '.jpg' for item in num_idx]
file_names = [os.path.join(input_dir, item) for item in t_names]
labels = frame['Cloud_Cover'].values.tolist()
t_labels = [list('F'.join(item.split('*'))) for item in labels]
for it in range(len(t_labels)):
t_labels[it] = list(map(lambda x: ord(x) - ord('A'), t_labels[it]))
# Initialize as a tensorflow tensor object
data = tf.data.Dataset.from_tensor_slices((tf.constant(file_names, name='file_names'),
tf.constant(t_labels)))
data = data.map(_parse_function)
return data, count
assert tag in ['train', 'validation']
assert datatype in ['jpg', 'jpeg', 'png']
_output_dir = os.path.join(OUTPUT_DIR, tag)
mk_childfolders(_output_dir, child_list=CLASS_LIST + CLASS_LIST_MAY)
reader, count = init_csv_reader(INPUT_DIR, tag == 'train', start_index, max_count, channels=OUTPUT_CHANNEL)
batch_path, batch_xs, batch_ys = reader.make_one_shot_iterator().get_next()
# param batch_path: shape []
# param batch_xs: shape [batch_size, 128, 256, channels] type tf.uint8
# param batch_ys: shape [batch_size, 1] type tf.int32
xs = [tf.squeeze(x, axis=0) for x in
tf.split(batch_xs, batch_xs.shape[0], axis=0)] # a list of single images, [shape[1] * batch_size]
ys = [tf.squeeze(y, axis=0) for y in
tf.split(batch_ys, batch_ys.shape[0], axis=0)] # a list of single label, [shape[1] * batch_size]
logging.basicConfig(filename=os.path.join(OUTPUT_DIR, 'log.txt'), level=logging.DEBUG)
extname = '.' + datatype
with tf.Session() as sess:
perc = count / 100
perc = 1 if perc < 1 else int(perc)
step = 0
while True:
try:
org_path, imgs, labels = sess.run([batch_path, xs, ys])
org_name = os.path.basename(org_path.decode()).split('.')[0]
for i in range(len(imgs)):
new_name = CLASS_LIST[labels[i][0]] + '/' + org_name + '_' + str(i) + extname
if imgs[i].sum() == 0: # 全是0
if labels[i][0] != 5: # 原label不是nodata
logging.error('{} is nodata, not a-e'.format(new_name))
new_name = CLASS_LIST[5] + '/' + org_name + '_' + str(i) + extname
else: # 不全是0
if 0 in [x.sum() for x in imgs[i]] and labels[i][0] != 5: # 有一行是0,可能是nodata
new_name = 'may_nodata' + '/' + org_name + '_' + str(i) + extname
elif labels[i][0] == 5: # 没有一行0且原label是nodata
new_name = 'may_abcde' + '/' + org_name + '_' + str(i) + extname
save_path = os.path.join(_output_dir, new_name)
im = Image.fromarray(imgs[i])
im.save(save_path)
if int(org_name) % perc == 0:
# print('progress: {}/{}'.format(step, batch_count))
processBar(step, count, msg="Initizing " + _output_dir)
step += 1
except tf.errors.OutOfRangeError:
processBar(step, count, msg="Initizing " + _output_dir)
print('Finish!')
break
except Exception as e:
print('an error accrue when open file %s' % org_path.decode())
print(e)
pass
def tfrecord_reader(filepaths, batch_size=24, num_epochs=1):
# ToDO: try this
assert batch_size >= 0 and type(batch_size) is int
reader = tf.TFRecordReader()
if type(filepaths) is not list:
filepaths = [filepaths]
fqueue = tf.train.string_input_producer(filepaths, num_epochs=num_epochs) # 此处不指定epochs就会一直持续下去
_, serialized_example = reader.read(fqueue)
features = tf.parse_single_example(
serialized_example,
features={
'name': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string),
'image': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'channel': tf.FixedLenFeature([], tf.int64),
})
name = tf.decode_raw(features['name'], tf.uint8)
label = tf.decode_raw(features['label'], tf.uint8)
image = tf.decode_raw(features['image'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
channel = tf.cast(features['channel'], tf.int32)
image_shape = tf.stack([height, width, channel])
image = tf.reshape(image, image_shape)
# set_shape是为了固定维度,方便后面使用tf.train.batch
#label.set_shape([10])
#name.set_shape([10])
image.set_shape([32, 32, 3])
# num_thread可以选择用几个线程同时读取example queue,
# min_after_dequeue表示读取一次之后队列至少需要剩下的样例数目,capacity表示队列的容量
names, images, labels = tf.train.batch([name, image, label], batch_size=batch_size,
capacity=512 + 4 * batch_size)
return names, images, labels
def check_tfrecord():
path = r'D:\qxliu\ordinal_clouds\datasets\clouds.shuffle.train.tfrecord'
name_op, label_op, image_op = tfrecord_reader(path)
import cv2 as cv
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while True:
name, label, img = sess.run([name_op, label_op, image_op])
img = np.squeeze(img, axis=0)
label = np.squeeze(label, axis=0)
print(name.tostring().decode('utf8'))
print(label.tostring().decode('utf8'))
# plt.imshow(img)
cv.imshow('img', img)
cv.waitKey(50)
except tf.errors.OutOfRangeError:
print('End')
finally:
coord.request_stop()
coord.join(threads) # 等待各线程关闭
def init_binary_dataset(save_name, tag, datatype, shuffle):
def init_img_reader(input_dir, class_list, img_resize=None, channels=3, shuffle=False):
assert channels in [1, 3]
resize = img_resize is not None and type(img_resize) in [list, np.ndarray] and len(img_resize) == 2
def _parse_function(file_path, label):
'''Decode image
:param file_path: shape[]
:param label: shape[]
:return a string of image file path, a tensor of image, a label string of image
'''
x_img_str = tf.read_file(file_path) # shape[]
x_img = tf.image.decode_jpeg(x_img_str, channels=channels) # shape[?,?,channels]
if resize:
x_img = tf.image.resize_images(x_img, size=img_resize,
method=tf.image.ResizeMethod.BILINEAR) # shape[img_resize,channels]
if shuffle: # 随机亮度对比度色相翻转
# ToDO: all images do with these
x_img = tf.image.random_brightness(x_img, max_delta=0.25)
x_img = tf.image.random_contrast(x_img, lower=0.75, upper=1.5)
# x_img = tf.image.random_hue(x_img, max_delta=0.5)
x_img = tf.image.random_flip_up_down(x_img)
x_img = tf.image.random_flip_left_right(x_img)
return file_path, x_img, label
files = []
labels = []
for cls in class_list:
dir = os.path.join(input_dir, cls)
if not os.path.exists(dir):
print('path %s not exist' % dir)
continue
fs = os.listdir(dir)
fs = [os.path.join(dir, item) for item in fs]
files.extend(fs)
labels.extend([cls] * len(fs))
count = len(files)
if shuffle:
import random
idx = list(range(count))
random.shuffle(idx)
sfl_files = []
sfl_labels = []
for i in idx:
sfl_files.append(files[i])
sfl_labels.append(labels[i])
files = sfl_files
labels = sfl_labels
# Initialize as a tensorflow tensor object
data = tf.data.Dataset.from_tensor_slices((tf.constant(files, dtype=tf.string, name='file_path'),
tf.constant(labels, name='label')))
data = data.map(_parse_function)
# if shuffle:
# data = data.shuffle(batch_count)
return data, count
assert tag in ['train', 'validation']
assert datatype in ['tfrecord', 'json', 'h5']
def _bytes_feature(value): # 生成字符串型的属性
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value): # 生成整数型的属性
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# ToDO: arrange more datatype
input_dir = os.path.join(INPUT_DIR, tag)
output_dir = OUTPUT_DIR
mk_childfolders(output_dir)
reader, count = init_img_reader(input_dir, class_list=CLASS_LIST,
img_resize=[32, 32], channels=OUTPUT_CHANNEL, shuffle=shuffle)
fpath, xs, ys = reader.make_one_shot_iterator().get_next()
# param batch_xs: shape [32, 32, channels] type tf.uint8
# param batch_ys: shape [1] type tf.int32
logging.basicConfig(filename=os.path.join(OUTPUT_DIR, 'log.txt'), level=logging.DEBUG)
if shuffle:
save_name += '.shuffle'
else:
save_name += '.noshuffle'
output_path = os.path.join(output_dir, '{}.{}.{}'.format(save_name, tag, datatype))
with tf.Session() as sess:
if datatype == 'tfrecord':
with tf.python_io.TFRecordWriter(output_path) as writer:
perc = count / 100
perc = 1 if perc < 1 else int(perc)
step = 0
while True:
try:
org_path, img, label = sess.run([fpath, xs, ys])
org_name = os.path.basename(org_path.decode()).split('.')[0]
example = tf.train.Example(features=tf.train.Features(feature={
'name': _bytes_feature(org_name.encode('utf8')),
'label': _bytes_feature(label),
'height': _int64_feature(32),
'width': _int64_feature(32),
'channel': _int64_feature(3),
'image': _bytes_feature(img.tostring())
}))
writer.write(example.SerializeToString())
if int(org_name) % perc == 0:
processBar(step, count, msg="Initizing " + output_path)
step += 1
except tf.errors.OutOfRangeError:
processBar(step, count, msg="Initizing " + output_path)
print('Finish!')
break
elif datatype == 'json':
pass
elif datatype == 'h5':
pass
def main():
def _make_separate_dataset():
print('Begin to initialize training dataset...')
init_separate_dataset('train', 0, -1)
print('Begin to initialize validation dataset...')
init_separate_dataset('validation', 0, -1)
def _make_tfrecord_dataset():
print('Begin to initialize training dataset...')
init_binary_dataset('clouds', tag='train', datatype='tfrecord', shuffle=True)
print('Begin to initialize validation dataset...')
init_binary_dataset(save_name='clouds', tag='validation', datatype='tfrecord', shuffle=True)
begintime = datetime.now()
_make_separate_dataset()
# _make_tfrecord_dataset()
# check_tfrecord()
endtime = datetime.now()
print('All dataset initialized! Span Time:%s' % (endtime - begintime))
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
template/faas-flow/tracer.go | package main
import (
"fmt"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
//"github.com/opentracing/opentracing-go/log"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/config"
"io"
"net/http"
"os"
"time"
)
type traceHandler struct {
tracer opentracing.Tracer
closer io.Closer
reqSpan opentracing.Span
reqSpanCtx opentracing.SpanContext
nodeSpans map[string]opentracing.Span
operationSpans map[string]map[string]opentracing.Span
}
// CustomHeadersCarrier satisfies both TextMapWriter and TextMapReader
type CustomHeadersCarrier struct {
envMap map[string]string
}
// buildCustomHeadersCarrier builds a CustomHeadersCarrier from env
func buildCustomHeadersCarrier(header http.Header) *CustomHeadersCarrier {
carrier := &CustomHeadersCarrier{}
carrier.envMap = make(map[string]string)
for k, v := range header {
if k == "Uber-Trace-Id" && len(v) > 0 {
key := "uber-trace-id"
carrier.envMap[key] = v[0]
break
}
}
return carrier
}
// ForeachKey conforms to the TextMapReader interface
func (c *CustomHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
for key, value := range c.envMap {
err := handler(key, value)
if err != nil {
fmt.Fprintf(os.Stderr, "ForeachKey key %s value %s, error %v",
key, value, err)
return err
}
}
return nil
}
// Set conforms to the TextMapWriter interface
func (c *CustomHeadersCarrier) Set(key, val string) {
c.envMap[key] = val
}
// getTraceServer get the traceserver address
func getTraceServer() string {
traceServer := os.Getenv("trace_server")
if traceServer == "" {
traceServer = "jaegertracing:5775"
}
return traceServer
}
// initRequestTracer init global trace with configuration
func initRequestTracer(flowName string) (*traceHandler, error) {
tracerObj := &traceHandler{}
agentPort := getTraceServer()
cfg := config.Configuration{
ServiceName: flowName,
Sampler: &config.SamplerConfig{
Type: "const",
Param: 1,
},
Reporter: &config.ReporterConfig{
LogSpans: true,
BufferFlushInterval: 1 * time.Second,
LocalAgentHostPort: agentPort,
},
}
opentracer, traceCloser, err := cfg.NewTracer(
config.Logger(jaeger.StdLogger),
)
if err != nil {
return nil, fmt.Errorf("failed to init tracer, error %v", err.Error())
}
tracerObj.closer = traceCloser
tracerObj.tracer = opentracer
tracerObj.nodeSpans = make(map[string]opentracing.Span)
tracerObj.operationSpans = make(map[string]map[string]opentracing.Span)
return tracerObj, nil
}
// startReqSpan starts a request span
func (tracerObj *traceHandler) startReqSpan(reqId string) {
tracerObj.reqSpan = tracerObj.tracer.StartSpan(reqId)
tracerObj.reqSpan.SetTag("request", reqId)
tracerObj.reqSpanCtx = tracerObj.reqSpan.Context()
}
// continueReqSpan continue request span
func (tracerObj *traceHandler) continueReqSpan(reqId string, header http.Header) {
var err error
tracerObj.reqSpanCtx, err = tracerObj.tracer.Extract(
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(header),
)
if err != nil {
fmt.Printf("[Request %s] failed to continue req span for tracing, error %v\n", reqId, err)
return
}
tracerObj.reqSpan = nil
// TODO: Its not Supported to get span from spanContext as of now
// https://github.com/opentracing/specification/issues/81
// it will support us to extend the request span for nodes
//reqSpan = opentracing.SpanFromContext(reqSpanCtx)
}
// extendReqSpan extend req span over a request
// func extendReqSpan(url string, req *http.Request) {
func (tracerObj *traceHandler) extendReqSpan(reqId string, lastNode string, url string, req *http.Request) {
// TODO: as requestSpan can't be regenerated with the span context we
// forward the nodes SpanContext
// span := reqSpan
span := tracerObj.nodeSpans[lastNode]
if span == nil {
return
}
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, url)
ext.HTTPMethod.Set(span, "POST")
err := span.Tracer().Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
if err != nil {
fmt.Printf("[Request %s] failed to extend req span for tracing, error %v\n", reqId, err)
}
if req.Header.Get("Uber-Trace-Id") == "" {
fmt.Printf("[Request %s] failed to extend req span for tracing, error Uber-Trace-Id not set\n",
reqId)
}
}
// stopReqSpan terminate a request span
func (tracerObj *traceHandler) stopReqSpan() {
if tracerObj.reqSpan == nil {
return
}
tracerObj.reqSpan.Finish()
}
// startNodeSpan starts a node span
func (tracerObj *traceHandler) startNodeSpan(node string, reqId string) {
tracerObj.nodeSpans[node] = tracerObj.tracer.StartSpan(
node, ext.RPCServerOption(tracerObj.reqSpanCtx))
/*
tracerObj.nodeSpans[node] = tracerObj.tracer.StartSpan(
node, opentracing.ChildOf(reqSpan.Context()))
*/
tracerObj.nodeSpans[node].SetTag("async", "true")
tracerObj.nodeSpans[node].SetTag("request", reqId)
tracerObj.nodeSpans[node].SetTag("node", node)
}
// stopNodeSpan terminates a node span
func (tracerObj *traceHandler) stopNodeSpan(node string) {
tracerObj.nodeSpans[node].Finish()
}
// startOperationSpan starts an operation span
func (tracerObj *traceHandler) startOperationSpan(node string, reqId string, operationId string) {
if tracerObj.nodeSpans[node] == nil {
return
}
operationSpans, ok := tracerObj.operationSpans[node]
if !ok {
operationSpans = make(map[string]opentracing.Span)
tracerObj.operationSpans[node] = operationSpans
}
nodeContext := tracerObj.nodeSpans[node].Context()
operationSpans[operationId] = tracerObj.tracer.StartSpan(
operationId, opentracing.ChildOf(nodeContext))
operationSpans[operationId].SetTag("request", reqId)
operationSpans[operationId].SetTag("node", node)
operationSpans[operationId].SetTag("operation", operationId)
}
// stopOperationSpan stops an operation span
func (tracerObj *traceHandler) stopOperationSpan(node string, operationId string) {
if tracerObj.nodeSpans[node] == nil {
return
}
operationSpans := tracerObj.operationSpans[node]
operationSpans[operationId].Finish()
}
// flushTracer flush all pending traces
func (tracerObj *traceHandler) flushTracer() {
tracerObj.closer.Close()
}
| [
"\"trace_server\""
]
| []
| [
"trace_server"
]
| [] | ["trace_server"] | go | 1 | 0 | |
test/e2e/kms/vault_test.go | //
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build e2e
// +build e2e
package main
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/sigstore/sigstore/pkg/signature/kms"
"github.com/sigstore/sigstore/pkg/signature/kms/hashivault"
"github.com/sigstore/sigstore/pkg/signature/options"
vault "github.com/hashicorp/vault/api"
)
type VaultSuite struct {
suite.Suite
vaultclient *vault.Client
}
func (suite *VaultSuite) GetProvider(key string, opts ...signature.RPCOption) *hashivault.SignerVerifier {
provider, err := kms.Get(context.Background(), fmt.Sprintf("hashivault://%s", key), crypto.SHA256, opts...)
require.NoError(suite.T(), err)
require.NotNil(suite.T(), provider)
return provider.(*hashivault.SignerVerifier)
}
func (suite *VaultSuite) SetupSuite() {
var err error
suite.vaultclient, err = vault.NewClient(&vault.Config{
Address: os.Getenv("VAULT_ADDR"),
})
require.Nil(suite.T(), err)
require.NotNil(suite.T(), suite.vaultclient)
err = suite.vaultclient.Sys().Mount("transit", &vault.MountInput{
Type: "transit",
})
require.Nil(suite.T(), err)
err = suite.vaultclient.Sys().Mount("somerandompath", &vault.MountInput{
Type: "transit",
})
require.Nil(suite.T(), err)
}
func (suite *VaultSuite) TearDownSuite() {
var err error
if suite.vaultclient == nil {
suite.vaultclient, err = vault.NewClient(&vault.Config{
Address: os.Getenv("VAULT_ADDR"),
})
require.Nil(suite.T(), err)
require.NotNil(suite.T(), suite.vaultclient)
}
err = suite.vaultclient.Sys().Unmount("transit")
require.Nil(suite.T(), err)
err = suite.vaultclient.Sys().Unmount("somerandompath")
require.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestProvider() {
suite.GetProvider("provider")
}
func (suite *VaultSuite) TestCreateKey() {
provider := suite.GetProvider("createkey")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
}
func (suite *VaultSuite) TestSign() {
provider := suite.GetProvider("testsign")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
verifier, _ := signature.LoadECDSAVerifier(key.(*ecdsa.PublicKey), crypto.SHA256)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestSignOpts() {
addr := os.Getenv("VAULT_ADDR")
token := os.Getenv("VAULT_TOKEN")
os.Setenv("VAULT_ADDR", "")
os.Setenv("VAULT_TOKEN", "")
defer os.Setenv("VAULT_ADDR", addr)
defer os.Setenv("VAULT_TOKEN", token)
provider := suite.GetProvider("testsign",
options.WithRPCAuthOpts(options.RPCAuth{Address: addr, Token: token}))
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
verifier, _ := signature.LoadECDSAVerifier(key.(*ecdsa.PublicKey), crypto.SHA256)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestSignSpecificKeyVersion() {
provider := suite.GetProvider("testsignversion")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
// test without specifying any value (aka use default)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
verifier, _ := signature.LoadECDSAVerifier(key.(*ecdsa.PublicKey), crypto.SHA256)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
// test with specifying default value
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("0"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test with specifying explicit value
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("1"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test version that doesn't (yet) exist
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("2"))
assert.NotNil(suite.T(), err)
assert.Nil(suite.T(), sig)
// rotate key (now two valid versions)
client := suite.vaultclient.Logical()
_, err = client.Write("/transit/keys/testsignversion/rotate", nil)
assert.Nil(suite.T(), err)
// test default version again (implicitly)
sig, err = provider.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test default version again (explicitly)
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("0"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test explicit previous version (should still work as we haven't set min_encryption_version yet)
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("1"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test explicit new version
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("2"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test bad value
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("3"))
assert.NotNil(suite.T(), err)
assert.Nil(suite.T(), sig)
// change minimum to v2
_, err = client.Write("/transit/keys/testsignversion/config", map[string]interface{}{
"min_encryption_version": 2,
})
assert.Nil(suite.T(), err)
// test explicit previous version (should fail as min_encryption_version has been set)
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("1"))
assert.NotNil(suite.T(), err)
assert.Nil(suite.T(), sig)
provider2 := suite.GetProvider("testsignversion", options.WithKeyVersion("2"))
sig, err = provider2.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test explicit new version
sig, err = provider2.SignMessage(bytes.NewReader(data), options.WithKeyVersion("2"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
}
func (suite *VaultSuite) TestVerifySpecificKeyVersion() {
provider := suite.GetProvider("testverifyversion")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
// test using v1
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("1"))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
// test without specifying key value
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
// test with explicitly specifying default value
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("1"))
assert.Nil(suite.T(), err)
// test version that doesn't (yet) exist
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("2"))
assert.NotNil(suite.T(), err)
// rotate key (now two valid versions)
client := suite.vaultclient.Logical()
_, err = client.Write("/transit/keys/testverifyversion/rotate", nil)
assert.Nil(suite.T(), err)
// test default version again (implicitly)
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
// test invalid version (0 is fine for signing, but must be >= 1 for verification)
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("0"))
assert.NotNil(suite.T(), err)
// test explicit previous version (should still as we haven't set min_decryption_version yet)
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("1"))
assert.Nil(suite.T(), err)
// test explicit new version (should fail since it doesn't match the v1 key)
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("2"))
assert.NotNil(suite.T(), err)
// test bad value
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("3"))
assert.NotNil(suite.T(), err)
// change minimum to v2
_, err = client.Write("/transit/keys/testverifyversion/config", map[string]interface{}{
"min_decryption_version": 2,
})
assert.Nil(suite.T(), err)
// test explicit previous version (should fail as min_decryption_version has been set)
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithKeyVersion("1"))
assert.NotNil(suite.T(), err)
}
func (suite *VaultSuite) TestSignAndRecordKeyVersion() {
provider := suite.GetProvider("testrecordsignversion")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
// test for v1
data := []byte("mydata")
var versionUsed string
sig, err := provider.SignMessage(bytes.NewReader(data), options.ReturnKeyVersionUsed(&versionUsed))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
assert.Contains(suite.T(), versionUsed, "vault:v1:")
// rotate
client := suite.vaultclient.Logical()
_, err = client.Write("/transit/keys/testrecordsignversion/rotate", nil)
assert.Nil(suite.T(), err)
sig, err = provider.SignMessage(bytes.NewReader(data), options.WithKeyVersion("2"), options.ReturnKeyVersionUsed(&versionUsed))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
assert.Contains(suite.T(), versionUsed, "vault:v2:")
}
func (suite *VaultSuite) TestSignWithDifferentTransitSecretEnginePath() {
provider := suite.GetProvider("testsign")
os.Setenv("TRANSIT_SECRET_ENGINE_PATH", "somerandompath")
defer os.Setenv("TRANSIT_SECRET_ENGINE_PATH", "transit")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data), options.WithContext(context.Background()))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
verifier, err := signature.LoadECDSAVerifier(key.(*ecdsa.PublicKey), crypto.SHA256)
assert.Nil(suite.T(), err)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithContext(context.Background()))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestSignWithDifferentTransitSecretEnginePathOpts() {
provider := suite.GetProvider("testsign", options.WithRPCAuthOpts(options.RPCAuth{Path: "somerandompath"}))
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data), options.WithContext(context.Background()))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
verifier, err := signature.LoadECDSAVerifier(key.(*ecdsa.PublicKey), crypto.SHA256)
assert.Nil(suite.T(), err)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithContext(context.Background()))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestPubKeyVerify() {
provider := suite.GetProvider("testsign")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
require.Nil(suite.T(), err)
require.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data))
require.Nil(suite.T(), err)
require.NotNil(suite.T(), sig)
k, err := provider.PublicKey()
require.NotNil(suite.T(), k)
require.Nil(suite.T(), err)
pubKey, ok := k.(*ecdsa.PublicKey)
require.True(suite.T(), ok)
verifier, _ := signature.LoadECDSAVerifier(pubKey, crypto.SHA256)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestCryptoSigner() {
provider := suite.GetProvider("testsign")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
require.Nil(suite.T(), err)
require.NotNil(suite.T(), key)
data := []byte("mydata")
cs, opts, err := provider.CryptoSigner(context.Background(), func(err error) { require.Nil(suite.T(), err) })
hasher := opts.HashFunc().New()
_, _ = hasher.Write(data)
sig, err := cs.Sign(rand.Reader, hasher.Sum(nil), opts)
require.Nil(suite.T(), err)
require.NotNil(suite.T(), sig)
k := cs.Public()
require.NotNil(suite.T(), k)
pubKey, ok := k.(*ecdsa.PublicKey)
require.True(suite.T(), ok)
verifier, _ := signature.LoadECDSAVerifier(pubKey, crypto.SHA256)
err = verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestVerify() {
provider := suite.GetProvider("testverify")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.Nil(suite.T(), err)
}
func (suite *VaultSuite) TestVerifyBadData() {
provider := suite.GetProvider("testverify")
key, err := provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key)
data := []byte("mydata")
sig, err := provider.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig)
dataInvalid := []byte("mydata-invalid")
err = provider.VerifySignature(bytes.NewReader(sig), bytes.NewReader(dataInvalid))
assert.Contains(suite.T(), err.Error(), "Failed vault verification")
}
func (suite *VaultSuite) TestBadSignature() {
provider1 := suite.GetProvider("testverify1")
provider2 := suite.GetProvider("testverify2")
key1, err := provider1.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key1)
key2, err := provider2.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), key2)
data := []byte("mydata")
sig1, err := provider1.SignMessage(bytes.NewReader(data))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), sig1)
err = provider1.VerifySignature(bytes.NewReader(sig1), bytes.NewReader(data))
assert.Nil(suite.T(), err)
err = provider2.VerifySignature(bytes.NewReader(sig1), bytes.NewReader(data))
assert.NotNil(suite.T(), err)
assert.Contains(suite.T(), err.Error(), "Failed vault verification")
}
func (suite *VaultSuite) TestNoProvider() {
provider, err := kms.Get(context.Background(), "hashi://nonsense", crypto.Hash(0))
require.Error(suite.T(), err)
require.Nil(suite.T(), provider)
}
func (suite *VaultSuite) TestInvalidHost() {
provider, err := kms.Get(context.Background(), "hashivault://keyname", crypto.SHA256,
options.WithRPCAuthOpts(options.RPCAuth{Address: "https://unknown.example.com:8200"}))
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), provider)
_, err = provider.CreateKey(context.Background(), hashivault.Algorithm_ECDSA_P256)
require.Error(suite.T(), err)
}
func TestVault(t *testing.T) {
suite.Run(t, new(VaultSuite))
}
| [
"\"VAULT_ADDR\"",
"\"VAULT_ADDR\"",
"\"VAULT_ADDR\"",
"\"VAULT_TOKEN\""
]
| []
| [
"VAULT_ADDR",
"VAULT_TOKEN"
]
| [] | ["VAULT_ADDR", "VAULT_TOKEN"] | go | 2 | 0 | |
statsmodels/multivariate/factor_rotation/tests/test_rotation.py | import unittest
import numpy as np
from statsmodels.multivariate.factor_rotation._wrappers import rotate_factors
from statsmodels.multivariate.factor_rotation._gpa_rotation import (
ff_partial_target, vgQ_partial_target, ff_target, vgQ_target, CF_objective,
orthomax_objective, oblimin_objective, GPA)
from statsmodels.multivariate.factor_rotation._analytic_rotation import (
target_rotation)
class TestAnalyticRotation(unittest.TestCase):
@staticmethod
def str2matrix(A):
A = A.lstrip().rstrip().split('\n')
A = np.array([row.split() for row in A]).astype(np.float)
return A
def test_target_rotation(self):
"""
Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa
"""
A = self.str2matrix("""
.830 -.396
.818 -.469
.777 -.470
.798 -.401
.786 .500
.672 .458
.594 .444
.647 .333
""")
H = self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
T = target_rotation(A, H)
L = A.dot(T)
L_required = self.str2matrix("""
0.84168 -0.37053
0.83191 -0.44386
0.79096 -0.44611
0.80985 -0.37650
0.77040 0.52371
0.65774 0.47826
0.58020 0.46189
0.63656 0.35255
""")
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
T = target_rotation(A, H, full_rank=True)
L = A.dot(T)
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
def test_orthogonal_target(self):
"""
Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa
"""
A = self.str2matrix("""
.830 -.396
.818 -.469
.777 -.470
.798 -.401
.786 .500
.672 .458
.594 .444
.647 .333
""")
H = self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
vgQ = lambda L=None, A=None, T=None: vgQ_target(H, L=L, A=A, T=T)
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
T_analytic = target_rotation(A, H)
self.assertTrue(np.allclose(T, T_analytic, atol=1e-05))
class TestGPARotation(unittest.TestCase):
@staticmethod
def str2matrix(A):
A = A.lstrip().rstrip().split('\n')
A = np.array([row.split() for row in A]).astype(np.float)
return A
@classmethod
def get_A(cls):
return cls.str2matrix("""
.830 -.396
.818 -.469
.777 -.470
.798 -.401
.786 .500
.672 .458
.594 .444
.647 .333
""")
@classmethod
def get_quartimin_example(cls):
A = cls.get_A()
table_required = cls.str2matrix("""
0.00000 0.42806 -0.46393 1.00000
1.00000 0.41311 -0.57313 0.25000
2.00000 0.38238 -0.36652 0.50000
3.00000 0.31850 -0.21011 0.50000
4.00000 0.20937 -0.13838 0.50000
5.00000 0.12379 -0.35583 0.25000
6.00000 0.04289 -0.53244 0.50000
7.00000 0.01098 -0.86649 0.50000
8.00000 0.00566 -1.65798 0.50000
9.00000 0.00558 -2.13212 0.25000
10.00000 0.00557 -2.49020 0.25000
11.00000 0.00557 -2.84585 0.25000
12.00000 0.00557 -3.20320 0.25000
13.00000 0.00557 -3.56143 0.25000
14.00000 0.00557 -3.92005 0.25000
15.00000 0.00557 -4.27885 0.25000
16.00000 0.00557 -4.63772 0.25000
17.00000 0.00557 -4.99663 0.25000
18.00000 0.00557 -5.35555 0.25000
""")
L_required = cls.str2matrix("""
0.891822 0.056015
0.953680 -0.023246
0.929150 -0.046503
0.876683 0.033658
0.013701 0.925000
-0.017265 0.821253
-0.052445 0.764953
0.085890 0.683115
""")
return A, table_required, L_required
@classmethod
def get_biquartimin_example(cls):
A = cls.get_A()
table_required = cls.str2matrix("""
0.00000 0.21632 -0.54955 1.00000
1.00000 0.19519 -0.46174 0.50000
2.00000 0.09479 -0.16365 1.00000
3.00000 -0.06302 -0.32096 0.50000
4.00000 -0.21304 -0.46562 1.00000
5.00000 -0.33199 -0.33287 1.00000
6.00000 -0.35108 -0.63990 0.12500
7.00000 -0.35543 -1.20916 0.12500
8.00000 -0.35568 -2.61213 0.12500
9.00000 -0.35568 -2.97910 0.06250
10.00000 -0.35568 -3.32645 0.06250
11.00000 -0.35568 -3.66021 0.06250
12.00000 -0.35568 -3.98564 0.06250
13.00000 -0.35568 -4.30635 0.06250
14.00000 -0.35568 -4.62451 0.06250
15.00000 -0.35568 -4.94133 0.06250
16.00000 -0.35568 -5.25745 0.06250
""")
L_required = cls.str2matrix("""
1.01753 -0.13657
1.11338 -0.24643
1.09200 -0.26890
1.00676 -0.16010
-0.26534 1.11371
-0.26972 0.99553
-0.29341 0.93561
-0.10806 0.80513
""")
return A, table_required, L_required
@classmethod
def get_biquartimin_example_derivative_free(cls):
A = cls.get_A()
table_required = cls.str2matrix("""
0.00000 0.21632 -0.54955 1.00000
1.00000 0.19519 -0.46174 0.50000
2.00000 0.09479 -0.16365 1.00000
3.00000 -0.06302 -0.32096 0.50000
4.00000 -0.21304 -0.46562 1.00000
5.00000 -0.33199 -0.33287 1.00000
6.00000 -0.35108 -0.63990 0.12500
7.00000 -0.35543 -1.20916 0.12500
8.00000 -0.35568 -2.61213 0.12500
9.00000 -0.35568 -2.97910 0.06250
10.00000 -0.35568 -3.32645 0.06250
11.00000 -0.35568 -3.66021 0.06250
12.00000 -0.35568 -3.98564 0.06250
13.00000 -0.35568 -4.30634 0.06250
14.00000 -0.35568 -4.62451 0.06250
15.00000 -0.35568 -4.94133 0.06250
16.00000 -0.35568 -6.32435 0.12500
""")
L_required = cls.str2matrix("""
1.01753 -0.13657
1.11338 -0.24643
1.09200 -0.26890
1.00676 -0.16010
-0.26534 1.11371
-0.26972 0.99553
-0.29342 0.93561
-0.10806 0.80513
""")
return A, table_required, L_required
@classmethod
def get_quartimax_example_derivative_free(cls):
A = cls.get_A()
table_required = cls.str2matrix("""
0.00000 -0.72073 -0.65498 1.00000
1.00000 -0.88561 -0.34614 2.00000
2.00000 -1.01992 -1.07152 1.00000
3.00000 -1.02237 -1.51373 0.50000
4.00000 -1.02269 -1.96205 0.50000
5.00000 -1.02273 -2.41116 0.50000
6.00000 -1.02273 -2.86037 0.50000
7.00000 -1.02273 -3.30959 0.50000
8.00000 -1.02273 -3.75881 0.50000
9.00000 -1.02273 -4.20804 0.50000
10.00000 -1.02273 -4.65726 0.50000
11.00000 -1.02273 -5.10648 0.50000
""")
L_required = cls.str2matrix("""
0.89876 0.19482
0.93394 0.12974
0.90213 0.10386
0.87651 0.17128
0.31558 0.87647
0.25113 0.77349
0.19801 0.71468
0.30786 0.65933
""")
return A, table_required, L_required
def test_orthomax(self):
"""
Quartimax example
http://www.stat.ucla.edu/research/gpa
"""
A = self.get_A()
vgQ = lambda L=None, A=None, T=None: orthomax_objective(
L=L, A=A, T=T, gamma=0, return_gradient=True)
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
table_required = self.str2matrix("""
0.00000 -0.72073 -0.65498 1.00000
1.00000 -0.88561 -0.34614 2.00000
2.00000 -1.01992 -1.07152 1.00000
3.00000 -1.02237 -1.51373 0.50000
4.00000 -1.02269 -1.96205 0.50000
5.00000 -1.02273 -2.41116 0.50000
6.00000 -1.02273 -2.86037 0.50000
7.00000 -1.02273 -3.30959 0.50000
8.00000 -1.02273 -3.75881 0.50000
9.00000 -1.02273 -4.20804 0.50000
10.00000 -1.02273 -4.65726 0.50000
11.00000 -1.02273 -5.10648 0.50000
""")
L_required = self.str2matrix("""
0.89876 0.19482
0.93394 0.12974
0.90213 0.10386
0.87651 0.17128
0.31558 0.87647
0.25113 0.77349
0.19801 0.71468
0.30786 0.65933
""")
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
# oblimin criterion gives same result
vgQ = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=0, rotation_method='orthogonal',
return_gradient=True)
L_oblimin, phi2, T2, table2 = GPA(A, vgQ=vgQ,
rotation_method='orthogonal')
self.assertTrue(np.allclose(L, L_oblimin, atol=1e-05))
# derivative free quartimax
out = self.get_quartimax_example_derivative_free()
A, table_required, L_required = out
ff = lambda L=None, A=None, T=None: orthomax_objective(
L=L, A=A, T=T, gamma=0, return_gradient=False)
L, phi, T, table = GPA(A, ff=ff, rotation_method='orthogonal')
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
def test_equivalence_orthomax_oblimin(self):
"""
These criteria should be equivalent when restricted to orthogonal
rotation.
See Hartman 1976 page 299.
"""
A = self.get_A()
gamma = 0 # quartimax
vgQ = lambda L=None, A=None, T=None: orthomax_objective(
L=L, A=A, T=T, gamma=gamma, return_gradient=True)
L_orthomax, phi, T, table = GPA(
A, vgQ=vgQ, rotation_method='orthogonal')
vgQ = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=gamma, rotation_method='orthogonal',
return_gradient=True)
L_oblimin, phi2, T2, table2 = GPA(A, vgQ=vgQ,
rotation_method='orthogonal')
self.assertTrue(np.allclose(L_orthomax, L_oblimin, atol=1e-05))
gamma = 1 # varimax
vgQ = lambda L=None, A=None, T=None: orthomax_objective(
L=L, A=A, T=T, gamma=gamma, return_gradient=True)
L_orthomax, phi, T, table = GPA(
A, vgQ=vgQ, rotation_method='orthogonal')
vgQ = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=gamma, rotation_method='orthogonal',
return_gradient=True)
L_oblimin, phi2, T2, table2 = GPA(
A, vgQ=vgQ, rotation_method='orthogonal')
self.assertTrue(np.allclose(L_orthomax, L_oblimin, atol=1e-05))
def test_orthogonal_target(self):
"""
Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa
"""
A = self.get_A()
H = self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
vgQ = lambda L=None, A=None, T=None: vgQ_target(H, L=L, A=A, T=T)
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
table_required = self.str2matrix("""
0.00000 0.05925 -0.61244 1.00000
1.00000 0.05444 -1.14701 0.12500
2.00000 0.05403 -1.68194 0.12500
3.00000 0.05399 -2.21689 0.12500
4.00000 0.05399 -2.75185 0.12500
5.00000 0.05399 -3.28681 0.12500
6.00000 0.05399 -3.82176 0.12500
7.00000 0.05399 -4.35672 0.12500
8.00000 0.05399 -4.89168 0.12500
9.00000 0.05399 -5.42664 0.12500
""")
L_required = self.str2matrix("""
0.84168 -0.37053
0.83191 -0.44386
0.79096 -0.44611
0.80985 -0.37650
0.77040 0.52371
0.65774 0.47826
0.58020 0.46189
0.63656 0.35255
""")
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
ff = lambda L=None, A=None, T=None: ff_target(H, L=L, A=A, T=T)
L2, phi, T2, table = GPA(A, ff=ff, rotation_method='orthogonal')
self.assertTrue(np.allclose(L, L2, atol=1e-05))
self.assertTrue(np.allclose(T, T2, atol=1e-05))
vgQ = lambda L=None, A=None, T=None: vgQ_target(
H, L=L, A=A, T=T, rotation_method='oblique')
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='oblique')
ff = lambda L=None, A=None, T=None: ff_target(
H, L=L, A=A, T=T, rotation_method='oblique')
L2, phi, T2, table = GPA(A, ff=ff, rotation_method='oblique')
self.assertTrue(np.allclose(L, L2, atol=1e-05))
self.assertTrue(np.allclose(T, T2, atol=1e-05))
def test_orthogonal_partial_target(self):
"""
Rotation towards target matrix example
http://www.stat.ucla.edu/research/gpa
"""
A = self.get_A()
H = self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
W = self.str2matrix("""
1 0
0 1
0 0
1 1
1 0
1 0
0 1
1 0
""")
vgQ = lambda L=None, A=None, T=None: vgQ_partial_target(
H, W, L=L, A=A, T=T)
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
table_required = self.str2matrix("""
0.00000 0.02559 -0.84194 1.00000
1.00000 0.02203 -1.27116 0.25000
2.00000 0.02154 -1.71198 0.25000
3.00000 0.02148 -2.15713 0.25000
4.00000 0.02147 -2.60385 0.25000
5.00000 0.02147 -3.05114 0.25000
6.00000 0.02147 -3.49863 0.25000
7.00000 0.02147 -3.94619 0.25000
8.00000 0.02147 -4.39377 0.25000
9.00000 0.02147 -4.84137 0.25000
10.00000 0.02147 -5.28897 0.25000
""")
L_required = self.str2matrix("""
0.84526 -0.36228
0.83621 -0.43571
0.79528 -0.43836
0.81349 -0.36857
0.76525 0.53122
0.65303 0.48467
0.57565 0.46754
0.63308 0.35876
""")
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
ff = lambda L=None, A=None, T=None: ff_partial_target(
H, W, L=L, A=A, T=T)
L2, phi, T2, table = GPA(A, ff=ff, rotation_method='orthogonal')
self.assertTrue(np.allclose(L, L2, atol=1e-05))
self.assertTrue(np.allclose(T, T2, atol=1e-05))
def test_oblimin(self):
# quartimin
A, table_required, L_required = self.get_quartimin_example()
vgQ = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=0, rotation_method='oblique')
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='oblique')
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
# quartimin derivative free
ff = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=0, rotation_method='oblique',
return_gradient=False)
L, phi, T, table = GPA(A, ff=ff, rotation_method='oblique')
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
# biquartimin
A, table_required, L_required = self.get_biquartimin_example()
vgQ = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=1/2, rotation_method='oblique')
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='oblique')
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
# quartimin derivative free
out = self.get_biquartimin_example_derivative_free()
A, table_required, L_required = out
ff = lambda L=None, A=None, T=None: oblimin_objective(
L=L, A=A, T=T, gamma=1/2, rotation_method='oblique',
return_gradient=False)
L, phi, T, table = GPA(A, ff=ff, rotation_method='oblique')
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
self.assertTrue(np.allclose(table, table_required, atol=1e-05))
def test_CF(self):
# quartimax
out = self.get_quartimax_example_derivative_free()
A, table_required, L_required = out
vgQ = lambda L=None, A=None, T=None: CF_objective(
L=L, A=A, T=T, kappa=0, rotation_method='orthogonal',
return_gradient=True)
L, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
# quartimax derivative free
ff = lambda L=None, A=None, T=None: CF_objective(
L=L, A=A, T=T, kappa=0, rotation_method='orthogonal',
return_gradient=False)
L, phi, T, table = GPA(A, ff=ff, rotation_method='orthogonal')
self.assertTrue(np.allclose(L, L_required, atol=1e-05))
# varimax
p, k = A.shape
vgQ = lambda L=None, A=None, T=None: orthomax_objective(
L=L, A=A, T=T, gamma=1, return_gradient=True)
L_vm, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
vgQ = lambda L=None, A=None, T=None: CF_objective(
L=L, A=A, T=T, kappa=1/p, rotation_method='orthogonal',
return_gradient=True)
L_CF, phi, T, table = GPA(A, vgQ=vgQ, rotation_method='orthogonal')
ff = lambda L=None, A=None, T=None: CF_objective(
L=L, A=A, T=T, kappa=1/p, rotation_method='orthogonal',
return_gradient=False)
L_CF_df, phi, T, table = GPA(A, ff=ff, rotation_method='orthogonal')
self.assertTrue(np.allclose(L_vm, L_CF, atol=1e-05))
self.assertTrue(np.allclose(L_CF, L_CF_df, atol=1e-05))
class TestWrappers(unittest.TestCase):
@staticmethod
def str2matrix(A):
A = A.lstrip().rstrip().split('\n')
A = np.array([row.split() for row in A]).astype(np.float)
return A
def get_A(self):
return self.str2matrix("""
.830 -.396
.818 -.469
.777 -.470
.798 -.401
.786 .500
.672 .458
.594 .444
.647 .333
""")
def get_H(self):
return self.str2matrix("""
.8 -.3
.8 -.4
.7 -.4
.9 -.4
.8 .5
.6 .4
.5 .4
.6 .3
""")
def get_W(self):
return self.str2matrix("""
1 0
0 1
0 0
1 1
1 0
1 0
0 1
1 0
""")
def _test_template(self, method, *method_args, **algorithms):
A = self.get_A()
algorithm1 = 'gpa' if 'algorithm1' not in algorithms else algorithms[
'algorithm1']
if 'algorithm`' not in algorithms:
algorithm2 = 'gpa_der_free'
else:
algorithms['algorithm1']
L1, T1 = rotate_factors(A, method, *method_args, algorithm=algorithm1)
L2, T2 = rotate_factors(A, method, *method_args, algorithm=algorithm2)
self.assertTrue(np.allclose(L1, L2, atol=1e-5))
self.assertTrue(np.allclose(T1, T2, atol=1e-5))
def test_methods(self):
"""
Quartimax derivative free example
http://www.stat.ucla.edu/research/gpa
"""
# orthomax, oblimin and CF are tested indirectly
methods = ['quartimin', 'biquartimin',
'quartimax', 'biquartimax', 'varimax', 'equamax',
'parsimax', 'parsimony',
'target', 'partial_target']
for method in methods:
method_args = []
if method == 'target':
method_args = [self.get_H(), 'orthogonal']
self._test_template(method, *method_args)
method_args = [self.get_H(), 'oblique']
self._test_template(method, *method_args)
method_args = [self.get_H(), 'orthogonal']
self._test_template(method, *method_args,
algorithm2='analytic')
elif method == 'partial_target':
method_args = [self.get_H(), self.get_W()]
self._test_template(method, *method_args)
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go | package installerpod
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/openshift/library-go/pkg/config/client"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
"github.com/openshift/library-go/pkg/operator/resource/retry"
)
type InstallOptions struct {
// TODO replace with genericclioptions
KubeConfig string
KubeClient kubernetes.Interface
Revision string
NodeName string
Namespace string
PodConfigMapNamePrefix string
SecretNamePrefixes []string
OptionalSecretNamePrefixes []string
ConfigMapNamePrefixes []string
OptionalConfigMapNamePrefixes []string
CertSecretNames []string
OptionalCertSecretNamePrefixes []string
CertConfigMapNamePrefixes []string
OptionalCertConfigMapNamePrefixes []string
CertDir string
ResourceDir string
PodManifestDir string
Timeout time.Duration
PodMutationFns []PodMutationFunc
}
// PodMutationFunc is a function that has a chance at changing the pod before it is created
type PodMutationFunc func(pod *corev1.Pod) error
func NewInstallOptions() *InstallOptions {
return &InstallOptions{}
}
func (o *InstallOptions) WithPodMutationFn(podMutationFn PodMutationFunc) *InstallOptions {
o.PodMutationFns = append(o.PodMutationFns, podMutationFn)
return o
}
func NewInstaller() *cobra.Command {
o := NewInstallOptions()
cmd := &cobra.Command{
Use: "installer",
Short: "Install static pod and related resources",
Run: func(cmd *cobra.Command, args []string) {
klog.V(1).Info(cmd.Flags())
klog.V(1).Info(spew.Sdump(o))
if err := o.Complete(); err != nil {
klog.Fatal(err)
}
if err := o.Validate(); err != nil {
klog.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.TODO(), o.Timeout)
defer cancel()
if err := o.Run(ctx); err != nil {
klog.Fatal(err)
}
},
}
o.AddFlags(cmd.Flags())
return cmd
}
func (o *InstallOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty")
fs.StringVar(&o.Revision, "revision", o.Revision, "identifier for this particular installation instance. For example, a counter or a hash")
fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to retrieve all resources from and create the static pod in")
fs.StringVar(&o.PodConfigMapNamePrefix, "pod", o.PodConfigMapNamePrefix, "name of configmap that contains the pod to be created")
fs.StringSliceVar(&o.SecretNamePrefixes, "secrets", o.SecretNamePrefixes, "list of secret names to be included")
fs.StringSliceVar(&o.ConfigMapNamePrefixes, "configmaps", o.ConfigMapNamePrefixes, "list of configmaps to be included")
fs.StringSliceVar(&o.OptionalSecretNamePrefixes, "optional-secrets", o.OptionalSecretNamePrefixes, "list of optional secret names to be included")
fs.StringSliceVar(&o.OptionalConfigMapNamePrefixes, "optional-configmaps", o.OptionalConfigMapNamePrefixes, "list of optional configmaps to be included")
fs.StringVar(&o.ResourceDir, "resource-dir", o.ResourceDir, "directory for all files supporting the static pod manifest")
fs.StringVar(&o.PodManifestDir, "pod-manifest-dir", o.PodManifestDir, "directory for the static pod manifest")
fs.DurationVar(&o.Timeout, "timeout-duration", 120*time.Second, "maximum time in seconds to wait for the copying to complete (default: 2m)")
fs.StringSliceVar(&o.CertSecretNames, "cert-secrets", o.CertSecretNames, "list of secret names to be included")
fs.StringSliceVar(&o.CertConfigMapNamePrefixes, "cert-configmaps", o.CertConfigMapNamePrefixes, "list of configmaps to be included")
fs.StringSliceVar(&o.OptionalCertSecretNamePrefixes, "optional-cert-secrets", o.OptionalCertSecretNamePrefixes, "list of optional secret names to be included")
fs.StringSliceVar(&o.OptionalCertConfigMapNamePrefixes, "optional-cert-configmaps", o.OptionalCertConfigMapNamePrefixes, "list of optional configmaps to be included")
fs.StringVar(&o.CertDir, "cert-dir", o.CertDir, "directory for all certs")
}
func (o *InstallOptions) Complete() error {
clientConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfig, nil)
if err != nil {
return err
}
// Use protobuf to fetch configmaps and secrets and create pods.
protoConfig := rest.CopyConfig(clientConfig)
protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
protoConfig.ContentType = "application/vnd.kubernetes.protobuf"
o.KubeClient, err = kubernetes.NewForConfig(protoConfig)
if err != nil {
return err
}
// set via downward API
o.NodeName = os.Getenv("NODE_NAME")
return nil
}
func (o *InstallOptions) Validate() error {
if len(o.Revision) == 0 {
return fmt.Errorf("--revision is required")
}
if len(o.NodeName) == 0 {
return fmt.Errorf("env var NODE_NAME is required")
}
if len(o.Namespace) == 0 {
return fmt.Errorf("--namespace is required")
}
if len(o.PodConfigMapNamePrefix) == 0 {
return fmt.Errorf("--pod is required")
}
if len(o.ConfigMapNamePrefixes) == 0 {
return fmt.Errorf("--configmaps is required")
}
if o.Timeout == 0 {
return fmt.Errorf("--timeout-duration cannot be 0")
}
if o.KubeClient == nil {
return fmt.Errorf("missing client")
}
return nil
}
func (o *InstallOptions) nameFor(prefix string) string {
return fmt.Sprintf("%s-%s", prefix, o.Revision)
}
func (o *InstallOptions) prefixFor(name string) string {
return name[0 : len(name)-len(fmt.Sprintf("-%s", o.Revision))]
}
func (o *InstallOptions) copySecretsAndConfigMaps(ctx context.Context, resourceDir string,
secretNames, optionalSecretNames, configNames, optionalConfigNames sets.String, prefixed bool) error {
klog.Infof("Creating target resource directory %q ...", resourceDir)
if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) {
return err
}
// Gather secrets. If we get API server error, retry getting until we hit the timeout.
// Retrying will prevent temporary API server blips or networking issues.
// We return when all "required" secrets are gathered, optional secrets are not checked.
klog.Infof("Getting secrets ...")
secrets := []*corev1.Secret{}
for _, name := range append(secretNames.List(), optionalSecretNames.List()...) {
secret, err := o.getSecretWithRetry(ctx, name, optionalSecretNames.Has(name))
if err != nil {
return err
}
// secret is nil means the secret was optional and we failed to get it.
if secret != nil {
secrets = append(secrets, o.substituteSecret(secret))
}
}
klog.Infof("Getting config maps ...")
configs := []*corev1.ConfigMap{}
for _, name := range append(configNames.List(), optionalConfigNames.List()...) {
config, err := o.getConfigMapWithRetry(ctx, name, optionalConfigNames.Has(name))
if err != nil {
return err
}
// config is nil means the config was optional and we failed to get it.
if config != nil {
configs = append(configs, o.substituteConfigMap(config))
}
}
for _, secret := range secrets {
secretBaseName := secret.Name
if prefixed {
secretBaseName = o.prefixFor(secret.Name)
}
contentDir := path.Join(resourceDir, "secrets", secretBaseName)
klog.Infof("Creating directory %q ...", contentDir)
if err := os.MkdirAll(contentDir, 0755); err != nil {
return err
}
for filename, content := range secret.Data {
klog.Infof("Writing secret manifest %q ...", path.Join(contentDir, filename))
filePerms := os.FileMode(0600)
if strings.HasSuffix(filename, ".sh") {
filePerms = 0700
}
if err := ioutil.WriteFile(path.Join(contentDir, filename), content, filePerms); err != nil {
return err
}
}
}
for _, configmap := range configs {
configMapBaseName := configmap.Name
if prefixed {
configMapBaseName = o.prefixFor(configmap.Name)
}
contentDir := path.Join(resourceDir, "configmaps", configMapBaseName)
klog.Infof("Creating directory %q ...", contentDir)
if err := os.MkdirAll(contentDir, 0755); err != nil {
return err
}
for filename, content := range configmap.Data {
klog.Infof("Writing config file %q ...", path.Join(contentDir, filename))
filePerms := os.FileMode(0644)
if strings.HasSuffix(filename, ".sh") {
filePerms = 0755
}
if err := ioutil.WriteFile(path.Join(contentDir, filename), []byte(content), filePerms); err != nil {
return err
}
}
}
return nil
}
func (o *InstallOptions) copyContent(ctx context.Context) error {
resourceDir := path.Join(o.ResourceDir, o.nameFor(o.PodConfigMapNamePrefix))
klog.Infof("Creating target resource directory %q ...", resourceDir)
if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) {
return err
}
secretPrefixes := sets.NewString()
optionalSecretPrefixes := sets.NewString()
configPrefixes := sets.NewString()
optionalConfigPrefixes := sets.NewString()
for _, prefix := range o.SecretNamePrefixes {
secretPrefixes.Insert(o.nameFor(prefix))
}
for _, prefix := range o.OptionalSecretNamePrefixes {
optionalSecretPrefixes.Insert(o.nameFor(prefix))
}
for _, prefix := range o.ConfigMapNamePrefixes {
configPrefixes.Insert(o.nameFor(prefix))
}
for _, prefix := range o.OptionalConfigMapNamePrefixes {
optionalConfigPrefixes.Insert(o.nameFor(prefix))
}
if err := o.copySecretsAndConfigMaps(ctx, resourceDir, secretPrefixes, optionalSecretPrefixes, configPrefixes, optionalConfigPrefixes, true); err != nil {
return err
}
// Copy the current state of the certs as we see them. This primes us once and allows a kube-apiserver to start once
if len(o.CertDir) > 0 {
if err := o.copySecretsAndConfigMaps(ctx, o.CertDir,
sets.NewString(o.CertSecretNames...),
sets.NewString(o.OptionalCertSecretNamePrefixes...),
sets.NewString(o.CertConfigMapNamePrefixes...),
sets.NewString(o.OptionalCertConfigMapNamePrefixes...),
false,
); err != nil {
return err
}
}
// Gather pod yaml from config map
var podContent string
err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) {
klog.Infof("Getting pod configmaps/%s -n %s", o.nameFor(o.PodConfigMapNamePrefix), o.Namespace)
podConfigMap, err := o.KubeClient.CoreV1().ConfigMaps(o.Namespace).Get(o.nameFor(o.PodConfigMapNamePrefix), metav1.GetOptions{})
if err != nil {
return false, err
}
if _, exists := podConfigMap.Data["pod.yaml"]; !exists {
return true, fmt.Errorf("required 'pod.yaml' key does not exist in configmap")
}
podConfigMap = o.substituteConfigMap(podConfigMap)
podContent = podConfigMap.Data["pod.yaml"]
return true, nil
})
if err != nil {
return err
}
// Write secrets, config maps and pod to disk
// This does not need timeout, instead we should fail hard when we are not able to write.
podFileName := o.PodConfigMapNamePrefix + ".yaml"
klog.Infof("Writing pod manifest %q ...", path.Join(resourceDir, podFileName))
if err := ioutil.WriteFile(path.Join(resourceDir, podFileName), []byte(podContent), 0644); err != nil {
return err
}
// copy static pod
klog.Infof("Creating directory for static pod manifest %q ...", o.PodManifestDir)
if err := os.MkdirAll(o.PodManifestDir, 0755); err != nil {
return err
}
for _, fn := range o.PodMutationFns {
klog.V(2).Infof("Customizing static pod ...")
pod := resourceread.ReadPodV1OrDie([]byte(podContent))
if err := fn(pod); err != nil {
return err
}
podContent = resourceread.WritePodV1OrDie(pod)
}
klog.Infof("Writing static pod manifest %q ...\n%s", path.Join(o.PodManifestDir, podFileName), podContent)
if err := ioutil.WriteFile(path.Join(o.PodManifestDir, podFileName), []byte(podContent), 0644); err != nil {
return err
}
return nil
}
func (o *InstallOptions) substituteConfigMap(obj *corev1.ConfigMap) *corev1.ConfigMap {
ret := obj.DeepCopy()
for k, oldContent := range obj.Data {
newContent := strings.ReplaceAll(oldContent, "REVISION", o.Revision)
newContent = strings.ReplaceAll(newContent, "NODE_NAME", o.NodeName)
newContent = strings.ReplaceAll(newContent, "NODE_ENVVAR_NAME", strings.ReplaceAll(strings.ReplaceAll(o.NodeName, "-", "_"), ".", "_"))
ret.Data[k] = newContent
}
return ret
}
func (o *InstallOptions) substituteSecret(obj *corev1.Secret) *corev1.Secret {
ret := obj.DeepCopy()
for k, oldContent := range obj.Data {
newContent := strings.ReplaceAll(string(oldContent), "REVISION", o.Revision)
newContent = strings.ReplaceAll(newContent, "NODE_NAME", o.NodeName)
newContent = strings.ReplaceAll(newContent, "NODE_ENVVAR_NAME", strings.ReplaceAll(strings.ReplaceAll(o.NodeName, "-", "_"), ".", "_"))
ret.Data[k] = []byte(newContent)
}
return ret
}
func (o *InstallOptions) Run(ctx context.Context) error {
var eventTarget *corev1.ObjectReference
err := retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) {
var clientErr error
eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(o.KubeClient, o.Namespace, nil)
if clientErr != nil {
return false, clientErr
}
return true, nil
})
if err != nil {
klog.Warningf("unable to get owner reference (falling back to namespace): %v", err)
}
recorder := events.NewRecorder(o.KubeClient.CoreV1().Events(o.Namespace), "static-pod-installer", eventTarget)
if err := o.copyContent(ctx); err != nil {
recorder.Warningf("StaticPodInstallerFailed", "Installing revision %s: %v", o.Revision, err)
return fmt.Errorf("failed to copy: %v", err)
}
recorder.Eventf("StaticPodInstallerCompleted", "Successfully installed revision %s", o.Revision)
return nil
}
| [
"\"NODE_NAME\""
]
| []
| [
"NODE_NAME"
]
| [] | ["NODE_NAME"] | go | 1 | 0 | |
lib/setup.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
#CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
#self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
#Extension('nms.gpu_nms',
#['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
# library_dirs=[CUDA['lib64']],
#libraries=['cudart'],
# language='c++',
#runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
#extra_compile_args={'gcc': ["-Wno-unused-function"],
# 'nvcc': ['-arch=sm_52',
# '--ptxas-options=-v',
# '-c',
# '--compiler-options',
# "'-fPIC'"]},
#include_dirs = [numpy_include, CUDA['include']]
# )
]
setup(
name='tf_faster_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| []
| []
| [
"PATH",
"CUDAHOME"
]
| [] | ["PATH", "CUDAHOME"] | python | 2 | 0 | |
CVEGrabber/wsgi.py | """
WSGI config for CVEGrabber project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CVEGrabber.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
providers/oura/oura_test.go | package oura_test
import (
"os"
"testing"
"github.com/kannanr/goth"
"github.com/kannanr/goth/providers/oura"
"github.com/stretchr/testify/assert"
)
func provider() *oura.Provider {
return oura.New(os.Getenv("OURA_KEY"), os.Getenv("OURA_SECRET"), "/foo", "user")
}
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("OURA_KEY"))
a.Equal(p.Secret, os.Getenv("OURA_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_ImplementsProvider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*oura.Session)
a.NoError(err)
a.Contains(s.AuthURL, "https://cloud.ouraring.com/oauth/authorize")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://cloud.ouraring.com/oauth/authorize","AccessToken":"1234567890","UserID":"abc"}`)
a.NoError(err)
s := session.(*oura.Session)
a.Equal(s.AuthURL, "https://cloud.ouraring.com/oauth/authorize")
a.Equal(s.AccessToken, "1234567890")
a.Equal(s.UserID, "abc")
}
| [
"\"OURA_KEY\"",
"\"OURA_SECRET\"",
"\"OURA_KEY\"",
"\"OURA_SECRET\""
]
| []
| [
"OURA_KEY",
"OURA_SECRET"
]
| [] | ["OURA_KEY", "OURA_SECRET"] | go | 2 | 0 | |
functests/utils/client/clients.go | package client
import (
"os"
perfApi "github.com/openshift-kni/performance-addon-operators/pkg/apis"
configv1 "github.com/openshift/api/config/v1"
clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
clientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1"
ptpv1 "github.com/openshift/ptp-operator/pkg/client/clientset/versioned/typed/ptp/v1"
sriovk8sv1 "github.com/openshift/sriov-network-operator/pkg/apis/k8s/v1"
sriovv1 "github.com/openshift/sriov-network-operator/pkg/apis/sriovnetwork/v1"
"github.com/golang/glog"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
discovery "k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes/scheme"
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
networkv1client "k8s.io/client-go/kubernetes/typed/networking/v1"
rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Client defines the client set that will be used for testing
var Client *ClientSet
func init() {
Client = New("")
}
// ClientSet provides the struct to talk with relevant API
type ClientSet struct {
client.Client
corev1client.CoreV1Interface
clientconfigv1.ConfigV1Interface
clientmachineconfigv1.MachineconfigurationV1Interface
networkv1client.NetworkingV1Client
appsv1client.AppsV1Interface
rbacv1client.RbacV1Interface
discovery.DiscoveryInterface
ptpv1.PtpV1Interface
imagev1client.ImageV1Interface
Config *rest.Config
}
// New returns a *ClientBuilder with the given kubeconfig.
func New(kubeconfig string) *ClientSet {
var config *rest.Config
var err error
if kubeconfig == "" {
kubeconfig = os.Getenv("KUBECONFIG")
}
if kubeconfig != "" {
glog.V(4).Infof("Loading kube client config from path %q", kubeconfig)
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
glog.V(4).Infof("Using in-cluster kube client config")
config, err = rest.InClusterConfig()
}
if err != nil {
glog.Infof("Failed to init kubernetes client, please check the $KUBECONFIG environment variable")
return nil
}
clientSet := &ClientSet{}
clientSet.CoreV1Interface = corev1client.NewForConfigOrDie(config)
clientSet.ConfigV1Interface = clientconfigv1.NewForConfigOrDie(config)
clientSet.MachineconfigurationV1Interface = clientmachineconfigv1.NewForConfigOrDie(config)
clientSet.AppsV1Interface = appsv1client.NewForConfigOrDie(config)
clientSet.RbacV1Interface = rbacv1client.NewForConfigOrDie(config)
clientSet.DiscoveryInterface = discovery.NewDiscoveryClientForConfigOrDie(config)
clientSet.NetworkingV1Client = *networkv1client.NewForConfigOrDie(config)
clientSet.PtpV1Interface = ptpv1.NewForConfigOrDie(config)
clientSet.ImageV1Interface = imagev1client.NewForConfigOrDie(config)
clientSet.Config = config
myScheme := runtime.NewScheme()
if err = scheme.AddToScheme(myScheme); err != nil {
panic(err)
}
// Setup Scheme for all resources
if err := perfApi.AddToScheme(myScheme); err != nil {
panic(err)
}
if err := configv1.AddToScheme(myScheme); err != nil {
panic(err)
}
if err := mcov1.AddToScheme(myScheme); err != nil {
panic(err)
}
if err := tunedv1.AddToScheme(myScheme); err != nil {
panic(err)
}
if err := sriovk8sv1.SchemeBuilder.AddToScheme(myScheme); err != nil {
panic(err)
}
if err := sriovv1.AddToScheme(myScheme); err != nil {
panic(err)
}
if err := apiext.AddToScheme(myScheme); err != nil {
panic(err)
}
clientSet.Client, err = client.New(config, client.Options{
Scheme: myScheme,
})
if err != nil {
return nil
}
return clientSet
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
speech/speech_recognizer_test.go | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
package speech
import (
"bufio"
"io"
"os"
"testing"
"time"
"github.com/Microsoft/cognitive-services-speech-sdk-go/audio"
)
func createSpeechRecognizerFromSubscriptionRegionAndAudioConfig(t *testing.T, subscription string, region string, audioConfig *audio.AudioConfig) *SpeechRecognizer {
config, err := NewSpeechConfigFromSubscription(subscription, region)
if err != nil {
t.Error("Got an error: ", err)
return nil
}
defer config.Close()
recognizer, err := NewSpeechRecognizerFromConfig(config, audioConfig)
if err != nil {
t.Error("Got an error: ", err)
}
return recognizer
}
func createSpeechRecognizerFromAudioConfig(t *testing.T, audioConfig *audio.AudioConfig) *SpeechRecognizer {
subscription := os.Getenv("SPEECH_SUBSCRIPTION_KEY")
region := os.Getenv("SPEECH_SUBSCRIPTION_REGION")
return createSpeechRecognizerFromSubscriptionRegionAndAudioConfig(t, subscription, region, audioConfig)
}
func createSpeechRecognizerFromFileInput(t *testing.T, file string) *SpeechRecognizer {
audioConfig, err := audio.NewAudioConfigFromWavFileInput(file)
if err != nil {
t.Error("Got an error: ", err)
}
defer audioConfig.Close()
return createSpeechRecognizerFromAudioConfig(t, audioConfig)
}
func pumpFileIntoStream(t *testing.T, filename string, stream *audio.PushAudioInputStream) {
file, err := os.Open(filename)
if err != nil {
t.Error("Error opening file: ", err)
return
}
defer file.Close()
reader := bufio.NewReader(file)
buffer := make([]byte, 1000)
for {
n, err := reader.Read(buffer)
if err == io.EOF {
t.Log("Done reading file.")
break
}
if err != nil {
t.Error("Error reading file: ", err)
break
}
err = stream.Write(buffer[0:n])
if err != nil {
t.Error("Error writing to the stream")
}
}
}
func pumpSilenceIntoStream(t *testing.T, stream *audio.PushAudioInputStream) {
buffer := make([]byte, 1000)
for i := range buffer {
buffer[i] = 0
}
for i := 0; i < 16; i++ {
err := stream.Write(buffer)
if err != nil {
t.Error("Error writing to the stream")
}
}
}
func TestSessionEvents(t *testing.T) {
recognizer := createSpeechRecognizerFromFileInput(t, "../test_files/turn_on_the_lamp.wav")
if recognizer == nil {
t.Error("Recognizer creation failed")
return
}
defer recognizer.Close()
sessionStartedFuture := make(chan bool)
sessionStoppedFuture := make(chan bool)
recognizer.SessionStarted(func(event SessionEventArgs) {
defer event.Close()
t.Log("SessionStarted")
sessionStartedFuture <- true
})
recognizer.SessionStopped(func(event SessionEventArgs) {
defer event.Close()
t.Log("SessionStarted")
sessionStoppedFuture <- true
})
recognizer.RecognizeOnceAsync()
select {
case <-sessionStartedFuture:
case <-time.After(5 * time.Second):
t.Error("Timeout waiting for SessionStarted event.")
}
select {
case <-sessionStoppedFuture:
case <-time.After(5 * time.Second):
t.Error("Timeout waiting for SessionStopped event.")
}
}
func TestRecognizeOnce(t *testing.T) {
recognizer := createSpeechRecognizerFromFileInput(t, "../test_files/turn_on_the_lamp.wav")
if recognizer == nil {
return
}
defer recognizer.Close()
recognizedFuture := make(chan string)
recognizedHandler := func(event SpeechRecognitionEventArgs) {
defer event.Close()
t.Log("Recognized: ", event.Result.Text)
recognizedFuture <- "Recognized"
}
recognizingFuture := make(chan string)
recognizingHandle := func(event SpeechRecognitionEventArgs) {
defer event.Close()
t.Log("Recognizing: ", event.Result.Text)
select {
case recognizingFuture <- "Recognizing":
default:
}
}
recognizer.Recognized(recognizedHandler)
recognizer.Recognizing(recognizingHandle)
result := recognizer.RecognizeOnceAsync()
select {
case <-recognizingFuture:
t.Log("Received at least one Recognizing event.")
case <-time.After(5 * time.Second):
t.Error("Didn't receive Recognizing event.")
}
select {
case <-recognizedFuture:
t.Log("Received a Recognized event.")
case <-time.After(5 * time.Second):
t.Error("Didn't receive Recognizing event.")
}
select {
case <-result:
t.Log("Result resolved.")
case <-time.After(5 * time.Second):
t.Error("Result didn't resolve.")
}
}
func TestContinuousRecognition(t *testing.T) {
format, err := audio.GetDefaultInputFormat()
if err != nil {
t.Error("Got an error ", err.Error())
}
defer format.Close()
stream, err := audio.CreatePushAudioInputStreamFromFormat(format)
if err != nil {
t.Error("Got an error ", err.Error())
}
defer stream.Close()
audioConfig, err := audio.NewAudioConfigFromStreamInput(stream)
if err != nil {
t.Error("Got an error ", err.Error())
}
defer audioConfig.Close()
recognizer := createSpeechRecognizerFromAudioConfig(t, audioConfig)
if recognizer == nil {
return
}
defer recognizer.Close()
firstResult := true
recognizedFuture := make(chan string)
recognizingFuture := make(chan string)
recognizedHandler := func(event SpeechRecognitionEventArgs) {
defer event.Close()
firstResult = true
t.Log("Recognized: ", event.Result.Text)
recognizedFuture <- "Recognized"
}
recognizingHandle := func(event SpeechRecognitionEventArgs) {
defer event.Close()
t.Log("Recognizing: ", event.Result.Text)
if firstResult {
firstResult = false
recognizingFuture <- "Recognizing"
}
}
recognizer.Recognized(recognizedHandler)
recognizer.Recognizing(recognizingHandle)
err = <-recognizer.StartContinuousRecognitionAsync()
if err != nil {
t.Error("Got error: ", err)
}
pumpFileIntoStream(t, "../test_files/turn_on_the_lamp.wav", stream)
pumpFileIntoStream(t, "../test_files/turn_on_the_lamp.wav", stream)
pumpSilenceIntoStream(t, stream)
stream.CloseStream()
select {
case <-recognizingFuture:
t.Log("Received first Recognizing event.")
case <-time.After(5 * time.Second):
t.Error("Didn't receive first Recognizing event.")
}
select {
case <-recognizedFuture:
t.Log("Received first Recognized event.")
case <-time.After(5 * time.Second):
t.Error("Didn't receive first Recognized event.")
}
select {
case <-recognizingFuture:
t.Log("Received second Recognizing event.")
case <-time.After(5 * time.Second):
t.Error("Didn't receive second Recognizing event.")
}
select {
case <-recognizedFuture:
t.Log("Received second Recognized event.")
case <-time.After(5 * time.Second):
t.Error("Didn't receive second Recognized event.")
}
err = <-recognizer.StopContinuousRecognitionAsync()
if err != nil {
t.Error("Got error: ", err)
}
}
| [
"\"SPEECH_SUBSCRIPTION_KEY\"",
"\"SPEECH_SUBSCRIPTION_REGION\""
]
| []
| [
"SPEECH_SUBSCRIPTION_KEY",
"SPEECH_SUBSCRIPTION_REGION"
]
| [] | ["SPEECH_SUBSCRIPTION_KEY", "SPEECH_SUBSCRIPTION_REGION"] | go | 2 | 0 | |
cmd/frontend/internal/app/ui/handlers.go | package ui
import (
"context"
"html/template"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/inconshreveable/log15"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/auth"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
"github.com/sourcegraph/sourcegraph/cmd/frontend/hubspot"
"github.com/sourcegraph/sourcegraph/cmd/frontend/hubspot/hubspotutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/assetsutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/jscontext"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/handlerutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/routevar"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/internal/vcs/git"
)
type InjectedHTML struct {
HeadTop template.HTML
HeadBottom template.HTML
BodyTop template.HTML
BodyBottom template.HTML
}
type Metadata struct {
// Title is the title of the page for Twitter cards, OpenGraph, etc.
// e.g. "Open in Sourcegraph"
Title string
// Description is the description of the page for Twitter cards, OpenGraph,
// etc. e.g. "View this link in Sourcegraph Editor."
Description string
// ShowPreview controls whether or not OpenGraph/Twitter card/etc metadata is rendered.
ShowPreview bool
}
type Common struct {
Injected InjectedHTML
Metadata *Metadata
Context jscontext.JSContext
AssetURL string
Title string
Error *pageError
WebpackDevServer bool // whether the Webpack dev server is running (WEBPACK_DEV_SERVER env var)
// The fields below have zero values when not on a repo page.
Repo *types.Repo
Rev string // unresolved / user-specified revision (e.x.: "@master")
api.CommitID // resolved SHA1 revision
}
var webpackDevServer, _ = strconv.ParseBool(os.Getenv("WEBPACK_DEV_SERVER"))
// repoShortName trims the first path element of the given repo name if it has
// at least two path components.
func repoShortName(name api.RepoName) string {
split := strings.Split(string(name), "/")
if len(split) < 2 {
return string(name)
}
return strings.Join(split[1:], "/")
}
// serveErrorHandler is a function signature used in newCommon and
// mockNewCommon. This is used as syntactic sugar to prevent programmer's
// (fragile creatures from planet Earth) from crashing out.
type serveErrorHandler func(w http.ResponseWriter, r *http.Request, err error, statusCode int)
// mockNewCommon is used in tests to mock newCommon (duh!).
//
// Ensure that the mock is reset at the end of every test by adding a call like the following:
// defer func() {
// mockNewCommon = nil
// }()
var mockNewCommon func(w http.ResponseWriter, r *http.Request, title string, serveError serveErrorHandler) (*Common, error)
// newCommon builds a *Common data structure, returning an error if one occurs.
//
// In the event of the repository having been renamed, the request is handled
// by newCommon and nil, nil is returned. Basic usage looks like:
//
// common, err := newCommon(w, r, serveError)
// if err != nil {
// return err
// }
// if common == nil {
// return nil // request was handled
// }
//
// In the case of a repository that is cloning, a Common data structure is
// returned but it has an incomplete RevSpec.
func newCommon(w http.ResponseWriter, r *http.Request, title string, serveError serveErrorHandler) (*Common, error) {
if mockNewCommon != nil {
return mockNewCommon(w, r, title, serveError)
}
common := &Common{
Injected: InjectedHTML{
HeadTop: template.HTML(conf.Get().HtmlHeadTop),
HeadBottom: template.HTML(conf.Get().HtmlHeadBottom),
BodyTop: template.HTML(conf.Get().HtmlBodyTop),
BodyBottom: template.HTML(conf.Get().HtmlBodyBottom),
},
Context: jscontext.NewJSContextFromRequest(r),
AssetURL: assetsutil.URL("").String(),
Title: title,
Metadata: &Metadata{
Title: globals.Branding().BrandName,
Description: "Sourcegraph is a web-based code search and navigation tool for dev teams. Search, navigate, and review code. Find answers.",
ShowPreview: r.URL.Path == "/sign-in" && r.URL.RawQuery == "returnTo=%2F",
},
WebpackDevServer: webpackDevServer,
}
if _, ok := mux.Vars(r)["Repo"]; ok {
// Common repo pages (blob, tree, etc).
var err error
common.Repo, common.CommitID, err = handlerutil.GetRepoAndRev(r.Context(), mux.Vars(r))
isRepoEmptyError := routevar.ToRepoRev(mux.Vars(r)).Rev == "" && gitserver.IsRevisionNotFound(errors.Cause(err)) // should reply with HTTP 200
if err != nil && !isRepoEmptyError {
if e, ok := err.(*handlerutil.URLMovedError); ok {
// The repository has been renamed, e.g. "github.com/docker/docker"
// was renamed to "github.com/moby/moby" -> redirect the user now.
err = handlerutil.RedirectToNewRepoName(w, r, e.NewRepo)
if err != nil {
return nil, errors.Wrap(err, "when sending renamed repository redirect response")
}
return nil, nil
}
if e, ok := err.(backend.ErrRepoSeeOther); ok {
// Repo does not exist here, redirect to the recommended location.
u, err := url.Parse(e.RedirectURL)
if err != nil {
return nil, err
}
u.Path, u.RawQuery = r.URL.Path, r.URL.RawQuery
http.Redirect(w, r, u.String(), http.StatusSeeOther)
return nil, nil
}
if gitserver.IsRevisionNotFound(errors.Cause(err)) {
// Revision does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if _, ok := errors.Cause(err).(*gitserver.RepoNotCloneableErr); ok {
if errcode.IsNotFound(err) {
// Repository is not found.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
// Repository is not clonable.
dangerouslyServeError(w, r, errors.New("repository could not be cloned"), http.StatusInternalServerError)
return nil, nil
}
if vcs.IsRepoNotExist(err) {
if vcs.IsCloneInProgress(err) {
// Repo is cloning.
return common, nil
}
// Repo does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if errcode.IsNotFound(err) {
// Repo does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if errcode.IsUnauthorized(err) {
// Not authorized to access repository.
serveError(w, r, err, http.StatusUnauthorized)
return nil, nil
}
return nil, err
}
if common.Repo.Name == "github.com/sourcegraphtest/Always500Test" {
return nil, errors.New("error caused by Always500Test repo name")
}
common.Rev = mux.Vars(r)["Rev"]
// Update gitserver contents for a repo whenever it is visited.
go func() {
ctx := context.Background()
_, err = repoupdater.DefaultClient.EnqueueRepoUpdate(ctx, common.Repo.Name)
if err != nil {
log15.Error("EnqueueRepoUpdate", "error", err)
}
}()
}
return common, nil
}
type handlerFunc func(w http.ResponseWriter, r *http.Request) error
func serveBrandedPageString(titles string, description *string) handlerFunc {
return serveBasicPage(func(c *Common, r *http.Request) string {
return brandNameSubtitle(titles)
}, description)
}
func serveBasicPage(title func(c *Common, r *http.Request) string, description *string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if description != nil {
common.Metadata.Description = *description
}
if common == nil {
return nil // request was handled
}
common.Title = title(common, r)
return renderTemplate(w, "app.html", common)
}
}
func serveHome(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, globals.Branding().BrandName, serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
if envvar.SourcegraphDotComMode() && !actor.FromContext(r.Context()).IsAuthenticated() && !strings.Contains(r.UserAgent(), "Cookiebot") {
// The user is not signed in and tried to access Sourcegraph.com. Redirect to
// about.sourcegraph.com so they see general info page.
// Don't redirect Cookiebot so it can scan the website without authentication.
http.Redirect(w, r, (&url.URL{Scheme: aboutRedirectScheme, Host: aboutRedirectHost}).String(), http.StatusTemporaryRedirect)
return nil
}
// On non-Sourcegraph.com instances, there is no separate homepage, so redirect to /search.
r.URL.Path = "/search"
http.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)
return nil
}
func serveSignIn(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
common.Title = brandNameSubtitle("Sign in")
return renderTemplate(w, "app.html", common)
}
// redirectTreeOrBlob redirects a blob page to a tree page if the file is actually a directory,
// or a tree page to a blob page if the directory is actually a file.
func redirectTreeOrBlob(routeName, path string, common *Common, w http.ResponseWriter, r *http.Request) (requestHandled bool, err error) {
// NOTE: It makes no sense for this function to proceed if the commit ID
// for the repository is empty. It is most likely the repository is still
// clone in progress.
if common.CommitID == "" {
return false, nil
}
if path == "/" || path == "" {
if routeName != routeRepo {
// Redirect to repo route
target := "/" + string(common.Repo.Name) + common.Rev
http.Redirect(w, r, target, http.StatusTemporaryRedirect)
return true, nil
}
return false, nil
}
stat, err := git.Stat(r.Context(), common.Repo.Name, common.CommitID, path)
if err != nil {
if os.IsNotExist(err) {
serveError(w, r, err, http.StatusNotFound)
return true, nil
}
return false, err
}
expectedDir := routeName == routeTree
if stat.Mode().IsDir() != expectedDir {
target := "/" + string(common.Repo.Name) + common.Rev + "/-/"
if expectedDir {
target += "blob"
} else {
target += "tree"
}
target += path
http.Redirect(w, r, auth.SafeRedirectURL(target), http.StatusTemporaryRedirect)
return true, nil
}
return false, nil
}
// serveTree serves the tree (directory) pages.
func serveTree(title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
handled, err := redirectTreeOrBlob(routeTree, mux.Vars(r)["Path"], common, w, r)
if handled {
return nil
}
if err != nil {
return err
}
common.Title = title(common, r)
return renderTemplate(w, "app.html", common)
}
}
func serveRepoOrBlob(routeName string, title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
handled, err := redirectTreeOrBlob(routeName, mux.Vars(r)["Path"], common, w, r)
if handled {
return nil
}
if err != nil {
return err
}
common.Title = title(common, r)
q := r.URL.Query()
_, isNewQueryUX := q["sq"] // sq URL param is only set by new query UX in SearchNavbarItem.tsx
if search := q.Get("q"); search != "" && !isNewQueryUX {
// Redirect old search URLs:
//
// /github.com/gorilla/mux@24fca303ac6da784b9e8269f724ddeb0b2eea5e7?q=ErrMethodMismatch&utm_source=chrome-extension
// /github.com/gorilla/mux@24fca303ac6da784b9e8269f724ddeb0b2eea5e7/-/blob/mux.go?q=NewRouter
//
// To new ones:
//
// /search?q=repo:^github.com/gorilla/mux$+ErrMethodMismatch
//
// It does not apply the file: filter because that was not the behavior of the
// old blob URLs with a 'q' parameter either.
r.URL.Path = "/search"
q.Set("sq", "repo:^"+regexp.QuoteMeta(string(common.Repo.Name))+"$")
r.URL.RawQuery = q.Encode()
http.Redirect(w, r, r.URL.String(), http.StatusPermanentRedirect)
return nil
}
return renderTemplate(w, "app.html", common)
}
}
// searchBadgeHandler serves the search readme badges from the search-badger service
// https://github.com/sourcegraph/search-badger
func searchBadgeHandler() *httputil.ReverseProxy {
return &httputil.ReverseProxy{
Director: func(r *http.Request) {
r.URL.Scheme = "http"
r.URL.Host = "search-badger"
r.URL.Path = "/"
},
ErrorLog: log.New(env.DebugOut, "search-badger proxy: ", log.LstdFlags),
}
}
func servePingFromSelfHosted(w http.ResponseWriter, r *http.Request) error {
// CORS to allow request from anywhere
u, err := url.Parse(r.Referer())
if err != nil {
return err
}
w.Header().Add("Access-Control-Allow-Origin", u.Host)
w.Header().Add("Access-Control-Allow-Credentials", "true")
if r.Method == http.MethodOptions {
// CORS preflight request, respond 204 and allow origin header
w.WriteHeader(http.StatusNoContent)
return nil
}
email := r.URL.Query().Get("email")
sourceURLCookie, err := r.Cookie("sourcegraphSourceUrl")
var sourceURL string
if err == nil && sourceURLCookie != nil {
sourceURL = sourceURLCookie.Value
}
anonymousUIDCookie, err := r.Cookie("sourcegraphAnonymousUid")
var anonymousUserId string
if err == nil && anonymousUIDCookie != nil {
anonymousUserId = anonymousUIDCookie.Value
}
hubspotutil.SyncUser(email, hubspotutil.SelfHostedSiteInitEventID, &hubspot.ContactProperties{
IsServerAdmin: true,
AnonymousUserID: anonymousUserId,
FirstSourceURL: sourceURL,
})
return nil
}
| [
"\"WEBPACK_DEV_SERVER\""
]
| []
| [
"WEBPACK_DEV_SERVER"
]
| [] | ["WEBPACK_DEV_SERVER"] | go | 1 | 0 | |
setup.py | ##############################################################################
# pip install recipe for conduit
##############################################################################
#
# Recipe that uses cmake to build conduit for use in python.
#
# Optional Conduit features are enabled via env vars:
#
# HDF5_DIR {path to hdf5 install}
#
# [Caveats]
# - Assumes a suitable cmake (3.9 + ) is in your path
# - Does not build a relocatable wheel
# - Windows untested
#
# [Example Usage]
# pip install .
# pip install . --user
# pip install -v . --user
# env HDF5_DIR={path/to/hdf5/install} pip install -v . --user
#
# # for those with certificate woes
# pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org -v . --user
# env HDF5_DIR={path/to/hdf5/install} pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org -v . --user
#
#
# [Ack]
# https://github.com/openPMD/openPMD-api/blob/dev/setup.py
#
# Provided helpful pointers to create this setup script.
##############################################################################
import os
import re
import sys
import platform
import subprocess
from os.path import join as pjoin
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
# make sure cmake exist with min version
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake >= 3.9.0 must be installed to build the following " +
"extensions: " +
", ".join(e.name for e in self.extensions))
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()
).group(1))
if cmake_version < '3.9.0':
raise RuntimeError("CMake >= 3.9.0 is required")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir =self.get_ext_fullpath(ext.name)
extdir = os.path.abspath(os.path.dirname(extdir))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DPYTHON_MODULE_INSTALL_PREFIX=' + pjoin(extdir),
'-DCMAKE_INSTALL_PREFIX=' + pjoin(extdir, "conduit_cxx"),
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DENABLE_PYTHON:BOOL=ON',
'-DHDF5_DIR=' + HDF5_DIR,
'-DENABLE_MPI=' + ENABLE_MPI,
'-DENABLE_TESTS:BOOL=OFF',
'-DENABLE_DOCS:BOOL=OFF']
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
# TODO: Windows untested
if platform.system() == "Windows":
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake',
pjoin(ext.sourcedir,"src")] + cmake_args,
cwd=self.build_temp,
env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
subprocess.check_call(['cmake', '--install', '.'],
cwd=self.build_temp)
#
# pass options via env vars
#
HDF5_DIR = os.environ.get('HDF5_DIR', 'IGNORE')
ENABLE_MPI = os.environ.get('ENABLE_MPI', 'OFF')
# keyword reference:
# https://packaging.python.org/guides/distributing-packages-using-setuptools
setup(
name='conduit',
version='0.8.0.dev',
author='Cyrus Harrison',
author_email='[email protected]',
maintainer='Cyrus Harrison',
maintainer_email='[email protected]',
description='Simplified Data Exchange for HPC Simulations '
'(Python, C++, C, and Fortran)',
keywords=('yaml json cpp fortran hpc hdf5 scientific-computing'
' data-management llnl radiuss'),
url='https://github.com/llnl/conduit',
project_urls={
'Documentation': 'https://llnl-conduit.readthedocs.io/',
'Source': 'https://github.com/llnl/conduit',
'Tracker': 'https://github.com/LLNL/conduit/issues',
},
ext_modules=[CMakeExtension('conduit_cxx')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
python_requires='>=3.6, <3.11')
| []
| []
| [
"ENABLE_MPI",
"HDF5_DIR"
]
| [] | ["ENABLE_MPI", "HDF5_DIR"] | python | 2 | 0 | |
main.go | /* ######################################################################
# Author: ([email protected])
# Created Time: 2018-11-14 12:50:43
# File Name: main.go
# Description:
####################################################################### */
package main
import (
"flag"
"fmt"
"os"
"strings"
"ant-coder/coder"
"ant-coder/coder/config"
)
// pass through when build project, go build -ldflags "main.__version__ 1.2.1" app
var coders = map[string]coder.Coder{
"go_model": coder.NewGoModelCoder(),
"go_ui": coder.NewGoUiCoder(),
"go_loop_worker": coder.NewGoLoopWorkerCoder(),
"go_crontab_worker": coder.NewGoCrontabWorkerCoder(),
"go_rpcx_server": coder.NewGoRpcxServerCoder(),
}
var (
__version__ string
pwd = flag.String("d", "", "work directory")
verbose = flag.String("v", "false", "enable verbose logging [false]")
scene string
)
func init() {
var scenes []string
for scene, _ := range coders {
scenes = append(scenes, scene)
}
flag.StringVar(&scene, "s", "", fmt.Sprintf("coder scene (options: %s)", strings.Join(scenes, "|")))
flag.Parse()
if len(*pwd) == 0 {
*pwd, _ = os.Getwd()
}
os.Setenv("VERSION", __version__)
os.Setenv("WORKDIR", *pwd)
os.Setenv("VERBOSE", *verbose)
if len(scene) == 0 {
fmt.Println("you must specify `-s` option")
os.Exit(-1)
}
if err := config.SetPathAndLoad(os.Getenv("HOME")); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func main() {
c, ok := coders[scene]
if !ok {
fmt.Println("you specify coder sense not support.")
os.Exit(-1)
}
if err := coder.NewExecutor(c).Do(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
serverstats/plugin_web.go | package serverstats
import (
"context"
_ "embed"
"fmt"
"html/template"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/dv8-studio/yagpdb/common"
"github.com/dv8-studio/yagpdb/common/cplogs"
"github.com/dv8-studio/yagpdb/common/pubsub"
"github.com/dv8-studio/yagpdb/premium"
"github.com/dv8-studio/yagpdb/serverstats/models"
"github.com/dv8-studio/yagpdb/web"
"github.com/jonas747/discordgo/v2"
"github.com/karlseguin/rcache"
"github.com/volatiletech/null"
"github.com/volatiletech/sqlboiler/boil"
"goji.io"
"goji.io/pat"
)
//go:embed assets/serverstats.html
var PageHTML string
var WebStatsCache = rcache.New(cacheChartFetcher, time.Minute)
var WebConfigCache = rcache.NewInt(cacheConfigFetcher, time.Minute)
var panelLogKey = cplogs.RegisterActionFormat(&cplogs.ActionFormat{Key: "serverstats_settings_updated", FormatString: "Updated serverstats settings"})
type FormData struct {
Public bool
IgnoreChannels []int64 `valid:"channel,false"`
}
func (p *Plugin) InitWeb() {
web.AddHTMLTemplate("serverstats/assets/serverstats.html", PageHTML)
web.AddSidebarItem(web.SidebarCategoryTopLevel, &web.SidebarItem{
Name: "Stats",
URL: "stats",
Icon: "fas fa-chart-bar",
})
statsCPMux := goji.SubMux()
web.CPMux.Handle(pat.New("/stats"), statsCPMux)
web.CPMux.Handle(pat.New("/stats/*"), statsCPMux)
cpGetHandler := web.ControllerHandler(publicHandler(HandleStatsHtml, false), "cp_serverstats")
statsCPMux.Handle(pat.Get(""), cpGetHandler)
statsCPMux.Handle(pat.Get("/"), cpGetHandler)
statsCPMux.Handle(pat.Post("/settings"), web.ControllerPostHandler(HandleSaveStatsSettings, cpGetHandler, FormData{}))
statsCPMux.Handle(pat.Get("/daily_json"), web.APIHandler(publicHandlerJson(HandleStatsJson, false)))
statsCPMux.Handle(pat.Get("/charts"), web.APIHandler(publicHandlerJson(HandleStatsCharts, false)))
// Public
web.ServerPublicMux.Handle(pat.Get("/stats"), web.ControllerHandler(publicHandler(HandleStatsHtml, true), "cp_serverstats"))
web.ServerPublicMux.Handle(pat.Get("/stats/daily_json"), web.APIHandler(publicHandlerJson(HandleStatsJson, true)))
web.ServerPublicMux.Handle(pat.Get("/stats/charts"), web.APIHandler(publicHandlerJson(HandleStatsCharts, true)))
}
type publicHandlerFunc func(w http.ResponseWriter, r *http.Request, publicAccess bool) (web.TemplateData, error)
func publicHandler(inner publicHandlerFunc, public bool) web.ControllerHandlerFunc {
mw := func(w http.ResponseWriter, r *http.Request) (web.TemplateData, error) {
return inner(w, r.WithContext(web.SetContextTemplateData(r.Context(), map[string]interface{}{"Public": public})), public)
}
return mw
}
// Somewhat dirty - should clean up this mess sometime
func HandleStatsHtml(w http.ResponseWriter, r *http.Request, isPublicAccess bool) (web.TemplateData, error) {
activeGuild, templateData := web.GetBaseCPContextData(r.Context())
config, err := GetConfig(r.Context(), activeGuild.ID)
if err != nil {
return templateData, common.ErrWithCaller(err)
}
templateData["Config"] = config
if confDeprecated.GetBool() {
templateData.AddAlerts(web.WarningAlert("Serverstats are deprecated in favor of the superior discord server insights. Recording of new stats may stop at any time and stats will no longer be available next month."))
}
return templateData, nil
}
func HandleSaveStatsSettings(w http.ResponseWriter, r *http.Request) (web.TemplateData, error) {
ag, templateData := web.GetBaseCPContextData(r.Context())
formData := r.Context().Value(common.ContextKeyParsedForm).(*FormData)
stringedChannels := ""
alreadyAdded := make([]int64, 0, len(formData.IgnoreChannels))
OUTER:
for i, v := range formData.IgnoreChannels {
// only add each once
for _, ad := range alreadyAdded {
if ad == v {
continue OUTER
}
}
// make sure the channel exists
channelExists := false
for _, ec := range ag.Channels {
if ec.ID == v {
channelExists = true
break
}
}
if !channelExists {
continue
}
if i != 0 {
stringedChannels += ","
}
alreadyAdded = append(alreadyAdded, v)
stringedChannels += strconv.FormatInt(v, 10)
}
model := &models.ServerStatsConfig{
GuildID: ag.ID,
Public: null.BoolFrom(formData.Public),
IgnoreChannels: null.StringFrom(stringedChannels),
CreatedAt: null.TimeFrom(time.Now()),
}
err := model.UpsertG(r.Context(), true, []string{"guild_id"}, boil.Whitelist("public", "ignore_channels"), boil.Infer())
if err == nil {
pubsub.EvictCacheSet(cachedConfig, ag.ID)
go cplogs.RetryAddEntry(web.NewLogEntryFromContext(r.Context(), panelLogKey))
}
WebConfigCache.Delete(int(ag.ID))
return templateData, err
}
type publicHandlerFuncJson func(w http.ResponseWriter, r *http.Request, publicAccess bool) interface{}
func publicHandlerJson(inner publicHandlerFuncJson, public bool) web.CustomHandlerFunc {
mw := func(w http.ResponseWriter, r *http.Request) interface{} {
return inner(w, r.WithContext(web.SetContextTemplateData(r.Context(), map[string]interface{}{"Public": public})), public)
}
return mw
}
func HandleStatsJson(w http.ResponseWriter, r *http.Request, isPublicAccess bool) interface{} {
activeGuild, _ := web.GetBaseCPContextData(r.Context())
conf := GetConfigWeb(activeGuild.ID)
if conf == nil {
w.WriteHeader(http.StatusInternalServerError)
return nil
}
if !conf.Public && isPublicAccess {
return nil
}
stats, err := RetrieveDailyStats(time.Now(), activeGuild.ID)
if err != nil {
web.CtxLogger(r.Context()).WithError(err).Error("Failed retrieving stats")
w.WriteHeader(http.StatusInternalServerError)
return nil
}
// Update the names to human readable ones, leave the ids in the name fields for the ones not available
for _, cs := range stats.ChannelMessages {
for _, channel := range activeGuild.Channels {
if discordgo.StrID(channel.ID) == cs.Name {
cs.Name = channel.Name
break
}
}
}
return stats
}
type ChartResponse struct {
Days int `json:"days"`
Data []*ChartDataPeriod `json:"data"`
}
func HandleStatsCharts(w http.ResponseWriter, r *http.Request, isPublicAccess bool) interface{} {
activeGuild, _ := web.GetBaseCPContextData(r.Context())
conf := GetConfigWeb(activeGuild.ID)
if conf == nil {
w.WriteHeader(http.StatusInternalServerError)
return nil
}
if !conf.Public && isPublicAccess {
return nil
}
numDays := 7
if r.URL.Query().Get("days") != "" {
numDays, _ = strconv.Atoi(r.URL.Query().Get("days"))
if numDays > 365 {
numDays = 365
}
}
if !premium.ContextPremium(r.Context()) && (numDays > 7 || numDays <= 0) {
numDays = 7
}
stats := CacheGetCharts(activeGuild.ID, numDays, r.Context())
return stats
}
func emptyChartData() *ChartResponse {
return &ChartResponse{
Days: 0,
Data: []*ChartDataPeriod{},
}
}
func CacheGetCharts(guildID int64, days int, ctx context.Context) *ChartResponse {
if os.Getenv("YAGPDB_SERVERSTATS_DISABLE_SERVERSTATS") != "" {
return emptyChartData()
}
fetchDays := days
if days < 7 {
fetchDays = 7
}
// default to full time stats
if days != 30 && days != 365 && days > 7 {
fetchDays = -1
days = -1
} else if days < 1 {
days = -1
fetchDays = -1
}
key := "charts:" + strconv.FormatInt(guildID, 10) + ":" + strconv.FormatInt(int64(fetchDays), 10)
statsInterface := WebStatsCache.Get(key)
if statsInterface == nil {
return emptyChartData()
}
stats := statsInterface.(*ChartResponse)
cop := *stats
if fetchDays != days && days != -1 && len(cop.Data) > days {
cop.Data = cop.Data[:days]
cop.Days = days
}
return &cop
}
func cacheChartFetcher(key string) interface{} {
split := strings.Split(key, ":")
if len(split) < 3 {
logger.Error("invalid cache key: ", key)
return nil
}
guildID, _ := strconv.ParseInt(split[1], 10, 64)
days, _ := strconv.Atoi(split[2])
periods, err := RetrieveChartDataPeriods(context.Background(), guildID, time.Now(), days)
if err != nil {
logger.WithError(err).WithField("cache_key", key).Error("failed retrieving chart data")
return nil
}
return &ChartResponse{
Days: days,
Data: periods,
}
}
func GetConfigWeb(guildID int64) *ServerStatsConfig {
config := WebConfigCache.Get(int(guildID))
if config == nil {
return nil
}
return config.(*ServerStatsConfig)
}
func cacheConfigFetcher(key int) interface{} {
config, err := GetConfig(context.Background(), int64(key))
if err != nil {
logger.WithError(err).WithField("cache_key", key).Error("failed retrieving stats config")
return nil
}
return config
}
var _ web.PluginWithServerHomeWidget = (*Plugin)(nil)
func (p *Plugin) LoadServerHomeWidget(w http.ResponseWriter, r *http.Request) (web.TemplateData, error) {
activeGuild, templateData := web.GetBaseCPContextData(r.Context())
templateData["WidgetTitle"] = "Server Stats"
templateData["SettingsPath"] = "/stats"
templateData["WidgetEnabled"] = true
config, err := GetConfig(r.Context(), activeGuild.ID)
if err != nil {
return templateData, common.ErrWithCaller(err)
}
const format = `<ul>
<li>Public stats: %s</li>
<li>Blacklisted channnels: <code>%d</code></li>
</ul>`
templateData["WidgetBody"] = template.HTML(fmt.Sprintf(format, web.EnabledDisabledSpanStatus(config.Public), len(config.ParsedChannels)))
return templateData, nil
}
| [
"\"YAGPDB_SERVERSTATS_DISABLE_SERVERSTATS\""
]
| []
| [
"YAGPDB_SERVERSTATS_DISABLE_SERVERSTATS"
]
| [] | ["YAGPDB_SERVERSTATS_DISABLE_SERVERSTATS"] | go | 1 | 0 | |
django-poll-project/kube101/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kube101.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
health/health.go | // Package health assemble all functions required for health checks
package health
import (
"net/http"
"os"
"strings"
"github.com/Lord-Y/versions/mysql"
"github.com/Lord-Y/versions/postgres"
"github.com/gin-gonic/gin"
)
// Health permit to return basic health check
func Health(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"health": "OK"})
}
// Health permit to return basic health check
func Healthz(c *gin.Context) {
db := make(map[string]interface{})
var count int
s := strings.TrimSpace(os.Getenv("SQL_DRIVER"))
if s == "mysql" {
if mysql.Ping() {
db["mysql"] = "OK"
count += 1
} else {
db["mysql"] = "NOT OK"
}
} else {
if postgres.Ping() {
db["postgresql"] = "OK"
count += 1
} else {
db["postgresql"] = "NOT OK"
}
}
if count > 0 {
c.JSON(http.StatusOK, gin.H{"status": db})
} else {
c.JSON(http.StatusInternalServerError, gin.H{"status": db})
}
}
| [
"\"SQL_DRIVER\""
]
| []
| [
"SQL_DRIVER"
]
| [] | ["SQL_DRIVER"] | go | 1 | 0 | |
scripts/suse/yum/plugins/yumnotify.py | # Copyright (c) 2016 SUSE Linux LLC
# All Rights Reserved.
#
# Author: Bo Maryniuk <[email protected]>
import hashlib
import os
from yum import config
from yum.plugins import TYPE_CORE
CK_PATH = "/var/cache/salt/minion/rpmdb.cookie"
RPM_PATH = "/var/lib/rpm/Packages"
requires_api_version = "2.5"
plugin_type = TYPE_CORE
def _get_mtime():
"""
Get the modified time of the RPM Database.
Returns:
Unix ticks
"""
return os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH)) or 0
def _get_checksum():
"""
Get the checksum of the RPM Database.
Returns:
hexdigest
"""
digest = hashlib.sha256()
with open(RPM_PATH, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def posttrans_hook(conduit):
"""
Hook after the package installation transaction.
:param conduit:
:return:
"""
# Integrate Yum with Salt
if "SALT_RUNNING" not in os.environ:
with open(CK_PATH, "w") as ck_fh:
ck_fh.write(
"{chksum} {mtime}\n".format(chksum=_get_checksum(), mtime=_get_mtime())
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
example/helpers/args.go | // Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Helper methods for Oracle Cloud Infrastructure Go SDK Samples
package helpers
import (
"os"
"github.com/oracle/oci-go-sdk/common"
)
var (
availabilityDomain string
compartmentID string
rootCompartmentID string
)
// ParseEnvironmentVariables parse shared variables from environment variables, other samples should define their own
// viariables and call this function to initialize shared variables
func ParseEnvironmentVariables() {
availabilityDomain = os.Getenv("OCI_AVAILABILITY_DOMAIN")
compartmentID = os.Getenv("OCI_COMPARTMENT_ID")
rootCompartmentID = os.Getenv("OCI_ROOT_COMPARTMENT_ID")
}
// AvailabilityDomain return the aviailability domain defined in .env.sample file
func AvailabilityDomain() *string {
return common.String(availabilityDomain)
}
// CompartmentID return the compartment ID defined in .env.sample file
func CompartmentID() *string {
return common.String(compartmentID)
}
// RootCompartmentID return the root compartment ID defined in .env.sample file
func RootCompartmentID() *string {
return common.String(rootCompartmentID)
}
| [
"\"OCI_AVAILABILITY_DOMAIN\"",
"\"OCI_COMPARTMENT_ID\"",
"\"OCI_ROOT_COMPARTMENT_ID\""
]
| []
| [
"OCI_ROOT_COMPARTMENT_ID",
"OCI_COMPARTMENT_ID",
"OCI_AVAILABILITY_DOMAIN"
]
| [] | ["OCI_ROOT_COMPARTMENT_ID", "OCI_COMPARTMENT_ID", "OCI_AVAILABILITY_DOMAIN"] | go | 3 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/url"
"os"
"strings"
"github.com/ChimeraCoder/anaconda"
ui "github.com/gizak/termui"
)
func main() {
api := anaconda.NewTwitterApiWithCredentials(
os.Getenv("ACCESS_TOKEN"), os.Getenv("ACCESS_TOKEN_SECRET"),
os.Getenv("CONSUMER_KEY"), os.Getenv("CONSUMER_SECRET_KEY"))
startUI(api)
}
func startUI(api *anaconda.TwitterApi) {
err := ui.Init()
if err != nil {
log.Fatal(err)
}
defer ui.Close()
tweetList := ui.NewList()
tweetList.BorderLabel = "Tweets"
tweetList.Height = ui.TermHeight()
tweetList.Overflow = "wrap"
ui.Body.AddRows(
ui.NewRow(ui.NewCol(12, 0, tweetList)),
)
ui.Body.Align()
ui.Render(ui.Body)
ui.Handle("/sys/kbd/q", func(ui.Event) {
ui.StopLoop()
})
loadTimeline(tweetList, api)
go updateTweets(tweetList, api)
ui.Loop()
}
func loadTimeline(tweetList *ui.List, api *anaconda.TwitterApi) {
v := url.Values{}
v.Set("count", string(ui.TermHeight()-2))
timeline, err := api.GetHomeTimeline(v)
if err != nil {
log.Fatal(err)
}
tweets := make([]string, ui.TermHeight()-2, ui.TermHeight()-2)
tweetList.Items = tweets
ui.Render(ui.Body)
for t := len(timeline) - 1; t >= 0; t-- {
for i := len(tweets) - 1; i > 0; i-- {
tweets[i] = tweets[i-1]
}
tweets[0] = formatTweet(timeline[t])
ui.Render(ui.Body)
}
}
func updateTweets(tweetList *ui.List, api *anaconda.TwitterApi) {
v := url.Values{}
// Fetch extended tweets by default
v.Set("tweet_mode", "extended")
// "with" specifies to include tweets from accounts the user follows
v.Set("with", "true")
stream := api.UserStream(v)
defer stream.Stop()
// Get the existing tweet list from termui.
// By the magic of Go arrays, the underlying array is modified when
// updating this slice, so no need to re-add the slice to the termui List.
tweets := tweetList.Items
for v := range stream.C {
// Ignore anything that isn't a tweet
t, ok := v.(anaconda.Tweet)
if !ok {
continue
}
// Shift each tweet down one in the list
for j := len(tweets) - 1; j > 0; j-- {
tweets[j] = tweets[j-1]
}
tweets[0] = formatTweet(t)
ui.Render(ui.Body)
}
}
func formatTweet(t anaconda.Tweet) string {
tm, _ := t.CreatedAtTime()
ts := tm.Format("15:04")
tt := t.FullText
// Unwrap t.co URLs
tt = unwrapURLs(tt, t)
// Unwrap media
tt = unwrapMedia(tt, t)
tu := t.User.ScreenName
var ru string
if t.RetweetedStatus != nil {
ru = fmt.Sprintf(" (via [%s](fg-magenta))", tu)
tu = t.RetweetedStatus.User.ScreenName
tt = t.RetweetedStatus.FullText
// Unwrap t.co URLs
tt = unwrapURLs(tt, *t.RetweetedStatus)
// Unwrap media
tt = unwrapMedia(tt, *t.RetweetedStatus)
}
return fmt.Sprintf("[%s](fg-green) [%s](fg-red)%s: %s", ts, tu, ru, tt)
}
// unwrapURLs unwraps (expands) t.co links to the original.
func unwrapURLs(text string, t anaconda.Tweet) string {
for _, u := range t.Entities.Urls {
text = strings.Replace(text, u.Url, u.Expanded_url, -1)
}
return text
}
// unwrapMedia unwraps (expands) media (embedded photo, GIF, etc.) links from
// t.co to the original.
func unwrapMedia(text string, t anaconda.Tweet) string {
for _, m := range t.Entities.Media {
text = strings.Replace(text, m.Url, m.Expanded_url, -1)
}
return text
}
| [
"\"ACCESS_TOKEN\"",
"\"ACCESS_TOKEN_SECRET\"",
"\"CONSUMER_KEY\"",
"\"CONSUMER_SECRET_KEY\""
]
| []
| [
"CONSUMER_KEY",
"ACCESS_TOKEN_SECRET",
"CONSUMER_SECRET_KEY",
"ACCESS_TOKEN"
]
| [] | ["CONSUMER_KEY", "ACCESS_TOKEN_SECRET", "CONSUMER_SECRET_KEY", "ACCESS_TOKEN"] | go | 4 | 0 | |
main.go | package main
import (
"fmt"
"github.com/go-telegram-bot-api/telegram-bot-api"
"gopkg.in/ffmt.v1"
"log"
"os"
"regexp"
"strconv"
"strings"
"time"
)
//TODO bot de compra e vendas pro telegram
//start com botões bonitinhos pras ações do bot, o que fica na tela
//crud de produtos pra quem puder vender
//tem dono do bot
// cada produto tem:
// -descrição
// -preço
//aceitar crypto
//aceitar mandar contato do vendedor e uma mensagem pro vendedor
//aceitar apikey e owner na inicialização do bot
//guardar dados um uma db sqlite
var (
owner = os.Getenv("TELEGRAM_OWNER") // you should set this to your username
ApiKey = os.Getenv("TELEGRAM_KEY") // you should set this to your api key
logger = log.Logger{}
//todo add timeout on states
//id -> state
state = map[string]string{}
chans = map[string]chan tgbotapi.Update{}
catalog = map[string][]product{
owner: []product{
product{
Description: "durr",
Price: 666,
Creator: tgbotapi.User{
UserName: owner,
},
},
},
}
)
type product struct {
Description string
Price float64
Extra []tgbotapi.Message //extra messages for selling the product
Creator tgbotapi.User
OnBought string
CryptoKey string
Currency string
Done bool
}
var mainMenuKeyboard = tgbotapi.NewInlineKeyboardMarkup(
//tgbotapi.NewInlineKeyboardRow(
// tgbotapi.NewInlineKeyboardButtonURL("1.com","http://1.com"),
// tgbotapi.NewInlineKeyboardButtonSwitch("2sw","open 2"),
// tgbotapi.NewInlineKeyboardButtonData("3","3"),
//),
tgbotapi.NewInlineKeyboardRow(
tgbotapi.NewInlineKeyboardButtonData("List all sales", "/list"),
tgbotapi.NewInlineKeyboardButtonData("Add sale", "/add"),
tgbotapi.NewInlineKeyboardButtonData("Update sale", "/update"),
tgbotapi.NewInlineKeyboardButtonData("Remove Sale", "/remove"),
),
)
var exitKeyboard = tgbotapi.NewInlineKeyboardMarkup(
tgbotapi.NewInlineKeyboardRow(
tgbotapi.NewInlineKeyboardButtonData("Exit", "/exit"),
),
)
var extnextKeyboard = tgbotapi.NewInlineKeyboardMarkup(
tgbotapi.NewInlineKeyboardRow(
tgbotapi.NewInlineKeyboardButtonData("Exit", "/exit"),
tgbotapi.NewInlineKeyboardButtonData("Next", "/next"),
),
)
//todo any other option?
var sendchargeKeyboard = tgbotapi.NewInlineKeyboardMarkup(
tgbotapi.NewInlineKeyboardRow(
tgbotapi.NewInlineKeyboardButtonData("Send contact to buyer", "/add/send_ctct"),
),
tgbotapi.NewInlineKeyboardRow(
tgbotapi.NewInlineKeyboardButtonData("Charge him crypto", "/add/charge_crypto"),
),
tgbotapi.NewInlineKeyboardRow(
tgbotapi.NewInlineKeyboardButtonData("Exit", "/exit"),
),
)
type handler struct {
Path string
Handler func(update tgbotapi.Update)
}
var handlers = []handler{}
func addHandler(path string, onmatch func(update tgbotapi.Update)) {
handlers = append(handlers, handler{Path: path, Handler: onmatch})
}
//todo remove str != nil checks
func execHandlers(u tgbotapi.Update) {
str := ""
if u.Message != nil { //pvt message
if str != "" {
logger.Println("str was already used!")
}
str = u.Message.Text
} else if u.ChannelPost != nil { //
if str != "" {
logger.Println("str was already used!")
}
str = u.ChannelPost.Text
} else if u.ChosenInlineResult != nil { //
if str != "" {
logger.Println("str was already used!")
}
str = u.ChannelPost.Text
} else if u.EditedChannelPost != nil {
if str != "" {
logger.Println("str was already used!")
}
str = u.EditedChannelPost.Text
} else if u.EditedMessage != nil {
if str != "" {
logger.Println("str was already used!")
}
str = u.EditedMessage.Text
} else if u.CallbackQuery != nil {
if str != "" {
logger.Println("str was already used!")
}
str = u.CallbackQuery.Data
} else if u.InlineQuery != nil {
if str != "" {
logger.Println("str was already used!")
}
str = u.InlineQuery.Query
} else if u.PreCheckoutQuery != nil {
if str != "" {
logger.Println("str was already used!")
}
str = u.PreCheckoutQuery.InvoicePayload
} else if u.ShippingQuery != nil {
if str != "" {
logger.Println("str was already used!")
}
str = u.ShippingQuery.InvoicePayload
}
fmt.Print("str: ", str)
for _, h := range handlers {
match, err := regexp.MatchString(h.Path, str)
if err != nil {
logger.Print("error matching path to input string")
}
if match {
h.Handler(u)
}
}
}
// Retrieves the user field "From" from every recieved update
func uFrom(u tgbotapi.Update) *tgbotapi.User {
if u.CallbackQuery != nil {
return u.CallbackQuery.From
}
if u.Message != nil {
return u.Message.From
}
if u.ChannelPost != nil {
return u.ChannelPost.From
}
if u.ShippingQuery != nil {
return u.ShippingQuery.From
}
if u.PreCheckoutQuery != nil {
return u.PreCheckoutQuery.From
}
if u.InlineQuery != nil {
return u.InlineQuery.From
}
if u.EditedMessage != nil {
return u.EditedMessage.From
}
if u.EditedChannelPost != nil {
return u.EditedChannelPost.From
}
if u.ChosenInlineResult != nil {
return u.ChosenInlineResult.From
}
return &tgbotapi.User{}
}
func uID(u tgbotapi.Update) int64 {
if u.CallbackQuery != nil {
return u.CallbackQuery.Message.Chat.ID
}
if u.Message != nil {
return u.Message.Chat.ID
}
if u.ChannelPost != nil {
return u.ChannelPost.Chat.ID
}
if u.ShippingQuery != nil {
out, err := strconv.ParseInt(u.ShippingQuery.ID, 10, 64)
if err != nil {
fmt.Println("error converting string to int", u.ShippingQuery.ID, out, err)
}
return out
}
if u.PreCheckoutQuery != nil {
out, err := strconv.ParseInt(u.PreCheckoutQuery.ID, 10, 64)
if err != nil {
fmt.Println("error converting string to int", u.PreCheckoutQuery.ID, out, err)
}
return out
}
if u.InlineQuery != nil {
out, err := strconv.ParseInt(u.InlineQuery.ID, 10, 64)
if err != nil {
fmt.Println("error converting string to int", u.InlineQuery.ID, out, err)
}
return out
}
if u.EditedMessage != nil {
return u.EditedMessage.Chat.ID
}
if u.EditedChannelPost != nil {
return u.EditedChannelPost.Chat.ID
}
if u.ChosenInlineResult != nil {
out, err := strconv.ParseInt(u.ChosenInlineResult.InlineMessageID, 10, 64)
if err != nil {
fmt.Println("error converting string to int", u.ChosenInlineResult.InlineMessageID, out, err)
}
return out
}
return 0
}
func main() {
owner = os.Getenv("TELEGRAM_OWNER")
ApiKey = os.Getenv("TELEGRAM_KEY")
bot, err := tgbotapi.NewBotAPI(ApiKey)
if err != nil {
fmt.Print(ApiKey, owner)
//log.Panic(err)
return
}
bot.Debug = true
log.Printf("Authorized on account %s", bot.Self.UserName)
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
updates, err := bot.GetUpdatesChan(u)
addHandler("", func(update tgbotapi.Update) {
fmt.Print("woo!\n")
})
addHandler("/list", func(update tgbotapi.Update) {
if update.CallbackQuery != nil { //inline button press
//bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
for _, prod := range catalog {
//bprod,err := json.Marshal(prod)
//ffmt.Print(prod)
//_ = bprod
if err != nil {
logger.Println("failure to marshall product into json")
}
msg := tgbotapi.NewMessage(update.CallbackQuery.Message.Chat.ID, ffmt.Sprint(prod))
msg.ReplyMarkup = exitKeyboard
bot.Send(msg)
}
} else if update.Message != nil {
for _, prod := range catalog {
//bprod,err := json.Marshal(prod)
//ffmt.Print(prod)
//_ = bprod
if err != nil {
logger.Println("failure to marshall product into json")
}
msg := tgbotapi.NewMessage(update.Message.Chat.ID, ffmt.Sprint(prod))
msg.ReplyMarkup = exitKeyboard
bot.Send(msg)
}
}
})
addHandler("/add", func(update tgbotapi.Update) {
//if update.CallbackQuery != nil { //inline button press
//bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
msg := tgbotapi.NewMessage(uID(update), "Send the product description")
msg.ReplyMarkup = exitKeyboard
bot.Send(msg)
//wait for answer
state[uFrom(update).UserName] = "add_desc"
update = <-chans[uFrom(update).UserName]
catalog[update.Message.From.UserName] = append(catalog[update.Message.From.UserName], product{Description: update.Message.Text})
msg = tgbotapi.NewMessage(uID(update), "Send the product price")
msg.ReplyMarkup = exitKeyboard
bot.Send(msg)
state[uFrom(update).UserName] = "add_price"
update = <-chans[uFrom(update).UserName]
f, err := strconv.ParseFloat(update.Message.Text, 64)
if err != nil {
msg := tgbotapi.NewMessage(uID(update), "Only send a number, please")
msg.ReplyMarkup = exitKeyboard
bot.Send(msg)
//logger.Print("unable to convert message to float")
} else {
catalog[update.Message.From.UserName][len(catalog[update.Message.From.UserName])-1].Price = f
msg := tgbotapi.NewMessage(uID(update), "Send any other message you'd like the buyer to receive, then press the next button")
msg.ReplyMarkup = extnextKeyboard
bot.Send(msg)
}
state[uFrom(update).UserName] = "add_other"
for { //recieve until next is pressed
update = <-chans[uFrom(update).UserName]
if update.CallbackQuery != nil && update.CallbackQuery.Data == "/next" { //inline button press
bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
msg := tgbotapi.NewMessage(uID(update), "Chose what to do when an item is bought")
msg.ReplyMarkup = sendchargeKeyboard
bot.Send(msg)
break
} else if update.CallbackQuery != nil && update.CallbackQuery.Data == "/exit" {
bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
if len(catalog[update.CallbackQuery.From.UserName]) > 0 && !catalog[update.CallbackQuery.From.UserName][len(catalog[update.CallbackQuery.From.UserName])-1].Done {
catalog[update.CallbackQuery.From.UserName] = catalog[update.CallbackQuery.From.UserName][:len(catalog[update.CallbackQuery.From.UserName])-1]
}
state[uFrom(update).UserName] = ""
return
} else {
catalog[uFrom(update).UserName][len(catalog[uFrom(update).UserName])-1].Extra = append(catalog[uFrom(update).UserName][len(catalog[uFrom(update).UserName])-1].Extra, *update.Message)
}
}
state[uFrom(update).UserName] = "add_sendorcharge"
msg = tgbotapi.NewMessage(uID(update), "Please answer what to do when the user buys the product")
msg.ReplyMarkup = sendchargeKeyboard
for {
update = <-chans[uFrom(update).UserName]
if update.CallbackQuery != nil && update.CallbackQuery.Data == "/add/charge_crypto" {
bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
catalog[uFrom(update).UserName][len(catalog[uFrom(update).UserName])-1].OnBought = "crypto"
bot.Send(tgbotapi.NewMessage(uID(update), "Please state the favored cryptocurrency short name, e.g. BTC ETH XMR"))
break
} else if update.CallbackQuery != nil && update.CallbackQuery.Data == "/add/send_ctct" {
bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
catalog[uFrom(update).UserName][len(catalog[uFrom(update).UserName])-1].OnBought = "contact"
msg := tgbotapi.NewMessage(uID(update), "Thank you for adding a product for sale!")
msg.ReplyMarkup = mainMenuKeyboard
bot.Send(msg)
state[uFrom(update).UserName] = ""
return
} else {
msg := tgbotapi.NewMessage(uID(update), "Please choose an item")
msg.ReplyMarkup = sendchargeKeyboard
bot.Send(msg)
}
}
state[uFrom(update).UserName] = "add_get_crypto"
update = <-chans[uFrom(update).UserName]
catalog[update.Message.From.UserName][len(catalog[update.Message.From.UserName])-1].Currency = strings.Trim(update.Message.Text, " .,\n")
msg = tgbotapi.NewMessage(uID(update), "Please send your public key")
bot.Send(msg)
state[uFrom(update).UserName] = "add_get_crypto_key"
update = <-chans[uFrom(update).UserName]
catalog[update.Message.From.UserName][len(catalog[update.Message.From.UserName])-1].CryptoKey = strings.Trim(update.Message.Text, " .,\n")
catalog[update.Message.From.UserName][len(catalog[update.Message.From.UserName])-1].Done = true
msg = tgbotapi.NewMessage(uID(update), "Thank you for adding a product for sale!")
msg.ReplyMarkup = mainMenuKeyboard
bot.Send(msg)
state[uFrom(update).UserName] = ""
return
})
addHandler("", func(update tgbotapi.Update) {
if update.CallbackQuery != nil { //inline button press
bot.AnswerCallbackQuery(tgbotapi.NewCallback(update.CallbackQuery.ID, update.CallbackQuery.Data))
//fmt.Print("callback data: ", update.CallbackQuery.Data, "\n")
}
if update.Message != nil {
switch state[uFrom(update).UserName] {
// TODO: add exit inline keyboard on the add states
default:
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "fdsfgs")
switch update.Message.Text {
case "/start":
msg.ReplyMarkup = mainMenuKeyboard
}
if update.Message.Text == "/start@"+bot.Self.UserName {
msg.ReplyMarkup = mainMenuKeyboard
}
bot.Send(msg)
}
}
})
time.Sleep(time.Millisecond * 500)
updates.Clear()
for update := range updates {
if chans[uFrom(update).UserName] == nil {
chans[uFrom(update).UserName] = make(chan tgbotapi.Update, 16)
go func() {
from := uFrom(update).UserName
tout := time.After(30 * time.Minute)
last := time.Now()
for {
select {
case u := <-chans[from]:
last = time.Now()
execHandlers(u)
case <-tout:
//no longer active
if len(chans[from]) == 0 && time.Now().Sub(last) > 17*time.Minute {
close(chans[from])
return
} else {
tout = time.After(time.Hour)
}
}
}
}()
}
chans[uFrom(update).UserName] <- update
// if nil make chan and goroutine
// send to chan
//
//execHandlers(&update)
//ffmt.Print(update)
}
}
| [
"\"TELEGRAM_OWNER\"",
"\"TELEGRAM_KEY\"",
"\"TELEGRAM_OWNER\"",
"\"TELEGRAM_KEY\""
]
| []
| [
"TELEGRAM_OWNER",
"TELEGRAM_KEY"
]
| [] | ["TELEGRAM_OWNER", "TELEGRAM_KEY"] | go | 2 | 0 | |
go/test/endtoend/cluster/topo_process.go | /*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"path"
"strings"
"syscall"
"time"
"vitess.io/vitess/go/vt/log"
)
// TopoProcess is a generic handle for a running Topo service .
// It can be spawned manually
type TopoProcess struct {
Name string
Binary string
DataDirectory string
LogDirectory string
ListenClientURL string
AdvertiseClientURL string
Port int
Host string
VerifyURL string
PeerURL string
ZKPorts string
proc *exec.Cmd
exit chan error
}
// Setup starts a new topo service
func (topo *TopoProcess) Setup(topoFlavor string, cluster *LocalProcessCluster) (err error) {
switch topoFlavor {
case "zk2":
return topo.SetupZookeeper(cluster)
case "consul":
return topo.SetupConsul(cluster)
default:
return topo.SetupEtcd()
}
}
// SetupEtcd spawns a new etcd service and initializes it with the defaults.
// The service is kept running in the background until TearDown() is called.
func (topo *TopoProcess) SetupEtcd() (err error) {
topo.proc = exec.Command(
topo.Binary,
"--name", topo.Name,
"--data-dir", topo.DataDirectory,
"--listen-client-urls", topo.ListenClientURL,
"--advertise-client-urls", topo.AdvertiseClientURL,
"--initial-advertise-peer-urls", topo.PeerURL,
"--listen-peer-urls", topo.PeerURL,
"--initial-cluster", fmt.Sprintf("%s=%s", topo.Name, topo.PeerURL),
"--enable-v2=true",
)
err = createDirectory(topo.DataDirectory, 0700)
if err != nil && !os.IsExist(err) {
return err
}
errFile, err := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt"))
if err != nil {
return err
}
topo.proc.Stderr = errFile
topo.proc.Env = append(topo.proc.Env, os.Environ()...)
log.Infof("Starting etcd with command: %v", strings.Join(topo.proc.Args, " "))
err = topo.proc.Start()
if err != nil {
return
}
topo.exit = make(chan error)
go func() {
topo.exit <- topo.proc.Wait()
}()
timeout := time.Now().Add(60 * time.Second)
for time.Now().Before(timeout) {
if topo.IsHealthy() {
return
}
select {
case err := <-topo.exit:
return fmt.Errorf("process '%s' exited prematurely (err: %s)", topo.Binary, err)
default:
time.Sleep(300 * time.Millisecond)
}
}
return fmt.Errorf("process '%s' timed out after 60s (err: %s)", topo.Binary, <-topo.exit)
}
// SetupZookeeper spawns a new zookeeper topo service and initializes it with the defaults.
// The service is kept running in the background until TearDown() is called.
func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error) {
host, err := os.Hostname()
if err != nil {
return
}
topo.ZKPorts = fmt.Sprintf("%d:%d:%d", cluster.GetAndReservePort(), cluster.GetAndReservePort(), topo.Port)
topo.proc = exec.Command(
topo.Binary,
"--log_dir", topo.LogDirectory,
"--zk.cfg", fmt.Sprintf("1@%v:%s", host, topo.ZKPorts),
"init",
)
errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt"))
topo.proc.Stderr = errFile
topo.proc.Env = append(topo.proc.Env, os.Environ()...)
log.Infof("Starting zookeeper with args %v", strings.Join(topo.proc.Args, " "))
err = topo.proc.Run()
if err != nil {
return
}
return
}
// ConsulConfigs are the configurations that are added the config files which are used by consul
type ConsulConfigs struct {
Ports PortsInfo `json:"ports"`
DataDir string `json:"data_dir"`
LogFile string `json:"log_file"`
}
// PortsInfo is the different ports used by consul
type PortsInfo struct {
DNS int `json:"dns"`
HTTP int `json:"http"`
SerfLan int `json:"serf_lan"`
SerfWan int `json:"serf_wan"`
Server int `json:"server"`
}
// SetupConsul spawns a new consul service and initializes it with the defaults.
// The service is kept running in the background until TearDown() is called.
func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) {
topo.VerifyURL = fmt.Sprintf("http://%s:%d/v1/kv/?keys", topo.Host, topo.Port)
_ = os.MkdirAll(topo.LogDirectory, os.ModePerm)
_ = os.MkdirAll(topo.DataDirectory, os.ModePerm)
configFile := path.Join(os.Getenv("VTDATAROOT"), "consul.json")
logFile := path.Join(topo.LogDirectory, "/consul.log")
_, _ = os.Create(logFile)
var config []byte
configs := ConsulConfigs{
Ports: PortsInfo{
DNS: cluster.GetAndReservePort(),
HTTP: topo.Port,
SerfLan: cluster.GetAndReservePort(),
SerfWan: cluster.GetAndReservePort(),
Server: cluster.GetAndReservePort(),
},
DataDir: topo.DataDirectory,
LogFile: logFile,
}
config, err = json.Marshal(configs)
if err != nil {
log.Error(err.Error())
return
}
err = os.WriteFile(configFile, config, 0666)
if err != nil {
return
}
topo.proc = exec.Command(
topo.Binary, "agent",
"-server",
"-ui",
"-bootstrap-expect", "1",
"-bind", "127.0.0.1",
"-config-file", configFile,
)
errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt"))
topo.proc.Stderr = errFile
topo.proc.Env = append(topo.proc.Env, os.Environ()...)
log.Errorf("Starting consul with args %v", strings.Join(topo.proc.Args, " "))
err = topo.proc.Start()
if err != nil {
return
}
topo.exit = make(chan error)
go func() {
topo.exit <- topo.proc.Wait()
}()
timeout := time.Now().Add(60 * time.Second)
for time.Now().Before(timeout) {
if topo.IsHealthy() {
return
}
select {
case err := <-topo.exit:
return fmt.Errorf("process '%s' exited prematurely (err: %s)", topo.Binary, err)
default:
time.Sleep(300 * time.Millisecond)
}
}
return fmt.Errorf("process '%s' timed out after 60s (err: %s)", topo.Binary, <-topo.exit)
}
// TearDown shutdowns the running topo service
func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoot string, keepdata bool, topoFlavor string) error {
if topoFlavor == "zk2" {
cmd := "shutdown"
if keepdata {
cmd = "teardown"
}
topo.proc = exec.Command(
topo.Binary,
"--log_dir", topo.LogDirectory,
"--zk.cfg", fmt.Sprintf("1@%v:%s", topo.Host, topo.ZKPorts),
cmd,
)
err := topo.proc.Run()
if err != nil {
return err
}
} else {
if topo.proc == nil || topo.exit == nil {
return nil
}
if !(*keepData || keepdata) {
topo.removeTopoDirectories(Cell)
}
// Attempt graceful shutdown with SIGTERM first
_ = topo.proc.Process.Signal(syscall.SIGTERM)
if !(*keepData || keepdata) {
_ = os.RemoveAll(topo.DataDirectory)
_ = os.RemoveAll(currentRoot)
_ = os.Setenv("VTDATAROOT", originalVtRoot)
}
select {
case <-topo.exit:
topo.proc = nil
return nil
case <-time.After(10 * time.Second):
topo.proc.Process.Kill()
topo.proc = nil
return <-topo.exit
}
}
return nil
}
// IsHealthy function checks if topo server is up and running
func (topo *TopoProcess) IsHealthy() bool {
resp, err := http.Get(topo.VerifyURL)
if err != nil {
return false
}
if resp.StatusCode == 200 {
return true
}
return false
}
func (topo *TopoProcess) removeTopoDirectories(Cell string) {
if err := topo.ManageTopoDir("rmdir", "/vitess/global"); err != nil {
log.Errorf("Failed to remove global topo directory: %v", err)
}
if err := topo.ManageTopoDir("rmdir", "/vitess/"+Cell); err != nil {
log.Errorf("Failed to remove local topo directory: %v", err)
}
}
// ManageTopoDir creates global and zone in etcd2
func (topo *TopoProcess) ManageTopoDir(command string, directory string) (err error) {
url := topo.VerifyURL + directory
payload := strings.NewReader(`{"dir":"true"}`)
if command == "mkdir" {
req, _ := http.NewRequest("PUT", url, payload)
req.Header.Add("content-type", "application/json")
_, err = http.DefaultClient.Do(req)
return err
} else if command == "rmdir" {
req, _ := http.NewRequest("DELETE", url+"?dir=true", payload)
_, err = http.DefaultClient.Do(req)
return err
} else {
return nil
}
}
// TopoProcessInstance returns a TopoProcess handle for a etcd sevice,
// configured with the given Config.
// The process must be manually started by calling setup()
func TopoProcessInstance(port int, peerPort int, hostname string, flavor string, name string) *TopoProcess {
binary := "etcd"
if flavor == "zk2" {
binary = "zkctl"
}
if flavor == "consul" {
binary = "consul"
}
topo := &TopoProcess{
Name: name,
Binary: binary,
Port: port,
Host: hostname,
}
topo.AdvertiseClientURL = fmt.Sprintf("http://%s:%d", topo.Host, topo.Port)
topo.ListenClientURL = fmt.Sprintf("http://%s:%d", topo.Host, topo.Port)
topo.DataDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port))
topo.LogDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port), "logs")
topo.VerifyURL = fmt.Sprintf("http://%s:%d/v2/keys", topo.Host, topo.Port)
topo.PeerURL = fmt.Sprintf("http://%s:%d", hostname, peerPort)
return topo
}
| [
"\"VTDATAROOT\"",
"\"VTDATAROOT\"",
"\"VTDATAROOT\""
]
| []
| [
"VTDATAROOT"
]
| [] | ["VTDATAROOT"] | go | 1 | 0 | |
model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval.py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" training_and_evaluating """
import os
import sys
from mindspore import Model, context
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import TimeMonitor
from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel
from src.callbacks import LossCallBack, EvalCallBack
from src.datasets import create_dataset, compute_emb_dim
from src.metrics import AUCMetric
from src.config import WideDeepConfig
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def get_WideDeep_net(config):
"""
Get network of wide&deep model.
"""
WideDeep_net = WideDeepModel(config)
loss_net = NetWithLossClass(WideDeep_net, config)
train_net = TrainStepWrap(loss_net, config)
eval_net = PredictWithSigmoid(WideDeep_net)
return train_net, eval_net
class ModelBuilder():
"""
ModelBuilder.
"""
def __init__(self):
pass
def get_hook(self):
pass
def get_train_hook(self):
hooks = []
callback = LossCallBack()
hooks.append(callback)
if int(os.getenv('DEVICE_ID')) == 0:
pass
return hooks
def get_net(self, config):
return get_WideDeep_net(config)
def train_and_eval(config):
"""
train_and_eval.
"""
data_path = config.data_path
epochs = config.epochs
print("epochs is {}".format(epochs))
ds_train = create_dataset(data_path, train_mode=True, epochs=1,
batch_size=config.batch_size, is_tf_dataset=config.is_tf_dataset)
ds_eval = create_dataset(data_path, train_mode=False, epochs=1,
batch_size=config.batch_size, is_tf_dataset=config.is_tf_dataset)
print("ds_train.size: {}".format(ds_train.get_dataset_size()))
print("ds_eval.size: {}".format(ds_eval.get_dataset_size()))
net_builder = ModelBuilder()
train_net, eval_net = net_builder.get_net(config)
train_net.set_train()
auc_metric = AUCMetric()
model = Model(train_net, eval_network=eval_net, metrics={"auc": auc_metric})
eval_callback = EvalCallBack(model, ds_eval, auc_metric, config)
callback = LossCallBack(config)
# Only save the last checkpoint at the last epoch. For saving epochs at each epoch, please
# set save_checkpoint_steps=ds_train.get_dataset_size()
ckptconfig = CheckpointConfig(save_checkpoint_steps=ds_train.get_dataset_size()*config.epochs,
keep_checkpoint_max=10)
ckpoint_cb = ModelCheckpoint(prefix='widedeep_train',
directory=config.ckpt_path, config=ckptconfig)
model.train(epochs, ds_train, callbacks=[TimeMonitor(ds_train.get_dataset_size()), eval_callback,
callback, ckpoint_cb], sink_size=ds_train.get_dataset_size())
if __name__ == "__main__":
wide_and_deep_config = WideDeepConfig()
wide_and_deep_config.argparse_init()
compute_emb_dim(wide_and_deep_config)
context.set_context(mode=context.GRAPH_MODE, device_target="Davinci",
save_graphs=True)
train_and_eval(wide_and_deep_config)
| []
| []
| [
"DEVICE_ID"
]
| [] | ["DEVICE_ID"] | python | 1 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/joho/godotenv"
"github.com/sirupsen/logrus"
"./app"
"./app/config"
"./app/models"
)
func main() {
// Loads the env variables
err := godotenv.Load()
if err != nil {
logrus.Fatal("Error loading .env file")
}
config.InitDB()
// Check database connection
db := config.GetDatabaseConnection()
defer db.Close()
// Prints the version and the address of our api to the console
logrus.Info("Version is ", os.Getenv("API_VERSION"))
logrus.Info("Starting Server on http://localhost:", os.Getenv("API_PORT"))
// Set log level
switch os.Getenv("LOG_LEVEL") {
case "debug":
logrus.SetLevel(logrus.ErrorLevel)
case "info":
logrus.SetLevel(logrus.ErrorLevel)
case "warn":
logrus.SetLevel(logrus.ErrorLevel)
default:
logrus.SetLevel(logrus.ErrorLevel)
}
// Creates the database schema
migrateDatabase()
// Server router on given port and attach the cors headers
server := app.NewServer()
log.Fatal(http.ListenAndServe(":"+os.Getenv("API_PORT"), server))
}
func migrateDatabase() {
db := config.GetDatabaseConnection()
// Migrate the given tables
db.AutoMigrate(&models.User{})
db.AutoMigrate(&models.Comment{})
} | [
"\"API_VERSION\"",
"\"API_PORT\"",
"\"LOG_LEVEL\"",
"\"API_PORT\""
]
| []
| [
"API_PORT",
"API_VERSION",
"LOG_LEVEL"
]
| [] | ["API_PORT", "API_VERSION", "LOG_LEVEL"] | go | 3 | 0 | |
server/routes.go | package main
import (
// #include <unistd.h>
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"runtime"
"strings"
"time"
"github.com/pbnjay/memory"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/mem"
"github.com/shirou/gopsutil/net"
)
// SysInfo is generic holder for passsing data back
type SysInfo struct {
Hostname string `json:"hostname"`
Platform string `json:"platform"`
OS string `json:"os"`
Uptime uint64 `json:"uptime"`
Arch string `json:"architecture"`
CPUs int `json:"cpuCount"`
CPUModel string `json:"cpuModel"`
Mem uint64 `json:"mem"`
GoVersion string `json:"goVersion"`
NetRemoteAddr string `json:"netRemoteAddress"`
NetHost string `json:"netHost"`
IsContainer bool `json:"isContainer"`
IsKubernetes bool `json:"isKubernetes"`
EnvVars []string `json:"envVars"`
}
// Metrics are real time system counters
type Metrics struct {
MemTotal uint64 `json:"memTotal"`
MemUsed uint64 `json:"memUsed"`
CPUPerc float64 `json:"cpuPerc"`
DiskTotal uint64 `json:"diskTotal"`
DiskFree uint64 `json:"diskFree"`
NetBytesSent uint64 `json:"netBytesSent"`
NetBytesRecv uint64 `json:"netBytesRecv"`
}
// Weather holds data about the weather
type Weather struct {
IPAddress string `json:"ipAddress"`
GeoInfo ipstackAPIData `json:"location"`
WeatherInfo darkskyAPIData `json:"weather"`
}
// HTTPError holds API JSON error
type HTTPError struct {
Error string `json:"error"`
}
// ipstackAPIData holds results of IPStack API call
type ipstackAPIData struct {
City string `json:"city"`
Country string `json:"country_name"`
Lat float64 `json:"latitude"`
Long float64 `json:"longitude"`
}
// darkskyAPIData holds results of Dark Sky API call
type darkskyAPIData struct {
Timezone string `json:"timezone"`
Currently struct {
Summary string `json:"summary"`
Icon string `json:"icon"`
PrecipProbability float32 `json:"precipProbability"`
Temperature float32 `json:"temperature"`
WindSpeed float32 `json:"windSpeed"`
UVIndex float32 `json:"uvIndex"`
Humidity float32 `json:"humidity"`
} `json:"currently"`
}
// Routes is our exported class
type Routes struct {
contentDir string
disableCORS bool
darkskyAPIKey string
ipstackAPIKey string
}
//
// Simple config API for the frontend
//
func (r Routes) configRoute(resp http.ResponseWriter, req *http.Request) {
// CORS is for wimps
if r.disableCORS {
resp.Header().Set("Access-Control-Allow-Origin", "*")
}
resp.Header().Set("Content-Type", "application/json")
// Passes through a single env var, AUTH_CLIENT_ID
config := struct {
AuthClientAd string `json:"AUTH_CLIENT_ID"`
}{}
config.AuthClientAd = os.Getenv("AUTH_CLIENT_ID")
jsonResp, err := json.Marshal(config)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
// Fire JSON result back down the internet tubes
_, _ = resp.Write(jsonResp)
}
//
// /api/info - Return system information and properties
//
func (r Routes) apiInfoRoute(resp http.ResponseWriter, req *http.Request) {
// CORS is for wimps
if r.disableCORS {
resp.Header().Set("Access-Control-Allow-Origin", "*")
}
resp.Header().Set("Content-Type", "application/json")
var info SysInfo
hostInfo, err := host.Info()
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
cpuInfo, err := cpu.Info()
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
// Grab various bits of infomation from where we can
info.Hostname, _ = os.Hostname()
info.GoVersion = runtime.Version()
info.OS = hostInfo.Platform + " " + hostInfo.PlatformVersion
info.Platform = hostInfo.OS
info.Uptime = hostInfo.Uptime
info.Mem = memory.TotalMemory()
info.Arch = runtime.GOARCH
info.CPUs = runtime.NumCPU()
info.CPUModel = cpuInfo[0].ModelName
info.NetRemoteAddr = req.RemoteAddr
info.NetHost = req.Host
info.IsContainer = fileExists("/.dockerenv")
info.IsKubernetes = fileExists("/var/run/secrets/kubernetes.io")
// Full grab of all env vars
info.EnvVars = os.Environ()
// Basic attempt to remove sensitive vars
// Strange for means we can delete elements while looping over
for i := len(info.EnvVars) - 1; i >= 0; i-- {
envVarName := strings.Split(info.EnvVars[i], "=")[0]
if strings.Contains(envVarName, "_KEY") || strings.Contains(envVarName, "SECRET") || strings.Contains(envVarName, "PWD") || strings.Contains(envVarName, "PASSWORD") {
info.EnvVars = sliceRemove(info.EnvVars, i)
}
}
// JSON-ify our info
js, err := json.Marshal(info)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
// Fire JSON result back down the internet tubes
_, _ = resp.Write(js)
}
//
// /api/metrics - Return system metrics cpu, mem, etc
//
func (r Routes) apiMetricsRoute(resp http.ResponseWriter, req *http.Request) {
// CORS is for wimps
if r.disableCORS {
resp.Header().Set("Access-Control-Allow-Origin", "*")
}
resp.Header().Set("Content-Type", "application/json")
var metrics Metrics
// Memory stuff
memStats, err := mem.VirtualMemory()
if err != nil {
apiError(resp, http.StatusInternalServerError, "Virtual memory "+err.Error())
return
}
metrics.MemTotal = memStats.Total
metrics.MemUsed = memStats.Used
// CPU / processor stuff
cpuStats, err := cpu.Percent(0, false)
if err != nil {
apiError(resp, http.StatusInternalServerError, "CPU percentage "+err.Error())
return
}
metrics.CPUPerc = cpuStats[0]
// Disk and filesystem usage stuff
diskStats, err := disk.Usage("/")
if err != nil {
apiError(resp, http.StatusInternalServerError, "Disk usage "+err.Error())
return
}
metrics.DiskTotal = diskStats.Total
metrics.DiskFree = diskStats.Free
// Network stuff
netStats, err := net.IOCounters(false)
if err != nil {
apiError(resp, http.StatusInternalServerError, "IOCounters "+err.Error())
return
}
metrics.NetBytesRecv = netStats[0].BytesRecv
metrics.NetBytesSent = netStats[0].BytesSent
// JSON-ify our metrics
js, err := json.Marshal(metrics)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
// Fire JSON result back down the internet tubes
_, _ = resp.Write(js)
}
//
// Special route to handle serving static SPA content with a JS router
//
func (r Routes) spaIndexRoute(resp http.ResponseWriter, req *http.Request) {
http.ServeFile(resp, req, contentDir+"/index.html")
}
//
// Weather info
//
func (r Routes) weatherRoute(resp http.ResponseWriter, req *http.Request) {
if r.disableCORS {
resp.Header().Set("Access-Control-Allow-Origin", "*")
}
resp.Header().Set("Content-Type", "application/json")
// Check if required config is set
if r.ipstackAPIKey == "" {
apiError(resp, http.StatusNotImplemented, "Feature disabled, IPSTACK_API_KEY is not set")
return
}
if r.darkskyAPIKey == "" {
apiError(resp, http.StatusNotImplemented, "Feature disabled, WEATHER_API_KEY is not set")
return
}
// Top level JSON container struct
var weather Weather
// Try to deduce calling IP address
ip := req.Header.Get("x-forwarded-for")
// Special trick to work with local dev
if hostname, _ := os.Hostname(); hostname == "BENSL3" {
ip = "212.36.160.18" // Only uncomment for local testing!
}
// If not in the header try the RemoteAddr field
if len(ip) == 0 {
ip = req.RemoteAddr
}
// Checks for localhost, as it won't work
if strings.HasPrefix(ip, "127.0.0.1") || strings.HasPrefix(ip, "[::1]") {
apiError(resp, http.StatusNotAcceptable, fmt.Sprintf("This IP is not allowed %v", ip))
return
}
// There might be a port in there, get rid of it
if strings.Contains(ip, ":") {
ip = strings.Split(ip, ":")[0]
}
weather.IPAddress = ip
// First API call is to IPStack to reverse lookup IP into location (lat & long)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
var netClient = &http.Client{Timeout: time.Second * 10, Transport: tr}
url := fmt.Sprintf("http://api.ipstack.com/%s?access_key=%s&format=1", ip, r.ipstackAPIKey)
apiresponse, err := netClient.Get(url)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
body, _ := ioutil.ReadAll(apiresponse.Body)
// Handle response and create object from JSON, and store in weather object
var ipstackData ipstackAPIData
err = json.Unmarshal(body, &ipstackData)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
if len(ipstackData.City) == 0 {
apiError(resp, http.StatusNotFound, fmt.Sprintf("No location data for this IP %v", ip))
return
}
weather.GeoInfo = ipstackData
// Second API call is to Dark Sky to fetch weather data
url = fmt.Sprintf("https://api.darksky.net/forecast/%s/%v,%v?exclude=minutely,hourly,daily&units=si", r.darkskyAPIKey, weather.GeoInfo.Lat, weather.GeoInfo.Long)
apiresponse, err = netClient.Get(url)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
body, _ = ioutil.ReadAll(apiresponse.Body)
// Handle response and create object from JSON, and store in weather object
var darkskyData darkskyAPIData
err = json.Unmarshal(body, &darkskyData)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
// Push API response data into our weather struct
weather.WeatherInfo = darkskyData
// JSON-ify our completed weather info object
jsonResp, err := json.Marshal(weather)
if err != nil {
apiError(resp, http.StatusInternalServerError, err.Error())
return
}
// Fire JSON result back down the internet tubes
_, _ = resp.Write(jsonResp)
}
//
// Helper function for returning API errors
//
func apiError(resp http.ResponseWriter, code int, message string) {
resp.WriteHeader(code)
//message = strings.ReplaceAll(message, "\"", "'")
errorData := &HTTPError{
Error: message,
}
errorResp, err := json.Marshal(errorData)
if err != nil {
fmt.Printf("### ERROR! httpError unable to marshal to JSON. Message was %s\n", message)
return
}
_, _ = resp.Write(errorResp)
}
// fileExists checks if a file or directory exists
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
//return !info.IsDir()
return info != nil
}
func sliceRemove(slice []string, i int) []string {
if i < len(slice)-1 {
slice = append(slice[:i], slice[i+1:]...)
} else if i == len(slice)-1 {
slice = slice[:len(slice)-1]
}
return slice
}
| [
"\"AUTH_CLIENT_ID\""
]
| []
| [
"AUTH_CLIENT_ID"
]
| [] | ["AUTH_CLIENT_ID"] | go | 1 | 0 | |
cmd/sqlflowserver/e2e_mysql_test.go | // Copyright 2020 The SQLFlow Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"sqlflow.org/sqlflow/pkg/database"
pb "sqlflow.org/sqlflow/pkg/proto"
server "sqlflow.org/sqlflow/pkg/sqlflowserver"
)
func TestEnd2EndMySQL(t *testing.T) {
if os.Getenv("SQLFLOW_TEST_DB") != "mysql" {
t.Skip("Skipping mysql tests")
}
dbConnStr = database.GetTestingMySQLURL()
modelDir := ""
tmpDir, caCrt, caKey, err := generateTempCA()
defer os.RemoveAll(tmpDir)
if err != nil {
t.Fatalf("failed to generate CA pair %v", err)
}
go start(modelDir, caCrt, caKey, unitTestPort, false)
server.WaitPortReady(fmt.Sprintf("localhost:%d", unitTestPort), 0)
err = prepareTestData(dbConnStr)
if err != nil {
t.Fatalf("prepare test dataset failed: %v", err)
}
t.Run("CaseShowDatabases", caseShowDatabases)
t.Run("CaseSelect", caseSelect)
t.Run("CaseShouldError", CaseShouldError)
t.Run("CaseTrainSQL", caseTrainSQL)
t.Run("caseCoverageCommon", caseCoverageCommon)
t.Run("caseCoverageCustomModel", caseCoverageCustomModel)
t.Run("CaseCoverage", CaseCoverageMysql)
t.Run("CaseTrainWithCommaSeparatedLabel", CaseTrainWithCommaSeparatedLabel)
t.Run("CaseTrainCustomModelFunctional", CaseTrainCustomModelFunctional)
t.Run("CaseSQLByPassLeftJoin", CaseSQLByPassLeftJoin)
t.Run("CaseTrainRegression", caseTrainRegression)
// Cases using feature derivation
t.Run("CaseFeatureDerivation", CaseFeatureDerivation)
// xgboost cases
t.Run("caseTrainXGBoostRegressionConvergence", caseTrainXGBoostRegressionConvergence)
t.Run("CasePredictXGBoostRegression", casePredictXGBoostRegression)
caseTensorFlowIncrementalTrain(t, false)
caseXGBoostFeatureColumn(t, false)
t.Run("CaseShowTrain", caseShowTrain)
// Cases for diagnosis
t.Run("CaseDiagnosisMissingModelParams", CaseDiagnosisMissingModelParams)
t.Run("CaseTrainARIMAWithSTLDecompostionModel", caseTrainARIMAWithSTLDecompostionModel)
}
func CaseShouldError(t *testing.T) {
cases := []string{`SELECT * FROM iris.train LIMIT 0 TO TRAIN xgboost.gbtree
WITH objective="reg:squarederror"
LABEL class
INTO sqlflow_models.my_xgb_regression_model;`, // empty dataset
`SELECT * FROM iris.train WHERE class=2 TO TRAIN xgboost.gbtree
WITH objective="reg:squarederror"
LABEL target
INTO sqlflow_models.my_xgb_regression_model;`, // label not exist
`SELECT * FROM housing.train
TO TRAIN DNNRegressora WITH
model.hidden_units = [10, 20],
validation.select = "SELECT * FROM %s "
COLUMN INDICATOR(CATEGORY_ID("a.*", 1000))
LABEL target
INTO housing.dnn_model;`, // column regex don't match any column
`SELECT * FROM housing.train
TO TRAIN DNNRegressor WITH
model.hidden_units = [10, 20],
validation.select = "SELECT * FROM %s "
COLUMN INDICATOR(CATEGORY_ID("[*", 1000))
LABEL target
INTO housing.dnn_model;`, // invalid column regex
`SELECT * FROM iris.train
TO TRAIN DNNClassifier WITH
model.n_classes = 3,
model.hidden_units = [10, 20],
validation.select = "SELECT * FROM %s LIMIT 30"
COLUMN typo, sepal_length, sepal_width, petal_length, petal_width
LABEL class
INTO iris.dnn_model;`, // typo in column clause
}
for _, sql := range cases {
connectAndRunSQLShouldError(sql)
}
}
func CaseFeatureDerivation(t *testing.T) {
cases := []string{`SELECT * FROM iris.train
TO TRAIN DNNClassifier
WITH model.n_classes = 3, model.hidden_units = [10, 20]
LABEL class
INTO sqlflow_models.my_dnn_model;`, // basic case, derive numeric
`SELECT * FROM iris.test
TO PREDICT iris.predict.class
USING sqlflow_models.my_dnn_model;`, // predict using feature derivation model
`SELECT c1, c2, c3, c4, c5, class from feature_derivation_case.train
TO TRAIN DNNClassifier
WITH model.n_classes=3, model.hidden_units=[10,10]
COLUMN EMBEDDING(c3, 32, sum), EMBEDDING(SPARSE(c5, 64, COMMA), 32, sum)
LABEL class
INTO sqlflow_models.my_dnn_model;`, // general case to derive all column types
`SELECT c1, c2, c3, c4, c5, class from feature_derivation_case.train
TO TRAIN DNNClassifier
WITH model.n_classes=3, model.hidden_units=[10,10]
COLUMN INDICATOR(c3), EMBEDDING(SPARSE(c5, 64, COMMA), 32, sum)
LABEL class
INTO sqlflow_models.my_dnn_model;`, // general case with indicator column
`SELECT news_title, class_id
FROM text_cn.train_processed
TO TRAIN DNNClassifier
WITH model.n_classes = 17, model.hidden_units = [10, 20]
COLUMN EMBEDDING(CATEGORY_ID(SPARSE(news_title,16000,COMMA), 16000),128,mean)
LABEL class_id
INTO sqlflow_models.my_dnn_model;`, // specify COLUMN
`SELECT news_title, class_id
FROM text_cn.train_processed
TO TRAIN DNNClassifier
WITH model.n_classes = 17, model.hidden_units = [10, 20]
COLUMN EMBEDDING(SPARSE(news_title,16000,COMMA),128,mean)
LABEL class_id
INTO sqlflow_models.my_dnn_model;`, // derive CATEGORY_ID()
`SELECT * FROM housing.train
TO TRAIN xgboost.gbtree
WITH objective="reg:squarederror",
train.num_boost_round=30
LABEL target
INTO sqlflow_models.my_xgb_regression_model;`, // xgboost feature derivation
`SELECT * FROM housing.test
TO PREDICT housing.predict.target
USING sqlflow_models.my_xgb_regression_model;`, // predict xgboost feature derivation model
}
a := assert.New(t)
for _, sql := range cases {
_, _, _, err := connectAndRunSQL(sql)
a.NoError(err)
}
}
func CaseDiagnosisMissingModelParams(t *testing.T) {
a := assert.New(t)
trainSQL := `SELECT * FROM iris.train TO TRAIN DNNClassifier WITH
model.n_classes = 3,
train.epoch = 10
COLUMN sepal_length, sepal_width, petal_length, petal_width
LABEL class
INTO sqlflow_models.my_dnn_model;`
_, _, _, err := connectAndRunSQL(trainSQL)
a.Contains(err.Error(), "DNNClassifierV2 missing 1 required attribute: 'hidden_units'")
}
func CaseTrainCustomModelFunctional(t *testing.T) {
a := assert.New(t)
trainSQL := fmt.Sprintf(`SELECT * FROM %s
TO TRAIN sqlflow_models.dnnclassifier_functional_model
WITH model.n_classes = 3, validation.metrics="CategoricalAccuracy"
COLUMN sepal_length, sepal_width, petal_length, petal_width
LABEL class
INTO %s;`, caseTrainTable, caseInto)
_, _, _, err := connectAndRunSQL(trainSQL)
if err != nil {
a.Fail("run trainSQL error: %v", err)
}
}
func CaseTrainWithCommaSeparatedLabel(t *testing.T) {
a := assert.New(t)
trainSQL := `SELECT sepal_length, sepal_width, petal_length, concat(petal_width,',',class) as class FROM iris.train
TO TRAIN sqlflow_models.RNNBasedTimeSeriesModel WITH
model.n_in=3,
model.stack_units = [10, 10],
model.n_out=2,
model.model_type="lstm",
validation.metrics= "MeanAbsoluteError,MeanSquaredError"
LABEL class
INTO sqlflow_models.my_dnn_regts_model_2;`
_, _, _, err := connectAndRunSQL(trainSQL)
if err != nil {
a.Fail("run trainSQL error: %v", err)
}
predSQL := `SELECT sepal_length, sepal_width, petal_length, concat(petal_width,',',class) as class FROM iris.test
TO PREDICT iris.predict_ts_2.class USING sqlflow_models.my_dnn_regts_model_2;`
_, _, _, err = connectAndRunSQL(predSQL)
if err != nil {
a.Fail("run trainSQL error: %v", err)
}
showPred := `SELECT * FROM iris.predict_ts_2 LIMIT 5;`
_, rows, _, err := connectAndRunSQL(showPred)
if err != nil {
a.Fail("Run showPred error: %v", err)
}
for _, row := range rows {
// NOTE: Ensure that the predict result contains comma
AssertIsSubStringAny(a, ",", row[3])
}
}
func CaseSQLByPassLeftJoin(t *testing.T) {
a := assert.New(t)
trainSQL := `SELECT f1.user_id, f1.fea1, f2.fea2
FROM standard_join_test.user_fea1 AS f1 LEFT OUTER JOIN standard_join_test.user_fea2 AS f2
ON f1.user_id = f2.user_id
WHERE f1.user_id < 3;`
conn, err := createRPCConn()
a.NoError(err)
defer conn.Close()
cli := pb.NewSQLFlowClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
stream, err := cli.Run(ctx, sqlRequest(trainSQL))
if err != nil {
a.Fail("Check if the server started successfully. %v", err)
}
// wait train finish
_, _, _, e := ParseResponse(stream)
a.NoError(e)
}
func CaseXGBoost(t *testing.T) {
cases := []string{
`SELECT * FROM iris.train TO TRAIN xgboost.gbtree
WITH objective="multi:softprob",
num_class=3,
validation.select="SELECT * FROM iris.test",
eval_metric=accuracy_score
LABEL class
INTO sqlflow_models.my_xgb_multi_class_model;`, // xgb multi-class training with eval_metric
`SELECT * FROM iris.test
TO EVALUATE sqlflow_models.my_xgb_multi_class_model
WITH validation.metrics="accuracy_score"
LABEL class
INTO sqlflow_models.my_xgb_regression_model_eval_result;`, // xgb multi-class evaluation
`SELECT * FROM housing.train TO TRAIN xgboost.gbtree
WITH objective="reg:squarederror",
train.num_boost_round = 30,
train.batch_size=20
COLUMN f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13
LABEL target
INTO sqlflow_models.my_xgb_regression_model;`, // xgb regression training
`SELECT * FROM housing.train
TO EXPLAIN sqlflow_models.my_xgb_regression_model
WITH summary.plot_type="bar",
summary.alpha=1,
summary.sort=True
USING TreeExplainer;`, // xgb regression explain
`SELECT * FROM iris.train WHERE class in (0, 1) TO TRAIN xgboost.gbtree
WITH objective="binary:logistic", eval_metric=auc, train.disk_cache=True
LABEL class
INTO sqlflow_models.my_xgb_binary_classification_model;`, // xgb training with external memory
}
a := assert.New(t)
for _, sql := range cases {
_, _, _, err := connectAndRunSQL(sql)
a.NoError(err)
}
}
func CaseCoverageMysql(t *testing.T) {
cases := []string{
`SELECT * FROM iris.train WHERE class<>2
TO TRAIN DNNClassifier
WITH
model.n_classes = 2,
model.hidden_units = [10, 20],
validation.select = "SELECT * FROM iris.test WHERE class <>2 LIMIT 30"
LABEL class
INTO sqlflow_models.dnn_binary_classfier;`, // train a binary classification model for evaluation
`SELECT * FROM iris.test WHERE class<>2
TO EVALUATE sqlflow_models.dnn_binary_classfier
WITH validation.metrics = "Accuracy,AUC"
LABEL class
INTO iris.evaluation_result;`, // evaluate the model
`SELECT f9, target FROM housing.train
TO TRAIN DNNRegressor WITH model.hidden_units = [10, 20]
COLUMN EMBEDDING(CATEGORY_ID(f9, 1000), 2, "sum")
LABEL target
INTO housing.dnn_model;`, // train a model to predict categorical value
`SELECT f9, target FROM housing.test
TO PREDICT housing.predict.class USING housing.dnn_model;`, // predict categorical value
`SELECT f9, f10, target FROM housing.train
TO TRAIN DNNLinearCombinedRegressor WITH
model.dnn_hidden_units = [10, 20]
COLUMN EMBEDDING(CATEGORY_ID(f9, 25), 2, "sum") for dnn_feature_columns
COLUMN INDICATOR(CATEGORY_ID(f10, 712)) for linear_feature_columns
LABEL target
INTO housing.dnnlinear_model;`, // deep wide model
`SELECT f9, f10, target FROM housing.test
TO PREDICT housing.predict.class USING housing.dnnlinear_model;`, // deep wide model predict
`SELECT * FROM housing.train
TO TRAIN DNNRegressor WITH
model.hidden_units = [10, 20],
validation.select = "SELECT * FROM housing.test"
COLUMN INDICATOR(CATEGORY_ID("f10|f9|f4", 1000))
LABEL target
INTO housing.dnn_model;`, // column regex
`SELECT * FROM housing.test
TO PREDICT housing.predict.class
USING housing.dnn_model;`, // column regex mode predict
`SELECT * FROM iris.train
TO TRAIN DNNClassifier
WITH model.n_classes = 3, model.hidden_units = [10, 20],
train.batch_size = 10, train.epoch = 6,
train.max_steps = 200,
train.save_checkpoints_steps=10,
train.log_every_n_iter=20,
validation.start_delay_secs=10, validation.throttle_secs=10
COLUMN sepal_length, sepal_width, petal_length, petal_width
LABEL class
INTO sqlflow_models.my_dnn_model;`, // train with hyper params
`SELECT news_title, class_id
FROM text_cn.train
TO TRAIN DNNClassifier
WITH model.n_classes = 3, model.hidden_units = [10, 20]
COLUMN EMBEDDING(SPARSE(news_title,16000,COMMA),128,mean)
LABEL class_id
INTO sqlflow_models.my_dnn_model;`, // sparse feature support
`SELECT news_title, class_id
FROM text_cn.train_processed
TO TRAIN DNNClassifier
WITH model.n_classes = 17, model.hidden_units = [10, 20]
COLUMN EMBEDDING(CATEGORY_ID(news_title,16000,COMMA),128,mean)
LABEL class_id
INTO sqlflow_models.my_dnn_model;`, // dnn text classification
`SELECT news_title, class_id
FROM text_cn.train_processed
TO TRAIN sqlflow_models.StackedRNNClassifier
WITH model.n_classes = 17, model.stack_units = [16], model.model_type = "lstm", model.bidirectional = True,
train.epoch = 1, train.batch_size = 32
COLUMN EMBEDDING(SEQ_CATEGORY_ID(news_title,1600,COMMA),128,mean)
LABEL class_id
INTO sqlflow_models.my_rnn_model;`, // custom rnn model text classification
`SELECT * FROM iris.train WHERE class!=2
TO TRAIN BoostedTreesClassifier
WITH
model.n_batches_per_layer=1,
model.center_bias=True,
train.batch_size=100,
train.epoch=10,
validation.select="SELECT * FROM iris.test where class!=2"
LABEL class
INTO sqlflow_models.boostedtrees_model;`, // train tf boosted trees model
`SELECT * FROM iris.test WHERE class!=2
TO EXPLAIN sqlflow_models.boostedtrees_model
INTO iris.explain_result;`, // explain tf boosted trees model
}
a := assert.New(t)
for _, sql := range cases {
_, _, _, err := connectAndRunSQL(sql)
a.NoError(err)
}
// check tf boosted trees model explain result
getExplainResult := `SELECT * FROM iris.explain_result;`
_, rows, _, err := connectAndRunSQL(getExplainResult)
a.NoError(err)
for _, row := range rows {
AssertGreaterEqualAny(a, row[1], float32(0))
}
}
func caseTrainARIMAWithSTLDecompostionModel(t *testing.T) {
a := assert.New(t)
trainSQL := `
SELECT time, %[1]s FROM fund.train
TO TRAIN sqlflow_models.ARIMAWithSTLDecomposition
WITH
model.order=[7, 0, 2],
model.period=[7, 30],
model.date_format="%[2]s",
model.forecast_start='2014-09-01',
model.forecast_end='2014-09-30'
COLUMN time, %[1]s
LABEL %[1]s
INTO fund.%[1]s_model;
`
var err error
dateFormat := "%Y-%m-%d"
purchaseTrainSQL := fmt.Sprintf(trainSQL, "purchase", dateFormat)
_, _, _, err = connectAndRunSQL(purchaseTrainSQL)
a.NoError(err)
redeemTrainSQL := fmt.Sprintf(trainSQL, "redeem", dateFormat)
_, _, _, err = connectAndRunSQL(redeemTrainSQL)
a.NoError(err)
}
| [
"\"SQLFLOW_TEST_DB\""
]
| []
| [
"SQLFLOW_TEST_DB"
]
| [] | ["SQLFLOW_TEST_DB"] | go | 1 | 0 | |
lib/IPython/utils/terminal.py | # encoding: utf-8
"""
Utilities for working with terminals.
Authors:
* Brian E. Granger
* Fernando Perez
* Alexander Belchenko (e-mail: bialix AT ukr.net)
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import struct
import sys
import warnings
from . import py3compat
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# This variable is part of the expected API of the module:
ignore_termtitle = True
if os.name == 'posix':
def _term_clear():
os.system('clear')
elif sys.platform == 'win32':
def _term_clear():
os.system('cls')
else:
def _term_clear():
pass
def toggle_set_term_title(val):
"""Control whether set_term_title is active or not.
set_term_title() allows writing to the console titlebar. In embedded
widgets this can cause problems, so this call can be used to toggle it on
or off as needed.
The default state of the module is for the function to be disabled.
Parameters
----------
val : bool
If True, set_term_title() actually writes to the terminal (using the
appropriate platform-specific module). If False, it is a no-op.
"""
global ignore_termtitle
ignore_termtitle = not(val)
def _set_term_title(*args,**kw):
"""Dummy no-op."""
pass
def _set_term_title_xterm(title):
""" Change virtual terminal title in xterm-workalikes """
sys.stdout.write('\033]0;%s\007' % title)
if os.name == 'posix':
TERM = os.environ.get('TERM','')
if TERM.startswith('xterm'):
_set_term_title = _set_term_title_xterm
elif sys.platform == 'win32':
try:
import ctypes
SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
def _set_term_title(title):
"""Set terminal title using ctypes to access the Win32 APIs."""
SetConsoleTitleW(title)
except ImportError:
def _set_term_title(title):
"""Set terminal title using the 'title' command."""
global ignore_termtitle
try:
# Cannot be on network share when issuing system commands
curr = py3compat.getcwd()
os.chdir("C:")
ret = os.system("title " + title)
finally:
os.chdir(curr)
if ret:
# non-zero return code signals error, don't try again
ignore_termtitle = True
def set_term_title(title):
"""Set terminal title using the necessary platform-dependent calls."""
if ignore_termtitle:
return
_set_term_title(title)
def freeze_term_title():
warnings.warn("This function is deprecated, use toggle_set_term_title()")
global ignore_termtitle
ignore_termtitle = True
if sys.platform == 'win32':
def get_terminal_size(defaultx=80, defaulty=25):
"""Return size of current terminal console.
This function try to determine actual size of current working
console window and return tuple (sizex, sizey) if success,
or default size (defaultx, defaulty) otherwise.
Dependencies: ctypes should be installed.
Author: Alexander Belchenko (e-mail: bialix AT ukr.net)
"""
try:
import ctypes
except ImportError:
return defaultx, defaulty
h = ctypes.windll.kernel32.GetStdHandle(-11)
csbi = ctypes.create_string_buffer(22)
res = ctypes.windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack(
"hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return (sizex, sizey)
else:
return (defaultx, defaulty)
else:
def get_terminal_size(defaultx=80, defaulty=25):
return defaultx, defaulty
| []
| []
| [
"TERM"
]
| [] | ["TERM"] | python | 1 | 0 | |
src/php/integration/deploy_a_php_app_with_all_modules_test.go | package integration_test
import (
"os"
"path/filepath"
"strings"
"github.com/cloudfoundry/libbuildpack"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type SubDependency struct {
Name string
Version string
}
func ItLoadsAllTheModules(app *cutlass.App, phpVersion string) {
var manifest struct {
Dependencies []struct {
Name string `json:"name"`
Version string `json:"version"`
Modules []SubDependency `yaml:"dependencies"`
} `json:"dependencies"`
}
Expect((&libbuildpack.YAML{}).Load(filepath.Join(bpDir, "manifest.yml"), &manifest)).To(Succeed())
var subDependencies []SubDependency
for _, d := range manifest.Dependencies {
if d.Name == "php" && strings.HasPrefix(d.Version, phpVersion) {
subDependencies = d.Modules
break
}
}
By("logs each module on the info page", func() {
Expect(app.Stdout.String()).To(ContainSubstring("PHP " + phpVersion))
body, err := app.GetBody("/")
Expect(err).ToNot(HaveOccurred())
for _, dependency := range subDependencies {
if dependency.Name != "ioncube" {
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", dependency.Name))
}
}
})
}
var _ = Describe("CF PHP Buildpack", func() {
var app *cutlass.App
AfterEach(func() { app = DestroyApp(app) })
Context("extensions are specified in .bp-config", func() {
It("deploying a basic PHP7.2 app that loads all prepackaged extensions", func() {
app = cutlass.New(Fixtures("php_72_all_modules"))
app.SetEnv("COMPOSER_GITHUB_OAUTH_TOKEN", os.Getenv("COMPOSER_GITHUB_OAUTH_TOKEN"))
By("warns about deprecated PHP_EXTENSIONS", func() {
PushAppAndConfirm(app)
Expect(app.Stdout.String()).To(ContainSubstring("Warning: PHP_EXTENSIONS in options.json is deprecated."))
})
ItLoadsAllTheModules(app, "7.2")
body, err := app.GetBody("/")
Expect(err).ToNot(HaveOccurred())
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", "sqlsrv"))
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", "pdo_sqlsrv"))
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", "maxminddb"))
})
It("deploying a basic PHP7.2 app with cflinuxfs3 extensions", func() {
SkipUnlessCflinuxfs3()
app = cutlass.New(Fixtures("php_72_fs3_extensions"))
app.SetEnv("COMPOSER_GITHUB_OAUTH_TOKEN", os.Getenv("COMPOSER_GITHUB_OAUTH_TOKEN"))
By("warns about deprecated PHP_EXTENSIONS", func() {
PushAppAndConfirm(app)
Expect(app.Stdout.String()).To(ContainSubstring("Warning: PHP_EXTENSIONS in options.json is deprecated."))
})
body, err := app.GetBody("/")
Expect(err).ToNot(HaveOccurred())
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", "sqlsrv"))
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", "pdo_sqlsrv"))
Expect(body).To(MatchRegexp("(?i)module_(Zend[+ ])?%s", "maxminddb"))
})
})
Context("extensions are specified in composer.json", func() {
It("deploying a basic PHP7.2 app that loads all prepackaged extensions", func() {
app = cutlass.New(Fixtures("php_72_all_modules_composer"))
PushAppAndConfirm(app)
ItLoadsAllTheModules(app, "7.2")
By("does not warn about deprecated PHP_EXTENSIONS", func() {
Expect(app.Stdout.String()).ToNot(ContainSubstring("Warning: PHP_EXTENSIONS in options.json is deprecated."))
})
})
})
})
| [
"\"COMPOSER_GITHUB_OAUTH_TOKEN\"",
"\"COMPOSER_GITHUB_OAUTH_TOKEN\""
]
| []
| [
"COMPOSER_GITHUB_OAUTH_TOKEN"
]
| [] | ["COMPOSER_GITHUB_OAUTH_TOKEN"] | go | 1 | 0 | |
pkg/customers/approval_ofac.go | // Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package customers
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"time"
moovhttp "github.com/moov-io/base/http"
watchmanClient "github.com/moov-io/watchman/client"
"github.com/moov-io/customers/pkg/client"
"github.com/moov-io/customers/pkg/route"
"github.com/moov-io/customers/pkg/watchman"
"github.com/gorilla/mux"
"github.com/moov-io/base/log"
)
var (
ofacMatchThreshold float32 = func() float32 {
if v := os.Getenv("OFAC_MATCH_THRESHOLD"); v != "" {
f, err := strconv.ParseFloat(v, 32)
if err == nil && f > 0.00 {
return float32(f)
}
}
return 0.99 // default, 99%
}()
)
type OFACSearcher struct {
repo CustomerRepository
watchmanClient watchman.Client
}
func NewOFACSearcher(repo CustomerRepository, client watchman.Client) *OFACSearcher {
return &OFACSearcher{
repo: repo,
watchmanClient: client,
}
}
// storeCustomerOFACSearch performs OFAC searches against the Customer's name and nickname if populated.
// The higher matching search result is stored in s.customerRepository for use later (in approvals)
func (s *OFACSearcher) storeCustomerOFACSearch(cust *client.Customer, requestID string) error {
ctx, cancelFn := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancelFn()
if cust == nil {
return errors.New("nil Customer")
}
sdn, err := s.watchmanClient.Search(ctx, formatCustomerName(cust), requestID)
if err != nil {
return fmt.Errorf("OFACSearcher.storeCustomerOFACSearch: name search for customer=%s: %v", cust.CustomerID, err)
}
var nickSDN *watchmanClient.OfacSdn
if cust.NickName != "" {
nickSDN, err = s.watchmanClient.Search(ctx, cust.NickName, requestID)
if err != nil {
return fmt.Errorf("OFACSearcher.storeCustomerOFACSearch: nickname search for customer=%s: %v", cust.CustomerID, err)
}
}
// Save the higher matching SDN (from name search or nick name)
switch {
case nickSDN != nil && nickSDN.Match > sdn.Match:
err = s.repo.saveCustomerOFACSearch(cust.CustomerID, client.OfacSearch{
EntityID: nickSDN.EntityID,
Blocked: nickSDN.Match > ofacMatchThreshold,
SdnName: nickSDN.SdnName,
SdnType: nickSDN.SdnType,
Match: nickSDN.Match,
CreatedAt: time.Now(),
})
case sdn != nil:
err = s.repo.saveCustomerOFACSearch(cust.CustomerID, client.OfacSearch{
EntityID: sdn.EntityID,
Blocked: sdn.Match > ofacMatchThreshold,
SdnName: sdn.SdnName,
SdnType: sdn.SdnType,
Match: sdn.Match,
CreatedAt: time.Now(),
})
}
if err != nil {
return fmt.Errorf("OFACSearcher.storeCustomerOFACSearch: saveCustomerOFACSearch customer=%s: %v", cust.CustomerID, err)
}
return nil
}
func AddOFACRoutes(logger log.Logger, r *mux.Router, repo CustomerRepository, ofac *OFACSearcher) {
logger = logger.Set("package", log.String("customers"))
r.Methods("GET").Path("/customers/{customerID}/ofac").HandlerFunc(getLatestCustomerOFACSearch(logger, repo))
r.Methods("PUT").Path("/customers/{customerID}/refresh/ofac").HandlerFunc(refreshOFACSearch(logger, repo, ofac))
}
func getLatestCustomerOFACSearch(logger log.Logger, repo CustomerRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w = route.Responder(logger, w, r)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
customerID := route.GetCustomerID(w, r)
if customerID == "" {
return
}
organization := route.GetOrganization(w, r)
if organization == "" {
return
}
result, err := repo.getLatestCustomerOFACSearch(customerID, organization)
if err != nil {
moovhttp.Problem(w, err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(result)
}
}
func refreshOFACSearch(logger log.Logger, repo CustomerRepository, ofac *OFACSearcher) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w = route.Responder(logger, w, r)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
requestID := moovhttp.GetRequestID(r)
customerID := route.GetCustomerID(w, r)
if customerID == "" {
return
}
organization := route.GetOrganization(w, r)
if organization == "" {
return
}
cust, err := repo.GetCustomer(customerID, organization)
if err != nil {
moovhttp.Problem(w, err)
return
}
logger.Logf("running live OFAC search for customer=%s", customerID)
if err := ofac.storeCustomerOFACSearch(cust, requestID); err != nil {
logger.LogErrorf("error refreshing ofac search: %v", err)
moovhttp.Problem(w, err)
return
}
result, err := repo.getLatestCustomerOFACSearch(customerID, organization)
if err != nil {
logger.LogErrorf("error getting latest ofac search: %v", err)
moovhttp.Problem(w, err)
return
}
if result.Blocked {
logger.LogErrorf("customer=%s matched against OFAC entity=%s with a score of %.2f - rejecting customer", cust.CustomerID, result.EntityID, result.Match)
if err := repo.updateCustomerStatus(cust.CustomerID, client.CUSTOMERSTATUS_REJECTED, "manual OFAC refresh"); err != nil {
logger.LogErrorf("error updating customer=%s error=%v", cust.CustomerID, err)
moovhttp.Problem(w, err)
return
}
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(result)
}
}
| [
"\"OFAC_MATCH_THRESHOLD\""
]
| []
| [
"OFAC_MATCH_THRESHOLD"
]
| [] | ["OFAC_MATCH_THRESHOLD"] | go | 1 | 0 | |
bin/sentinel.py | #!/usr/bin/env python
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import init
import config
import misc
# from anond import anondaemon
from anond import AnonDaemon
# from models import Superblock, Proposal, GovernanceObject, Watchdog
from models import Proposal, GovernanceObject, Watchdog
from models import VoteSignals, VoteOutcomes, Transient
import socket
from misc import printdbg
import time
from bitcoinrpc.authproxy import JSONRPCException
import signal
import atexit
import random
from scheduler import Scheduler
import argparse
# sync anond gobject list with our local relational DB backend
def perform_anond_object_sync(anond):
GovernanceObject.sync(anond)
# delete old watchdog objects, create new when necessary
def watchdog_check(anond):
printdbg("in watchdog_check")
# delete expired watchdogs
for wd in Watchdog.expired(anond):
printdbg("\tFound expired watchdog [%s], voting to delete" % wd.object_hash)
wd.vote(anond, VoteSignals.delete, VoteOutcomes.yes)
# now, get all the active ones...
active_wd = Watchdog.active(anond)
active_count = active_wd.count()
# none exist, submit a new one to the network
if 0 == active_count:
# create/submit one
printdbg("\tNo watchdogs exist... submitting new one.")
wd = Watchdog(created_at=int(time.time()))
wd.submit(anond)
else:
wd_list = sorted(active_wd, key=lambda wd: wd.object_hash)
# highest hash wins
winner = wd_list.pop()
printdbg("\tFound winning watchdog [%s], voting VALID" % winner.object_hash)
winner.vote(anond, VoteSignals.valid, VoteOutcomes.yes)
# if remaining Watchdogs exist in the list, vote delete
for wd in wd_list:
printdbg("\tFound losing watchdog [%s], voting DELETE" % wd.object_hash)
wd.vote(anond, VoteSignals.delete, VoteOutcomes.yes)
printdbg("leaving watchdog_check")
def prune_expired_proposals(anond):
# vote delete for old proposals
for proposal in Proposal.expired(anond.superblockcycle()):
proposal.vote(anond, VoteSignals.delete, VoteOutcomes.yes)
# ping anond
# def sentinel_ping(anond):
def sentinel_ping(anond):
printdbg("in sentinel_ping")
# anond.ping()
anond.ping()
printdbg("leaving sentinel_ping")
# def attempt_superblock_creation(anond):
# import dashlib
# if not anond.is_masternode():
# print("We are not a Masternode... can't submit superblocks!")
# return
# # query votes for this specific ebh... if we have voted for this specific
# # ebh, then it's voted on. since we track votes this is all done using joins
# # against the votes table
# #
# # has this masternode voted on *any* superblocks at the given event_block_height?
# # have we voted FUNDING=YES for a superblock for this specific event_block_height?
# event_block_height = anond.next_superblock_height()
# if Superblock.is_voted_funding(event_block_height):
# # printdbg("ALREADY VOTED! 'til next time!")
# # vote down any new SBs because we've already chosen a winner
# for sb in Superblock.at_height(event_block_height):
# if not sb.voted_on(signal=VoteSignals.funding):
# sb.vote(anond, VoteSignals.funding, VoteOutcomes.no)
# # now return, we're done
# return
# if not anond.is_govobj_maturity_phase():
# printdbg("Not in maturity phase yet -- will not attempt Superblock")
# return
# proposals = Proposal.approved_and_ranked(proposal_quorum=anond.governance_quorum(), next_superblock_max_budget=anond.next_superblock_max_budget())
# budget_max = anond.get_superblock_budget_allocation(event_block_height)
# sb_epoch_time = anond.block_height_to_epoch(event_block_height)
# sb = dashlib.create_superblock(proposals, event_block_height, budget_max, sb_epoch_time)
# if not sb:
# printdbg("No superblock created, sorry. Returning.")
# return
# # find the deterministic SB w/highest object_hash in the DB
# dbrec = Superblock.find_highest_deterministic(sb.hex_hash())
# if dbrec:
# dbrec.vote(anond, VoteSignals.funding, VoteOutcomes.yes)
# # any other blocks which match the sb_hash are duplicates, delete them
# for sb in Superblock.select().where(Superblock.sb_hash == sb.hex_hash()):
# if not sb.voted_on(signal=VoteSignals.funding):
# sb.vote(anond, VoteSignals.delete, VoteOutcomes.yes)
# printdbg("VOTED FUNDING FOR SB! We're done here 'til next superblock cycle.")
# return
# else:
# printdbg("The correct superblock wasn't found on the network...")
# # if we are the elected masternode...
# if (anond.we_are_the_winner()):
# printdbg("we are the winner! Submit SB to network")
# sb.submit(anond)
def check_object_validity(anond):
# vote (in)valid objects
# for gov_class in [Proposal, Superblock]:
for gov_class in [Proposal]:
for obj in gov_class.select():
obj.vote_validity(anond)
# def is_anond_port_open(anond):
def is_anond_port_open(anond):
# test socket open before beginning, display instructive message to MN
# operators if it's not
port_open = False
try:
# info = anond.rpc_command('getgovernanceinfo')
info = anond.rpc_command('getnetworkinfo')
port_open = True
except (socket.error, JSONRPCException) as e:
print("%s" % e)
return port_open
def main():
# anond = anondaemon.from_dash_conf(config.dash_conf)
anond = AnonDaemon.from_anon_conf(config.anon_conf)
options = process_args()
# check anond connectivity
# if not is_anond_port_open(anond):
if not is_anond_port_open(anond):
print("Cannot connect to anond. Please ensure anond is running and the JSONRPC port is open to Sentinel.")
return
# check anond sync
# if not anond.is_synced():
if not anond.is_synced():
print("anond not synced with network! Awaiting full sync before running Sentinel.")
return
# ensure valid masternode
# if not anond.is_masternode():
if not anond.is_masternode():
print("Invalid Masternode Status, cannot continue.")
return
# register a handler if SENTINEL_DEBUG is set
if os.environ.get('SENTINEL_DEBUG', None):
import logging
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
if options.bypass:
# bypassing scheduler, remove the scheduled event
printdbg("--bypass-schedule option used, clearing schedule")
Scheduler.clear_schedule()
if not Scheduler.is_run_time():
printdbg("Not yet time for an object sync/vote, moving on.")
return
if not options.bypass:
# delay to account for cron minute sync
Scheduler.delay()
# running now, so remove the scheduled event
Scheduler.clear_schedule()
# ========================================================================
# general flow:
# ========================================================================
#
# load "gobject list" rpc command data, sync objects into internal database
# perform_anond_object_sync(anond)
perform_anond_object_sync(anond)
# if anond.has_sentinel_ping:
# sentinel_ping(anond)
if anond.has_sentinel_ping:
sentinel_ping(anond)
else:
# delete old watchdog objects, create a new if necessary
# watchdog_check(anond)
watchdog_check(anond)
# auto vote network objects as valid/invalid
# check_object_validity(anond)
# vote to delete expired proposals
prune_expired_proposals(anond)
prune_expired_proposals(anond)
# create a Superblock if necessary
# attempt_superblock_creation(anond)
# attempt_superblock_creation(anond)
# schedule the next run
Scheduler.schedule_next_run()
def signal_handler(signum, frame):
print("Got a signal [%d], cleaning up..." % (signum))
Transient.delete('SENTINEL_RUNNING')
sys.exit(1)
def cleanup():
Transient.delete(mutex_key)
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bypass-scheduler',
action='store_true',
help='Bypass scheduler and sync/vote immediately',
dest='bypass')
args = parser.parse_args()
return args
if __name__ == '__main__':
atexit.register(cleanup)
signal.signal(signal.SIGINT, signal_handler)
# ensure another instance of Sentinel is not currently running
mutex_key = 'SENTINEL_RUNNING'
# assume that all processes expire after 'timeout_seconds' seconds
timeout_seconds = 90
is_running = Transient.get(mutex_key)
if is_running:
printdbg("An instance of Sentinel is already running -- aborting.")
sys.exit(1)
else:
Transient.set(mutex_key, misc.now(), timeout_seconds)
# locked to this instance -- perform main logic here
main()
Transient.delete(mutex_key)
| []
| []
| [
"SENTINEL_DEBUG"
]
| [] | ["SENTINEL_DEBUG"] | python | 1 | 0 | |
src/borg/testsuite/upgrader.py | import os
import tarfile
import pytest
from ..constants import * # NOQA
from ..crypto.key import KeyfileKey
from ..upgrader import AtticRepositoryUpgrader, AtticKeyfileKey
from ..helpers import get_keys_dir
from ..repository import Repository
from . import are_hardlinks_supported
# tar with a repo and repo keyfile from attic
ATTIC_TAR = os.path.join(os.path.dirname(__file__), 'attic.tar.gz')
def untar(tarfname, path, what):
"""
extract <tarfname> tar archive to <path>, all stuff starting with <what>.
return path to <what>.
"""
def files(members):
for tarinfo in members:
if tarinfo.name.startswith(what):
yield tarinfo
with tarfile.open(tarfname, 'r') as tf:
tf.extractall(path, members=files(tf))
return os.path.join(path, what)
def repo_valid(path):
"""
utility function to check if borg can open a repository
:param path: the path to the repository
:returns: if borg can check the repository
"""
with Repository(str(path), exclusive=True, create=False) as repository:
# can't check raises() because check() handles the error
return repository.check()
def key_valid(path):
"""
check that the new keyfile is alright
:param path: the path to the key file
:returns: if the file starts with the borg magic string
"""
keyfile = os.path.join(get_keys_dir(),
os.path.basename(path))
with open(keyfile) as f:
return f.read().startswith(KeyfileKey.FILE_ID)
def make_attic_repo(dir):
"""
create an attic repo with some stuff in it
:param dir: path to the repository to be created
:returns: path to attic repository
"""
# there is some stuff in that repo, copied from `RepositoryTestCase.test1`
return untar(ATTIC_TAR, str(dir), 'repo')
@pytest.fixture()
def attic_repo(tmpdir):
return make_attic_repo(tmpdir)
@pytest.fixture(params=[True, False])
def inplace(request):
return request.param
def test_convert_segments(attic_repo, inplace):
"""test segment conversion
this will load the given attic repository, list all the segments
then convert them one at a time. we need to close the repo before
conversion otherwise we have errors from borg
:param attic_repo: a populated attic repository (fixture)
"""
repo_path = attic_repo
with pytest.raises(Repository.AtticRepository):
repo_valid(repo_path)
repository = AtticRepositoryUpgrader(repo_path, create=False)
with repository:
segments = [filename for i, filename in repository.io.segment_iterator()]
repository.convert_segments(segments, dryrun=False, inplace=inplace)
repository.convert_cache(dryrun=False)
assert repo_valid(repo_path)
@pytest.fixture()
def attic_key_file(tmpdir, monkeypatch):
"""
create an attic key file from the given repo, in the keys
subdirectory of the given tmpdir
:param tmpdir: a temporary directory (a builtin fixture)
:returns: path to key file
"""
keys_dir = untar(ATTIC_TAR, str(tmpdir), 'keys')
# we use the repo dir for the created keyfile, because we do
# not want to clutter existing keyfiles
monkeypatch.setenv('ATTIC_KEYS_DIR', keys_dir)
# we use the same directory for the converted files, which
# will clutter the previously created one, which we don't care
# about anyways. in real runs, the original key will be retained.
monkeypatch.setenv('BORG_KEYS_DIR', keys_dir)
monkeypatch.setenv('ATTIC_PASSPHRASE', 'test')
return os.path.join(keys_dir, 'repo')
def test_keys(attic_repo, attic_key_file):
"""test key conversion
test that we can convert the given key to a properly formatted
borg key. assumes that the ATTIC_KEYS_DIR and BORG_KEYS_DIR have
been properly populated by the attic_key_file fixture.
:param attic_repo: path to an attic repository (fixture defined above)
:param attic_key_file: path to an attic key file (fixture defined above)
"""
keyfile_path = attic_key_file
assert not key_valid(keyfile_path) # not upgraded yet
with AtticRepositoryUpgrader(attic_repo, create=False) as repository:
keyfile = AtticKeyfileKey.find_key_file(repository)
AtticRepositoryUpgrader.convert_keyfiles(keyfile, dryrun=False)
assert key_valid(keyfile_path)
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_convert_all(attic_repo, attic_key_file, inplace):
"""test all conversion steps
this runs everything. mostly redundant test, since everything is
done above. yet we expect a NotImplementedError because we do not
convert caches yet.
:param attic_repo: path to an attic repository (fixture defined above)
:param attic_key_file: path to an attic key file (fixture defined above)
"""
repo_path = attic_repo
with pytest.raises(Repository.AtticRepository):
repo_valid(repo_path)
def stat_segment(path):
return os.stat(os.path.join(path, 'data', '0', '0'))
def first_inode(path):
return stat_segment(path).st_ino
orig_inode = first_inode(repo_path)
with AtticRepositoryUpgrader(repo_path, create=False) as repository:
# replicate command dispatch, partly
os.umask(UMASK_DEFAULT)
backup = repository.upgrade(dryrun=False, inplace=inplace) # note: uses hardlinks internally
if inplace:
assert backup is None
assert first_inode(repository.path) == orig_inode
else:
assert backup
assert first_inode(repository.path) != first_inode(backup)
# i have seen cases where the copied tree has world-readable
# permissions, which is wrong
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert stat_segment(backup).st_mode & UMASK_DEFAULT == 0
assert key_valid(attic_key_file)
assert repo_valid(repo_path)
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_hardlink(tmpdir, inplace):
"""test that we handle hard links properly
that is, if we are in "inplace" mode, hardlinks should *not*
change (ie. we write to the file directly, so we do not rewrite the
whole file, and we do not re-create the file).
if we are *not* in inplace mode, then the inode should change, as
we are supposed to leave the original inode alone."""
a = str(tmpdir.join('a'))
with open(a, 'wb') as tmp:
tmp.write(b'aXXX')
b = str(tmpdir.join('b'))
os.link(a, b)
AtticRepositoryUpgrader.header_replace(b, b'a', b'b', inplace=inplace)
if not inplace:
assert os.stat(a).st_ino != os.stat(b).st_ino
else:
assert os.stat(a).st_ino == os.stat(b).st_ino
with open(b, 'rb') as tmp:
assert tmp.read() == b'bXXX'
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tsdb/engine/tsm1/engine.go | // Package tsm1 provides a TSDB in the Time Structured Merge tree format.
package tsm1 // import "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
import (
"archive/tar"
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/logger"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/pkg/bytesutil"
"github.com/influxdata/influxdb/v2/pkg/estimator"
"github.com/influxdata/influxdb/v2/pkg/file"
"github.com/influxdata/influxdb/v2/pkg/limiter"
"github.com/influxdata/influxdb/v2/pkg/metrics"
"github.com/influxdata/influxdb/v2/pkg/radix"
intar "github.com/influxdata/influxdb/v2/pkg/tar"
"github.com/influxdata/influxdb/v2/pkg/tracing"
"github.com/influxdata/influxdb/v2/tsdb"
_ "github.com/influxdata/influxdb/v2/tsdb/index"
"github.com/influxdata/influxdb/v2/tsdb/index/inmem"
"github.com/influxdata/influxdb/v2/tsdb/index/tsi1"
"github.com/influxdata/influxql"
"go.uber.org/zap"
)
//go:generate -command tmpl go run github.com/benbjohnson/tmpl
//go:generate tmpl [email protected] iterator.gen.go.tmpl engine.gen.go.tmpl array_cursor.gen.go.tmpl array_cursor_iterator.gen.go.tmpl
// The file store generate uses a custom modified tmpl
// to support adding templated data from the command line.
// This can probably be worked into the upstream tmpl
// but isn't at the moment.
//go:generate go run ../../../tools/tmpl -i -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store.gen.go
//go:generate go run ../../../tools/tmpl -i -d isArray=y -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store_array.gen.go
//go:generate tmpl [email protected] encoding.gen.go.tmpl
//go:generate tmpl [email protected] compact.gen.go.tmpl
//go:generate tmpl [email protected] reader.gen.go.tmpl
func init() {
tsdb.RegisterEngine("tsm1", NewEngine)
}
var (
// Ensure Engine implements the interface.
_ tsdb.Engine = &Engine{}
// Static objects to prevent small allocs.
timeBytes = []byte("time")
keyFieldSeparatorBytes = []byte(keyFieldSeparator)
emptyBytes = []byte{}
)
var (
tsmGroup = metrics.MustRegisterGroup("tsm1")
numberOfRefCursorsCounter = metrics.MustRegisterCounter("cursors_ref", metrics.WithGroup(tsmGroup))
numberOfAuxCursorsCounter = metrics.MustRegisterCounter("cursors_aux", metrics.WithGroup(tsmGroup))
numberOfCondCursorsCounter = metrics.MustRegisterCounter("cursors_cond", metrics.WithGroup(tsmGroup))
planningTimer = metrics.MustRegisterTimer("planning_time", metrics.WithGroup(tsmGroup))
)
// NewContextWithMetricsGroup creates a new context with a tsm1 metrics.Group for tracking
// various metrics when accessing TSM data.
func NewContextWithMetricsGroup(ctx context.Context) context.Context {
group := metrics.NewGroup(tsmGroup)
return metrics.NewContextWithGroup(ctx, group)
}
// MetricsGroupFromContext returns the tsm1 metrics.Group associated with the context
// or nil if no group has been assigned.
func MetricsGroupFromContext(ctx context.Context) *metrics.Group {
return metrics.GroupFromContext(ctx)
}
const (
// keyFieldSeparator separates the series key from the field name in the composite key
// that identifies a specific field in series
keyFieldSeparator = "#!~#"
// deleteFlushThreshold is the size in bytes of a batch of series keys to delete.
deleteFlushThreshold = 50 * 1024 * 1024
)
// Statistics gathered by the engine.
const (
statCacheCompactions = "cacheCompactions"
statCacheCompactionsActive = "cacheCompactionsActive"
statCacheCompactionError = "cacheCompactionErr"
statCacheCompactionDuration = "cacheCompactionDuration"
statTSMLevel1Compactions = "tsmLevel1Compactions"
statTSMLevel1CompactionsActive = "tsmLevel1CompactionsActive"
statTSMLevel1CompactionError = "tsmLevel1CompactionErr"
statTSMLevel1CompactionDuration = "tsmLevel1CompactionDuration"
statTSMLevel1CompactionQueue = "tsmLevel1CompactionQueue"
statTSMLevel2Compactions = "tsmLevel2Compactions"
statTSMLevel2CompactionsActive = "tsmLevel2CompactionsActive"
statTSMLevel2CompactionError = "tsmLevel2CompactionErr"
statTSMLevel2CompactionDuration = "tsmLevel2CompactionDuration"
statTSMLevel2CompactionQueue = "tsmLevel2CompactionQueue"
statTSMLevel3Compactions = "tsmLevel3Compactions"
statTSMLevel3CompactionsActive = "tsmLevel3CompactionsActive"
statTSMLevel3CompactionError = "tsmLevel3CompactionErr"
statTSMLevel3CompactionDuration = "tsmLevel3CompactionDuration"
statTSMLevel3CompactionQueue = "tsmLevel3CompactionQueue"
statTSMOptimizeCompactions = "tsmOptimizeCompactions"
statTSMOptimizeCompactionsActive = "tsmOptimizeCompactionsActive"
statTSMOptimizeCompactionError = "tsmOptimizeCompactionErr"
statTSMOptimizeCompactionDuration = "tsmOptimizeCompactionDuration"
statTSMOptimizeCompactionQueue = "tsmOptimizeCompactionQueue"
statTSMFullCompactions = "tsmFullCompactions"
statTSMFullCompactionsActive = "tsmFullCompactionsActive"
statTSMFullCompactionError = "tsmFullCompactionErr"
statTSMFullCompactionDuration = "tsmFullCompactionDuration"
statTSMFullCompactionQueue = "tsmFullCompactionQueue"
)
// Engine represents a storage engine with compressed blocks.
type Engine struct {
mu sync.RWMutex
index tsdb.Index
// The following group of fields is used to track the state of level compactions within the
// Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is
// used to signal those goroutines to shutdown. Every request to disable level compactions will
// call 'Wait' on 'wg', with the first goroutine to arrive (levelWorkers == 0 while holding the
// lock) will close the done channel and re-assign 'nil' to the variable. Re-enabling will
// decrease 'levelWorkers', and when it decreases to zero, level compactions will be started
// back up again.
wg *sync.WaitGroup // waitgroup for active level compaction goroutines
done chan struct{} // channel to signal level compactions to stop
levelWorkers int // Number of "workers" that expect compactions to be in a disabled state
snapDone chan struct{} // channel to signal snapshot compactions to stop
snapWG *sync.WaitGroup // waitgroup for running snapshot compactions
id uint64
path string
sfile *tsdb.SeriesFile
logger *zap.Logger // Logger to be used for important messages
traceLogger *zap.Logger // Logger to be used when trace-logging is on.
traceLogging bool
fieldset *tsdb.MeasurementFieldSet
WAL *WAL
Cache *Cache
Compactor *Compactor
CompactionPlan CompactionPlanner
FileStore *FileStore
MaxPointsPerBlock int
// CacheFlushMemorySizeThreshold specifies the minimum size threshold for
// the cache when the engine should write a snapshot to a TSM file
CacheFlushMemorySizeThreshold uint64
// CacheFlushWriteColdDuration specifies the length of time after which if
// no writes have been committed to the WAL, the engine will write
// a snapshot of the cache to a TSM file
CacheFlushWriteColdDuration time.Duration
// WALEnabled determines whether writes to the WAL are enabled. If this is false,
// writes will only exist in the cache and can be lost if a snapshot has not occurred.
WALEnabled bool
// Invoked when creating a backup file "as new".
formatFileName FormatFileNameFunc
// Controls whether to enabled compactions when the engine is open
enableCompactionsOnOpen bool
stats *EngineStatistics
// Limiter for concurrent compactions.
compactionLimiter limiter.Fixed
scheduler *scheduler
// provides access to the total set of series IDs
seriesIDSets tsdb.SeriesIDSets
// seriesTypeMap maps a series key to field type
seriesTypeMap *radix.Tree
// muDigest ensures only one goroutine can generate a digest at a time.
muDigest sync.RWMutex
}
// NewEngine returns a new instance of Engine.
func NewEngine(id uint64, idx tsdb.Index, path string, walPath string, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Engine {
var wal *WAL
if opt.WALEnabled {
wal = NewWAL(walPath)
wal.syncDelay = time.Duration(opt.Config.WALFsyncDelay)
}
fs := NewFileStore(path)
fs.openLimiter = opt.OpenLimiter
if opt.FileStoreObserver != nil {
fs.WithObserver(opt.FileStoreObserver)
}
fs.tsmMMAPWillNeed = opt.Config.TSMWillNeed
cache := NewCache(uint64(opt.Config.CacheMaxMemorySize))
c := NewCompactor()
c.Dir = path
c.FileStore = fs
c.RateLimit = opt.CompactionThroughputLimiter
var planner CompactionPlanner = NewDefaultPlanner(fs, time.Duration(opt.Config.CompactFullWriteColdDuration))
if opt.CompactionPlannerCreator != nil {
planner = opt.CompactionPlannerCreator(opt.Config).(CompactionPlanner)
planner.SetFileStore(fs)
}
logger := zap.NewNop()
stats := &EngineStatistics{}
e := &Engine{
id: id,
path: path,
index: idx,
sfile: sfile,
logger: logger,
traceLogger: logger,
traceLogging: opt.Config.TraceLoggingEnabled,
WAL: wal,
Cache: cache,
FileStore: fs,
Compactor: c,
CompactionPlan: planner,
CacheFlushMemorySizeThreshold: uint64(opt.Config.CacheSnapshotMemorySize),
CacheFlushWriteColdDuration: time.Duration(opt.Config.CacheSnapshotWriteColdDuration),
enableCompactionsOnOpen: true,
WALEnabled: opt.WALEnabled,
formatFileName: DefaultFormatFileName,
stats: stats,
compactionLimiter: opt.CompactionLimiter,
scheduler: newScheduler(stats, opt.CompactionLimiter.Capacity()),
seriesIDSets: opt.SeriesIDSets,
}
// Feature flag to enable per-series type checking, by default this is off and
// e.seriesTypeMap will be nil.
if os.Getenv("INFLUXDB_SERIES_TYPE_CHECK_ENABLED") != "" {
e.seriesTypeMap = radix.New()
}
if e.traceLogging {
fs.enableTraceLogging(true)
if e.WALEnabled {
e.WAL.enableTraceLogging(true)
}
}
return e
}
func (e *Engine) WithFormatFileNameFunc(formatFileNameFunc FormatFileNameFunc) {
e.Compactor.WithFormatFileNameFunc(formatFileNameFunc)
e.formatFileName = formatFileNameFunc
}
func (e *Engine) WithParseFileNameFunc(parseFileNameFunc ParseFileNameFunc) {
e.FileStore.WithParseFileNameFunc(parseFileNameFunc)
e.Compactor.WithParseFileNameFunc(parseFileNameFunc)
}
// Digest returns a reader for the shard's digest.
func (e *Engine) Digest() (io.ReadCloser, int64, error) {
e.muDigest.Lock()
defer e.muDigest.Unlock()
log, logEnd := logger.NewOperation(context.TODO(), e.logger, "Engine digest", "tsm1_digest")
defer logEnd()
log.Info("Starting digest", zap.String("tsm1_path", e.path))
digestPath := filepath.Join(e.path, DigestFilename)
// Get a list of tsm file paths from the FileStore.
files := e.FileStore.Files()
tsmfiles := make([]string, 0, len(files))
for _, f := range files {
tsmfiles = append(tsmfiles, f.Path())
}
// See if there's a fresh digest cached on disk.
fresh, reason := DigestFresh(e.path, tsmfiles, e.LastModified())
if fresh {
f, err := os.Open(digestPath)
if err == nil {
fi, err := f.Stat()
if err != nil {
log.Info("Digest aborted, couldn't stat digest file", logger.Shard(e.id), zap.Error(err))
return nil, 0, err
}
log.Info("Digest is fresh", logger.Shard(e.id), zap.String("path", digestPath))
// Return the cached digest.
return f, fi.Size(), nil
}
}
log.Info("Digest stale", logger.Shard(e.id), zap.String("reason", reason))
// Either no digest existed or the existing one was stale
// so generate a new digest.
// Make sure the directory exists, in case it was deleted for some reason.
if err := os.MkdirAll(e.path, 0777); err != nil {
log.Info("Digest aborted, problem creating shard directory path", zap.Error(err))
return nil, 0, err
}
// Create a tmp file to write the digest to.
tf, err := os.Create(digestPath + ".tmp")
if err != nil {
log.Info("Digest aborted, problem creating tmp digest", zap.Error(err))
return nil, 0, err
}
// Write the new digest to the tmp file.
if err := Digest(e.path, tsmfiles, tf); err != nil {
log.Info("Digest aborted, problem writing tmp digest", zap.Error(err))
tf.Close()
os.Remove(tf.Name())
return nil, 0, err
}
// Rename the temporary digest file to the actual digest file.
if err := file.RenameFile(tf.Name(), digestPath); err != nil {
log.Info("Digest aborted, problem renaming tmp digest", zap.Error(err))
return nil, 0, err
}
// Create and return a reader for the new digest file.
f, err := os.Open(digestPath)
if err != nil {
log.Info("Digest aborted, opening new digest", zap.Error(err))
return nil, 0, err
}
fi, err := f.Stat()
if err != nil {
log.Info("Digest aborted, can't stat new digest", zap.Error(err))
f.Close()
return nil, 0, err
}
log.Info("Digest written", zap.String("tsm1_digest_path", digestPath), zap.Int64("size", fi.Size()))
return f, fi.Size(), nil
}
// SetEnabled sets whether the engine is enabled.
func (e *Engine) SetEnabled(enabled bool) {
e.enableCompactionsOnOpen = enabled
e.SetCompactionsEnabled(enabled)
}
// SetCompactionsEnabled enables compactions on the engine. When disabled
// all running compactions are aborted and new compactions stop running.
func (e *Engine) SetCompactionsEnabled(enabled bool) {
if enabled {
e.enableSnapshotCompactions()
e.enableLevelCompactions(false)
} else {
e.disableSnapshotCompactions()
e.disableLevelCompactions(false)
}
}
// enableLevelCompactions will request that level compactions start back up again
//
// 'wait' signifies that a corresponding call to disableLevelCompactions(true) was made at some
// point, and the associated task that required disabled compactions is now complete
func (e *Engine) enableLevelCompactions(wait bool) {
// If we don't need to wait, see if we're already enabled
if !wait {
e.mu.RLock()
if e.done != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
}
e.mu.Lock()
if wait {
e.levelWorkers -= 1
}
if e.levelWorkers != 0 || e.done != nil {
// still waiting on more workers or already enabled
e.mu.Unlock()
return
}
// last one to enable, start things back up
e.Compactor.EnableCompactions()
e.done = make(chan struct{})
wg := new(sync.WaitGroup)
wg.Add(1)
e.wg = wg
e.mu.Unlock()
go func() { defer wg.Done(); e.compact(wg) }()
}
// disableLevelCompactions will stop level compactions before returning.
//
// If 'wait' is set to true, then a corresponding call to enableLevelCompactions(true) will be
// required before level compactions will start back up again.
func (e *Engine) disableLevelCompactions(wait bool) {
e.mu.Lock()
old := e.levelWorkers
if wait {
e.levelWorkers += 1
}
// Hold onto the current done channel so we can wait on it if necessary
waitCh := e.done
wg := e.wg
if old == 0 && e.done != nil {
// It's possible we have closed the done channel and released the lock and another
// goroutine has attempted to disable compactions. We're current in the process of
// disabling them so check for this and wait until the original completes.
select {
case <-e.done:
e.mu.Unlock()
return
default:
}
// Prevent new compactions from starting
e.Compactor.DisableCompactions()
// Stop all background compaction goroutines
close(e.done)
e.mu.Unlock()
wg.Wait()
// Signal that all goroutines have exited.
e.mu.Lock()
e.done = nil
e.mu.Unlock()
return
}
e.mu.Unlock()
// Compaction were already disabled.
if waitCh == nil {
return
}
// We were not the first caller to disable compactions and they were in the process
// of being disabled. Wait for them to complete before returning.
<-waitCh
wg.Wait()
}
func (e *Engine) enableSnapshotCompactions() {
// Check if already enabled under read lock
e.mu.RLock()
if e.snapDone != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
// Check again under write lock
e.mu.Lock()
if e.snapDone != nil {
e.mu.Unlock()
return
}
e.Compactor.EnableSnapshots()
e.snapDone = make(chan struct{})
wg := new(sync.WaitGroup)
wg.Add(1)
e.snapWG = wg
e.mu.Unlock()
go func() { defer wg.Done(); e.compactCache() }()
}
func (e *Engine) disableSnapshotCompactions() {
e.mu.Lock()
if e.snapDone == nil {
e.mu.Unlock()
return
}
// We may be in the process of stopping snapshots. See if the channel
// was closed.
select {
case <-e.snapDone:
e.mu.Unlock()
return
default:
}
// first one here, disable and wait for completion
close(e.snapDone)
e.Compactor.DisableSnapshots()
wg := e.snapWG
e.mu.Unlock()
// Wait for the snapshot goroutine to exit.
wg.Wait()
// Signal that the goroutines are exit and everything is stopped by setting
// snapDone to nil.
e.mu.Lock()
e.snapDone = nil
e.mu.Unlock()
// If the cache is empty, free up its resources as well.
if e.Cache.Size() == 0 {
e.Cache.Free()
}
}
// ScheduleFullCompaction will force the engine to fully compact all data stored.
// This will cancel and running compactions and snapshot any data in the cache to
// TSM files. This is an expensive operation.
func (e *Engine) ScheduleFullCompaction() error {
// Snapshot any data in the cache
if err := e.WriteSnapshot(); err != nil {
return err
}
// Cancel running compactions
e.SetCompactionsEnabled(false)
// Ensure compactions are restarted
defer e.SetCompactionsEnabled(true)
// Force the planner to only create a full plan.
e.CompactionPlan.ForceFull()
return nil
}
// Path returns the path the engine was opened with.
func (e *Engine) Path() string { return e.path }
func (e *Engine) SetFieldName(measurement []byte, name string) {
e.index.SetFieldName(measurement, name)
}
func (e *Engine) MeasurementExists(name []byte) (bool, error) {
return e.index.MeasurementExists(name)
}
func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return e.index.MeasurementNamesByRegex(re)
}
// MeasurementFieldSet returns the measurement field set.
func (e *Engine) MeasurementFieldSet() *tsdb.MeasurementFieldSet {
return e.fieldset
}
// MeasurementFields returns the measurement fields for a measurement.
func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields {
return e.fieldset.CreateFieldsIfNotExists(measurement)
}
func (e *Engine) HasTagKey(name, key []byte) (bool, error) {
return e.index.HasTagKey(name, key)
}
func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
return e.index.MeasurementTagKeysByExpr(name, expr)
}
func (e *Engine) TagKeyCardinality(name, key []byte) int {
return e.index.TagKeyCardinality(name, key)
}
// SeriesN returns the unique number of series in the index.
func (e *Engine) SeriesN() int64 {
return e.index.SeriesN()
}
// MeasurementsSketches returns sketches that describe the cardinality of the
// measurements in this shard and measurements that were in this shard, but have
// been tombstoned.
func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.MeasurementsSketches()
}
// SeriesSketches returns sketches that describe the cardinality of the
// series in this shard and series that were in this shard, but have
// been tombstoned.
func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.SeriesSketches()
}
// LastModified returns the time when this shard was last modified.
func (e *Engine) LastModified() time.Time {
fsTime := e.FileStore.LastModified()
if e.WALEnabled && e.WAL.LastWriteTime().After(fsTime) {
return e.WAL.LastWriteTime()
}
return fsTime
}
// EngineStatistics maintains statistics for the engine.
type EngineStatistics struct {
CacheCompactions int64 // Counter of cache compactions that have ever run.
CacheCompactionsActive int64 // Gauge of cache compactions currently running.
CacheCompactionErrors int64 // Counter of cache compactions that have failed due to error.
CacheCompactionDuration int64 // Counter of number of wall nanoseconds spent in cache compactions.
TSMCompactions [3]int64 // Counter of TSM compactions (by level) that have ever run.
TSMCompactionsActive [3]int64 // Gauge of TSM compactions (by level) currently running.
TSMCompactionErrors [3]int64 // Counter of TSM compcations (by level) that have failed due to error.
TSMCompactionDuration [3]int64 // Counter of number of wall nanoseconds spent in TSM compactions (by level).
TSMCompactionsQueue [3]int64 // Gauge of TSM compactions queues (by level).
TSMOptimizeCompactions int64 // Counter of optimize compactions that have ever run.
TSMOptimizeCompactionsActive int64 // Gauge of optimize compactions currently running.
TSMOptimizeCompactionErrors int64 // Counter of optimize compactions that have failed due to error.
TSMOptimizeCompactionDuration int64 // Counter of number of wall nanoseconds spent in optimize compactions.
TSMOptimizeCompactionsQueue int64 // Gauge of optimize compactions queue.
TSMFullCompactions int64 // Counter of full compactions that have ever run.
TSMFullCompactionsActive int64 // Gauge of full compactions currently running.
TSMFullCompactionErrors int64 // Counter of full compactions that have failed due to error.
TSMFullCompactionDuration int64 // Counter of number of wall nanoseconds spent in full compactions.
TSMFullCompactionsQueue int64 // Gauge of full compactions queue.
}
// Statistics returns statistics for periodic monitoring.
func (e *Engine) Statistics(tags map[string]string) []models.Statistic {
statistics := make([]models.Statistic, 0, 4)
statistics = append(statistics, models.Statistic{
Name: "tsm1_engine",
Tags: tags,
Values: map[string]interface{}{
statCacheCompactions: atomic.LoadInt64(&e.stats.CacheCompactions),
statCacheCompactionsActive: atomic.LoadInt64(&e.stats.CacheCompactionsActive),
statCacheCompactionError: atomic.LoadInt64(&e.stats.CacheCompactionErrors),
statCacheCompactionDuration: atomic.LoadInt64(&e.stats.CacheCompactionDuration),
statTSMLevel1Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[0]),
statTSMLevel1CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]),
statTSMLevel1CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[0]),
statTSMLevel1CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[0]),
statTSMLevel1CompactionQueue: atomic.LoadInt64(&e.stats.TSMCompactionsQueue[0]),
statTSMLevel2Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[1]),
statTSMLevel2CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]),
statTSMLevel2CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[1]),
statTSMLevel2CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[1]),
statTSMLevel2CompactionQueue: atomic.LoadInt64(&e.stats.TSMCompactionsQueue[1]),
statTSMLevel3Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[2]),
statTSMLevel3CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]),
statTSMLevel3CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[2]),
statTSMLevel3CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[2]),
statTSMLevel3CompactionQueue: atomic.LoadInt64(&e.stats.TSMCompactionsQueue[2]),
statTSMOptimizeCompactions: atomic.LoadInt64(&e.stats.TSMOptimizeCompactions),
statTSMOptimizeCompactionsActive: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive),
statTSMOptimizeCompactionError: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionErrors),
statTSMOptimizeCompactionDuration: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionDuration),
statTSMOptimizeCompactionQueue: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsQueue),
statTSMFullCompactions: atomic.LoadInt64(&e.stats.TSMFullCompactions),
statTSMFullCompactionsActive: atomic.LoadInt64(&e.stats.TSMFullCompactionsActive),
statTSMFullCompactionError: atomic.LoadInt64(&e.stats.TSMFullCompactionErrors),
statTSMFullCompactionDuration: atomic.LoadInt64(&e.stats.TSMFullCompactionDuration),
statTSMFullCompactionQueue: atomic.LoadInt64(&e.stats.TSMFullCompactionsQueue),
},
})
statistics = append(statistics, e.Cache.Statistics(tags)...)
statistics = append(statistics, e.FileStore.Statistics(tags)...)
if e.WALEnabled {
statistics = append(statistics, e.WAL.Statistics(tags)...)
}
return statistics
}
// DiskSize returns the total size in bytes of all TSM and WAL segments on disk.
func (e *Engine) DiskSize() int64 {
var walDiskSizeBytes int64
if e.WALEnabled {
walDiskSizeBytes = e.WAL.DiskSizeBytes()
}
return e.FileStore.DiskSizeBytes() + walDiskSizeBytes
}
// Open opens and initializes the engine.
// TODO(edd): plumb context
func (e *Engine) Open() error {
if err := os.MkdirAll(e.path, 0777); err != nil {
return err
}
if err := e.cleanup(); err != nil {
return err
}
fields, err := tsdb.NewMeasurementFieldSet(filepath.Join(e.path, "fields.idx"))
if err != nil {
e.logger.Warn(fmt.Sprintf("error opening fields.idx: %v. Rebuilding.", err))
}
e.mu.Lock()
e.fieldset = fields
e.mu.Unlock()
e.index.SetFieldSet(fields)
if e.WALEnabled {
if err := e.WAL.Open(); err != nil {
return err
}
}
if err := e.FileStore.Open(); err != nil {
return err
}
if e.WALEnabled {
if err := e.reloadCache(); err != nil {
return err
}
}
e.Compactor.Open()
if e.enableCompactionsOnOpen {
e.SetCompactionsEnabled(true)
}
return nil
}
// Close closes the engine. Subsequent calls to Close are a nop.
func (e *Engine) Close() error {
e.SetCompactionsEnabled(false)
// Lock now and close everything else down.
e.mu.Lock()
defer e.mu.Unlock()
e.done = nil // Ensures that the channel will not be closed again.
if err := e.FileStore.Close(); err != nil {
return err
}
if e.WALEnabled {
return e.WAL.Close()
}
return nil
}
// WithLogger sets the logger for the engine.
func (e *Engine) WithLogger(log *zap.Logger) {
e.logger = log.With(zap.String("engine", "tsm1"))
if e.traceLogging {
e.traceLogger = e.logger
}
if e.WALEnabled {
e.WAL.WithLogger(e.logger)
}
e.FileStore.WithLogger(e.logger)
}
// LoadMetadataIndex loads the shard metadata into memory.
//
// Note, it not safe to call LoadMetadataIndex concurrently. LoadMetadataIndex
// should only be called when initialising a new Engine.
func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {
now := time.Now()
// Save reference to index for iterator creation.
e.index = index
// If we have the cached fields index on disk and we're using TSI, we
// can skip scanning all the TSM files.
if e.index.Type() != inmem.IndexName && !e.fieldset.IsEmpty() {
return nil
}
keys := make([][]byte, 0, 10000)
fieldTypes := make([]influxql.DataType, 0, 10000)
if err := e.FileStore.WalkKeys(nil, func(key []byte, typ byte) error {
fieldType := BlockTypeToInfluxQLDataType(typ)
if fieldType == influxql.Unknown {
return fmt.Errorf("unknown block type: %v", typ)
}
keys = append(keys, key)
fieldTypes = append(fieldTypes, fieldType)
if len(keys) == cap(keys) {
// Send batch of keys to the index.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
// Reset buffers.
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
return nil
}); err != nil {
return err
}
if len(keys) > 0 {
// Add remaining partial batch from FileStore.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
// load metadata from the Cache
if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
fieldType, err := entry.values.InfluxQLType()
if err != nil {
e.logger.Info("Error getting the data type of values for key", zap.ByteString("key", key), zap.Error(err))
}
keys = append(keys, key)
fieldTypes = append(fieldTypes, fieldType)
if len(keys) == cap(keys) {
// Send batch of keys to the index.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
// Reset buffers.
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
return nil
}); err != nil {
return err
}
if len(keys) > 0 {
// Add remaining partial batch from FileStore.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
}
// Save the field set index so we don't have to rebuild it next time
if err := e.fieldset.Save(); err != nil {
return err
}
e.traceLogger.Info("Meta data index for shard loaded", zap.Uint64("id", shardID), zap.Duration("duration", time.Since(now)))
return nil
}
// IsIdle returns true if the cache is empty, there are no running compactions and the
// shard is fully compacted.
func (e *Engine) IsIdle() bool {
cacheEmpty := e.Cache.Size() == 0
runningCompactions := atomic.LoadInt64(&e.stats.CacheCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[0])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[1])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[2])
runningCompactions += atomic.LoadInt64(&e.stats.TSMFullCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive)
return cacheEmpty && runningCompactions == 0 && e.CompactionPlan.FullyCompacted()
}
// Free releases any resources held by the engine to free up memory or CPU.
func (e *Engine) Free() error {
e.Cache.Free()
return e.FileStore.Free()
}
// Backup writes a tar archive of any TSM files modified since the passed
// in time to the passed in writer. The basePath will be prepended to the names
// of the files in the archive. It will force a snapshot of the WAL first
// then perform the backup with a read lock against the file store. This means
// that new TSM files will not be able to be created in this shard while the
// backup is running. For shards that are still acively getting writes, this
// could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.
func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
var err error
var path string
for i := 0; i < 3; i++ {
path, err = e.CreateSnapshot()
if err != nil {
switch err {
case ErrSnapshotInProgress:
backoff := time.Duration(math.Pow(32, float64(i))) * time.Millisecond
time.Sleep(backoff)
default:
return err
}
}
}
if err == ErrSnapshotInProgress {
e.logger.Warn("Snapshotter busy: Backup proceeding without snapshot contents.")
}
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
return intar.Stream(w, path, basePath, intar.SinceFilterTarFile(since))
}
func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
return func(fi os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
if !strings.HasSuffix(fi.Name(), ".tsm") {
return intar.StreamFile(fi, shardRelativePath, fullPath, tw)
}
var tombstonePath string
f, err := os.Open(fullPath)
if err != nil {
return err
}
r, err := NewTSMReader(f)
if err != nil {
return err
}
// Grab the tombstone file if one exists.
if r.HasTombstones() {
tombstonePath = filepath.Base(r.TombstoneFiles()[0].Path)
return intar.StreamFile(fi, shardRelativePath, tombstonePath, tw)
}
min, max := r.TimeRange()
stun := start.UnixNano()
eun := end.UnixNano()
// We overlap time ranges, we need to filter the file
if min >= stun && min <= eun && max > eun || // overlap to the right
max >= stun && max <= eun && min < stun || // overlap to the left
min <= stun && max >= eun { // TSM file has a range LARGER than the boundary
err := e.filterFileToBackup(r, fi, shardRelativePath, fullPath, start.UnixNano(), end.UnixNano(), tw)
if err != nil {
if err := r.Close(); err != nil {
return err
}
return err
}
}
// above is the only case where we need to keep the reader open.
if err := r.Close(); err != nil {
return err
}
// the TSM file is 100% inside the range, so we can just write it without scanning each block
if min >= start.UnixNano() && max <= end.UnixNano() {
if err := intar.StreamFile(fi, shardRelativePath, fullPath, tw); err != nil {
return err
}
}
return nil
}
}
func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
return intar.Stream(w, path, basePath, e.timeStampFilterTarFile(start, end))
}
func (e *Engine) filterFileToBackup(r *TSMReader, fi os.FileInfo, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error {
path := fullPath + ".tmp"
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return err
}
defer os.Remove(path)
w, err := NewTSMWriter(out)
if err != nil {
return err
}
defer w.Close()
// implicit else: here we iterate over the blocks and only keep the ones we really want.
bi := r.BlockIterator()
for bi.Next() {
// not concerned with typ or checksum since we are just blindly writing back, with no decoding
key, minTime, maxTime, _, _, buf, err := bi.Read()
if err != nil {
return err
}
if minTime >= start && minTime <= end ||
maxTime >= start && maxTime <= end ||
minTime <= start && maxTime >= end {
err := w.WriteBlock(key, minTime, maxTime, buf)
if err != nil {
return err
}
}
}
if err := bi.Err(); err != nil {
return err
}
err = w.WriteIndex()
if err != nil {
return err
}
// make sure the whole file is out to disk
if err := w.Flush(); err != nil {
return err
}
tmpFi, err := os.Stat(path)
if err != nil {
return err
}
return intar.StreamRenameFile(tmpFi, fi.Name(), shardRelativePath, path, tw)
}
// Restore reads a tar archive generated by Backup().
// Only files that match basePath will be copied into the directory. This obtains
// a write lock so no operations can be performed while restoring.
func (e *Engine) Restore(r io.Reader, basePath string) error {
return e.overlay(r, basePath, false)
}
// Import reads a tar archive generated by Backup() and adds each
// file matching basePath as a new TSM file. This obtains
// a write lock so no operations can be performed while Importing.
// If the import is successful, a full compaction is scheduled.
func (e *Engine) Import(r io.Reader, basePath string) error {
if err := e.overlay(r, basePath, true); err != nil {
return err
}
return e.ScheduleFullCompaction()
}
// overlay reads a tar archive generated by Backup() and adds each file
// from the archive matching basePath to the shard.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
// Copy files from archive while under lock to prevent reopening.
newFiles, err := func() ([]string, error) {
e.mu.Lock()
defer e.mu.Unlock()
var newFiles []string
tr := tar.NewReader(r)
for {
if fileName, err := e.readFileFromBackup(tr, basePath, asNew); err == io.EOF {
break
} else if err != nil {
return nil, err
} else if fileName != "" {
newFiles = append(newFiles, fileName)
}
}
if err := file.SyncDir(e.path); err != nil {
return nil, err
}
// The filestore will only handle tsm files. Other file types will be ignored.
if err := e.FileStore.Replace(nil, newFiles); err != nil {
return nil, err
}
return newFiles, nil
}()
if err != nil {
return err
}
// Load any new series keys to the index
tsmFiles := make([]TSMFile, 0, len(newFiles))
defer func() {
for _, r := range tsmFiles {
r.Close()
}
}()
ext := fmt.Sprintf(".%s", TmpTSMFileExtension)
for _, f := range newFiles {
// If asNew is true, the files created from readFileFromBackup will be new ones
// having a temp extension.
f = strings.TrimSuffix(f, ext)
if !strings.HasSuffix(f, TSMFileExtension) {
// This isn't a .tsm file.
continue
}
fd, err := os.Open(f)
if err != nil {
return err
}
r, err := NewTSMReader(fd)
if err != nil {
return err
}
tsmFiles = append(tsmFiles, r)
}
// Merge and dedup all the series keys across each reader to reduce
// lock contention on the index.
keys := make([][]byte, 0, 10000)
fieldTypes := make([]influxql.DataType, 0, 10000)
ki := newMergeKeyIterator(tsmFiles, nil)
for ki.Next() {
key, typ := ki.Read()
fieldType := BlockTypeToInfluxQLDataType(typ)
if fieldType == influxql.Unknown {
return fmt.Errorf("unknown block type: %v", typ)
}
keys = append(keys, key)
fieldTypes = append(fieldTypes, fieldType)
if len(keys) == cap(keys) {
// Send batch of keys to the index.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
// Reset buffers.
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
}
if len(keys) > 0 {
// Add remaining partial batch.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
}
return nil
}
// readFileFromBackup copies the next file from the archive into the shard.
// The file is skipped if it does not have a matching shardRelativePath prefix.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, asNew bool) (string, error) {
// Read next archive file.
hdr, err := tr.Next()
if err != nil {
return "", err
}
if !strings.HasSuffix(hdr.Name, TSMFileExtension) {
// This isn't a .tsm file.
return "", nil
}
filename := filepath.Base(filepath.FromSlash(hdr.Name))
// If this is a directory entry (usually just `index` for tsi), create it an move on.
if hdr.Typeflag == tar.TypeDir {
if err := os.MkdirAll(filepath.Join(e.path, filename), os.FileMode(hdr.Mode).Perm()); err != nil {
return "", err
}
return "", nil
}
if asNew {
filename = e.formatFileName(e.FileStore.NextGeneration(), 1) + "." + TSMFileExtension
}
tmp := fmt.Sprintf("%s.%s", filepath.Join(e.path, filename), TmpTSMFileExtension)
// Create new file on disk.
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return "", err
}
defer f.Close()
// Copy from archive to the file.
if _, err := io.CopyN(f, tr, hdr.Size); err != nil {
return "", err
}
// Sync to disk & close.
if err := f.Sync(); err != nil {
return "", err
}
return tmp, nil
}
// addToIndexFromKey will pull the measurement names, series keys, and field
// names from composite keys, and add them to the database index and measurement
// fields.
func (e *Engine) addToIndexFromKey(keys [][]byte, fieldTypes []influxql.DataType) error {
var field []byte
names := make([][]byte, 0, len(keys))
tags := make([]models.Tags, 0, len(keys))
for i := 0; i < len(keys); i++ {
// Replace tsm key format with index key format.
keys[i], field = SeriesAndFieldFromCompositeKey(keys[i])
name := models.ParseName(keys[i])
mf := e.fieldset.CreateFieldsIfNotExists(name)
if err := mf.CreateFieldIfNotExists(field, fieldTypes[i]); err != nil {
return err
}
names = append(names, name)
tags = append(tags, models.ParseTags(keys[i]))
}
// Build in-memory index, if necessary.
if e.index.Type() == inmem.IndexName {
if err := e.index.InitializeSeries(keys, names, tags); err != nil {
return err
}
} else {
if err := e.index.CreateSeriesListIfNotExists(keys, names, tags); err != nil {
return err
}
}
return nil
}
// WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key.
func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var (
keyBuf []byte
baseLen int
seriesErr error
)
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
// Skip fields name "time", they are illegal
if bytes.Equal(iter.FieldKey(), timeBytes) {
continue
}
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
if e.seriesTypeMap != nil {
// Fast-path check to see if the field for the series already exists.
if v, ok := e.seriesTypeMap.Get(keyBuf); !ok {
if typ, err := e.Type(keyBuf); err != nil {
// Field type is unknown, we can try to add it.
} else if typ != iter.Type() {
// Existing type is different from what was passed in, we need to drop
// this write and refresh the series type map.
seriesErr = tsdb.ErrFieldTypeConflict
e.seriesTypeMap.Insert(keyBuf, int(typ))
continue
}
// Doesn't exist, so try to insert
vv, ok := e.seriesTypeMap.Insert(keyBuf, int(iter.Type()))
// We didn't insert and the type that exists isn't what we tried to insert, so
// we have a conflict and must drop this field/series.
if !ok || vv != int(iter.Type()) {
seriesErr = tsdb.ErrFieldTypeConflict
continue
}
} else if v != int(iter.Type()) {
// The series already exists, but with a different type. This is also a type conflict
// and we need to drop this field/series.
seriesErr = tsdb.ErrFieldTypeConflict
continue
}
}
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
if err := e.Cache.WriteMulti(values); err != nil {
return err
}
if e.WALEnabled {
if _, err := e.WAL.WriteMulti(values); err != nil {
return err
}
}
return seriesErr
}
// DeleteSeriesRange removes the values between min and max (inclusive) from all series
func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) error {
return e.DeleteSeriesRangeWithPredicate(itr, func(name []byte, tags models.Tags) (int64, int64, bool) {
return min, max, true
})
}
// DeleteSeriesRangeWithPredicate removes the values between min and max (inclusive) from all series
// for which predicate() returns true. If predicate() is nil, then all values in range are removed.
func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error {
var disableOnce bool
// Ensure that the index does not compact away the measurement or series we're
// going to delete before we're done with them.
if tsiIndex, ok := e.index.(*tsi1.Index); ok {
tsiIndex.DisableCompactions()
defer tsiIndex.EnableCompactions()
tsiIndex.Wait()
fs, err := tsiIndex.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
}
var (
sz int
min, max int64 = math.MinInt64, math.MaxInt64
// Indicator that the min/max time for the current batch has changed and
// we need to flush the current batch before appending to it.
flushBatch bool
)
// These are reversed from min/max to ensure they are different the first time through.
newMin, newMax := int64(math.MaxInt64), int64(math.MinInt64)
// There is no predicate, so setup newMin/newMax to delete the full time range.
if predicate == nil {
newMin = min
newMax = max
}
batch := make([][]byte, 0, 10000)
for {
elem, err := itr.Next()
if err != nil {
return err
} else if elem == nil {
break
}
// See if the series should be deleted and if so, what range of time.
if predicate != nil {
var shouldDelete bool
newMin, newMax, shouldDelete = predicate(elem.Name(), elem.Tags())
if !shouldDelete {
continue
}
// If the min/max happens to change for the batch, we need to flush
// the current batch and start a new one.
flushBatch = (min != newMin || max != newMax) && len(batch) > 0
}
if elem.Expr() != nil {
if v, ok := elem.Expr().(*influxql.BooleanLiteral); !ok || !v.Val {
return errors.New("fields not supported in WHERE clause during deletion")
}
}
if !disableOnce {
// Disable and abort running compactions so that tombstones added existing tsm
// files don't get removed. This would cause deleted measurements/series to
// re-appear once the compaction completed. We only disable the level compactions
// so that snapshotting does not stop while writing out tombstones. If it is stopped,
// and writing tombstones takes a long time, writes can get rejected due to the cache
// filling up.
e.disableLevelCompactions(true)
defer e.enableLevelCompactions(true)
e.sfile.DisableCompactions()
defer e.sfile.EnableCompactions()
e.sfile.Wait()
disableOnce = true
}
if sz >= deleteFlushThreshold || flushBatch {
// Delete all matching batch.
if err := e.deleteSeriesRange(batch, min, max); err != nil {
return err
}
batch = batch[:0]
sz = 0
flushBatch = false
}
// Use the new min/max time for the next iteration
min = newMin
max = newMax
key := models.MakeKey(elem.Name(), elem.Tags())
sz += len(key)
batch = append(batch, key)
}
if len(batch) > 0 {
// Delete all matching batch.
if err := e.deleteSeriesRange(batch, min, max); err != nil {
return err
}
}
e.index.Rebuild()
return nil
}
// deleteSeriesRange removes the values between min and max (inclusive) from all series. This
// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange
// and not directly.
func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
if len(seriesKeys) == 0 {
return nil
}
// Min and max time in the engine are slightly different from the query language values.
if min == influxql.MinTime {
min = math.MinInt64
}
if max == influxql.MaxTime {
max = math.MaxInt64
}
var overlapsTimeRangeMinMax bool
var overlapsTimeRangeMinMaxLock sync.Mutex
e.FileStore.Apply(func(r TSMFile) error {
if r.OverlapsTimeRange(min, max) {
overlapsTimeRangeMinMaxLock.Lock()
overlapsTimeRangeMinMax = true
overlapsTimeRangeMinMaxLock.Unlock()
}
return nil
})
if !overlapsTimeRangeMinMax && e.Cache.store.count() > 0 {
overlapsTimeRangeMinMax = true
}
if !overlapsTimeRangeMinMax {
return nil
}
// Ensure keys are sorted since lower layers require them to be.
if !bytesutil.IsSorted(seriesKeys) {
bytesutil.Sort(seriesKeys)
}
// Run the delete on each TSM file in parallel
if err := e.FileStore.Apply(func(r TSMFile) error {
// See if this TSM file contains the keys and time range
minKey, maxKey := seriesKeys[0], seriesKeys[len(seriesKeys)-1]
tsmMin, tsmMax := r.KeyRange()
tsmMin, _ = SeriesAndFieldFromCompositeKey(tsmMin)
tsmMax, _ = SeriesAndFieldFromCompositeKey(tsmMax)
overlaps := bytes.Compare(tsmMin, maxKey) <= 0 && bytes.Compare(tsmMax, minKey) >= 0
if !overlaps || !r.OverlapsTimeRange(min, max) {
return nil
}
// Delete each key we find in the file. We seek to the min key and walk from there.
batch := r.BatchDelete()
n := r.KeyCount()
var j int
for i := r.Seek(minKey); i < n; i++ {
indexKey, _ := r.KeyAt(i)
seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey)
for j < len(seriesKeys) && bytes.Compare(seriesKeys[j], seriesKey) < 0 {
j++
}
if j >= len(seriesKeys) {
break
}
if bytes.Equal(seriesKeys[j], seriesKey) {
if err := batch.DeleteRange([][]byte{indexKey}, min, max); err != nil {
batch.Rollback()
return err
}
}
}
return batch.Commit()
}); err != nil {
return err
}
// find the keys in the cache and remove them
deleteKeys := make([][]byte, 0, len(seriesKeys))
// ApplySerialEntryFn cannot return an error in this invocation.
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
// Cache does not walk keys in sorted order, so search the sorted
// series we need to delete to see if any of the cache keys match.
i := bytesutil.SearchBytes(seriesKeys, seriesKey)
if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) {
// k is the measurement + tags + sep + field
deleteKeys = append(deleteKeys, k)
}
return nil
})
// Sort the series keys because ApplyEntryFn iterates over the keys randomly.
bytesutil.Sort(deleteKeys)
e.Cache.DeleteRange(deleteKeys, min, max)
// delete from the WAL
if e.WALEnabled {
if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil {
return err
}
}
// The series are deleted on disk, but the index may still say they exist.
// Depending on the the min,max time passed in, the series may or not actually
// exists now. To reconcile the index, we walk the series keys that still exists
// on disk and cross out any keys that match the passed in series. Any series
// left in the slice at the end do not exist and can be deleted from the index.
// Note: this is inherently racy if writes are occurring to the same measurement/series are
// being removed. A write could occur and exist in the cache at this point, but we
// would delete it from the index.
minKey := seriesKeys[0]
// Apply runs this func concurrently. The seriesKeys slice is mutated concurrently
// by different goroutines setting positions to nil.
if err := e.FileStore.Apply(func(r TSMFile) error {
n := r.KeyCount()
var j int
// Start from the min deleted key that exists in this file.
for i := r.Seek(minKey); i < n; i++ {
if j >= len(seriesKeys) {
return nil
}
indexKey, _ := r.KeyAt(i)
seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey)
// Skip over any deleted keys that are less than our tsm key
cmp := bytes.Compare(seriesKeys[j], seriesKey)
for j < len(seriesKeys) && cmp < 0 {
j++
if j >= len(seriesKeys) {
return nil
}
cmp = bytes.Compare(seriesKeys[j], seriesKey)
}
// We've found a matching key, cross it out so we do not remove it from the index.
if j < len(seriesKeys) && cmp == 0 {
seriesKeys[j] = emptyBytes
j++
}
}
return nil
}); err != nil {
return err
}
// The seriesKeys slice is mutated if they are still found in the cache.
cacheKeys := e.Cache.Keys()
for i := 0; i < len(seriesKeys); i++ {
seriesKey := seriesKeys[i]
// Already crossed out
if len(seriesKey) == 0 {
continue
}
j := bytesutil.SearchBytes(cacheKeys, seriesKey)
if j < len(cacheKeys) {
cacheSeriesKey, _ := SeriesAndFieldFromCompositeKey(cacheKeys[j])
if bytes.Equal(seriesKey, cacheSeriesKey) {
seriesKeys[i] = emptyBytes
}
}
}
// Have we deleted all values for the series? If so, we need to remove
// the series from the index.
hasDeleted := false
for _, k := range seriesKeys {
if len(k) > 0 {
hasDeleted = true
break
}
}
if hasDeleted {
buf := make([]byte, 1024) // For use when accessing series file.
ids := tsdb.NewSeriesIDSet()
measurements := make(map[string]struct{}, 1)
for _, k := range seriesKeys {
if len(k) == 0 {
continue // This key was wiped because it shouldn't be removed from index.
}
name, tags := models.ParseKeyBytes(k)
sid := e.sfile.SeriesID(name, tags, buf)
if sid == 0 {
continue
}
// See if this series was found in the cache earlier
i := bytesutil.SearchBytes(deleteKeys, k)
var hasCacheValues bool
// If there are multiple fields, they will have the same prefix. If any field
// has values, then we can't delete it from the index.
for i < len(deleteKeys) && bytes.HasPrefix(deleteKeys[i], k) {
if e.Cache.Values(deleteKeys[i]).Len() > 0 {
hasCacheValues = true
break
}
i++
}
if hasCacheValues {
continue
}
measurements[string(name)] = struct{}{}
// Remove the series from the local index.
if err := e.index.DropSeries(sid, k, false); err != nil {
return err
}
// Add the id to the set of delete ids.
ids.Add(sid)
}
fielsetChanged := false
for k := range measurements {
if dropped, err := e.index.DropMeasurementIfSeriesNotExist([]byte(k)); err != nil {
return err
} else if dropped {
if err := e.cleanupMeasurement([]byte(k)); err != nil {
return err
}
fielsetChanged = true
}
}
if fielsetChanged {
if err := e.fieldset.Save(); err != nil {
return err
}
}
// Remove any series IDs for our set that still exist in other shards.
// We cannot remove these from the series file yet.
if err := e.seriesIDSets.ForEach(func(s *tsdb.SeriesIDSet) {
ids = ids.AndNot(s)
}); err != nil {
return err
}
// Remove the remaining ids from the series file as they no longer exist
// in any shard.
var err error
ids.ForEach(func(id uint64) {
name, tags := e.sfile.Series(id)
if err1 := e.sfile.DeleteSeriesID(id); err1 != nil {
err = err1
return
}
// In the case of the inmem index the series can be removed across
// the global index (all shards).
if index, ok := e.index.(*inmem.ShardIndex); ok {
key := models.MakeKey(name, tags)
if e := index.Index.DropSeriesGlobal(key); e != nil {
err = e
}
}
})
if err != nil {
return err
}
}
return nil
}
func (e *Engine) cleanupMeasurement(name []byte) error {
// A sentinel error message to cause DeleteWithLock to not delete the measurement
abortErr := fmt.Errorf("measurements still exist")
// Under write lock, delete the measurement if we no longer have any data stored for
// the measurement. If data exists, we can't delete the field set yet as there
// were writes to the measurement while we are deleting it.
if err := e.fieldset.DeleteWithLock(string(name), func() error {
encodedName := models.EscapeMeasurement(name)
sep := len(encodedName)
// First scan the cache to see if any series exists for this measurement.
if err := e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
if bytes.HasPrefix(k, encodedName) && (k[sep] == ',' || k[sep] == keyFieldSeparator[0]) {
return abortErr
}
return nil
}); err != nil {
return err
}
// Check the filestore.
return e.FileStore.WalkKeys(name, func(k []byte, _ byte) error {
if bytes.HasPrefix(k, encodedName) && (k[sep] == ',' || k[sep] == keyFieldSeparator[0]) {
return abortErr
}
return nil
})
}); err != nil && err != abortErr {
// Something else failed, return it
return err
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) DeleteMeasurement(name []byte) error {
// Attempt to find the series keys.
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
itr, err := indexSet.MeasurementSeriesByExprIterator(name, nil)
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
return e.DeleteSeriesRange(tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64)
}
// ForEachMeasurementName iterates over each measurement name in the engine.
func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {
return e.index.ForEachMeasurementName(fn)
}
func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {
return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice)
}
func (e *Engine) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
return e.index.CreateSeriesIfNotExists(key, name, tags)
}
// WriteTo is not implemented.
func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") }
// WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.
func (e *Engine) WriteSnapshot() (err error) {
// Lock and grab the cache snapshot along with all the closed WAL
// filenames associated with the snapshot
started := time.Now()
log, logEnd := logger.NewOperation(context.TODO(), e.logger, "Cache snapshot", "tsm1_cache_snapshot")
defer func() {
elapsed := time.Since(started)
e.Cache.UpdateCompactTime(elapsed)
if err == nil {
log.Info("Snapshot for path written", zap.String("path", e.path), zap.Duration("duration", elapsed))
}
logEnd()
}()
closedFiles, snapshot, err := func() (segments []string, snapshot *Cache, err error) {
e.mu.Lock()
defer e.mu.Unlock()
if e.WALEnabled {
if err = e.WAL.CloseSegment(); err != nil {
return
}
segments, err = e.WAL.ClosedSegments()
if err != nil {
return
}
}
snapshot, err = e.Cache.Snapshot()
if err != nil {
return
}
return
}()
if err != nil {
return err
}
if snapshot.Size() == 0 {
e.Cache.ClearSnapshot(true)
return nil
}
// The snapshotted cache may have duplicate points and unsorted data. We need to deduplicate
// it before writing the snapshot. This can be very expensive so it's done while we are not
// holding the engine write lock.
dedup := time.Now()
snapshot.Deduplicate()
e.traceLogger.Info("Snapshot for path deduplicated",
zap.String("path", e.path),
zap.Duration("duration", time.Since(dedup)))
return e.writeSnapshotAndCommit(log, closedFiles, snapshot)
}
// CreateSnapshot will create a temp directory that holds
// temporary hardlinks to the underylyng shard files.
func (e *Engine) CreateSnapshot() (string, error) {
if err := e.WriteSnapshot(); err != nil {
return "", err
}
e.mu.RLock()
defer e.mu.RUnlock()
path, err := e.FileStore.CreateSnapshot()
if err != nil {
return "", err
}
// Generate a snapshot of the index.
return path, nil
}
// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.
func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, snapshot *Cache) (err error) {
defer func() {
if err != nil {
e.Cache.ClearSnapshot(false)
}
}()
// write the new snapshot files
newFiles, err := e.Compactor.WriteSnapshot(snapshot)
if err != nil {
log.Info("Error writing snapshot from compactor", zap.Error(err))
return err
}
e.mu.RLock()
defer e.mu.RUnlock()
// update the file store with these new files
if err := e.FileStore.Replace(nil, newFiles); err != nil {
log.Info("Error adding new TSM files from snapshot. Removing temp files.", zap.Error(err))
// Remove the new snapshot files. We will try again.
for _, file := range newFiles {
if err := os.Remove(file); err != nil {
log.Info("Unable to remove file", zap.String("path", file), zap.Error(err))
}
}
return err
}
// clear the snapshot from the in-memory cache, then the old WAL files
e.Cache.ClearSnapshot(true)
if e.WALEnabled {
if err := e.WAL.Remove(closedFiles); err != nil {
log.Info("Error removing closed WAL segments", zap.Error(err))
}
}
return nil
}
// compactCache continually checks if the WAL cache should be written to disk.
func (e *Engine) compactCache() {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
e.mu.RLock()
quit := e.snapDone
e.mu.RUnlock()
select {
case <-quit:
return
case <-t.C:
e.Cache.UpdateAge()
if e.ShouldCompactCache(time.Now()) {
start := time.Now()
e.traceLogger.Info("Compacting cache", zap.String("path", e.path))
err := e.WriteSnapshot()
if err != nil && err != errCompactionsDisabled {
e.logger.Info("Error writing snapshot", zap.Error(err))
atomic.AddInt64(&e.stats.CacheCompactionErrors, 1)
} else {
atomic.AddInt64(&e.stats.CacheCompactions, 1)
}
atomic.AddInt64(&e.stats.CacheCompactionDuration, time.Since(start).Nanoseconds())
}
}
}
}
// ShouldCompactCache returns true if the Cache is over its flush threshold
// or if the passed in lastWriteTime is older than the write cold threshold.
func (e *Engine) ShouldCompactCache(t time.Time) bool {
sz := e.Cache.Size()
if sz == 0 {
return false
}
if sz > e.CacheFlushMemorySizeThreshold {
return true
}
return t.Sub(e.Cache.LastWriteTime()) > e.CacheFlushWriteColdDuration
}
func (e *Engine) compact(wg *sync.WaitGroup) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
e.mu.RLock()
quit := e.done
e.mu.RUnlock()
select {
case <-quit:
return
case <-t.C:
// Find our compaction plans
level1Groups := e.CompactionPlan.PlanLevel(1)
level2Groups := e.CompactionPlan.PlanLevel(2)
level3Groups := e.CompactionPlan.PlanLevel(3)
level4Groups := e.CompactionPlan.Plan(e.LastModified())
atomic.StoreInt64(&e.stats.TSMOptimizeCompactionsQueue, int64(len(level4Groups)))
// If no full compactions are need, see if an optimize is needed
if len(level4Groups) == 0 {
level4Groups = e.CompactionPlan.PlanOptimize()
atomic.StoreInt64(&e.stats.TSMOptimizeCompactionsQueue, int64(len(level4Groups)))
}
// Update the level plan queue stats
atomic.StoreInt64(&e.stats.TSMCompactionsQueue[0], int64(len(level1Groups)))
atomic.StoreInt64(&e.stats.TSMCompactionsQueue[1], int64(len(level2Groups)))
atomic.StoreInt64(&e.stats.TSMCompactionsQueue[2], int64(len(level3Groups)))
// Set the queue depths on the scheduler
e.scheduler.setDepth(1, len(level1Groups))
e.scheduler.setDepth(2, len(level2Groups))
e.scheduler.setDepth(3, len(level3Groups))
e.scheduler.setDepth(4, len(level4Groups))
// Find the next compaction that can run and try to kick it off
if level, runnable := e.scheduler.next(); runnable {
switch level {
case 1:
if e.compactHiPriorityLevel(level1Groups[0], 1, false, wg) {
level1Groups = level1Groups[1:]
}
case 2:
if e.compactHiPriorityLevel(level2Groups[0], 2, false, wg) {
level2Groups = level2Groups[1:]
}
case 3:
if e.compactLoPriorityLevel(level3Groups[0], 3, true, wg) {
level3Groups = level3Groups[1:]
}
case 4:
if e.compactFull(level4Groups[0], wg) {
level4Groups = level4Groups[1:]
}
}
}
// Release all the plans we didn't start.
e.CompactionPlan.Release(level1Groups)
e.CompactionPlan.Release(level2Groups)
e.CompactionPlan.Release(level3Groups)
e.CompactionPlan.Release(level4Groups)
}
}
}
// compactHiPriorityLevel kicks off compactions using the high priority policy. It returns
// true if the compaction was started
func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool {
s := e.levelCompactionStrategy(grp, fast, level)
if s == nil {
return false
}
// Try hi priority limiter, otherwise steal a little from the low priority if we can.
if e.compactionLimiter.TryTake() {
atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], 1)
wg.Add(1)
go func() {
defer wg.Done()
defer atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], -1)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
// Return the unused plans
return false
}
// compactLoPriorityLevel kicks off compactions using the lo priority policy. It returns
// the plans that were not able to be started
func (e *Engine) compactLoPriorityLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool {
s := e.levelCompactionStrategy(grp, fast, level)
if s == nil {
return false
}
// Try the lo priority limiter, otherwise steal a little from the high priority if we can.
if e.compactionLimiter.TryTake() {
atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], 1)
wg.Add(1)
go func() {
defer wg.Done()
defer atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], -1)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
return false
}
// compactFull kicks off full and optimize compactions using the lo priority policy. It returns
// the plans that were not able to be started.
func (e *Engine) compactFull(grp CompactionGroup, wg *sync.WaitGroup) bool {
s := e.fullCompactionStrategy(grp, false)
if s == nil {
return false
}
// Try the lo priority limiter, otherwise steal a little from the high priority if we can.
if e.compactionLimiter.TryTake() {
atomic.AddInt64(&e.stats.TSMFullCompactionsActive, 1)
wg.Add(1)
go func() {
defer wg.Done()
defer atomic.AddInt64(&e.stats.TSMFullCompactionsActive, -1)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
return false
}
// compactionStrategy holds the details of what to do in a compaction.
type compactionStrategy struct {
group CompactionGroup
fast bool
level int
durationStat *int64
activeStat *int64
successStat *int64
errorStat *int64
logger *zap.Logger
compactor *Compactor
fileStore *FileStore
engine *Engine
}
// Apply concurrently compacts all the groups in a compaction strategy.
func (s *compactionStrategy) Apply() {
start := time.Now()
s.compactGroup()
atomic.AddInt64(s.durationStat, time.Since(start).Nanoseconds())
}
// compactGroup executes the compaction strategy against a single CompactionGroup.
func (s *compactionStrategy) compactGroup() {
group := s.group
log, logEnd := logger.NewOperation(context.TODO(), s.logger, "TSM compaction", "tsm1_compact_group")
defer logEnd()
log.Info("Beginning compaction", zap.Int("tsm1_files_n", len(group)))
for i, f := range group {
log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
var (
err error
files []string
)
if s.fast {
files, err = s.compactor.CompactFast(group)
} else {
files, err = s.compactor.CompactFull(group)
}
if err != nil {
_, inProgress := err.(errCompactionInProgress)
if err == errCompactionsDisabled || inProgress {
log.Info("Aborted compaction", zap.Error(err))
if _, ok := err.(errCompactionInProgress); ok {
time.Sleep(time.Second)
}
return
}
log.Warn("Error compacting TSM files", zap.Error(err))
// We hit a bad TSM file - rename so the next compaction can proceed.
if _, ok := err.(errBlockRead); ok {
path := err.(errBlockRead).file
log.Info("Renaming a corrupt TSM file due to compaction error", zap.Error(err))
if err := s.fileStore.ReplaceWithCallback([]string{path}, nil, nil); err != nil {
log.Info("Error removing bad TSM file", zap.Error(err))
} else if e := os.Rename(path, path+"."+BadTSMFileExtension); e != nil {
log.Info("Error renaming corrupt TSM file", zap.Error((err)))
}
}
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
if err := s.fileStore.ReplaceWithCallback(group, files, nil); err != nil {
log.Info("Error replacing new TSM files", zap.Error(err))
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
// Remove the new snapshot files. We will try again.
for _, file := range files {
if err := os.Remove(file); err != nil {
log.Error("Unable to remove file", zap.String("path", file), zap.Error(err))
}
}
return
}
for i, f := range files {
log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
log.Info("Finished compacting files",
zap.Int("tsm1_files_n", len(files)))
atomic.AddInt64(s.successStat, 1)
}
// levelCompactionStrategy returns a compactionStrategy for the given level.
// It returns nil if there are no TSM files to compact.
func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level int) *compactionStrategy {
return &compactionStrategy{
group: group,
logger: e.logger.With(zap.Int("tsm1_level", level), zap.String("tsm1_strategy", "level")),
fileStore: e.FileStore,
compactor: e.Compactor,
fast: fast,
engine: e,
level: level,
activeStat: &e.stats.TSMCompactionsActive[level-1],
successStat: &e.stats.TSMCompactions[level-1],
errorStat: &e.stats.TSMCompactionErrors[level-1],
durationStat: &e.stats.TSMCompactionDuration[level-1],
}
}
// fullCompactionStrategy returns a compactionStrategy for higher level generations of TSM files.
// It returns nil if there are no TSM files to compact.
func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *compactionStrategy {
s := &compactionStrategy{
group: group,
logger: e.logger.With(zap.String("tsm1_strategy", "full"), zap.Bool("tsm1_optimize", optimize)),
fileStore: e.FileStore,
compactor: e.Compactor,
fast: optimize,
engine: e,
level: 4,
}
if optimize {
s.activeStat = &e.stats.TSMOptimizeCompactionsActive
s.successStat = &e.stats.TSMOptimizeCompactions
s.errorStat = &e.stats.TSMOptimizeCompactionErrors
s.durationStat = &e.stats.TSMOptimizeCompactionDuration
} else {
s.activeStat = &e.stats.TSMFullCompactionsActive
s.successStat = &e.stats.TSMFullCompactions
s.errorStat = &e.stats.TSMFullCompactionErrors
s.durationStat = &e.stats.TSMFullCompactionDuration
}
return s
}
// reloadCache reads the WAL segment files and loads them into the cache.
func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info("Reloaded WAL cache",
zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now)))
return nil
}
// cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid
// removing tmp files that are still in use.
func (e *Engine) cleanup() error {
allfiles, err := ioutil.ReadDir(e.path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
ext := fmt.Sprintf(".%s", TmpTSMFileExtension)
for _, f := range allfiles {
// Check to see if there are any `.tmp` directories that were left over from failed shard snapshots
if f.IsDir() && strings.HasSuffix(f.Name(), ext) {
if err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil {
return fmt.Errorf("error removing tmp snapshot directory %q: %s", f.Name(), err)
}
}
}
return e.cleanupTempTSMFiles()
}
func (e *Engine) cleanupTempTSMFiles() error {
files, err := filepath.Glob(filepath.Join(e.path, fmt.Sprintf("*.%s", CompactionTempExtension)))
if err != nil {
return fmt.Errorf("error getting compaction temp files: %s", err.Error())
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return fmt.Errorf("error removing temp compaction files: %v", err)
}
}
return nil
}
// KeyCursor returns a KeyCursor for the given key starting at time t.
func (e *Engine) KeyCursor(ctx context.Context, key []byte, t int64, ascending bool) *KeyCursor {
return e.FileStore.KeyCursor(ctx, key, t, ascending)
}
// CreateIterator returns an iterator for the measurement based on opt.
func (e *Engine) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
if span := tracing.SpanFromContext(ctx); span != nil {
labels := []string{"shard_id", strconv.Itoa(int(e.id)), "measurement", measurement}
if opt.Condition != nil {
labels = append(labels, "cond", opt.Condition.String())
}
span = span.StartSpan("create_iterator")
span.SetLabels(labels...)
ctx = tracing.NewContextWithSpan(ctx, span)
group := metrics.NewGroup(tsmGroup)
ctx = metrics.NewContextWithGroup(ctx, group)
start := time.Now()
defer group.GetTimer(planningTimer).UpdateSince(start)
}
if call, ok := opt.Expr.(*influxql.Call); ok {
if opt.Interval.IsZero() {
if call.Name == "first" || call.Name == "last" {
refOpt := opt
refOpt.Limit = 1
refOpt.Ascending = call.Name == "first"
refOpt.Ordered = true
refOpt.Expr = call.Args[0]
itrs, err := e.createVarRefIterator(ctx, measurement, refOpt)
if err != nil {
return nil, err
}
return newMergeFinalizerIterator(ctx, itrs, opt, e.logger)
}
}
inputs, err := e.createCallIterator(ctx, measurement, call, opt)
if err != nil {
return nil, err
} else if len(inputs) == 0 {
return nil, nil
}
return newMergeFinalizerIterator(ctx, inputs, opt, e.logger)
}
itrs, err := e.createVarRefIterator(ctx, measurement, opt)
if err != nil {
return nil, err
}
return newMergeFinalizerIterator(ctx, itrs, opt, e.logger)
}
type indexTagSets interface {
TagSets(name []byte, options query.IteratorOptions) ([]*query.TagSet, error)
}
func (e *Engine) createCallIterator(ctx context.Context, measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := call.Args[0].(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
// Determine tagsets for this measurement based on dimensions and filters.
var (
tagSets []*query.TagSet
err error
)
if e.index.Type() == tsdb.InmemIndexName {
ts := e.index.(indexTagSets)
tagSets, err = ts.TagSets([]byte(measurement), opt)
} else {
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt)
}
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return query.ErrQueryInterrupted
default:
}
inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// Wrap each series in a call iterator.
for i, input := range inputs {
if opt.InterruptCh != nil {
input = query.NewInterruptIterator(input, opt.InterruptCh)
}
itr, err := query.NewCallIterator(input, opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
inputs[i] = itr
}
itr := query.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0))
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := opt.Expr.(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
var (
tagSets []*query.TagSet
err error
)
if e.index.Type() == tsdb.InmemIndexName {
ts := e.index.(indexTagSets)
tagSets, err = ts.TagSets([]byte(measurement), opt)
} else {
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt)
}
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// If we have a LIMIT or OFFSET and the grouping of the outer query
// is different than the current grouping, we need to perform the
// limit on each of the individual series keys instead to improve
// performance.
if (opt.Limit > 0 || opt.Offset > 0) && len(opt.Dimensions) != len(opt.GroupBy) {
for i, input := range inputs {
inputs[i] = newLimitIterator(input, opt)
}
}
itr, err := query.Iterators(inputs).Merge(opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
// Apply a limit on the merged iterator.
if opt.Limit > 0 || opt.Offset > 0 {
if len(opt.Dimensions) == len(opt.GroupBy) {
// When the final dimensions and the current grouping are
// the same, we will only produce one series so we can use
// the faster limit iterator.
itr = newLimitIterator(itr, opt)
} else {
// When the dimensions are different than the current
// grouping, we need to account for the possibility there
// will be multiple series. The limit iterator in the
// influxql package handles that scenario.
itr = query.NewLimitIterator(itr, opt)
}
}
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetIterators creates a set of iterators for a tagset.
func (e *Engine) createTagSetIterators(ctx context.Context, ref *influxql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) {
// Set parallelism by number of logical cpus.
parallelism := runtime.GOMAXPROCS(0)
if parallelism > len(t.SeriesKeys) {
parallelism = len(t.SeriesKeys)
}
// Create series key groupings w/ return error.
groups := make([]struct {
keys []string
filters []influxql.Expr
itrs []query.Iterator
err error
}, parallelism)
// Group series keys.
n := len(t.SeriesKeys) / parallelism
for i := 0; i < parallelism; i++ {
group := &groups[i]
if i < parallelism-1 {
group.keys = t.SeriesKeys[i*n : (i+1)*n]
group.filters = t.Filters[i*n : (i+1)*n]
} else {
group.keys = t.SeriesKeys[i*n:]
group.filters = t.Filters[i*n:]
}
}
// Read series groups in parallel.
var wg sync.WaitGroup
for i := range groups {
wg.Add(1)
go func(i int) {
defer wg.Done()
groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ctx, ref, name, groups[i].keys, t, groups[i].filters, opt)
}(i)
}
wg.Wait()
// Determine total number of iterators so we can allocate only once.
var itrN int
for _, group := range groups {
itrN += len(group.itrs)
}
// Combine all iterators together and check for errors.
var err error
itrs := make([]query.Iterator, 0, itrN)
for _, group := range groups {
if group.err != nil {
err = group.err
}
itrs = append(itrs, group.itrs...)
}
// If an error occurred, make sure we close all created iterators.
if err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series.
func (e *Engine) createTagSetGroupIterators(ctx context.Context, ref *influxql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []influxql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) {
itrs := make([]query.Iterator, 0, len(seriesKeys))
for i, seriesKey := range seriesKeys {
var conditionFields []influxql.VarRef
if filters[i] != nil {
// Retrieve non-time fields from this series filter and filter out tags.
conditionFields = influxql.ExprNames(filters[i])
}
itr, err := e.createVarRefSeriesIterator(ctx, ref, name, seriesKey, t, filters[i], conditionFields, opt)
if err != nil {
return itrs, err
} else if itr == nil {
continue
}
itrs = append(itrs, itr)
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return nil, query.ErrQueryInterrupted
default:
}
// Enforce series limit at creation time.
if opt.MaxSeriesN > 0 && len(itrs) > opt.MaxSeriesN {
query.Iterators(itrs).Close()
return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", len(itrs), opt.MaxSeriesN)
}
}
return itrs, nil
}
// createVarRefSeriesIterator creates an iterator for a variable reference for a series.
func (e *Engine) createVarRefSeriesIterator(ctx context.Context, ref *influxql.VarRef, name string, seriesKey string, t *query.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt query.IteratorOptions) (query.Iterator, error) {
_, tfs := models.ParseKey([]byte(seriesKey))
tags := query.NewTags(tfs.Map())
// Create options specific for this series.
itrOpt := opt
itrOpt.Condition = filter
var curCounter, auxCounter, condCounter *metrics.Counter
if col := metrics.GroupFromContext(ctx); col != nil {
curCounter = col.GetCounter(numberOfRefCursorsCounter)
auxCounter = col.GetCounter(numberOfAuxCursorsCounter)
condCounter = col.GetCounter(numberOfCondCursorsCounter)
}
// Build main cursor.
var cur cursor
if ref != nil {
cur = e.buildCursor(ctx, name, seriesKey, tfs, ref, opt)
// If the field doesn't exist then don't build an iterator.
if cur == nil {
return nil, nil
}
if curCounter != nil {
curCounter.Add(1)
}
}
// Build auxiliary cursors.
// Tag values should be returned if the field doesn't exist.
var aux []cursorAt
if len(opt.Aux) > 0 {
aux = make([]cursorAt, len(opt.Aux))
for i, ref := range opt.Aux {
// Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag {
cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt)
if cur != nil {
if auxCounter != nil {
auxCounter.Add(1)
}
aux[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case influxql.Float, influxql.AnyField:
aux[i] = nilFloatLiteralValueCursor
continue
case influxql.Integer:
aux[i] = nilIntegerLiteralValueCursor
continue
case influxql.Unsigned:
aux[i] = nilUnsignedLiteralValueCursor
continue
case influxql.String:
aux[i] = nilStringLiteralValueCursor
continue
case influxql.Boolean:
aux[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
aux[i] = nilStringLiteralValueCursor
} else {
aux[i] = &literalValueCursor{value: v}
}
}
}
// Remove _tagKey condition field.
// We can't seach on it because we can't join it to _tagValue based on time.
if varRefSliceContains(conditionFields, "_tagKey") {
conditionFields = varRefSliceRemove(conditionFields, "_tagKey")
// Remove _tagKey conditional references from iterator.
itrOpt.Condition = influxql.RewriteExpr(influxql.CloneExpr(itrOpt.Condition), func(expr influxql.Expr) influxql.Expr {
switch expr := expr.(type) {
case *influxql.BinaryExpr:
if ref, ok := expr.LHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" {
return &influxql.BooleanLiteral{Val: true}
}
if ref, ok := expr.RHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" {
return &influxql.BooleanLiteral{Val: true}
}
}
return expr
})
}
// Build conditional field cursors.
// If a conditional field doesn't exist then ignore the series.
var conds []cursorAt
if len(conditionFields) > 0 {
conds = make([]cursorAt, len(conditionFields))
for i, ref := range conditionFields {
// Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag {
cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt)
if cur != nil {
if condCounter != nil {
condCounter.Add(1)
}
conds[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case influxql.Float, influxql.AnyField:
conds[i] = nilFloatLiteralValueCursor
continue
case influxql.Integer:
conds[i] = nilIntegerLiteralValueCursor
continue
case influxql.Unsigned:
conds[i] = nilUnsignedLiteralValueCursor
continue
case influxql.String:
conds[i] = nilStringLiteralValueCursor
continue
case influxql.Boolean:
conds[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
conds[i] = nilStringLiteralValueCursor
} else {
conds[i] = &literalValueCursor{value: v}
}
}
}
condNames := influxql.VarRefs(conditionFields).Strings()
// Limit tags to only the dimensions selected.
dimensions := opt.GetDimensions()
tags = tags.Subset(dimensions)
// If it's only auxiliary fields then it doesn't matter what type of iterator we use.
if ref == nil {
if opt.StripName {
name = ""
}
return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil
}
// Remove name if requested.
if opt.StripName {
name = ""
}
switch cur := cur.(type) {
case floatCursor:
return newFloatIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case integerCursor:
return newIntegerIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case unsignedCursor:
return newUnsignedIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case stringCursor:
return newStringIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case booleanCursor:
return newBooleanIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
default:
panic("unreachable")
}
}
// buildCursor creates an untyped cursor for a field.
func (e *Engine) buildCursor(ctx context.Context, measurement, seriesKey string, tags models.Tags, ref *influxql.VarRef, opt query.IteratorOptions) cursor {
// Check if this is a system field cursor.
switch ref.Val {
case "_name":
return &stringSliceCursor{values: []string{measurement}}
case "_tagKey":
return &stringSliceCursor{values: tags.Keys()}
case "_tagValue":
return &stringSliceCursor{values: matchTagValues(tags, opt.Condition)}
case "_seriesKey":
return &stringSliceCursor{values: []string{seriesKey}}
}
// Look up fields for measurement.
mf := e.fieldset.FieldsByString(measurement)
if mf == nil {
return nil
}
// Check for system field for field keys.
if ref.Val == "_fieldKey" {
return &stringSliceCursor{values: mf.FieldKeys()}
}
// Find individual field.
f := mf.Field(ref.Val)
if f == nil {
return nil
}
// Check if we need to perform a cast. Performing a cast in the
// engine (if it is possible) is much more efficient than an automatic cast.
if ref.Type != influxql.Unknown && ref.Type != influxql.AnyField && ref.Type != f.Type {
switch ref.Type {
case influxql.Float:
switch f.Type {
case influxql.Integer:
cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &floatCastIntegerCursor{cursor: cur}
case influxql.Unsigned:
cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &floatCastUnsignedCursor{cursor: cur}
}
case influxql.Integer:
switch f.Type {
case influxql.Float:
cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &integerCastFloatCursor{cursor: cur}
case influxql.Unsigned:
cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &integerCastUnsignedCursor{cursor: cur}
}
case influxql.Unsigned:
switch f.Type {
case influxql.Float:
cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &unsignedCastFloatCursor{cursor: cur}
case influxql.Integer:
cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &unsignedCastIntegerCursor{cursor: cur}
}
}
return nil
}
// Return appropriate cursor based on type.
switch f.Type {
case influxql.Float:
return e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.Integer:
return e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.Unsigned:
return e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.String:
return e.buildStringCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.Boolean:
return e.buildBooleanCursor(ctx, measurement, seriesKey, ref.Val, opt)
default:
panic("unreachable")
}
}
func matchTagValues(tags models.Tags, condition influxql.Expr) []string {
if condition == nil {
return tags.Values()
}
// Populate map with tag values.
data := map[string]interface{}{}
for _, tag := range tags {
data[string(tag.Key)] = string(tag.Value)
}
// Match against each specific tag.
var values []string
for _, tag := range tags {
data["_tagKey"] = string(tag.Key)
if influxql.EvalBool(condition, data) {
values = append(values, string(tag.Value))
}
}
return values
}
// IteratorCost produces the cost of an iterator.
func (e *Engine) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) {
// Determine if this measurement exists. If it does not, then no shards are
// accessed to begin with.
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return query.IteratorCost{}, err
} else if !exists {
return query.IteratorCost{}, nil
}
// Determine all of the tag sets for this query.
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt)
if err != nil {
return query.IteratorCost{}, err
}
// Attempt to retrieve the ref from the main expression (if it exists).
var ref *influxql.VarRef
if opt.Expr != nil {
if v, ok := opt.Expr.(*influxql.VarRef); ok {
ref = v
} else if call, ok := opt.Expr.(*influxql.Call); ok {
if len(call.Args) > 0 {
ref, _ = call.Args[0].(*influxql.VarRef)
}
}
}
// Count the number of series concatenated from the tag set.
cost := query.IteratorCost{NumShards: 1}
for _, t := range tagSets {
cost.NumSeries += int64(len(t.SeriesKeys))
for i, key := range t.SeriesKeys {
// Retrieve the cost for the main expression (if it exists).
if ref != nil {
c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime)
cost = cost.Combine(c)
}
// Retrieve the cost for every auxiliary field since these are also
// iterators that we may have to look through.
// We may want to separate these though as we are unlikely to incur
// anywhere close to the full costs of the auxiliary iterators because
// many of the selected values are usually skipped.
for _, ref := range opt.Aux {
c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime)
cost = cost.Combine(c)
}
// Retrieve the expression names in the condition (if there is a condition).
// We will also create cursors for these too.
if t.Filters[i] != nil {
refs := influxql.ExprNames(t.Filters[i])
for _, ref := range refs {
c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime)
cost = cost.Combine(c)
}
}
}
}
return cost, nil
}
// Type returns FieldType for a series. If the series does not
// exist, ErrUnknownFieldType is returned.
func (e *Engine) Type(series []byte) (models.FieldType, error) {
if typ, err := e.Cache.Type(series); err == nil {
return typ, nil
}
typ, err := e.FileStore.Type(series)
if err != nil {
return 0, err
}
switch typ {
case BlockFloat64:
return models.Float, nil
case BlockInteger:
return models.Integer, nil
case BlockUnsigned:
return models.Unsigned, nil
case BlockString:
return models.String, nil
case BlockBoolean:
return models.Boolean, nil
}
return 0, tsdb.ErrUnknownFieldType
}
func (e *Engine) seriesCost(seriesKey, field string, tmin, tmax int64) query.IteratorCost {
key := SeriesFieldKeyBytes(seriesKey, field)
c := e.FileStore.Cost(key, tmin, tmax)
// Retrieve the range of values within the cache.
cacheValues := e.Cache.Values(key)
c.CachedValues = int64(len(cacheValues.Include(tmin, tmax)))
return c
}
// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID.
func SeriesFieldKey(seriesKey, field string) string {
return seriesKey + keyFieldSeparator + field
}
func SeriesFieldKeyBytes(seriesKey, field string) []byte {
b := make([]byte, len(seriesKey)+len(keyFieldSeparator)+len(field))
i := copy(b, seriesKey)
i += copy(b[i:], keyFieldSeparatorBytes)
copy(b[i:], field)
return b
}
var (
blockToFieldType = [8]influxql.DataType{
BlockFloat64: influxql.Float,
BlockInteger: influxql.Integer,
BlockBoolean: influxql.Boolean,
BlockString: influxql.String,
BlockUnsigned: influxql.Unsigned,
5: influxql.Unknown,
6: influxql.Unknown,
7: influxql.Unknown,
}
)
func BlockTypeToInfluxQLDataType(typ byte) influxql.DataType { return blockToFieldType[typ&7] }
// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key.
func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {
sep := bytes.Index(key, keyFieldSeparatorBytes)
if sep == -1 {
// No field???
return key, nil
}
return key[:sep], key[sep+len(keyFieldSeparator):]
}
func varRefSliceContains(a []influxql.VarRef, v string) bool {
for _, ref := range a {
if ref.Val == v {
return true
}
}
return false
}
func varRefSliceRemove(a []influxql.VarRef, v string) []influxql.VarRef {
if !varRefSliceContains(a, v) {
return a
}
other := make([]influxql.VarRef, 0, len(a))
for _, ref := range a {
if ref.Val != v {
other = append(other, ref)
}
}
return other
}
| [
"\"INFLUXDB_SERIES_TYPE_CHECK_ENABLED\""
]
| []
| [
"INFLUXDB_SERIES_TYPE_CHECK_ENABLED"
]
| [] | ["INFLUXDB_SERIES_TYPE_CHECK_ENABLED"] | go | 1 | 0 | |
tomo_encoders/neural_nets/autoencoders.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
class implementations for real-time 3D feature extraction
"""
import pandas as pd
import os
import glob
import numpy as np
from tomo_encoders import *
from tensorflow import keras
from tomo_encoders import Patches
import tensorflow as tf
from tensorflow.keras.models import load_model
from multiprocessing import Pool, cpu_count
import functools
import h5py
import abc
import time
# from tensorflow import RunOptions
from tensorflow.keras.backend import random_normal
from tensorflow import map_fn, constant, reduce_max, reduce_min
from tensorflow.keras import layers as L
# tensorflow configs
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
def insert_activation(tensor_in, activation):
"""
Returns
-------
tensor
of rank 2 (FC layer), 4 (image) or 5 (volume) (batch_size, nz, ny, nx, n_channels)
Parameters
----------
tensor_in : tensor
input tensor
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
"""
if activation is None:
return tensor_in
if activation == 'lrelu':
tensor_out = L.LeakyReLU(alpha = 0.2)(tensor_in)
else:
tensor_out = L.Activation(activation)(tensor_in)
return tensor_out
def hidden_layer(tensor_in, n_hidden, activation = None, batch_norm = False):
"""
Define a fully-connected layer with batch normalization, dropout and custom activations.
Returns
-------
tensor
of rank 2 (batch_size, n_hidden)
Parameters
----------
tensor_in : tensor
input tensor
n_hidden : int
number of units in the dense layer (this is the output shape)
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
batch_norm : bool
True to insert a BN layer
"""
tensor_out = L.Dense(n_hidden, activation = None)(tensor_in)
if batch_norm:
tensor_out = L.BatchNormalization(momentum = 0.9, epsilon = 1e-5)(tensor_out)
tensor_out = insert_activation(tensor_out, activation)
return tensor_out
def stdize_vol(vol):
eps = constant(1e-12, dtype = 'float32')
max_ = reduce_max(vol)
min_ = reduce_min(vol)
vol = (vol - min_ ) / (max_ - min_ + eps)
return vol
def standardize(vols):
return map_fn(stdize_vol, vols)
def custom_Conv3D(tensor_in, n_filters, kern_size, activation = None, batch_norm = False):
"""
Define a custom 3D convolutional layer with batch normalization and custom activation function (includes lrelu)
This is the order chosen in our implementation:
-> CONV/FC -> BatchNorm -> ReLu(or other activation) -> Dropout -> CONV/FC ->
See supmat in: https://dmitryulyanov.github.io/deep_image_prior
Returns
-------
tensor
of rank 5 (batch_size, nz, ny, nx, n_channels)
Parameters
----------
tensor_in : tensor
input tensor
n_filters : int
number of filters in the first convolutional layer
kern_size : tuple
kernel size, e.g. (3,3,3)
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
batch_norm : bool
True to insert a BN layer
"""
tensor_out = L.Conv3D(n_filters, kern_size, activation = None, padding = "same")(tensor_in)
if batch_norm:
tensor_out = L.BatchNormalization(momentum = 0.9, epsilon = 1e-5)(tensor_out)
tensor_out = insert_activation(tensor_out, activation)
return tensor_out
##############
# Contributed by Audrey Bartlett (Berkeley)
def analysis_block_small(tensor_in, n_filters, pool_size, \
kern_size = None, \
activation = None, \
batch_norm = False):
"""
Define a block of 2 3D convolutional layers followed by a 3D max-pooling layer
Returns
-------
output tensor of rank 5 (batch_size, nz, ny, nx, n_channels)
Parameters
----------
tensor_in : tensor
input tensor
n_filters : int
number of filters in the first convolutional layer
pool_size : tuple
max pooling e.g. (2,2,2)
kern_size : tuple
kernel size, e.g. (3,3,3)
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
kern_init : str
kernel initialization method
batch_norm : bool
True to insert a BN layer
"""
# layer # 1
tensor_out = custom_Conv3D(tensor_in, n_filters, kern_size, \
activation = activation, \
batch_norm = batch_norm)
# layer # 2; 2x filters
tensor_out = custom_Conv3D(tensor_out, 2*n_filters, kern_size, \
activation = activation, \
batch_norm = batch_norm)
# MaxPool3D
return L.MaxPool3D(pool_size = pool_size, padding = "same")(tensor_out)
def synthesis_block_small(tensor_in, n_filters, pool_size, \
activation = None, \
kern_size = 3, \
kern_size_upconv = 2, \
batch_norm = False):
"""
Define a 3D upsample block (with no concatenation/skip connections)
Returns
-------
tensor
of rank 5 (batch_size, nz, ny, nx, n_channels)
Parameters
----------
tensor_in : tensor
input tensor
concat_tensor : tensor
this will be concatenated to the output of the upconvolutional layer
n_filters : int
number of filters in each convolutional layer after the transpose conv.
pool_size : tuple
reverse the max pooling e.g. (2,2,2) with these many strides for transpose conv.
kern_size : int
kernel size for conv, e.g. 3
kern_size_upconv : int
kernel size for upconv, e.g. 2
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
batch_norm : bool
True to insert a BN layer
concat_flag : bool
True to concatenate layers (add skip connections)
"""
# transpose convolution
n_filters_upconv = tensor_in.shape[-1]
tensor_out = L.Conv3DTranspose(n_filters_upconv, kern_size_upconv, padding = "same", activation = None, strides = pool_size) (tensor_in)
tensor_out = insert_activation(tensor_out, activation)
# layer # 1
tensor_out = custom_Conv3D(tensor_out, n_filters, kern_size, \
activation = activation, \
batch_norm = batch_norm)
# layer # 2
tensor_out = custom_Conv3D(tensor_out, n_filters, kern_size, \
activation = activation, \
batch_norm = batch_norm)
return tensor_out
def build_encoder_r(input_shape, n_filters = [32, 64], \
n_blocks = 2, activation = 'lrelu',\
batch_norm = True, kern_size = 3, kern_size_upconv = 2,\
hidden_units = [128,32,2], pool_size = 2, POOL_FLAG = True):
"""
@arshadzahangirchowdhury
Define the encoder of a 3D convolutional autoencoder, based on the arguments provided.
Returns
-------
tf.Keras.model
keras model(s) for the encoder of a 3D autoencoder-decoder architecture.
flatten_shape
preflatten_shape
Parameters
----------
input_shape : tuple
input volume shape (nz,ny,nx,1)
n_filters : list
a list of the number of filters in the convolutional layers for each block. Length must equal number of number of blocks.
n_blocks : int
Number of repeating blocks in the convolutional part
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
batch_norm : bool
True to insert BN layer after the convolutional layers
kern_size : tuple
kernel size for conv. layers in downsampling block, e.g. (3,3,3).
kern_size_upconv : tuple
kernel size for conv. layers in upsampling block, e.g. (2,2,2).
hidden_units: list
list of number of hidden layer units. last value is the code.
pool_size : int or list
if list, list length must be equal to number of blocks.
"""
inp = L.Input(input_shape)
if type(pool_size) is int:
pool_size = [pool_size]*n_blocks
elif len(pool_size) != n_blocks:
raise ValueError("list length must be equal to number of blocks")
# downsampling path. e.g. n_blocks = 3, n_filters = [16,32,64], input volume is 64^3
for ii in range(n_blocks): #iterations
if ii == 0:
code = inp
code = analysis_block_small(code, \
n_filters[ii], \
pool_size[ii], \
kern_size = kern_size, \
activation = activation, \
batch_norm = batch_norm)
if POOL_FLAG:
# pool a second time before flattening
code = L.MaxPool3D(pool_size = 2, padding = "same")(code)
for ic, n_hidden in enumerate(hidden_units):
if ic == len(hidden_units) - 1: # ic = 2 (last unit is the code)
break
elif ic == 0:
# ic = 0 --> n_hidden = 128;
# first hidden layer takes flattened vector as input
preflatten_shape = tuple(code.shape[1:])
code = L.Flatten()(code)
flatten_shape = code.shape[-1]
code = hidden_layer(code, n_hidden, \
activation = activation, \
batch_norm = batch_norm)
else:
# ic = 1 --> n_hidden = 32;
code = hidden_layer(code, n_hidden, \
activation = activation, \
batch_norm = batch_norm)
z = hidden_layer(code, hidden_units[-1], \
activation = activation, \
batch_norm = True)
print('inp:',inp)
encoder = keras.models.Model(inp, z, name = "encoder")
print('encoder:',encoder)
return encoder, flatten_shape, preflatten_shape
def build_decoder_r(flatten_shape, preflatten_shape, n_filters = [32, 64], \
n_blocks = 2, activation = 'lrelu',\
batch_norm = True, kern_size = 3, kern_size_upconv = 2,\
hidden_units = [128,32,2], pool_size = 2, POOL_FLAG = True):
"""
@arshadzahangirchowdhury
Define the decoder of a 3D convolutional autoencoder, based on the arguments provided.
2 layers and no skip connections
Version of _2 with no skip connections
NOTE: borrowed from build_CAE_3D_4()
to-do: Integrate build_CAE_3D_3 from change_encoders and build_CAE_3D_r
Returns
-------
tf.Keras.model
keras model(s) for the encoder of a 3D autoencoder-decoder architecture.
Parameters
----------
flatten_shape : tuple
input volume shape (nz,ny,nx,1)
preflatten_shape : tuple
input volume shape (nz,ny,nx,1)
n_filters : list
a list of the number of filters in the convolutional layers for each block. Length must equal number of number of blocks.
n_blocks : int
Number of repeating blocks in the convolutional part
activation : str or tf.Keras.layers.Activation
name of custom activation or Keras activation layer
batch_norm : bool
True to insert BN layer after the convolutional layers
kern_size : tuple
kernel size for conv. layers in downsampling block, e.g. (3,3,3).
kern_size_upconv : tuple
kernel size for conv. layers in upsampling block, e.g. (2,2,2).
hidden_units: list
list of number of hidden layer units. last value is the code.
pool_size : int or list
if list, list length must be equal to number of blocks.
"""
decoder_input=L.Input((hidden_units[-1],), name = "decoder_input")
for ic, n_hidden in enumerate(hidden_units[::-1]): # iterate as e.g. [16,32,128]
if ic == 0:
# skip n_hidden = 16 as we already implemented that in the previous loop
decoded = decoder_input
else:
# ic = 1 --> n_hidden = 32
# ic = 2 --> n_hidden = 128
decoded = hidden_layer(decoded, n_hidden, activation = activation, batch_norm = batch_norm)
# n_hidden = flattened shape
decoded = hidden_layer(decoded, flatten_shape, activation = activation, batch_norm = batch_norm)
# reshape to convolutional feature maps
decoded = L.Reshape(preflatten_shape)(decoded)
if POOL_FLAG:
# upsample once before synthesis block
n_filters_upconv = decoded.shape[-1]
decoded = L.Conv3DTranspose(n_filters_upconv, \
kern_size_upconv, \
padding = "same", \
activation = None, \
strides = 2) (decoded)
decoded = insert_activation(decoded, activation)
# upsampling path. e.g. n_blocks = 3
for ii in range(n_blocks-1, -1, -1):
decoded = synthesis_block_small(decoded, \
n_filters[ii], \
pool_size[ii], \
activation = activation, \
kern_size = kern_size, \
kern_size_upconv = kern_size_upconv, \
batch_norm = batch_norm)
decoded = L.Conv3D(1, (1,1,1), activation = 'sigmoid', padding = "same")(decoded)
decoder = keras.models.Model(decoder_input, decoded, name = "decoder")
decoder.summary()
return decoder
class RegularizedAutoencoder(keras.Model):
"""
Modifies the keras.Model to implement custom loss functions and train step
Parameters
----------
encoder : tf.keras.Model
the encoder model.
decoder : tf.keras.Model
the decoder model.
weight: float
strength of the regularization loss (L1 or KL).
regularization_type: str
Type of regularization of model loss.'kl': Kullback-Leibler divergence loss. 'L1': L1 loss.
"""
def __init__(self, encoder, decoder, weight=1/250.0,regularization_type='kl', **kwargs):
super(RegularizedAutoencoder, self).__init__(**kwargs)
if len(encoder.output_shape[1:]) !=1:
print('WARNING: Encoder output is not a vector.')
assert encoder.input_shape == decoder.output_shape, 'Encoder input shape and decoder output shape must match.'
self.encoder = encoder
self.decoder = decoder
self.weight=float(weight)
self.regularization_type=regularization_type
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.pixel_mse_loss_tracker = keras.metrics.Mean(
name="pixel_mse_loss"
)
self.regularization_loss_tracker = keras.metrics.Mean(name="regularization_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.pixel_mse_loss_tracker,
self.regularization_loss_tracker,
]
def train_step(self, data):
with tf.GradientTape() as tape:
z = self.encoder(data)
decoded = self.decoder(z)
pixel_mse_loss = tf.reduce_mean(keras.losses.mean_squared_error(data, decoded))
#to-do: Try lambda function or tensorflow map
if self.regularization_type=='L1':
regularization_loss=tf.reduce_mean(tf.abs(z))
elif self.regularization_type=='kl':
regularization_loss = tf.reduce_mean(keras.losses.kl_divergence(data, decoded))
else:
raise ValueError("Regularization loss must be either 'L1' or 'kl' " )
total_loss = pixel_mse_loss + self.weight*regularization_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.pixel_mse_loss_tracker.update_state(pixel_mse_loss)
self.regularization_loss_tracker.update_state(regularization_loss)
return {
"loss": self.total_loss_tracker.result(),
"pixel_mse_loss": self.pixel_mse_loss_tracker.result(),
"regularization_loss": self.regularization_loss_tracker.result()
}
from tomo_encoders.neural_nets.keras_processor import EmbeddingLearner
class SelfSupervisedCAE(EmbeddingLearner):
def __init__(self, **kwargs):
'''
models : dict of tf.keras.Models.model
dict contains some models with model keys as string descriptors of them.
'''
self.model_keys = ["encoder", "decoder", "autoencoder"]
super().__init__(**kwargs)
return
def save_models(self, model_path):
for model_key in self.models.keys():
if model_key == "autoencoder":
continue
filepath = os.path.join(model_path, "%s_%s.hdf5"%(model_key, self.model_tag))
self.models[model_key].save(filepath, include_optimizer = False)
return
def _build_models(self, model_size = (64,64,64), descriptor_tag = "misc", **model_params):
'''
Parameters
----------
model_keys : list
list of strings describing the model, e.g., ["encoder", "decoder"], etc.
model_params : dict
for passing any number of model hyperparameters necessary to define the model(s).
'''
if model_params is None:
raise ValueError("Need model hyperparameters or instance of model. Neither were provided")
else:
self.models = {}
# insert your model building code here. The models variable must be a dictionary of models with str descriptors as keys
self.model_size = model_size
self.model_tag = "%s"%descriptor_tag
for key in self.model_keys:
self.models.update({key : None})
self.models["encoder"], _flatten_shape, _preflatten_shape = build_encoder_r(self.model_size + (1,), **model_params)
self.models["decoder"] = build_decoder_r(_flatten_shape, _preflatten_shape, **model_params)
self.models["autoencoder"] = None
return
def _load_models(self, model_tag = None, model_size = (64,64,64), model_path = 'some-path'):
'''
Parameters
----------
model_names : dict
example {"model_key" : tf.keras.Model, ...}
model_path : str
example "some/path"
'''
assert model_tag is not None, "need model_tag"
self.models = {} # clears any existing models linked to this class!!!!
for model_key in self.model_keys:
if model_key == "autoencoder":
self.models.update({model_key : None})
else:
filepath = os.path.join(model_path, "%s_%s.hdf5"%(model_key, model_tag))
self.models.update({model_key : load_model(filepath)})
# insert assignment of model_size here
self.model_size = self.models["encoder"].input_shape[1:-1]
self.model_tag = model_tag
return
def data_generator(self, Xs, batch_size, sampling_method, \
max_stride = 1, \
random_rotate = False, add_noise = 0.1):
'''
Parameters
----------
vol : np.array
Volume from which patches are extracted.
batch_size : int
Size of the batch generated at every iteration.
sampling_method : str
Possible methods include "random", "random-fixed-width", "grid"
max_stride : int
If method is "random" or "multiple-grids", then max_stride is required.
'''
while True:
n_vols = len(Xs)
# sample volumes
# use _get_xy
idx_vols = np.repeat(np.arange(0, n_vols), int(np.ceil(batch_size/n_vols)))
idx_vols = idx_vols[:batch_size]
x = []
for ivol in range(n_vols):
patches = self.get_patches(Xs[ivol].shape, sampling_method, np.sum(idx_vols == ivol), max_stride = max_stride)
x.append(self.extract_training_sub_volumes(Xs[ivol], patches, add_noise, random_rotate))
yield np.concatenate(x, axis = 0, dtype = 'float32')
def extract_training_sub_volumes(self, X, patches, add_noise, random_rotate):
'''
Extract training pairs x and y from a given volume X, Y pair
'''
batch_size = len(patches)
x = patches.extract(X, self.model_size)[...,np.newaxis]
if random_rotate:
nrots = np.random.randint(0, 4, batch_size)
for ii in range(batch_size):
axes = tuple(np.random.choice([0, 1, 2], size=2, replace=False))
x[ii, ..., 0] = np.rot90(x[ii, ..., 0], k=nrots[ii], axes=axes)
return x
def get_patches(self, vol_shape, sampling_method, batch_size, max_stride = None):
if sampling_method in ["grid", 'regular-grid', "random-fixed-width"]:
patches = Patches(vol_shape, initialize_by = sampling_method, \
patch_size = self.model_size, \
n_points = batch_size)
elif sampling_method in ["random"]:
patches = Patches(vol_shape, initialize_by = sampling_method, \
min_patch_size = self.model_size, \
max_stride = max_stride, \
n_points = batch_size)
else:
raise ValueError("sampling method not supported")
return patches
def train(self, vols, batch_size = 10, \
sampling_method = 'random-fixed-width', \
n_epochs = 10, \
random_rotate = True, \
add_noise = 0.1,\
max_stride = 1, \
normalize_sampling_factor = 2):
'''
'''
# to-do: IMPORTANT! Go make data_loader.py, make sure normalize volume is done there.
# instantiate data generator for use in training.
dg = self.data_generator(vols, batch_size, sampling_method, \
max_stride = max_stride, \
random_rotate = random_rotate, \
add_noise = add_noise)
tot_steps = 500
val_split = 0.2
steps_per_epoch = int((1-val_split)*tot_steps//batch_size)
validation_steps = int(val_split*tot_steps//batch_size)
t0 = time.time()
self.models["autoencoder"] = RegularizedAutoencoder(self.models['encoder'],\
self.models['decoder'],\
weight=1/250.0,\
regularization_type='kl')
self.models["autoencoder"].compile(optimizer='adam')
self.models["autoencoder"].fit(x = dg, epochs = n_epochs , \
steps_per_epoch=steps_per_epoch, \
validation_steps=validation_steps, verbose = 1)
self.models["encoder"] = self.models["autoencoder"].encoder
self.models["decoder"] = self.models["autoencoder"].decoder
t1 = time.time()
training_time = (t1 - t0)
print("training time = %.2f seconds"%training_time)
return
def random_data_generator(self, batch_size):
while True:
x_shape = tuple([batch_size] + list(self.input_size) + [1])
x = np.random.uniform(0, 1, x_shape)#.astype(np.float32)
x[x == 0] = 1.0e-12
yield x
def predict_embeddings(self, x, chunk_size, min_max = None, TIMEIT = False):
'''
Predicts on sub_vols. This is a wrapper around keras.model.predict() that speeds up inference on inputs lengths that are not factors of 2. Use this function to do multiprocessing if necessary.
'''
assert x.ndim == 5, "x must be 5-dimensional (batch_size, nz, ny, nx, 1)."
t0 = time.time()
print("call to keras predict, len(x) = %i, shape = %s, chunk_size = %i"%(len(x), str(x.shape[1:-1]), chunk_size))
nb = len(x)
nchunks = int(np.ceil(nb/chunk_size))
nb_padded = nchunks*chunk_size
padding = nb_padded - nb
out_arr = np.zeros((nb, self.models["encoder"].output_shape[-1]), dtype = np.float32) # use numpy since return from predict is numpy
for k in range(nchunks):
sb = slice(k*chunk_size , min((k+1)*chunk_size, nb))
x_in = x[sb,...]
if min_max is not None:
min_val, max_val = min_max
x_in = _rescale_data(x_in, float(min_val), float(max_val))
if padding != 0:
if k == nchunks - 1:
x_in = np.pad(x_in, \
((0,padding), (0,0), \
(0,0), (0,0), (0,0)), mode = 'edge')
x_out = self.models["encoder"].predict(x_in)
if k == nchunks -1:
x_out = x_out[:-padding,...]
else:
x_out = self.models["encoder"].predict(x_in)
out_arr[sb,...] = x_out
print("shape of output array: ", out_arr.shape)
t_unit = (time.time() - t0)*1000.0/nb
if TIMEIT:
print("inf. time p. input patch size %s = %.2f ms, nb = %i"%(str(x[0,...,0].shape), t_unit, nb))
print("\n")
return out_arr, t_unit
else:
return out_arr
# def detect_changes(self, vol_prev, vol_curr, patches):
# '''
# '''
# t0 = time.time()
# sub_vols_prev = patches.extract(self._normalize_volume(vol_prev), self.model_size)
# sub_vols_curr = patches.extract(self._normalize_volume(vol_curr), self.model_size)
# h_prev = self.models["encoder"].predict(sub_vols_prev[...,np.newaxis])
# h_curr = self.models["encoder"].predict(sub_vols_curr[...,np.newaxis])
# h_delta = (h_curr - h_prev)**2
# h_delta = np.mean(h_delta, axis = 1)
# h_delta = np.sqrt(h_delta)
# patches.add_features(h_delta.reshape(-1,1), names = ["h_delta"])
# t1 = time.time()
# tot_time_fe = t1 - t0
# print("total time for change detector = %.2f seconds"%tot_time_fe)
# mse = np.mean(np.power(sub_vols_curr - sub_vols_prev, 2), axis = (1,2,3))
# patches.add_features(mse.reshape(-1,1), names = ["mse"])
# return patches
if __name__ == "__main__":
print('just a bunch of functions')
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
src/net/http/serve_test.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// End-to-end serving tests
package http_test
import (
"bufio"
"bytes"
"compress/gzip"
"compress/zlib"
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"internal/testenv"
"io"
"log"
"math/rand"
"net"
. "net/http"
"net/http/httptest"
"net/http/httptrace"
"net/http/httputil"
"net/http/internal"
"net/http/internal/testcert"
"net/url"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
)
type dummyAddr string
type oneConnListener struct {
conn net.Conn
}
func (l *oneConnListener) Accept() (c net.Conn, err error) {
c = l.conn
if c == nil {
err = io.EOF
return
}
err = nil
l.conn = nil
return
}
func (l *oneConnListener) Close() error {
return nil
}
func (l *oneConnListener) Addr() net.Addr {
return dummyAddr("test-address")
}
func (a dummyAddr) Network() string {
return string(a)
}
func (a dummyAddr) String() string {
return string(a)
}
type noopConn struct{}
func (noopConn) LocalAddr() net.Addr { return dummyAddr("local-addr") }
func (noopConn) RemoteAddr() net.Addr { return dummyAddr("remote-addr") }
func (noopConn) SetDeadline(t time.Time) error { return nil }
func (noopConn) SetReadDeadline(t time.Time) error { return nil }
func (noopConn) SetWriteDeadline(t time.Time) error { return nil }
type rwTestConn struct {
io.Reader
io.Writer
noopConn
closeFunc func() error // called if non-nil
closec chan bool // else, if non-nil, send value to it on close
}
func (c *rwTestConn) Close() error {
if c.closeFunc != nil {
return c.closeFunc()
}
select {
case c.closec <- true:
default:
}
return nil
}
type testConn struct {
readMu sync.Mutex // for TestHandlerBodyClose
readBuf bytes.Buffer
writeBuf bytes.Buffer
closec chan bool // if non-nil, send value to it on close
noopConn
}
func (c *testConn) Read(b []byte) (int, error) {
c.readMu.Lock()
defer c.readMu.Unlock()
return c.readBuf.Read(b)
}
func (c *testConn) Write(b []byte) (int, error) {
return c.writeBuf.Write(b)
}
func (c *testConn) Close() error {
select {
case c.closec <- true:
default:
}
return nil
}
// reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters,
// ending in \r\n\r\n
func reqBytes(req string) []byte {
return []byte(strings.ReplaceAll(strings.TrimSpace(req), "\n", "\r\n") + "\r\n\r\n")
}
type handlerTest struct {
logbuf bytes.Buffer
handler Handler
}
func newHandlerTest(h Handler) handlerTest {
return handlerTest{handler: h}
}
func (ht *handlerTest) rawResponse(req string) string {
reqb := reqBytes(req)
var output bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(reqb),
Writer: &output,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
srv := &Server{
ErrorLog: log.New(&ht.logbuf, "", 0),
Handler: ht.handler,
}
go srv.Serve(ln)
<-conn.closec
return output.String()
}
func TestConsumingBodyOnNextConn(t *testing.T) {
t.Parallel()
defer afterTest(t)
conn := new(testConn)
for i := 0; i < 2; i++ {
conn.readBuf.Write([]byte(
"POST / HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 11\r\n" +
"\r\n" +
"foo=1&bar=1"))
}
reqNum := 0
ch := make(chan *Request)
servech := make(chan error)
listener := &oneConnListener{conn}
handler := func(res ResponseWriter, req *Request) {
reqNum++
ch <- req
}
go func() {
servech <- Serve(listener, HandlerFunc(handler))
}()
var req *Request
req = <-ch
if req == nil {
t.Fatal("Got nil first request.")
}
if req.Method != "POST" {
t.Errorf("For request #1's method, got %q; expected %q",
req.Method, "POST")
}
req = <-ch
if req == nil {
t.Fatal("Got nil first request.")
}
if req.Method != "POST" {
t.Errorf("For request #2's method, got %q; expected %q",
req.Method, "POST")
}
if serveerr := <-servech; serveerr != io.EOF {
t.Errorf("Serve returned %q; expected EOF", serveerr)
}
}
type stringHandler string
func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) {
w.Header().Set("Result", string(s))
}
var handlers = []struct {
pattern string
msg string
}{
{"/", "Default"},
{"/someDir/", "someDir"},
{"/#/", "hash"},
{"someHost.com/someDir/", "someHost.com/someDir"},
}
var vtests = []struct {
url string
expected string
}{
{"http://localhost/someDir/apage", "someDir"},
{"http://localhost/%23/apage", "hash"},
{"http://localhost/otherDir/apage", "Default"},
{"http://someHost.com/someDir/apage", "someHost.com/someDir"},
{"http://otherHost.com/someDir/apage", "someDir"},
{"http://otherHost.com/aDir/apage", "Default"},
// redirections for trees
{"http://localhost/someDir", "/someDir/"},
{"http://localhost/%23", "/%23/"},
{"http://someHost.com/someDir", "/someDir/"},
}
func TestHostHandlers(t *testing.T) {
setParallel(t)
defer afterTest(t)
mux := NewServeMux()
for _, h := range handlers {
mux.Handle(h.pattern, stringHandler(h.msg))
}
ts := httptest.NewServer(mux)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
cc := httputil.NewClientConn(conn, nil)
for _, vt := range vtests {
var r *Response
var req Request
if req.URL, err = url.Parse(vt.url); err != nil {
t.Errorf("cannot parse url: %v", err)
continue
}
if err := cc.Write(&req); err != nil {
t.Errorf("writing request: %v", err)
continue
}
r, err := cc.Read(&req)
if err != nil {
t.Errorf("reading response: %v", err)
continue
}
switch r.StatusCode {
case StatusOK:
s := r.Header.Get("Result")
if s != vt.expected {
t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
}
case StatusMovedPermanently:
s := r.Header.Get("Location")
if s != vt.expected {
t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
}
default:
t.Errorf("Get(%q) unhandled status code %d", vt.url, r.StatusCode)
}
}
}
var serveMuxRegister = []struct {
pattern string
h Handler
}{
{"/dir/", serve(200)},
{"/search", serve(201)},
{"codesearch.google.com/search", serve(202)},
{"codesearch.google.com/", serve(203)},
{"example.com/", HandlerFunc(checkQueryStringHandler)},
}
// serve returns a handler that sends a response with the given code.
func serve(code int) HandlerFunc {
return func(w ResponseWriter, r *Request) {
w.WriteHeader(code)
}
}
// checkQueryStringHandler checks if r.URL.RawQuery has the same value
// as the URL excluding the scheme and the query string and sends 200
// response code if it is, 500 otherwise.
func checkQueryStringHandler(w ResponseWriter, r *Request) {
u := *r.URL
u.Scheme = "http"
u.Host = r.Host
u.RawQuery = ""
if "http://"+r.URL.RawQuery == u.String() {
w.WriteHeader(200)
} else {
w.WriteHeader(500)
}
}
var serveMuxTests = []struct {
method string
host string
path string
code int
pattern string
}{
{"GET", "google.com", "/", 404, ""},
{"GET", "google.com", "/dir", 301, "/dir/"},
{"GET", "google.com", "/dir/", 200, "/dir/"},
{"GET", "google.com", "/dir/file", 200, "/dir/"},
{"GET", "google.com", "/search", 201, "/search"},
{"GET", "google.com", "/search/", 404, ""},
{"GET", "google.com", "/search/foo", 404, ""},
{"GET", "codesearch.google.com", "/search", 202, "codesearch.google.com/search"},
{"GET", "codesearch.google.com", "/search/", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com", "/search/foo", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com", "/", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com:443", "/", 203, "codesearch.google.com/"},
{"GET", "images.google.com", "/search", 201, "/search"},
{"GET", "images.google.com", "/search/", 404, ""},
{"GET", "images.google.com", "/search/foo", 404, ""},
{"GET", "google.com", "/../search", 301, "/search"},
{"GET", "google.com", "/dir/..", 301, ""},
{"GET", "google.com", "/dir/..", 301, ""},
{"GET", "google.com", "/dir/./file", 301, "/dir/"},
// The /foo -> /foo/ redirect applies to CONNECT requests
// but the path canonicalization does not.
{"CONNECT", "google.com", "/dir", 301, "/dir/"},
{"CONNECT", "google.com", "/../search", 404, ""},
{"CONNECT", "google.com", "/dir/..", 200, "/dir/"},
{"CONNECT", "google.com", "/dir/..", 200, "/dir/"},
{"CONNECT", "google.com", "/dir/./file", 200, "/dir/"},
}
func TestServeMuxHandler(t *testing.T) {
setParallel(t)
mux := NewServeMux()
for _, e := range serveMuxRegister {
mux.Handle(e.pattern, e.h)
}
for _, tt := range serveMuxTests {
r := &Request{
Method: tt.method,
Host: tt.host,
URL: &url.URL{
Path: tt.path,
},
}
h, pattern := mux.Handler(r)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, r)
if pattern != tt.pattern || rr.Code != tt.code {
t.Errorf("%s %s %s = %d, %q, want %d, %q", tt.method, tt.host, tt.path, rr.Code, pattern, tt.code, tt.pattern)
}
}
}
// Issue 24297
func TestServeMuxHandleFuncWithNilHandler(t *testing.T) {
setParallel(t)
defer func() {
if err := recover(); err == nil {
t.Error("expected call to mux.HandleFunc to panic")
}
}()
mux := NewServeMux()
mux.HandleFunc("/", nil)
}
var serveMuxTests2 = []struct {
method string
host string
url string
code int
redirOk bool
}{
{"GET", "google.com", "/", 404, false},
{"GET", "example.com", "/test/?example.com/test/", 200, false},
{"GET", "example.com", "test/?example.com/test/", 200, true},
}
// TestServeMuxHandlerRedirects tests that automatic redirects generated by
// mux.Handler() shouldn't clear the request's query string.
func TestServeMuxHandlerRedirects(t *testing.T) {
setParallel(t)
mux := NewServeMux()
for _, e := range serveMuxRegister {
mux.Handle(e.pattern, e.h)
}
for _, tt := range serveMuxTests2 {
tries := 1 // expect at most 1 redirection if redirOk is true.
turl := tt.url
for {
u, e := url.Parse(turl)
if e != nil {
t.Fatal(e)
}
r := &Request{
Method: tt.method,
Host: tt.host,
URL: u,
}
h, _ := mux.Handler(r)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, r)
if rr.Code != 301 {
if rr.Code != tt.code {
t.Errorf("%s %s %s = %d, want %d", tt.method, tt.host, tt.url, rr.Code, tt.code)
}
break
}
if !tt.redirOk {
t.Errorf("%s %s %s, unexpected redirect", tt.method, tt.host, tt.url)
break
}
turl = rr.HeaderMap.Get("Location")
tries--
}
if tries < 0 {
t.Errorf("%s %s %s, too many redirects", tt.method, tt.host, tt.url)
}
}
}
// Tests for https://golang.org/issue/900
func TestMuxRedirectLeadingSlashes(t *testing.T) {
setParallel(t)
paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"}
for _, path := range paths {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n")))
if err != nil {
t.Errorf("%s", err)
}
mux := NewServeMux()
resp := httptest.NewRecorder()
mux.ServeHTTP(resp, req)
if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected {
t.Errorf("Expected Location header set to %q; got %q", expected, loc)
return
}
if code, expected := resp.Code, StatusMovedPermanently; code != expected {
t.Errorf("Expected response code of StatusMovedPermanently; got %d", code)
return
}
}
}
// Test that the special cased "/route" redirect
// implicitly created by a registered "/route/"
// properly sets the query string in the redirect URL.
// See Issue 17841.
func TestServeWithSlashRedirectKeepsQueryString(t *testing.T) {
setParallel(t)
defer afterTest(t)
writeBackQuery := func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "%s", r.URL.RawQuery)
}
mux := NewServeMux()
mux.HandleFunc("/testOne", writeBackQuery)
mux.HandleFunc("/testTwo/", writeBackQuery)
mux.HandleFunc("/testThree", writeBackQuery)
mux.HandleFunc("/testThree/", func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "%s:bar", r.URL.RawQuery)
})
ts := httptest.NewServer(mux)
defer ts.Close()
tests := [...]struct {
path string
method string
want string
statusOk bool
}{
0: {"/testOne?this=that", "GET", "this=that", true},
1: {"/testTwo?foo=bar", "GET", "foo=bar", true},
2: {"/testTwo?a=1&b=2&a=3", "GET", "a=1&b=2&a=3", true},
3: {"/testTwo?", "GET", "", true},
4: {"/testThree?foo", "GET", "foo", true},
5: {"/testThree/?foo", "GET", "foo:bar", true},
6: {"/testThree?foo", "CONNECT", "foo", true},
7: {"/testThree/?foo", "CONNECT", "foo:bar", true},
// canonicalization or not
8: {"/testOne/foo/..?foo", "GET", "foo", true},
9: {"/testOne/foo/..?foo", "CONNECT", "404 page not found\n", false},
}
for i, tt := range tests {
req, _ := NewRequest(tt.method, ts.URL+tt.path, nil)
res, err := ts.Client().Do(req)
if err != nil {
continue
}
slurp, _ := io.ReadAll(res.Body)
res.Body.Close()
if !tt.statusOk {
if got, want := res.StatusCode, 404; got != want {
t.Errorf("#%d: Status = %d; want = %d", i, got, want)
}
}
if got, want := string(slurp), tt.want; got != want {
t.Errorf("#%d: Body = %q; want = %q", i, got, want)
}
}
}
func TestServeWithSlashRedirectForHostPatterns(t *testing.T) {
setParallel(t)
defer afterTest(t)
mux := NewServeMux()
mux.Handle("example.com/pkg/foo/", stringHandler("example.com/pkg/foo/"))
mux.Handle("example.com/pkg/bar", stringHandler("example.com/pkg/bar"))
mux.Handle("example.com/pkg/bar/", stringHandler("example.com/pkg/bar/"))
mux.Handle("example.com:3000/pkg/connect/", stringHandler("example.com:3000/pkg/connect/"))
mux.Handle("example.com:9000/", stringHandler("example.com:9000/"))
mux.Handle("/pkg/baz/", stringHandler("/pkg/baz/"))
tests := []struct {
method string
url string
code int
loc string
want string
}{
{"GET", "http://example.com/", 404, "", ""},
{"GET", "http://example.com/pkg/foo", 301, "/pkg/foo/", ""},
{"GET", "http://example.com/pkg/bar", 200, "", "example.com/pkg/bar"},
{"GET", "http://example.com/pkg/bar/", 200, "", "example.com/pkg/bar/"},
{"GET", "http://example.com/pkg/baz", 301, "/pkg/baz/", ""},
{"GET", "http://example.com:3000/pkg/foo", 301, "/pkg/foo/", ""},
{"CONNECT", "http://example.com/", 404, "", ""},
{"CONNECT", "http://example.com:3000/", 404, "", ""},
{"CONNECT", "http://example.com:9000/", 200, "", "example.com:9000/"},
{"CONNECT", "http://example.com/pkg/foo", 301, "/pkg/foo/", ""},
{"CONNECT", "http://example.com:3000/pkg/foo", 404, "", ""},
{"CONNECT", "http://example.com:3000/pkg/baz", 301, "/pkg/baz/", ""},
{"CONNECT", "http://example.com:3000/pkg/connect", 301, "/pkg/connect/", ""},
}
ts := httptest.NewServer(mux)
defer ts.Close()
for i, tt := range tests {
req, _ := NewRequest(tt.method, tt.url, nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
if got, want := w.Code, tt.code; got != want {
t.Errorf("#%d: Status = %d; want = %d", i, got, want)
}
if tt.code == 301 {
if got, want := w.HeaderMap.Get("Location"), tt.loc; got != want {
t.Errorf("#%d: Location = %q; want = %q", i, got, want)
}
} else {
if got, want := w.HeaderMap.Get("Result"), tt.want; got != want {
t.Errorf("#%d: Result = %q; want = %q", i, got, want)
}
}
}
}
func TestShouldRedirectConcurrency(t *testing.T) {
setParallel(t)
defer afterTest(t)
mux := NewServeMux()
ts := httptest.NewServer(mux)
defer ts.Close()
mux.HandleFunc("/", func(w ResponseWriter, r *Request) {})
}
func BenchmarkServeMux(b *testing.B) { benchmarkServeMux(b, true) }
func BenchmarkServeMux_SkipServe(b *testing.B) { benchmarkServeMux(b, false) }
func benchmarkServeMux(b *testing.B, runHandler bool) {
type test struct {
path string
code int
req *Request
}
// Build example handlers and requests
var tests []test
endpoints := []string{"search", "dir", "file", "change", "count", "s"}
for _, e := range endpoints {
for i := 200; i < 230; i++ {
p := fmt.Sprintf("/%s/%d/", e, i)
tests = append(tests, test{
path: p,
code: i,
req: &Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: p}},
})
}
}
mux := NewServeMux()
for _, tt := range tests {
mux.Handle(tt.path, serve(tt.code))
}
rw := httptest.NewRecorder()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, tt := range tests {
*rw = httptest.ResponseRecorder{}
h, pattern := mux.Handler(tt.req)
if runHandler {
h.ServeHTTP(rw, tt.req)
if pattern != tt.path || rw.Code != tt.code {
b.Fatalf("got %d, %q, want %d, %q", rw.Code, pattern, tt.code, tt.path)
}
}
}
}
}
func TestServerTimeouts(t *testing.T) {
setParallel(t)
defer afterTest(t)
// Try three times, with increasing timeouts.
tries := []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second}
for i, timeout := range tries {
err := testServerTimeouts(timeout)
if err == nil {
return
}
t.Logf("failed at %v: %v", timeout, err)
if i != len(tries)-1 {
t.Logf("retrying at %v ...", tries[i+1])
}
}
t.Fatal("all attempts failed")
}
func testServerTimeouts(timeout time.Duration) error {
reqNum := 0
ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
reqNum++
fmt.Fprintf(res, "req=%d", reqNum)
}))
ts.Config.ReadTimeout = timeout
ts.Config.WriteTimeout = timeout
ts.Start()
defer ts.Close()
// Hit the HTTP server successfully.
c := ts.Client()
r, err := c.Get(ts.URL)
if err != nil {
return fmt.Errorf("http Get #1: %v", err)
}
got, err := io.ReadAll(r.Body)
expected := "req=1"
if string(got) != expected || err != nil {
return fmt.Errorf("Unexpected response for request #1; got %q ,%v; expected %q, nil",
string(got), err, expected)
}
// Slow client that should timeout.
t1 := time.Now()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
return fmt.Errorf("Dial: %v", err)
}
buf := make([]byte, 1)
n, err := conn.Read(buf)
conn.Close()
latency := time.Since(t1)
if n != 0 || err != io.EOF {
return fmt.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
}
minLatency := timeout / 5 * 4
if latency < minLatency {
return fmt.Errorf("got EOF after %s, want >= %s", latency, minLatency)
}
// Hit the HTTP server successfully again, verifying that the
// previous slow connection didn't run our handler. (that we
// get "req=2", not "req=3")
r, err = c.Get(ts.URL)
if err != nil {
return fmt.Errorf("http Get #2: %v", err)
}
got, err = io.ReadAll(r.Body)
r.Body.Close()
expected = "req=2"
if string(got) != expected || err != nil {
return fmt.Errorf("Get #2 got %q, %v, want %q, nil", string(got), err, expected)
}
if !testing.Short() {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
return fmt.Errorf("long Dial: %v", err)
}
defer conn.Close()
go io.Copy(io.Discard, conn)
for i := 0; i < 5; i++ {
_, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"))
if err != nil {
return fmt.Errorf("on write %d: %v", i, err)
}
time.Sleep(timeout / 2)
}
}
return nil
}
// Test that the HTTP/2 server handles Server.WriteTimeout (Issue 18437)
func TestHTTP2WriteDeadlineExtendedOnNewRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
setParallel(t)
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {}))
ts.Config.WriteTimeout = 250 * time.Millisecond
ts.TLS = &tls.Config{NextProtos: []string{"h2"}}
ts.StartTLS()
defer ts.Close()
c := ts.Client()
if err := ExportHttp2ConfigureTransport(c.Transport.(*Transport)); err != nil {
t.Fatal(err)
}
for i := 1; i <= 3; i++ {
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
// fail test if no response after 1 second
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
req = req.WithContext(ctx)
r, err := c.Do(req)
if ctx.Err() == context.DeadlineExceeded {
t.Fatalf("http2 Get #%d response timed out", i)
}
if err != nil {
t.Fatalf("http2 Get #%d: %v", i, err)
}
r.Body.Close()
if r.ProtoMajor != 2 {
t.Fatalf("http2 Get expected HTTP/2.0, got %q", r.Proto)
}
time.Sleep(ts.Config.WriteTimeout / 2)
}
}
// tryTimeouts runs testFunc with increasing timeouts. Test passes on first success,
// and fails if all timeouts fail.
func tryTimeouts(t *testing.T, testFunc func(timeout time.Duration) error) {
tries := []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second}
for i, timeout := range tries {
err := testFunc(timeout)
if err == nil {
return
}
t.Logf("failed at %v: %v", timeout, err)
if i != len(tries)-1 {
t.Logf("retrying at %v ...", tries[i+1])
}
}
t.Fatal("all attempts failed")
}
// Test that the HTTP/2 server RSTs stream on slow write.
func TestHTTP2WriteDeadlineEnforcedPerStream(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
setParallel(t)
defer afterTest(t)
tryTimeouts(t, testHTTP2WriteDeadlineEnforcedPerStream)
}
func testHTTP2WriteDeadlineEnforcedPerStream(timeout time.Duration) error {
reqNum := 0
ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
reqNum++
if reqNum == 1 {
return // first request succeeds
}
time.Sleep(timeout) // second request times out
}))
ts.Config.WriteTimeout = timeout / 2
ts.TLS = &tls.Config{NextProtos: []string{"h2"}}
ts.StartTLS()
defer ts.Close()
c := ts.Client()
if err := ExportHttp2ConfigureTransport(c.Transport.(*Transport)); err != nil {
return fmt.Errorf("ExportHttp2ConfigureTransport: %v", err)
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
return fmt.Errorf("NewRequest: %v", err)
}
r, err := c.Do(req)
if err != nil {
return fmt.Errorf("http2 Get #1: %v", err)
}
r.Body.Close()
if r.ProtoMajor != 2 {
return fmt.Errorf("http2 Get expected HTTP/2.0, got %q", r.Proto)
}
req, err = NewRequest("GET", ts.URL, nil)
if err != nil {
return fmt.Errorf("NewRequest: %v", err)
}
r, err = c.Do(req)
if err == nil {
r.Body.Close()
if r.ProtoMajor != 2 {
return fmt.Errorf("http2 Get expected HTTP/2.0, got %q", r.Proto)
}
return fmt.Errorf("http2 Get #2 expected error, got nil")
}
expected := "stream ID 3; INTERNAL_ERROR" // client IDs are odd, second stream should be 3
if !strings.Contains(err.Error(), expected) {
return fmt.Errorf("http2 Get #2: expected error to contain %q, got %q", expected, err)
}
return nil
}
// Test that the HTTP/2 server does not send RST when WriteDeadline not set.
func TestHTTP2NoWriteDeadline(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
setParallel(t)
defer afterTest(t)
tryTimeouts(t, testHTTP2NoWriteDeadline)
}
func testHTTP2NoWriteDeadline(timeout time.Duration) error {
reqNum := 0
ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
reqNum++
if reqNum == 1 {
return // first request succeeds
}
time.Sleep(timeout) // second request timesout
}))
ts.TLS = &tls.Config{NextProtos: []string{"h2"}}
ts.StartTLS()
defer ts.Close()
c := ts.Client()
if err := ExportHttp2ConfigureTransport(c.Transport.(*Transport)); err != nil {
return fmt.Errorf("ExportHttp2ConfigureTransport: %v", err)
}
for i := 0; i < 2; i++ {
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
return fmt.Errorf("NewRequest: %v", err)
}
r, err := c.Do(req)
if err != nil {
return fmt.Errorf("http2 Get #%d: %v", i, err)
}
r.Body.Close()
if r.ProtoMajor != 2 {
return fmt.Errorf("http2 Get expected HTTP/2.0, got %q", r.Proto)
}
}
return nil
}
// golang.org/issue/4741 -- setting only a write timeout that triggers
// shouldn't cause a handler to block forever on reads (next HTTP
// request) that will never happen.
func TestOnlyWriteTimeout(t *testing.T) {
setParallel(t)
defer afterTest(t)
var (
mu sync.RWMutex
conn net.Conn
)
var afterTimeoutErrc = make(chan error, 1)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, req *Request) {
buf := make([]byte, 512<<10)
_, err := w.Write(buf)
if err != nil {
t.Errorf("handler Write error: %v", err)
return
}
mu.RLock()
defer mu.RUnlock()
if conn == nil {
t.Error("no established connection found")
return
}
conn.SetWriteDeadline(time.Now().Add(-30 * time.Second))
_, err = w.Write(buf)
afterTimeoutErrc <- err
}))
ts.Listener = trackLastConnListener{ts.Listener, &mu, &conn}
ts.Start()
defer ts.Close()
c := ts.Client()
errc := make(chan error, 1)
go func() {
res, err := c.Get(ts.URL)
if err != nil {
errc <- err
return
}
_, err = io.Copy(io.Discard, res.Body)
res.Body.Close()
errc <- err
}()
select {
case err := <-errc:
if err == nil {
t.Errorf("expected an error from Get request")
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for Get error")
}
if err := <-afterTimeoutErrc; err == nil {
t.Error("expected write error after timeout")
}
}
// trackLastConnListener tracks the last net.Conn that was accepted.
type trackLastConnListener struct {
net.Listener
mu *sync.RWMutex
last *net.Conn // destination
}
func (l trackLastConnListener) Accept() (c net.Conn, err error) {
c, err = l.Listener.Accept()
if err == nil {
l.mu.Lock()
*l.last = c
l.mu.Unlock()
}
return
}
// TestIdentityResponse verifies that a handler can unset
func TestIdentityResponse(t *testing.T) {
setParallel(t)
defer afterTest(t)
handler := HandlerFunc(func(rw ResponseWriter, req *Request) {
rw.Header().Set("Content-Length", "3")
rw.Header().Set("Transfer-Encoding", req.FormValue("te"))
switch {
case req.FormValue("overwrite") == "1":
_, err := rw.Write([]byte("foo TOO LONG"))
if err != ErrContentLength {
t.Errorf("expected ErrContentLength; got %v", err)
}
case req.FormValue("underwrite") == "1":
rw.Header().Set("Content-Length", "500")
rw.Write([]byte("too short"))
default:
rw.Write([]byte("foo"))
}
})
ts := httptest.NewServer(handler)
defer ts.Close()
c := ts.Client()
// Note: this relies on the assumption (which is true) that
// Get sends HTTP/1.1 or greater requests. Otherwise the
// server wouldn't have the choice to send back chunked
// responses.
for _, te := range []string{"", "identity"} {
url := ts.URL + "/?te=" + te
res, err := c.Get(url)
if err != nil {
t.Fatalf("error with Get of %s: %v", url, err)
}
if cl, expected := res.ContentLength, int64(3); cl != expected {
t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl)
}
if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected {
t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl)
}
if tl, expected := len(res.TransferEncoding), 0; tl != expected {
t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)",
url, expected, tl, res.TransferEncoding)
}
res.Body.Close()
}
// Verify that ErrContentLength is returned
url := ts.URL + "/?overwrite=1"
res, err := c.Get(url)
if err != nil {
t.Fatalf("error with Get of %s: %v", url, err)
}
res.Body.Close()
// Verify that the connection is closed when the declared Content-Length
// is larger than what the handler wrote.
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
_, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n"))
if err != nil {
t.Fatalf("error writing: %v", err)
}
// The ReadAll will hang for a failing test.
got, _ := io.ReadAll(conn)
expectedSuffix := "\r\n\r\ntoo short"
if !strings.HasSuffix(string(got), expectedSuffix) {
t.Errorf("Expected output to end with %q; got response body %q",
expectedSuffix, string(got))
}
}
func testTCPConnectionCloses(t *testing.T, req string, h Handler) {
setParallel(t)
defer afterTest(t)
s := httptest.NewServer(h)
defer s.Close()
conn, err := net.Dial("tcp", s.Listener.Addr().String())
if err != nil {
t.Fatal("dial error:", err)
}
defer conn.Close()
_, err = fmt.Fprint(conn, req)
if err != nil {
t.Fatal("print error:", err)
}
r := bufio.NewReader(conn)
res, err := ReadResponse(r, &Request{Method: "GET"})
if err != nil {
t.Fatal("ReadResponse error:", err)
}
didReadAll := make(chan bool, 1)
go func() {
select {
case <-time.After(5 * time.Second):
t.Error("body not closed after 5s")
return
case <-didReadAll:
}
}()
_, err = io.ReadAll(r)
if err != nil {
t.Fatal("read error:", err)
}
didReadAll <- true
if !res.Close {
t.Errorf("Response.Close = false; want true")
}
}
func testTCPConnectionStaysOpen(t *testing.T, req string, handler Handler) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(handler)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
br := bufio.NewReader(conn)
for i := 0; i < 2; i++ {
if _, err := io.WriteString(conn, req); err != nil {
t.Fatal(err)
}
res, err := ReadResponse(br, nil)
if err != nil {
t.Fatalf("res %d: %v", i+1, err)
}
if _, err := io.Copy(io.Discard, res.Body); err != nil {
t.Fatalf("res %d body copy: %v", i+1, err)
}
res.Body.Close()
}
}
// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive.
func TestServeHTTP10Close(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
}
// TestClientCanClose verifies that clients can also force a connection to close.
func TestClientCanClose(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
// Nothing.
}))
}
// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close,
// even for HTTP/1.1 requests.
func TestHandlersCanSetConnectionClose11(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
}))
}
func TestHandlersCanSetConnectionClose10(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
}))
}
func TestHTTP2UpgradeClosesConnection(t *testing.T) {
testTCPConnectionCloses(t, "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
// Nothing. (if not hijacked, the server should close the connection
// afterwards)
}))
}
func send204(w ResponseWriter, r *Request) { w.WriteHeader(204) }
func send304(w ResponseWriter, r *Request) { w.WriteHeader(304) }
// Issue 15647: 204 responses can't have bodies, so HTTP/1.0 keep-alive conns should stay open.
func TestHTTP10KeepAlive204Response(t *testing.T) {
testTCPConnectionStaysOpen(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(send204))
}
func TestHTTP11KeepAlive204Response(t *testing.T) {
testTCPConnectionStaysOpen(t, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n", HandlerFunc(send204))
}
func TestHTTP10KeepAlive304Response(t *testing.T) {
testTCPConnectionStaysOpen(t,
"GET / HTTP/1.0\r\nConnection: keep-alive\r\nIf-Modified-Since: Mon, 02 Jan 2006 15:04:05 GMT\r\n\r\n",
HandlerFunc(send304))
}
// Issue 15703
func TestKeepAliveFinalChunkWithEOF(t *testing.T) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, false /* h1 */, HandlerFunc(func(w ResponseWriter, r *Request) {
w.(Flusher).Flush() // force chunked encoding
w.Write([]byte("{\"Addr\": \"" + r.RemoteAddr + "\"}"))
}))
defer cst.close()
type data struct {
Addr string
}
var addrs [2]data
for i := range addrs {
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
if err := json.NewDecoder(res.Body).Decode(&addrs[i]); err != nil {
t.Fatal(err)
}
if addrs[i].Addr == "" {
t.Fatal("no address")
}
res.Body.Close()
}
if addrs[0] != addrs[1] {
t.Fatalf("connection not reused")
}
}
func TestSetsRemoteAddr_h1(t *testing.T) { testSetsRemoteAddr(t, h1Mode) }
func TestSetsRemoteAddr_h2(t *testing.T) { testSetsRemoteAddr(t, h2Mode) }
func testSetsRemoteAddr(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "%s", r.RemoteAddr)
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
}
body, err := io.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll error: %v", err)
}
ip := string(body)
if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") {
t.Fatalf("Expected local addr; got %q", ip)
}
}
type blockingRemoteAddrListener struct {
net.Listener
conns chan<- net.Conn
}
func (l *blockingRemoteAddrListener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return nil, err
}
brac := &blockingRemoteAddrConn{
Conn: c,
addrs: make(chan net.Addr, 1),
}
l.conns <- brac
return brac, nil
}
type blockingRemoteAddrConn struct {
net.Conn
addrs chan net.Addr
}
func (c *blockingRemoteAddrConn) RemoteAddr() net.Addr {
return <-c.addrs
}
// Issue 12943
func TestServerAllowsBlockingRemoteAddr(t *testing.T) {
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "RA:%s", r.RemoteAddr)
}))
conns := make(chan net.Conn)
ts.Listener = &blockingRemoteAddrListener{
Listener: ts.Listener,
conns: conns,
}
ts.Start()
defer ts.Close()
c := ts.Client()
c.Timeout = time.Second
// Force separate connection for each:
c.Transport.(*Transport).DisableKeepAlives = true
fetch := func(num int, response chan<- string) {
resp, err := c.Get(ts.URL)
if err != nil {
t.Errorf("Request %d: %v", num, err)
response <- ""
return
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Request %d: %v", num, err)
response <- ""
return
}
response <- string(body)
}
// Start a request. The server will block on getting conn.RemoteAddr.
response1c := make(chan string, 1)
go fetch(1, response1c)
// Wait for the server to accept it; grab the connection.
conn1 := <-conns
// Start another request and grab its connection
response2c := make(chan string, 1)
go fetch(2, response2c)
var conn2 net.Conn
select {
case conn2 = <-conns:
case <-time.After(time.Second):
t.Fatal("Second Accept didn't happen")
}
// Send a response on connection 2.
conn2.(*blockingRemoteAddrConn).addrs <- &net.TCPAddr{
IP: net.ParseIP("12.12.12.12"), Port: 12}
// ... and see it
response2 := <-response2c
if g, e := response2, "RA:12.12.12.12:12"; g != e {
t.Fatalf("response 2 addr = %q; want %q", g, e)
}
// Finish the first response.
conn1.(*blockingRemoteAddrConn).addrs <- &net.TCPAddr{
IP: net.ParseIP("21.21.21.21"), Port: 21}
// ... and see it
response1 := <-response1c
if g, e := response1, "RA:21.21.21.21:21"; g != e {
t.Fatalf("response 1 addr = %q; want %q", g, e)
}
}
// TestHeadResponses verifies that all MIME type sniffing and Content-Length
// counting of GET requests also happens on HEAD requests.
func TestHeadResponses_h1(t *testing.T) { testHeadResponses(t, h1Mode) }
func TestHeadResponses_h2(t *testing.T) { testHeadResponses(t, h2Mode) }
func testHeadResponses(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
_, err := w.Write([]byte("<html>"))
if err != nil {
t.Errorf("ResponseWriter.Write: %v", err)
}
// Also exercise the ReaderFrom path
_, err = io.Copy(w, strings.NewReader("789a"))
if err != nil {
t.Errorf("Copy(ResponseWriter, ...): %v", err)
}
}))
defer cst.close()
res, err := cst.c.Head(cst.ts.URL)
if err != nil {
t.Error(err)
}
if len(res.TransferEncoding) > 0 {
t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
}
if ct := res.Header.Get("Content-Type"); ct != "text/html; charset=utf-8" {
t.Errorf("Content-Type: %q; want text/html; charset=utf-8", ct)
}
if v := res.ContentLength; v != 10 {
t.Errorf("Content-Length: %d; want 10", v)
}
body, err := io.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
if len(body) > 0 {
t.Errorf("got unexpected body %q", string(body))
}
}
func TestTLSHandshakeTimeout(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
errc := make(chanWriter, 10) // but only expecting 1
ts.Config.ReadTimeout = 250 * time.Millisecond
ts.Config.ErrorLog = log.New(errc, "", 0)
ts.StartTLS()
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer conn.Close()
var buf [1]byte
n, err := conn.Read(buf[:])
if err == nil || n != 0 {
t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
}
select {
case v := <-errc:
if !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") {
t.Errorf("expected a TLS handshake timeout error; got %q", v)
}
case <-time.After(5 * time.Second):
t.Errorf("timeout waiting for logged error")
}
}
func TestTLSServer(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.TLS != nil {
w.Header().Set("X-TLS-Set", "true")
if r.TLS.HandshakeComplete {
w.Header().Set("X-TLS-HandshakeComplete", "true")
}
}
}))
ts.Config.ErrorLog = log.New(io.Discard, "", 0)
defer ts.Close()
// Connect an idle TCP connection to this server before we run
// our real tests. This idle connection used to block forever
// in the TLS handshake, preventing future connections from
// being accepted. It may prevent future accidental blocking
// in newConn.
idleConn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer idleConn.Close()
if !strings.HasPrefix(ts.URL, "https://") {
t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
return
}
client := ts.Client()
res, err := client.Get(ts.URL)
if err != nil {
t.Error(err)
return
}
if res == nil {
t.Errorf("got nil Response")
return
}
defer res.Body.Close()
if res.Header.Get("X-TLS-Set") != "true" {
t.Errorf("expected X-TLS-Set response header")
return
}
if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
t.Errorf("expected X-TLS-HandshakeComplete header")
}
}
func TestServeTLS(t *testing.T) {
CondSkipHTTP2(t)
// Not parallel: uses global test hooks.
defer afterTest(t)
defer SetTestHookServerServe(nil)
cert, err := tls.X509KeyPair(testcert.LocalhostCert, testcert.LocalhostKey)
if err != nil {
t.Fatal(err)
}
tlsConf := &tls.Config{
Certificates: []tls.Certificate{cert},
}
ln := newLocalListener(t)
defer ln.Close()
addr := ln.Addr().String()
serving := make(chan bool, 1)
SetTestHookServerServe(func(s *Server, ln net.Listener) {
serving <- true
})
handler := HandlerFunc(func(w ResponseWriter, r *Request) {})
s := &Server{
Addr: addr,
TLSConfig: tlsConf,
Handler: handler,
}
errc := make(chan error, 1)
go func() { errc <- s.ServeTLS(ln, "", "") }()
select {
case err := <-errc:
t.Fatalf("ServeTLS: %v", err)
case <-serving:
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
c, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
InsecureSkipVerify: true,
NextProtos: []string{"h2", "http/1.1"},
})
if err != nil {
t.Fatal(err)
}
defer c.Close()
if got, want := c.ConnectionState().NegotiatedProtocol, "h2"; got != want {
t.Errorf("NegotiatedProtocol = %q; want %q", got, want)
}
if got, want := c.ConnectionState().NegotiatedProtocolIsMutual, true; got != want {
t.Errorf("NegotiatedProtocolIsMutual = %v; want %v", got, want)
}
}
// Test that the HTTPS server nicely rejects plaintext HTTP/1.x requests.
func TestTLSServerRejectHTTPRequests(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
t.Error("unexpected HTTPS request")
}))
var errBuf bytes.Buffer
ts.Config.ErrorLog = log.New(&errBuf, "", 0)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
io.WriteString(conn, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n")
slurp, err := io.ReadAll(conn)
if err != nil {
t.Fatal(err)
}
const wantPrefix = "HTTP/1.0 400 Bad Request\r\n"
if !strings.HasPrefix(string(slurp), wantPrefix) {
t.Errorf("response = %q; wanted prefix %q", slurp, wantPrefix)
}
}
// Issue 15908
func TestAutomaticHTTP2_Serve_NoTLSConfig(t *testing.T) {
testAutomaticHTTP2_Serve(t, nil, true)
}
func TestAutomaticHTTP2_Serve_NonH2TLSConfig(t *testing.T) {
testAutomaticHTTP2_Serve(t, &tls.Config{}, false)
}
func TestAutomaticHTTP2_Serve_H2TLSConfig(t *testing.T) {
testAutomaticHTTP2_Serve(t, &tls.Config{NextProtos: []string{"h2"}}, true)
}
func testAutomaticHTTP2_Serve(t *testing.T, tlsConf *tls.Config, wantH2 bool) {
setParallel(t)
defer afterTest(t)
ln := newLocalListener(t)
ln.Close() // immediately (not a defer!)
var s Server
s.TLSConfig = tlsConf
if err := s.Serve(ln); err == nil {
t.Fatal("expected an error")
}
gotH2 := s.TLSNextProto["h2"] != nil
if gotH2 != wantH2 {
t.Errorf("http2 configured = %v; want %v", gotH2, wantH2)
}
}
func TestAutomaticHTTP2_Serve_WithTLSConfig(t *testing.T) {
setParallel(t)
defer afterTest(t)
ln := newLocalListener(t)
ln.Close() // immediately (not a defer!)
var s Server
// Set the TLSConfig. In reality, this would be the
// *tls.Config given to tls.NewListener.
s.TLSConfig = &tls.Config{
NextProtos: []string{"h2"},
}
if err := s.Serve(ln); err == nil {
t.Fatal("expected an error")
}
on := s.TLSNextProto["h2"] != nil
if !on {
t.Errorf("http2 wasn't automatically enabled")
}
}
func TestAutomaticHTTP2_ListenAndServe(t *testing.T) {
cert, err := tls.X509KeyPair(testcert.LocalhostCert, testcert.LocalhostKey)
if err != nil {
t.Fatal(err)
}
testAutomaticHTTP2_ListenAndServe(t, &tls.Config{
Certificates: []tls.Certificate{cert},
})
}
func TestAutomaticHTTP2_ListenAndServe_GetCertificate(t *testing.T) {
cert, err := tls.X509KeyPair(testcert.LocalhostCert, testcert.LocalhostKey)
if err != nil {
t.Fatal(err)
}
testAutomaticHTTP2_ListenAndServe(t, &tls.Config{
GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return &cert, nil
},
})
}
func testAutomaticHTTP2_ListenAndServe(t *testing.T, tlsConf *tls.Config) {
CondSkipHTTP2(t)
// Not parallel: uses global test hooks.
defer afterTest(t)
defer SetTestHookServerServe(nil)
var ok bool
var s *Server
const maxTries = 5
var ln net.Listener
Try:
for try := 0; try < maxTries; try++ {
ln = newLocalListener(t)
addr := ln.Addr().String()
ln.Close()
t.Logf("Got %v", addr)
lnc := make(chan net.Listener, 1)
SetTestHookServerServe(func(s *Server, ln net.Listener) {
lnc <- ln
})
s = &Server{
Addr: addr,
TLSConfig: tlsConf,
}
errc := make(chan error, 1)
go func() { errc <- s.ListenAndServeTLS("", "") }()
select {
case err := <-errc:
t.Logf("On try #%v: %v", try+1, err)
continue
case ln = <-lnc:
ok = true
t.Logf("Listening on %v", ln.Addr().String())
break Try
}
}
if !ok {
t.Fatalf("Failed to start up after %d tries", maxTries)
}
defer ln.Close()
c, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
InsecureSkipVerify: true,
NextProtos: []string{"h2", "http/1.1"},
})
if err != nil {
t.Fatal(err)
}
defer c.Close()
if got, want := c.ConnectionState().NegotiatedProtocol, "h2"; got != want {
t.Errorf("NegotiatedProtocol = %q; want %q", got, want)
}
if got, want := c.ConnectionState().NegotiatedProtocolIsMutual, true; got != want {
t.Errorf("NegotiatedProtocolIsMutual = %v; want %v", got, want)
}
}
type serverExpectTest struct {
contentLength int // of request body
chunked bool
expectation string // e.g. "100-continue"
readBody bool // whether handler should read the body (if false, sends StatusUnauthorized)
expectedResponse string // expected substring in first line of http response
}
func expectTest(contentLength int, expectation string, readBody bool, expectedResponse string) serverExpectTest {
return serverExpectTest{
contentLength: contentLength,
expectation: expectation,
readBody: readBody,
expectedResponse: expectedResponse,
}
}
var serverExpectTests = []serverExpectTest{
// Normal 100-continues, case-insensitive.
expectTest(100, "100-continue", true, "100 Continue"),
expectTest(100, "100-cOntInUE", true, "100 Continue"),
// No 100-continue.
expectTest(100, "", true, "200 OK"),
// 100-continue but requesting client to deny us,
// so it never reads the body.
expectTest(100, "100-continue", false, "401 Unauthorized"),
// Likewise without 100-continue:
expectTest(100, "", false, "401 Unauthorized"),
// Non-standard expectations are failures
expectTest(0, "a-pony", false, "417 Expectation Failed"),
// Expect-100 requested but no body (is apparently okay: Issue 7625)
expectTest(0, "100-continue", true, "200 OK"),
// Expect-100 requested but handler doesn't read the body
expectTest(0, "100-continue", false, "401 Unauthorized"),
// Expect-100 continue with no body, but a chunked body.
{
expectation: "100-continue",
readBody: true,
chunked: true,
expectedResponse: "100 Continue",
},
}
// Tests that the server responds to the "Expect" request header
// correctly.
// http2 test: TestServer_Response_Automatic100Continue
func TestServerExpect(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
// Note using r.FormValue("readbody") because for POST
// requests that would read from r.Body, which we only
// conditionally want to do.
if strings.Contains(r.URL.RawQuery, "readbody=true") {
io.ReadAll(r.Body)
w.Write([]byte("Hi"))
} else {
w.WriteHeader(StatusUnauthorized)
}
}))
defer ts.Close()
runTest := func(test serverExpectTest) {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer conn.Close()
// Only send the body immediately if we're acting like an HTTP client
// that doesn't send 100-continue expectations.
writeBody := test.contentLength != 0 && strings.ToLower(test.expectation) != "100-continue"
go func() {
contentLen := fmt.Sprintf("Content-Length: %d", test.contentLength)
if test.chunked {
contentLen = "Transfer-Encoding: chunked"
}
_, err := fmt.Fprintf(conn, "POST /?readbody=%v HTTP/1.1\r\n"+
"Connection: close\r\n"+
"%s\r\n"+
"Expect: %s\r\nHost: foo\r\n\r\n",
test.readBody, contentLen, test.expectation)
if err != nil {
t.Errorf("On test %#v, error writing request headers: %v", test, err)
return
}
if writeBody {
var targ io.WriteCloser = struct {
io.Writer
io.Closer
}{
conn,
io.NopCloser(nil),
}
if test.chunked {
targ = httputil.NewChunkedWriter(conn)
}
body := strings.Repeat("A", test.contentLength)
_, err = fmt.Fprint(targ, body)
if err == nil {
err = targ.Close()
}
if err != nil {
if !test.readBody {
// Server likely already hung up on us.
// See larger comment below.
t.Logf("On test %#v, acceptable error writing request body: %v", test, err)
return
}
t.Errorf("On test %#v, error writing request body: %v", test, err)
}
}
}()
bufr := bufio.NewReader(conn)
line, err := bufr.ReadString('\n')
if err != nil {
if writeBody && !test.readBody {
// This is an acceptable failure due to a possible TCP race:
// We were still writing data and the server hung up on us. A TCP
// implementation may send a RST if our request body data was known
// to be lost, which may trigger our reads to fail.
// See RFC 1122 page 88.
t.Logf("On test %#v, acceptable error from ReadString: %v", test, err)
return
}
t.Fatalf("On test %#v, ReadString: %v", test, err)
}
if !strings.Contains(line, test.expectedResponse) {
t.Errorf("On test %#v, got first line = %q; want %q", test, line, test.expectedResponse)
}
}
for _, test := range serverExpectTests {
runTest(test)
}
}
// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server
// should consume client request bodies that a handler didn't read.
func TestServerUnreadRequestBodyLittle(t *testing.T) {
setParallel(t)
defer afterTest(t)
conn := new(testConn)
body := strings.Repeat("x", 100<<10)
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n"+
"Host: test\r\n"+
"Content-Length: %d\r\n"+
"\r\n", len(body))))
conn.readBuf.Write([]byte(body))
done := make(chan bool)
readBufLen := func() int {
conn.readMu.Lock()
defer conn.readMu.Unlock()
return conn.readBuf.Len()
}
ls := &oneConnListener{conn}
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
defer close(done)
if bufLen := readBufLen(); bufLen < len(body)/2 {
t.Errorf("on request, read buffer length is %d; expected about 100 KB", bufLen)
}
rw.WriteHeader(200)
rw.(Flusher).Flush()
if g, e := readBufLen(), 0; g != e {
t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e)
}
if c := rw.Header().Get("Connection"); c != "" {
t.Errorf(`Connection header = %q; want ""`, c)
}
}))
<-done
}
// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server
// should ignore client request bodies that a handler didn't read
// and close the connection.
func TestServerUnreadRequestBodyLarge(t *testing.T) {
setParallel(t)
if testing.Short() && testenv.Builder() == "" {
t.Log("skipping in short mode")
}
conn := new(testConn)
body := strings.Repeat("x", 1<<20)
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n"+
"Host: test\r\n"+
"Content-Length: %d\r\n"+
"\r\n", len(body))))
conn.readBuf.Write([]byte(body))
conn.closec = make(chan bool, 1)
ls := &oneConnListener{conn}
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
if conn.readBuf.Len() < len(body)/2 {
t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
}
rw.WriteHeader(200)
rw.(Flusher).Flush()
if conn.readBuf.Len() < len(body)/2 {
t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
}
}))
<-conn.closec
if res := conn.writeBuf.String(); !strings.Contains(res, "Connection: close") {
t.Errorf("Expected a Connection: close header; got response: %s", res)
}
}
type handlerBodyCloseTest struct {
bodySize int
bodyChunked bool
reqConnClose bool
wantEOFSearch bool // should Handler's Body.Close do Reads, looking for EOF?
wantNextReq bool // should it find the next request on the same conn?
}
func (t handlerBodyCloseTest) connectionHeader() string {
if t.reqConnClose {
return "Connection: close\r\n"
}
return ""
}
var handlerBodyCloseTests = [...]handlerBodyCloseTest{
// Small enough to slurp past to the next request +
// has Content-Length.
0: {
bodySize: 20 << 10,
bodyChunked: false,
reqConnClose: false,
wantEOFSearch: true,
wantNextReq: true,
},
// Small enough to slurp past to the next request +
// is chunked.
1: {
bodySize: 20 << 10,
bodyChunked: true,
reqConnClose: false,
wantEOFSearch: true,
wantNextReq: true,
},
// Small enough to slurp past to the next request +
// has Content-Length +
// declares Connection: close (so pointless to read more).
2: {
bodySize: 20 << 10,
bodyChunked: false,
reqConnClose: true,
wantEOFSearch: false,
wantNextReq: false,
},
// Small enough to slurp past to the next request +
// declares Connection: close,
// but chunked, so it might have trailers.
// TODO: maybe skip this search if no trailers were declared
// in the headers.
3: {
bodySize: 20 << 10,
bodyChunked: true,
reqConnClose: true,
wantEOFSearch: true,
wantNextReq: false,
},
// Big with Content-Length, so give up immediately if we know it's too big.
4: {
bodySize: 1 << 20,
bodyChunked: false, // has a Content-Length
reqConnClose: false,
wantEOFSearch: false,
wantNextReq: false,
},
// Big chunked, so read a bit before giving up.
5: {
bodySize: 1 << 20,
bodyChunked: true,
reqConnClose: false,
wantEOFSearch: true,
wantNextReq: false,
},
// Big with Connection: close, but chunked, so search for trailers.
// TODO: maybe skip this search if no trailers were declared
// in the headers.
6: {
bodySize: 1 << 20,
bodyChunked: true,
reqConnClose: true,
wantEOFSearch: true,
wantNextReq: false,
},
// Big with Connection: close, so don't do any reads on Close.
// With Content-Length.
7: {
bodySize: 1 << 20,
bodyChunked: false,
reqConnClose: true,
wantEOFSearch: false,
wantNextReq: false,
},
}
func TestHandlerBodyClose(t *testing.T) {
setParallel(t)
if testing.Short() && testenv.Builder() == "" {
t.Skip("skipping in -short mode")
}
for i, tt := range handlerBodyCloseTests {
testHandlerBodyClose(t, i, tt)
}
}
func testHandlerBodyClose(t *testing.T, i int, tt handlerBodyCloseTest) {
conn := new(testConn)
body := strings.Repeat("x", tt.bodySize)
if tt.bodyChunked {
conn.readBuf.WriteString("POST / HTTP/1.1\r\n" +
"Host: test\r\n" +
tt.connectionHeader() +
"Transfer-Encoding: chunked\r\n" +
"\r\n")
cw := internal.NewChunkedWriter(&conn.readBuf)
io.WriteString(cw, body)
cw.Close()
conn.readBuf.WriteString("\r\n")
} else {
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n"+
"Host: test\r\n"+
tt.connectionHeader()+
"Content-Length: %d\r\n"+
"\r\n", len(body))))
conn.readBuf.Write([]byte(body))
}
if !tt.reqConnClose {
conn.readBuf.WriteString("GET / HTTP/1.1\r\nHost: test\r\n\r\n")
}
conn.closec = make(chan bool, 1)
readBufLen := func() int {
conn.readMu.Lock()
defer conn.readMu.Unlock()
return conn.readBuf.Len()
}
ls := &oneConnListener{conn}
var numReqs int
var size0, size1 int
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
numReqs++
if numReqs == 1 {
size0 = readBufLen()
req.Body.Close()
size1 = readBufLen()
}
}))
<-conn.closec
if numReqs < 1 || numReqs > 2 {
t.Fatalf("%d. bug in test. unexpected number of requests = %d", i, numReqs)
}
didSearch := size0 != size1
if didSearch != tt.wantEOFSearch {
t.Errorf("%d. did EOF search = %v; want %v (size went from %d to %d)", i, didSearch, !didSearch, size0, size1)
}
if tt.wantNextReq && numReqs != 2 {
t.Errorf("%d. numReq = %d; want 2", i, numReqs)
}
}
// testHandlerBodyConsumer represents a function injected into a test handler to
// vary work done on a request Body.
type testHandlerBodyConsumer struct {
name string
f func(io.ReadCloser)
}
var testHandlerBodyConsumers = []testHandlerBodyConsumer{
{"nil", func(io.ReadCloser) {}},
{"close", func(r io.ReadCloser) { r.Close() }},
{"discard", func(r io.ReadCloser) { io.Copy(io.Discard, r) }},
}
func TestRequestBodyReadErrorClosesConnection(t *testing.T) {
setParallel(t)
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := new(testConn)
conn.readBuf.WriteString("POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n" +
"hax\r\n" + // Invalid chunked encoding
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n")
conn.closec = make(chan bool, 1)
ls := &oneConnListener{conn}
var numReqs int
go Serve(ls, HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Error("Request for /secret encountered, should not have happened.")
}
handler.f(req.Body)
}))
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
}
}
}
func TestInvalidTrailerClosesConnection(t *testing.T) {
setParallel(t)
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := new(testConn)
conn.readBuf.WriteString("POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Trailer: hack\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n" +
"3\r\n" +
"hax\r\n" +
"0\r\n" +
"I'm not a valid trailer\r\n" +
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n")
conn.closec = make(chan bool, 1)
ln := &oneConnListener{conn}
var numReqs int
go Serve(ln, HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Errorf("Handler %s, Request for /secret encountered, should not have happened.", handler.name)
}
handler.f(req.Body)
}))
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %s: got %d reqs; want 1", handler.name, numReqs)
}
}
}
// slowTestConn is a net.Conn that provides a means to simulate parts of a
// request being received piecemeal. Deadlines can be set and enforced in both
// Read and Write.
type slowTestConn struct {
// over multiple calls to Read, time.Durations are slept, strings are read.
script []any
closec chan bool
mu sync.Mutex // guards rd/wd
rd, wd time.Time // read, write deadline
noopConn
}
func (c *slowTestConn) SetDeadline(t time.Time) error {
c.SetReadDeadline(t)
c.SetWriteDeadline(t)
return nil
}
func (c *slowTestConn) SetReadDeadline(t time.Time) error {
c.mu.Lock()
defer c.mu.Unlock()
c.rd = t
return nil
}
func (c *slowTestConn) SetWriteDeadline(t time.Time) error {
c.mu.Lock()
defer c.mu.Unlock()
c.wd = t
return nil
}
func (c *slowTestConn) Read(b []byte) (n int, err error) {
c.mu.Lock()
defer c.mu.Unlock()
restart:
if !c.rd.IsZero() && time.Now().After(c.rd) {
return 0, syscall.ETIMEDOUT
}
if len(c.script) == 0 {
return 0, io.EOF
}
switch cue := c.script[0].(type) {
case time.Duration:
if !c.rd.IsZero() {
// If the deadline falls in the middle of our sleep window, deduct
// part of the sleep, then return a timeout.
if remaining := time.Until(c.rd); remaining < cue {
c.script[0] = cue - remaining
time.Sleep(remaining)
return 0, syscall.ETIMEDOUT
}
}
c.script = c.script[1:]
time.Sleep(cue)
goto restart
case string:
n = copy(b, cue)
// If cue is too big for the buffer, leave the end for the next Read.
if len(cue) > n {
c.script[0] = cue[n:]
} else {
c.script = c.script[1:]
}
default:
panic("unknown cue in slowTestConn script")
}
return
}
func (c *slowTestConn) Close() error {
select {
case c.closec <- true:
default:
}
return nil
}
func (c *slowTestConn) Write(b []byte) (int, error) {
if !c.wd.IsZero() && time.Now().After(c.wd) {
return 0, syscall.ETIMEDOUT
}
return len(b), nil
}
func TestRequestBodyTimeoutClosesConnection(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := &slowTestConn{
script: []any{
"POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 10000\r\n" +
"\r\n",
"foo bar baz",
600 * time.Millisecond, // Request deadline should hit here
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n",
},
closec: make(chan bool, 1),
}
ls := &oneConnListener{conn}
var numReqs int
s := Server{
Handler: HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Error("Request for /secret encountered, should not have happened.")
}
handler.f(req.Body)
}),
ReadTimeout: 400 * time.Millisecond,
}
go s.Serve(ls)
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
}
}
}
// cancelableTimeoutContext overwrites the error message to DeadlineExceeded
type cancelableTimeoutContext struct {
context.Context
}
func (c cancelableTimeoutContext) Err() error {
if c.Context.Err() != nil {
return context.DeadlineExceeded
}
return nil
}
func TestTimeoutHandler_h1(t *testing.T) { testTimeoutHandler(t, h1Mode) }
func TestTimeoutHandler_h2(t *testing.T) { testTimeoutHandler(t, h2Mode) }
func testTimeoutHandler(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
sendHi := make(chan bool, 1)
writeErrors := make(chan error, 1)
sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
<-sendHi
_, werr := w.Write([]byte("hi"))
writeErrors <- werr
})
ctx, cancel := context.WithCancel(context.Background())
h := NewTestTimeoutHandler(sayHi, cancelableTimeoutContext{ctx})
cst := newClientServerTest(t, h2, h)
defer cst.close()
// Succeed without timing out:
sendHi <- true
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusOK; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ := io.ReadAll(res.Body)
if g, e := string(body), "hi"; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
if g := <-writeErrors; g != nil {
t.Errorf("got unexpected Write error on first request: %v", g)
}
// Times out:
cancel()
res, err = cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ = io.ReadAll(res.Body)
if !strings.Contains(string(body), "<title>Timeout</title>") {
t.Errorf("expected timeout body; got %q", string(body))
}
if g, w := res.Header.Get("Content-Type"), "text/html; charset=utf-8"; g != w {
t.Errorf("response content-type = %q; want %q", g, w)
}
// Now make the previously-timed out handler speak again,
// which verifies the panic is handled:
sendHi <- true
if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
t.Errorf("expected Write error of %v; got %v", e, g)
}
}
// See issues 8209 and 8414.
func TestTimeoutHandlerRace(t *testing.T) {
setParallel(t)
defer afterTest(t)
delayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
ms, _ := strconv.Atoi(r.URL.Path[1:])
if ms == 0 {
ms = 1
}
for i := 0; i < ms; i++ {
w.Write([]byte("hi"))
time.Sleep(time.Millisecond)
}
})
ts := httptest.NewServer(TimeoutHandler(delayHi, 20*time.Millisecond, ""))
defer ts.Close()
c := ts.Client()
var wg sync.WaitGroup
gate := make(chan bool, 10)
n := 50
if testing.Short() {
n = 10
gate = make(chan bool, 3)
}
for i := 0; i < n; i++ {
gate <- true
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-gate }()
res, err := c.Get(fmt.Sprintf("%s/%d", ts.URL, rand.Intn(50)))
if err == nil {
io.Copy(io.Discard, res.Body)
res.Body.Close()
}
}()
}
wg.Wait()
}
// See issues 8209 and 8414.
// Both issues involved panics in the implementation of TimeoutHandler.
func TestTimeoutHandlerRaceHeader(t *testing.T) {
setParallel(t)
defer afterTest(t)
delay204 := HandlerFunc(func(w ResponseWriter, r *Request) {
w.WriteHeader(204)
})
ts := httptest.NewServer(TimeoutHandler(delay204, time.Nanosecond, ""))
defer ts.Close()
var wg sync.WaitGroup
gate := make(chan bool, 50)
n := 500
if testing.Short() {
n = 10
}
c := ts.Client()
for i := 0; i < n; i++ {
gate <- true
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-gate }()
res, err := c.Get(ts.URL)
if err != nil {
// We see ECONNRESET from the connection occasionally,
// and that's OK: this test is checking that the server does not panic.
t.Log(err)
return
}
defer res.Body.Close()
io.Copy(io.Discard, res.Body)
}()
}
wg.Wait()
}
// Issue 9162
func TestTimeoutHandlerRaceHeaderTimeout(t *testing.T) {
setParallel(t)
defer afterTest(t)
sendHi := make(chan bool, 1)
writeErrors := make(chan error, 1)
sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/plain")
<-sendHi
_, werr := w.Write([]byte("hi"))
writeErrors <- werr
})
ctx, cancel := context.WithCancel(context.Background())
h := NewTestTimeoutHandler(sayHi, cancelableTimeoutContext{ctx})
cst := newClientServerTest(t, h1Mode, h)
defer cst.close()
// Succeed without timing out:
sendHi <- true
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusOK; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ := io.ReadAll(res.Body)
if g, e := string(body), "hi"; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
if g := <-writeErrors; g != nil {
t.Errorf("got unexpected Write error on first request: %v", g)
}
// Times out:
cancel()
res, err = cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ = io.ReadAll(res.Body)
if !strings.Contains(string(body), "<title>Timeout</title>") {
t.Errorf("expected timeout body; got %q", string(body))
}
// Now make the previously-timed out handler speak again,
// which verifies the panic is handled:
sendHi <- true
if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
t.Errorf("expected Write error of %v; got %v", e, g)
}
}
// Issue 14568.
func TestTimeoutHandlerStartTimerWhenServing(t *testing.T) {
if testing.Short() {
t.Skip("skipping sleeping test in -short mode")
}
defer afterTest(t)
var handler HandlerFunc = func(w ResponseWriter, _ *Request) {
w.WriteHeader(StatusNoContent)
}
timeout := 300 * time.Millisecond
ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
defer ts.Close()
c := ts.Client()
// Issue was caused by the timeout handler starting the timer when
// was created, not when the request. So wait for more than the timeout
// to ensure that's not the case.
time.Sleep(2 * timeout)
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != StatusNoContent {
t.Errorf("got res.StatusCode %d, want %v", res.StatusCode, StatusNoContent)
}
}
func TestTimeoutHandlerContextCanceled(t *testing.T) {
setParallel(t)
defer afterTest(t)
writeErrors := make(chan error, 1)
sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/plain")
var err error
// The request context has already been canceled, but
// retry the write for a while to give the timeout handler
// a chance to notice.
for i := 0; i < 100; i++ {
_, err = w.Write([]byte("a"))
if err != nil {
break
}
time.Sleep(1 * time.Millisecond)
}
writeErrors <- err
})
ctx, cancel := context.WithCancel(context.Background())
cancel()
h := NewTestTimeoutHandler(sayHi, ctx)
cst := newClientServerTest(t, h1Mode, h)
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ := io.ReadAll(res.Body)
if g, e := string(body), ""; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
if g, e := <-writeErrors, context.Canceled; g != e {
t.Errorf("got unexpected Write in handler: %v, want %g", g, e)
}
}
// https://golang.org/issue/15948
func TestTimeoutHandlerEmptyResponse(t *testing.T) {
setParallel(t)
defer afterTest(t)
var handler HandlerFunc = func(w ResponseWriter, _ *Request) {
// No response.
}
timeout := 300 * time.Millisecond
ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
defer ts.Close()
c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != StatusOK {
t.Errorf("got res.StatusCode %d, want %v", res.StatusCode, StatusOK)
}
}
// https://golang.org/issues/22084
func TestTimeoutHandlerPanicRecovery(t *testing.T) {
wrapper := func(h Handler) Handler {
return TimeoutHandler(h, time.Second, "")
}
testHandlerPanic(t, false, false, wrapper, "intentional death for testing")
}
func TestRedirectBadPath(t *testing.T) {
// This used to crash. It's not valid input (bad path), but it
// shouldn't crash.
rr := httptest.NewRecorder()
req := &Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Path: "not-empty-but-no-leading-slash", // bogus
},
}
Redirect(rr, req, "", 304)
if rr.Code != 304 {
t.Errorf("Code = %d; want 304", rr.Code)
}
}
// Test different URL formats and schemes
func TestRedirect(t *testing.T) {
req, _ := NewRequest("GET", "http://example.com/qux/", nil)
var tests = []struct {
in string
want string
}{
// normal http
{"http://foobar.com/baz", "http://foobar.com/baz"},
// normal https
{"https://foobar.com/baz", "https://foobar.com/baz"},
// custom scheme
{"test://foobar.com/baz", "test://foobar.com/baz"},
// schemeless
{"//foobar.com/baz", "//foobar.com/baz"},
// relative to the root
{"/foobar.com/baz", "/foobar.com/baz"},
// relative to the current path
{"foobar.com/baz", "/qux/foobar.com/baz"},
// relative to the current path (+ going upwards)
{"../quux/foobar.com/baz", "/quux/foobar.com/baz"},
// incorrect number of slashes
{"///foobar.com/baz", "/foobar.com/baz"},
// Verifies we don't path.Clean() on the wrong parts in redirects:
{"/foo?next=http://bar.com/", "/foo?next=http://bar.com/"},
{"http://localhost:8080/_ah/login?continue=http://localhost:8080/",
"http://localhost:8080/_ah/login?continue=http://localhost:8080/"},
{"/фубар", "/%d1%84%d1%83%d0%b1%d0%b0%d1%80"},
{"http://foo.com/фубар", "http://foo.com/%d1%84%d1%83%d0%b1%d0%b0%d1%80"},
}
for _, tt := range tests {
rec := httptest.NewRecorder()
Redirect(rec, req, tt.in, 302)
if got, want := rec.Code, 302; got != want {
t.Errorf("Redirect(%q) generated status code %v; want %v", tt.in, got, want)
}
if got := rec.Header().Get("Location"); got != tt.want {
t.Errorf("Redirect(%q) generated Location header %q; want %q", tt.in, got, tt.want)
}
}
}
// Test that Redirect sets Content-Type header for GET and HEAD requests
// and writes a short HTML body, unless the request already has a Content-Type header.
func TestRedirectContentTypeAndBody(t *testing.T) {
type ctHeader struct {
Values []string
}
var tests = []struct {
method string
ct *ctHeader // Optional Content-Type header to set.
wantCT string
wantBody string
}{
{MethodGet, nil, "text/html; charset=utf-8", "<a href=\"/foo\">Found</a>.\n\n"},
{MethodHead, nil, "text/html; charset=utf-8", ""},
{MethodPost, nil, "", ""},
{MethodDelete, nil, "", ""},
{"foo", nil, "", ""},
{MethodGet, &ctHeader{[]string{"application/test"}}, "application/test", ""},
{MethodGet, &ctHeader{[]string{}}, "", ""},
{MethodGet, &ctHeader{nil}, "", ""},
}
for _, tt := range tests {
req := httptest.NewRequest(tt.method, "http://example.com/qux/", nil)
rec := httptest.NewRecorder()
if tt.ct != nil {
rec.Header()["Content-Type"] = tt.ct.Values
}
Redirect(rec, req, "/foo", 302)
if got, want := rec.Code, 302; got != want {
t.Errorf("Redirect(%q, %#v) generated status code %v; want %v", tt.method, tt.ct, got, want)
}
if got, want := rec.Header().Get("Content-Type"), tt.wantCT; got != want {
t.Errorf("Redirect(%q, %#v) generated Content-Type header %q; want %q", tt.method, tt.ct, got, want)
}
resp := rec.Result()
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if got, want := string(body), tt.wantBody; got != want {
t.Errorf("Redirect(%q, %#v) generated Body %q; want %q", tt.method, tt.ct, got, want)
}
}
}
// TestZeroLengthPostAndResponse exercises an optimization done by the Transport:
// when there is no body (either because the method doesn't permit a body, or an
// explicit Content-Length of zero is present), then the transport can re-use the
// connection immediately. But when it re-uses the connection, it typically closes
// the previous request's body, which is not optimal for zero-lengthed bodies,
// as the client would then see http.ErrBodyReadAfterClose and not 0, io.EOF.
func TestZeroLengthPostAndResponse_h1(t *testing.T) {
testZeroLengthPostAndResponse(t, h1Mode)
}
func TestZeroLengthPostAndResponse_h2(t *testing.T) {
testZeroLengthPostAndResponse(t, h2Mode)
}
func testZeroLengthPostAndResponse(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, r *Request) {
all, err := io.ReadAll(r.Body)
if err != nil {
t.Fatalf("handler ReadAll: %v", err)
}
if len(all) != 0 {
t.Errorf("handler got %d bytes; expected 0", len(all))
}
rw.Header().Set("Content-Length", "0")
}))
defer cst.close()
req, err := NewRequest("POST", cst.ts.URL, strings.NewReader(""))
if err != nil {
t.Fatal(err)
}
req.ContentLength = 0
var resp [5]*Response
for i := range resp {
resp[i], err = cst.c.Do(req)
if err != nil {
t.Fatalf("client post #%d: %v", i, err)
}
}
for i := range resp {
all, err := io.ReadAll(resp[i].Body)
if err != nil {
t.Fatalf("req #%d: client ReadAll: %v", i, err)
}
if len(all) != 0 {
t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all))
}
}
}
func TestHandlerPanicNil_h1(t *testing.T) { testHandlerPanic(t, false, h1Mode, nil, nil) }
func TestHandlerPanicNil_h2(t *testing.T) { testHandlerPanic(t, false, h2Mode, nil, nil) }
func TestHandlerPanic_h1(t *testing.T) {
testHandlerPanic(t, false, h1Mode, nil, "intentional death for testing")
}
func TestHandlerPanic_h2(t *testing.T) {
testHandlerPanic(t, false, h2Mode, nil, "intentional death for testing")
}
func TestHandlerPanicWithHijack(t *testing.T) {
// Only testing HTTP/1, and our http2 server doesn't support hijacking.
testHandlerPanic(t, true, h1Mode, nil, "intentional death for testing")
}
func testHandlerPanic(t *testing.T, withHijack, h2 bool, wrapper func(Handler) Handler, panicValue any) {
defer afterTest(t)
// Unlike the other tests that set the log output to io.Discard
// to quiet the output, this test uses a pipe. The pipe serves three
// purposes:
//
// 1) The log.Print from the http server (generated by the caught
// panic) will go to the pipe instead of stderr, making the
// output quiet.
//
// 2) We read from the pipe to verify that the handler
// actually caught the panic and logged something.
//
// 3) The blocking Read call prevents this TestHandlerPanic
// function from exiting before the HTTP server handler
// finishes crashing. If this text function exited too
// early (and its defer log.SetOutput(os.Stderr) ran),
// then the crash output could spill into the next test.
pr, pw := io.Pipe()
log.SetOutput(pw)
defer log.SetOutput(os.Stderr)
defer pw.Close()
var handler Handler = HandlerFunc(func(w ResponseWriter, r *Request) {
if withHijack {
rwc, _, err := w.(Hijacker).Hijack()
if err != nil {
t.Logf("unexpected error: %v", err)
}
defer rwc.Close()
}
panic(panicValue)
})
if wrapper != nil {
handler = wrapper(handler)
}
cst := newClientServerTest(t, h2, handler)
defer cst.close()
// Do a blocking read on the log output pipe so its logging
// doesn't bleed into the next test. But wait only 5 seconds
// for it.
done := make(chan bool, 1)
go func() {
buf := make([]byte, 4<<10)
_, err := pr.Read(buf)
pr.Close()
if err != nil && err != io.EOF {
t.Error(err)
}
done <- true
}()
_, err := cst.c.Get(cst.ts.URL)
if err == nil {
t.Logf("expected an error")
}
if panicValue == nil {
return
}
select {
case <-done:
return
case <-time.After(5 * time.Second):
t.Fatal("expected server handler to log an error")
}
}
type terrorWriter struct{ t *testing.T }
func (w terrorWriter) Write(p []byte) (int, error) {
w.t.Errorf("%s", p)
return len(p), nil
}
// Issue 16456: allow writing 0 bytes on hijacked conn to test hijack
// without any log spam.
func TestServerWriteHijackZeroBytes(t *testing.T) {
defer afterTest(t)
done := make(chan struct{})
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
defer close(done)
w.(Flusher).Flush()
conn, _, err := w.(Hijacker).Hijack()
if err != nil {
t.Errorf("Hijack: %v", err)
return
}
defer conn.Close()
_, err = w.Write(nil)
if err != ErrHijacked {
t.Errorf("Write error = %v; want ErrHijacked", err)
}
}))
ts.Config.ErrorLog = log.New(terrorWriter{t}, "Unexpected write: ", 0)
ts.Start()
defer ts.Close()
c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
}
func TestServerNoDate_h1(t *testing.T) { testServerNoHeader(t, h1Mode, "Date") }
func TestServerNoDate_h2(t *testing.T) { testServerNoHeader(t, h2Mode, "Date") }
func TestServerNoContentType_h1(t *testing.T) { testServerNoHeader(t, h1Mode, "Content-Type") }
func TestServerNoContentType_h2(t *testing.T) { testServerNoHeader(t, h2Mode, "Content-Type") }
func testServerNoHeader(t *testing.T, h2 bool, header string) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header()[header] = nil
io.WriteString(w, "<html>foo</html>") // non-empty
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if got, ok := res.Header[header]; ok {
t.Fatalf("Expected no %s header; got %q", header, got)
}
}
func TestStripPrefix(t *testing.T) {
setParallel(t)
defer afterTest(t)
h := HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("X-Path", r.URL.Path)
w.Header().Set("X-RawPath", r.URL.RawPath)
})
ts := httptest.NewServer(StripPrefix("/foo/bar", h))
defer ts.Close()
c := ts.Client()
cases := []struct {
reqPath string
path string // If empty we want a 404.
rawPath string
}{
{"/foo/bar/qux", "/qux", ""},
{"/foo/bar%2Fqux", "/qux", "%2Fqux"},
{"/foo%2Fbar/qux", "", ""}, // Escaped prefix does not match.
{"/bar", "", ""}, // No prefix match.
}
for _, tc := range cases {
t.Run(tc.reqPath, func(t *testing.T) {
res, err := c.Get(ts.URL + tc.reqPath)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if tc.path == "" {
if res.StatusCode != StatusNotFound {
t.Errorf("got %q, want 404 Not Found", res.Status)
}
return
}
if res.StatusCode != StatusOK {
t.Fatalf("got %q, want 200 OK", res.Status)
}
if g, w := res.Header.Get("X-Path"), tc.path; g != w {
t.Errorf("got Path %q, want %q", g, w)
}
if g, w := res.Header.Get("X-RawPath"), tc.rawPath; g != w {
t.Errorf("got RawPath %q, want %q", g, w)
}
})
}
}
// https://golang.org/issue/18952.
func TestStripPrefixNotModifyRequest(t *testing.T) {
h := StripPrefix("/foo", NotFoundHandler())
req := httptest.NewRequest("GET", "/foo/bar", nil)
h.ServeHTTP(httptest.NewRecorder(), req)
if req.URL.Path != "/foo/bar" {
t.Errorf("StripPrefix should not modify the provided Request, but it did")
}
}
func TestRequestLimit_h1(t *testing.T) { testRequestLimit(t, h1Mode) }
func TestRequestLimit_h2(t *testing.T) { testRequestLimit(t, h2Mode) }
func testRequestLimit(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
t.Fatalf("didn't expect to get request in Handler")
}), optQuietLog)
defer cst.close()
req, _ := NewRequest("GET", cst.ts.URL, nil)
var bytesPerHeader = len("header12345: val12345\r\n")
for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ {
req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i))
}
res, err := cst.c.Do(req)
if res != nil {
defer res.Body.Close()
}
if h2 {
// In HTTP/2, the result depends on a race. If the client has received the
// server's SETTINGS before RoundTrip starts sending the request, then RoundTrip
// will fail with an error. Otherwise, the client should receive a 431 from the
// server.
if err == nil && res.StatusCode != 431 {
t.Fatalf("expected 431 response status; got: %d %s", res.StatusCode, res.Status)
}
} else {
// In HTTP/1, we expect a 431 from the server.
// Some HTTP clients may fail on this undefined behavior (server replying and
// closing the connection while the request is still being written), but
// we do support it (at least currently), so we expect a response below.
if err != nil {
t.Fatalf("Do: %v", err)
}
if res.StatusCode != 431 {
t.Fatalf("expected 431 response status; got: %d %s", res.StatusCode, res.Status)
}
}
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
type countReader struct {
r io.Reader
n *int64
}
func (cr countReader) Read(p []byte) (n int, err error) {
n, err = cr.r.Read(p)
atomic.AddInt64(cr.n, int64(n))
return
}
func TestRequestBodyLimit_h1(t *testing.T) { testRequestBodyLimit(t, h1Mode) }
func TestRequestBodyLimit_h2(t *testing.T) { testRequestBodyLimit(t, h2Mode) }
func testRequestBodyLimit(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
const limit = 1 << 20
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
r.Body = MaxBytesReader(w, r.Body, limit)
n, err := io.Copy(io.Discard, r.Body)
if err == nil {
t.Errorf("expected error from io.Copy")
}
if n != limit {
t.Errorf("io.Copy = %d, want %d", n, limit)
}
}))
defer cst.close()
nWritten := new(int64)
req, _ := NewRequest("POST", cst.ts.URL, io.LimitReader(countReader{neverEnding('a'), nWritten}, limit*200))
// Send the POST, but don't care it succeeds or not. The
// remote side is going to reply and then close the TCP
// connection, and HTTP doesn't really define if that's
// allowed or not. Some HTTP clients will get the response
// and some (like ours, currently) will complain that the
// request write failed, without reading the response.
//
// But that's okay, since what we're really testing is that
// the remote side hung up on us before we wrote too much.
_, _ = cst.c.Do(req)
if atomic.LoadInt64(nWritten) > limit*100 {
t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d",
limit, nWritten)
}
}
// TestClientWriteShutdown tests that if the client shuts down the write
// side of their TCP connection, the server doesn't send a 400 Bad Request.
func TestClientWriteShutdown(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/17906")
}
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
err = conn.(*net.TCPConn).CloseWrite()
if err != nil {
t.Fatalf("CloseWrite: %v", err)
}
bs, err := io.ReadAll(conn)
if err != nil {
t.Errorf("ReadAll: %v", err)
}
got := string(bs)
if got != "" {
t.Errorf("read %q from server; want nothing", got)
}
}
// Tests that chunked server responses that write 1 byte at a time are
// buffered before chunk headers are added, not after chunk headers.
func TestServerBufferedChunking(t *testing.T) {
conn := new(testConn)
conn.readBuf.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"))
conn.closec = make(chan bool, 1)
ls := &oneConnListener{conn}
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
rw.(Flusher).Flush() // force the Header to be sent, in chunking mode, not counting the length
rw.Write([]byte{'x'})
rw.Write([]byte{'y'})
rw.Write([]byte{'z'})
}))
<-conn.closec
if !bytes.HasSuffix(conn.writeBuf.Bytes(), []byte("\r\n\r\n3\r\nxyz\r\n0\r\n\r\n")) {
t.Errorf("response didn't end with a single 3 byte 'xyz' chunk; got:\n%q",
conn.writeBuf.Bytes())
}
}
// Tests that the server flushes its response headers out when it's
// ignoring the response body and waits a bit before forcefully
// closing the TCP connection, causing the client to get a RST.
// See https://golang.org/issue/3595
func TestServerGracefulClose(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
Error(w, "bye", StatusUnauthorized)
}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
const bodySize = 5 << 20
req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize))
for i := 0; i < bodySize; i++ {
req = append(req, 'x')
}
writeErr := make(chan error)
go func() {
_, err := conn.Write(req)
writeErr <- err
}()
br := bufio.NewReader(conn)
lineNum := 0
for {
line, err := br.ReadString('\n')
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("ReadLine: %v", err)
}
lineNum++
if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") {
t.Errorf("Response line = %q; want a 401", line)
}
}
// Wait for write to finish. This is a broken pipe on both
// Darwin and Linux, but checking this isn't the point of
// the test.
<-writeErr
}
func TestCaseSensitiveMethod_h1(t *testing.T) { testCaseSensitiveMethod(t, h1Mode) }
func TestCaseSensitiveMethod_h2(t *testing.T) { testCaseSensitiveMethod(t, h2Mode) }
func testCaseSensitiveMethod(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
if r.Method != "get" {
t.Errorf(`Got method %q; want "get"`, r.Method)
}
}))
defer cst.close()
req, _ := NewRequest("get", cst.ts.URL, nil)
res, err := cst.c.Do(req)
if err != nil {
t.Error(err)
return
}
res.Body.Close()
}
// TestContentLengthZero tests that for both an HTTP/1.0 and HTTP/1.1
// request (both keep-alive), when a Handler never writes any
// response, the net/http package adds a "Content-Length: 0" response
// header.
func TestContentLengthZero(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {}))
defer ts.Close()
for _, version := range []string{"HTTP/1.0", "HTTP/1.1"} {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
_, err = fmt.Fprintf(conn, "GET / %v\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n", version)
if err != nil {
t.Fatalf("error writing: %v", err)
}
req, _ := NewRequest("GET", "/", nil)
res, err := ReadResponse(bufio.NewReader(conn), req)
if err != nil {
t.Fatalf("error reading response: %v", err)
}
if te := res.TransferEncoding; len(te) > 0 {
t.Errorf("For version %q, Transfer-Encoding = %q; want none", version, te)
}
if cl := res.ContentLength; cl != 0 {
t.Errorf("For version %q, Content-Length = %v; want 0", version, cl)
}
conn.Close()
}
}
func TestCloseNotifier(t *testing.T) {
defer afterTest(t)
gotReq := make(chan bool, 1)
sawClose := make(chan bool, 1)
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
gotReq <- true
cc := rw.(CloseNotifier).CloseNotify()
<-cc
sawClose <- true
}))
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
diec := make(chan bool)
go func() {
_, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n")
if err != nil {
t.Error(err)
return
}
<-diec
conn.Close()
}()
For:
for {
select {
case <-gotReq:
diec <- true
case <-sawClose:
break For
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
}
ts.Close()
}
// Tests that a pipelined request does not cause the first request's
// Handler's CloseNotify channel to fire.
//
// Issue 13165 (where it used to deadlock), but behavior changed in Issue 23921.
func TestCloseNotifierPipelined(t *testing.T) {
setParallel(t)
defer afterTest(t)
gotReq := make(chan bool, 2)
sawClose := make(chan bool, 2)
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
gotReq <- true
cc := rw.(CloseNotifier).CloseNotify()
select {
case <-cc:
t.Error("unexpected CloseNotify")
case <-time.After(100 * time.Millisecond):
}
sawClose <- true
}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
diec := make(chan bool, 1)
defer close(diec)
go func() {
const req = "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n"
_, err = io.WriteString(conn, req+req) // two requests
if err != nil {
t.Error(err)
return
}
<-diec
conn.Close()
}()
reqs := 0
closes := 0
for {
select {
case <-gotReq:
reqs++
if reqs > 2 {
t.Fatal("too many requests")
}
case <-sawClose:
closes++
if closes > 1 {
return
}
case <-time.After(5 * time.Second):
ts.CloseClientConnections()
t.Fatal("timeout")
}
}
}
func TestCloseNotifierChanLeak(t *testing.T) {
defer afterTest(t)
req := reqBytes("GET / HTTP/1.0\nHost: golang.org")
for i := 0; i < 20; i++ {
var output bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &output,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
// Ignore the return value and never read from
// it, testing that we don't leak goroutines
// on the sending side:
_ = rw.(CloseNotifier).CloseNotify()
})
go Serve(ln, handler)
<-conn.closec
}
}
// Tests that we can use CloseNotifier in one request, and later call Hijack
// on a second request on the same connection.
//
// It also tests that the connReader stitches together its background
// 1-byte read for CloseNotifier when CloseNotifier doesn't fire with
// the rest of the second HTTP later.
//
// Issue 9763.
// HTTP/1-only test. (http2 doesn't have Hijack)
func TestHijackAfterCloseNotifier(t *testing.T) {
defer afterTest(t)
script := make(chan string, 2)
script <- "closenotify"
script <- "hijack"
close(script)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
plan := <-script
switch plan {
default:
panic("bogus plan; too many requests")
case "closenotify":
w.(CloseNotifier).CloseNotify() // discard result
w.Header().Set("X-Addr", r.RemoteAddr)
case "hijack":
c, _, err := w.(Hijacker).Hijack()
if err != nil {
t.Errorf("Hijack in Handler: %v", err)
return
}
if _, ok := c.(*net.TCPConn); !ok {
// Verify it's not wrapped in some type.
// Not strictly a go1 compat issue, but in practice it probably is.
t.Errorf("type of hijacked conn is %T; want *net.TCPConn", c)
}
fmt.Fprintf(c, "HTTP/1.0 200 OK\r\nX-Addr: %v\r\nContent-Length: 0\r\n\r\n", r.RemoteAddr)
c.Close()
return
}
}))
defer ts.Close()
res1, err := Get(ts.URL)
if err != nil {
log.Fatal(err)
}
res2, err := Get(ts.URL)
if err != nil {
log.Fatal(err)
}
addr1 := res1.Header.Get("X-Addr")
addr2 := res2.Header.Get("X-Addr")
if addr1 == "" || addr1 != addr2 {
t.Errorf("addr1, addr2 = %q, %q; want same", addr1, addr2)
}
}
func TestHijackBeforeRequestBodyRead(t *testing.T) {
setParallel(t)
defer afterTest(t)
var requestBody = bytes.Repeat([]byte("a"), 1<<20)
bodyOkay := make(chan bool, 1)
gotCloseNotify := make(chan bool, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
defer close(bodyOkay) // caller will read false if nothing else
reqBody := r.Body
r.Body = nil // to test that server.go doesn't use this value.
gone := w.(CloseNotifier).CloseNotify()
slurp, err := io.ReadAll(reqBody)
if err != nil {
t.Errorf("Body read: %v", err)
return
}
if len(slurp) != len(requestBody) {
t.Errorf("Backend read %d request body bytes; want %d", len(slurp), len(requestBody))
return
}
if !bytes.Equal(slurp, requestBody) {
t.Error("Backend read wrong request body.") // 1MB; omitting details
return
}
bodyOkay <- true
select {
case <-gone:
gotCloseNotify <- true
case <-time.After(5 * time.Second):
gotCloseNotify <- false
}
}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
fmt.Fprintf(conn, "POST / HTTP/1.1\r\nHost: foo\r\nContent-Length: %d\r\n\r\n%s",
len(requestBody), requestBody)
if !<-bodyOkay {
// already failed.
return
}
conn.Close()
if !<-gotCloseNotify {
t.Error("timeout waiting for CloseNotify")
}
}
func TestOptions(t *testing.T) {
uric := make(chan string, 2) // only expect 1, but leave space for 2
mux := NewServeMux()
mux.HandleFunc("/", func(w ResponseWriter, r *Request) {
uric <- r.RequestURI
})
ts := httptest.NewServer(mux)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
// An OPTIONS * request should succeed.
_, err = conn.Write([]byte("OPTIONS * HTTP/1.1\r\nHost: foo.com\r\n\r\n"))
if err != nil {
t.Fatal(err)
}
br := bufio.NewReader(conn)
res, err := ReadResponse(br, &Request{Method: "OPTIONS"})
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Errorf("Got non-200 response to OPTIONS *: %#v", res)
}
// A GET * request on a ServeMux should fail.
_, err = conn.Write([]byte("GET * HTTP/1.1\r\nHost: foo.com\r\n\r\n"))
if err != nil {
t.Fatal(err)
}
res, err = ReadResponse(br, &Request{Method: "GET"})
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 400 {
t.Errorf("Got non-400 response to GET *: %#v", res)
}
res, err = Get(ts.URL + "/second")
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if got := <-uric; got != "/second" {
t.Errorf("Handler saw request for %q; want /second", got)
}
}
// Tests regarding the ordering of Write, WriteHeader, Header, and
// Flush calls. In Go 1.0, rw.WriteHeader immediately flushed the
// (*response).header to the wire. In Go 1.1, the actual wire flush is
// delayed, so we could maybe tack on a Content-Length and better
// Content-Type after we see more (or all) of the output. To preserve
// compatibility with Go 1, we need to be careful to track which
// headers were live at the time of WriteHeader, so we write the same
// ones, even if the handler modifies them (~erroneously) after the
// first Write.
func TestHeaderToWire(t *testing.T) {
tests := []struct {
name string
handler func(ResponseWriter, *Request)
check func(got, logs string) error
}{
{
name: "write without Header",
handler: func(rw ResponseWriter, r *Request) {
rw.Write([]byte("hello world"))
},
check: func(got, logs string) error {
if !strings.Contains(got, "Content-Length:") {
return errors.New("no content-length")
}
if !strings.Contains(got, "Content-Type: text/plain") {
return errors.New("no content-type")
}
return nil
},
},
{
name: "Header mutation before write",
handler: func(rw ResponseWriter, r *Request) {
h := rw.Header()
h.Set("Content-Type", "some/type")
rw.Write([]byte("hello world"))
h.Set("Too-Late", "bogus")
},
check: func(got, logs string) error {
if !strings.Contains(got, "Content-Length:") {
return errors.New("no content-length")
}
if !strings.Contains(got, "Content-Type: some/type") {
return errors.New("wrong content-type")
}
if strings.Contains(got, "Too-Late") {
return errors.New("don't want too-late header")
}
return nil
},
},
{
name: "write then useless Header mutation",
handler: func(rw ResponseWriter, r *Request) {
rw.Write([]byte("hello world"))
rw.Header().Set("Too-Late", "Write already wrote headers")
},
check: func(got, logs string) error {
if strings.Contains(got, "Too-Late") {
return errors.New("header appeared from after WriteHeader")
}
return nil
},
},
{
name: "flush then write",
handler: func(rw ResponseWriter, r *Request) {
rw.(Flusher).Flush()
rw.Write([]byte("post-flush"))
rw.Header().Set("Too-Late", "Write already wrote headers")
},
check: func(got, logs string) error {
if !strings.Contains(got, "Transfer-Encoding: chunked") {
return errors.New("not chunked")
}
if strings.Contains(got, "Too-Late") {
return errors.New("header appeared from after WriteHeader")
}
return nil
},
},
{
name: "header then flush",
handler: func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "some/type")
rw.(Flusher).Flush()
rw.Write([]byte("post-flush"))
rw.Header().Set("Too-Late", "Write already wrote headers")
},
check: func(got, logs string) error {
if !strings.Contains(got, "Transfer-Encoding: chunked") {
return errors.New("not chunked")
}
if strings.Contains(got, "Too-Late") {
return errors.New("header appeared from after WriteHeader")
}
if !strings.Contains(got, "Content-Type: some/type") {
return errors.New("wrong content-type")
}
return nil
},
},
{
name: "sniff-on-first-write content-type",
handler: func(rw ResponseWriter, r *Request) {
rw.Write([]byte("<html><head></head><body>some html</body></html>"))
rw.Header().Set("Content-Type", "x/wrong")
},
check: func(got, logs string) error {
if !strings.Contains(got, "Content-Type: text/html") {
return errors.New("wrong content-type; want html")
}
return nil
},
},
{
name: "explicit content-type wins",
handler: func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "some/type")
rw.Write([]byte("<html><head></head><body>some html</body></html>"))
},
check: func(got, logs string) error {
if !strings.Contains(got, "Content-Type: some/type") {
return errors.New("wrong content-type; want html")
}
return nil
},
},
{
name: "empty handler",
handler: func(rw ResponseWriter, r *Request) {
},
check: func(got, logs string) error {
if !strings.Contains(got, "Content-Length: 0") {
return errors.New("want 0 content-length")
}
return nil
},
},
{
name: "only Header, no write",
handler: func(rw ResponseWriter, r *Request) {
rw.Header().Set("Some-Header", "some-value")
},
check: func(got, logs string) error {
if !strings.Contains(got, "Some-Header") {
return errors.New("didn't get header")
}
return nil
},
},
{
name: "WriteHeader call",
handler: func(rw ResponseWriter, r *Request) {
rw.WriteHeader(404)
rw.Header().Set("Too-Late", "some-value")
},
check: func(got, logs string) error {
if !strings.Contains(got, "404") {
return errors.New("wrong status")
}
if strings.Contains(got, "Too-Late") {
return errors.New("shouldn't have seen Too-Late")
}
return nil
},
},
}
for _, tc := range tests {
ht := newHandlerTest(HandlerFunc(tc.handler))
got := ht.rawResponse("GET / HTTP/1.1\nHost: golang.org")
logs := ht.logbuf.String()
if err := tc.check(got, logs); err != nil {
t.Errorf("%s: %v\nGot response:\n%s\n\n%s", tc.name, err, got, logs)
}
}
}
type errorListener struct {
errs []error
}
func (l *errorListener) Accept() (c net.Conn, err error) {
if len(l.errs) == 0 {
return nil, io.EOF
}
err = l.errs[0]
l.errs = l.errs[1:]
return
}
func (l *errorListener) Close() error {
return nil
}
func (l *errorListener) Addr() net.Addr {
return dummyAddr("test-address")
}
func TestAcceptMaxFds(t *testing.T) {
setParallel(t)
ln := &errorListener{[]error{
&net.OpError{
Op: "accept",
Err: syscall.EMFILE,
}}}
server := &Server{
Handler: HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {})),
ErrorLog: log.New(io.Discard, "", 0), // noisy otherwise
}
err := server.Serve(ln)
if err != io.EOF {
t.Errorf("got error %v, want EOF", err)
}
}
func TestWriteAfterHijack(t *testing.T) {
req := reqBytes("GET / HTTP/1.1\nHost: golang.org")
var buf bytes.Buffer
wrotec := make(chan bool, 1)
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
conn, bufrw, err := rw.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
go func() {
bufrw.Write([]byte("[hijack-to-bufw]"))
bufrw.Flush()
conn.Write([]byte("[hijack-to-conn]"))
conn.Close()
wrotec <- true
}()
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
<-wrotec
if g, w := buf.String(), "[hijack-to-bufw][hijack-to-conn]"; g != w {
t.Errorf("wrote %q; want %q", g, w)
}
}
func TestDoubleHijack(t *testing.T) {
req := reqBytes("GET / HTTP/1.1\nHost: golang.org")
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
conn, _, err := rw.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
_, _, err = rw.(Hijacker).Hijack()
if err == nil {
t.Errorf("got err = nil; want err != nil")
}
conn.Close()
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
}
// https://golang.org/issue/5955
// Note that this does not test the "request too large"
// exit path from the http server. This is intentional;
// not sending Connection: close is just a minor wire
// optimization and is pointless if dealing with a
// badly behaved client.
func TestHTTP10ConnectionHeader(t *testing.T) {
defer afterTest(t)
mux := NewServeMux()
mux.Handle("/", HandlerFunc(func(ResponseWriter, *Request) {}))
ts := httptest.NewServer(mux)
defer ts.Close()
// net/http uses HTTP/1.1 for requests, so write requests manually
tests := []struct {
req string // raw http request
expect []string // expected Connection header(s)
}{
{
req: "GET / HTTP/1.0\r\n\r\n",
expect: nil,
},
{
req: "OPTIONS * HTTP/1.0\r\n\r\n",
expect: nil,
},
{
req: "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n",
expect: []string{"keep-alive"},
},
}
for _, tt := range tests {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal("dial err:", err)
}
_, err = fmt.Fprint(conn, tt.req)
if err != nil {
t.Fatal("conn write err:", err)
}
resp, err := ReadResponse(bufio.NewReader(conn), &Request{Method: "GET"})
if err != nil {
t.Fatal("ReadResponse err:", err)
}
conn.Close()
resp.Body.Close()
got := resp.Header["Connection"]
if !reflect.DeepEqual(got, tt.expect) {
t.Errorf("wrong Connection headers for request %q. Got %q expect %q", tt.req, got, tt.expect)
}
}
}
// See golang.org/issue/5660
func TestServerReaderFromOrder_h1(t *testing.T) { testServerReaderFromOrder(t, h1Mode) }
func TestServerReaderFromOrder_h2(t *testing.T) { testServerReaderFromOrder(t, h2Mode) }
func testServerReaderFromOrder(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
pr, pw := io.Pipe()
const size = 3 << 20
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
rw.Header().Set("Content-Type", "text/plain") // prevent sniffing path
done := make(chan bool)
go func() {
io.Copy(rw, pr)
close(done)
}()
time.Sleep(25 * time.Millisecond) // give Copy a chance to break things
n, err := io.Copy(io.Discard, req.Body)
if err != nil {
t.Errorf("handler Copy: %v", err)
return
}
if n != size {
t.Errorf("handler Copy = %d; want %d", n, size)
}
pw.Write([]byte("hi"))
pw.Close()
<-done
}))
defer cst.close()
req, err := NewRequest("POST", cst.ts.URL, io.LimitReader(neverEnding('a'), size))
if err != nil {
t.Fatal(err)
}
res, err := cst.c.Do(req)
if err != nil {
t.Fatal(err)
}
all, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if string(all) != "hi" {
t.Errorf("Body = %q; want hi", all)
}
}
// Issue 6157, Issue 6685
func TestCodesPreventingContentTypeAndBody(t *testing.T) {
for _, code := range []int{StatusNotModified, StatusNoContent, StatusContinue} {
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.URL.Path == "/header" {
w.Header().Set("Content-Length", "123")
}
w.WriteHeader(code)
if r.URL.Path == "/more" {
w.Write([]byte("stuff"))
}
}))
for _, req := range []string{
"GET / HTTP/1.0",
"GET /header HTTP/1.0",
"GET /more HTTP/1.0",
"GET / HTTP/1.1\nHost: foo",
"GET /header HTTP/1.1\nHost: foo",
"GET /more HTTP/1.1\nHost: foo",
} {
got := ht.rawResponse(req)
wantStatus := fmt.Sprintf("%d %s", code, StatusText(code))
if !strings.Contains(got, wantStatus) {
t.Errorf("Code %d: Wanted %q Modified for %q: %s", code, wantStatus, req, got)
} else if strings.Contains(got, "Content-Length") {
t.Errorf("Code %d: Got a Content-Length from %q: %s", code, req, got)
} else if strings.Contains(got, "stuff") {
t.Errorf("Code %d: Response contains a body from %q: %s", code, req, got)
}
}
}
}
func TestContentTypeOkayOn204(t *testing.T) {
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", "123") // suppressed
w.Header().Set("Content-Type", "foo/bar")
w.WriteHeader(204)
}))
got := ht.rawResponse("GET / HTTP/1.1\nHost: foo")
if !strings.Contains(got, "Content-Type: foo/bar") {
t.Errorf("Response = %q; want Content-Type: foo/bar", got)
}
if strings.Contains(got, "Content-Length: 123") {
t.Errorf("Response = %q; don't want a Content-Length", got)
}
}
// Issue 6995
// A server Handler can receive a Request, and then turn around and
// give a copy of that Request.Body out to the Transport (e.g. any
// proxy). So then two people own that Request.Body (both the server
// and the http client), and both think they can close it on failure.
// Therefore, all incoming server requests Bodies need to be thread-safe.
func TestTransportAndServerSharedBodyRace_h1(t *testing.T) {
testTransportAndServerSharedBodyRace(t, h1Mode)
}
func TestTransportAndServerSharedBodyRace_h2(t *testing.T) {
testTransportAndServerSharedBodyRace(t, h2Mode)
}
func testTransportAndServerSharedBodyRace(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
const bodySize = 1 << 20
// errorf is like t.Errorf, but also writes to println. When
// this test fails, it hangs. This helps debugging and I've
// added this enough times "temporarily". It now gets added
// full time.
errorf := func(format string, args ...any) {
v := fmt.Sprintf(format, args...)
println(v)
t.Error(v)
}
unblockBackend := make(chan bool)
backend := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
gone := rw.(CloseNotifier).CloseNotify()
didCopy := make(chan any)
go func() {
n, err := io.CopyN(rw, req.Body, bodySize)
didCopy <- []any{n, err}
}()
isGone := false
Loop:
for {
select {
case <-didCopy:
break Loop
case <-gone:
isGone = true
case <-time.After(time.Second):
println("1 second passes in backend, proxygone=", isGone)
}
}
<-unblockBackend
}))
var quitTimer *time.Timer
defer func() { quitTimer.Stop() }()
defer backend.close()
backendRespc := make(chan *Response, 1)
var proxy *clientServerTest
proxy = newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
req2, _ := NewRequest("POST", backend.ts.URL, req.Body)
req2.ContentLength = bodySize
cancel := make(chan struct{})
req2.Cancel = cancel
bresp, err := proxy.c.Do(req2)
if err != nil {
errorf("Proxy outbound request: %v", err)
return
}
_, err = io.CopyN(io.Discard, bresp.Body, bodySize/2)
if err != nil {
errorf("Proxy copy error: %v", err)
return
}
backendRespc <- bresp // to close later
// Try to cause a race: Both the Transport and the proxy handler's Server
// will try to read/close req.Body (aka req2.Body)
if h2 {
close(cancel)
} else {
proxy.c.Transport.(*Transport).CancelRequest(req2)
}
rw.Write([]byte("OK"))
}))
defer proxy.close()
defer func() {
// Before we shut down our two httptest.Servers, start a timer.
// We choose 7 seconds because httptest.Server starts logging
// warnings to stderr at 5 seconds. If we don't disarm this bomb
// in 7 seconds (after the two httptest.Server.Close calls above),
// then we explode with stacks.
quitTimer = time.AfterFunc(7*time.Second, func() {
debug.SetTraceback("ALL")
stacks := make([]byte, 1<<20)
stacks = stacks[:runtime.Stack(stacks, true)]
fmt.Fprintf(os.Stderr, "%s", stacks)
log.Fatalf("Timeout.")
})
}()
defer close(unblockBackend)
req, _ := NewRequest("POST", proxy.ts.URL, io.LimitReader(neverEnding('a'), bodySize))
res, err := proxy.c.Do(req)
if err != nil {
t.Fatalf("Original request: %v", err)
}
// Cleanup, so we don't leak goroutines.
res.Body.Close()
select {
case res := <-backendRespc:
res.Body.Close()
default:
// We failed earlier. (e.g. on proxy.c.Do(req2))
}
}
// Test that a hanging Request.Body.Read from another goroutine can't
// cause the Handler goroutine's Request.Body.Close to block.
// See issue 7121.
func TestRequestBodyCloseDoesntBlock(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
readErrCh := make(chan error, 1)
errCh := make(chan error, 2)
server := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
go func(body io.Reader) {
_, err := body.Read(make([]byte, 100))
readErrCh <- err
}(req.Body)
time.Sleep(500 * time.Millisecond)
}))
defer server.Close()
closeConn := make(chan bool)
defer close(closeConn)
go func() {
conn, err := net.Dial("tcp", server.Listener.Addr().String())
if err != nil {
errCh <- err
return
}
defer conn.Close()
_, err = conn.Write([]byte("POST / HTTP/1.1\r\nConnection: close\r\nHost: foo\r\nContent-Length: 100000\r\n\r\n"))
if err != nil {
errCh <- err
return
}
// And now just block, making the server block on our
// 100000 bytes of body that will never arrive.
<-closeConn
}()
select {
case err := <-readErrCh:
if err == nil {
t.Error("Read was nil. Expected error.")
}
case err := <-errCh:
t.Error(err)
case <-time.After(5 * time.Second):
t.Error("timeout")
}
}
// test that ResponseWriter implements io.StringWriter.
func TestResponseWriterWriteString(t *testing.T) {
okc := make(chan bool, 1)
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
_, ok := w.(io.StringWriter)
okc <- ok
}))
ht.rawResponse("GET / HTTP/1.0")
select {
case ok := <-okc:
if !ok {
t.Error("ResponseWriter did not implement io.StringWriter")
}
default:
t.Error("handler was never called")
}
}
func TestAppendTime(t *testing.T) {
var b [len(TimeFormat)]byte
t1 := time.Date(2013, 9, 21, 15, 41, 0, 0, time.FixedZone("CEST", 2*60*60))
res := ExportAppendTime(b[:0], t1)
t2, err := ParseTime(string(res))
if err != nil {
t.Fatalf("Error parsing time: %s", err)
}
if !t1.Equal(t2) {
t.Fatalf("Times differ; expected: %v, got %v (%s)", t1, t2, string(res))
}
}
func TestServerConnState(t *testing.T) {
setParallel(t)
defer afterTest(t)
handler := map[string]func(w ResponseWriter, r *Request){
"/": func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "Hello.")
},
"/close": func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
fmt.Fprintf(w, "Hello.")
},
"/hijack": func(w ResponseWriter, r *Request) {
c, _, _ := w.(Hijacker).Hijack()
c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello."))
c.Close()
},
"/hijack-panic": func(w ResponseWriter, r *Request) {
c, _, _ := w.(Hijacker).Hijack()
c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello."))
c.Close()
panic("intentional panic")
},
}
// A stateLog is a log of states over the lifetime of a connection.
type stateLog struct {
active net.Conn // The connection for which the log is recorded; set to the first connection seen in StateNew.
got []ConnState
want []ConnState
complete chan<- struct{} // If non-nil, closed when either 'got' is equal to 'want', or 'got' is no longer a prefix of 'want'.
}
activeLog := make(chan *stateLog, 1)
// wantLog invokes doRequests, then waits for the resulting connection to
// either pass through the sequence of states in want or enter a state outside
// of that sequence.
wantLog := func(doRequests func(), want ...ConnState) {
t.Helper()
complete := make(chan struct{})
activeLog <- &stateLog{want: want, complete: complete}
doRequests()
stateDelay := 5 * time.Second
if deadline, ok := t.Deadline(); ok {
// Allow an arbitrarily long delay.
// This test was observed to be flaky on the darwin-arm64-corellium builder,
// so we're increasing the deadline to see if it starts passing.
// See https://golang.org/issue/37322.
const arbitraryCleanupMargin = 1 * time.Second
stateDelay = time.Until(deadline) - arbitraryCleanupMargin
}
timer := time.NewTimer(stateDelay)
select {
case <-timer.C:
t.Errorf("Timed out after %v waiting for connection to change state.", stateDelay)
case <-complete:
timer.Stop()
}
sl := <-activeLog
if !reflect.DeepEqual(sl.got, sl.want) {
t.Errorf("Request(s) produced unexpected state sequence.\nGot: %v\nWant: %v", sl.got, sl.want)
}
// Don't return sl to activeLog: we don't expect any further states after
// this point, and want to keep the ConnState callback blocked until the
// next call to wantLog.
}
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
handler[r.URL.Path](w, r)
}))
defer func() {
activeLog <- &stateLog{} // If the test failed, allow any remaining ConnState callbacks to complete.
ts.Close()
}()
ts.Config.ErrorLog = log.New(io.Discard, "", 0)
ts.Config.ConnState = func(c net.Conn, state ConnState) {
if c == nil {
t.Errorf("nil conn seen in state %s", state)
return
}
sl := <-activeLog
if sl.active == nil && state == StateNew {
sl.active = c
} else if sl.active != c {
t.Errorf("unexpected conn in state %s", state)
activeLog <- sl
return
}
sl.got = append(sl.got, state)
if sl.complete != nil && (len(sl.got) >= len(sl.want) || !reflect.DeepEqual(sl.got, sl.want[:len(sl.got)])) {
close(sl.complete)
sl.complete = nil
}
activeLog <- sl
}
ts.Start()
c := ts.Client()
mustGet := func(url string, headers ...string) {
t.Helper()
req, err := NewRequest("GET", url, nil)
if err != nil {
t.Fatal(err)
}
for len(headers) > 0 {
req.Header.Add(headers[0], headers[1])
headers = headers[2:]
}
res, err := c.Do(req)
if err != nil {
t.Errorf("Error fetching %s: %v", url, err)
return
}
_, err = io.ReadAll(res.Body)
defer res.Body.Close()
if err != nil {
t.Errorf("Error reading %s: %v", url, err)
}
}
wantLog(func() {
mustGet(ts.URL + "/")
mustGet(ts.URL + "/close")
}, StateNew, StateActive, StateIdle, StateActive, StateClosed)
wantLog(func() {
mustGet(ts.URL + "/")
mustGet(ts.URL+"/", "Connection", "close")
}, StateNew, StateActive, StateIdle, StateActive, StateClosed)
wantLog(func() {
mustGet(ts.URL + "/hijack")
}, StateNew, StateActive, StateHijacked)
wantLog(func() {
mustGet(ts.URL + "/hijack-panic")
}, StateNew, StateActive, StateHijacked)
wantLog(func() {
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
c.Close()
}, StateNew, StateClosed)
wantLog(func() {
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(c, "BOGUS REQUEST\r\n\r\n"); err != nil {
t.Fatal(err)
}
c.Read(make([]byte, 1)) // block until server hangs up on us
c.Close()
}, StateNew, StateActive, StateClosed)
wantLog(func() {
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil {
t.Fatal(err)
}
res, err := ReadResponse(bufio.NewReader(c), nil)
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(io.Discard, res.Body); err != nil {
t.Fatal(err)
}
c.Close()
}, StateNew, StateActive, StateIdle, StateClosed)
}
func TestServerKeepAlivesEnabled(t *testing.T) {
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
ts.Config.SetKeepAlivesEnabled(false)
ts.Start()
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if !res.Close {
t.Errorf("Body.Close == false; want true")
}
}
// golang.org/issue/7856
func TestServerEmptyBodyRace_h1(t *testing.T) { testServerEmptyBodyRace(t, h1Mode) }
func TestServerEmptyBodyRace_h2(t *testing.T) { testServerEmptyBodyRace(t, h2Mode) }
func testServerEmptyBodyRace(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
var n int32
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
atomic.AddInt32(&n, 1)
}), optQuietLog)
defer cst.close()
var wg sync.WaitGroup
const reqs = 20
for i := 0; i < reqs; i++ {
wg.Add(1)
go func() {
defer wg.Done()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
// Try to deflake spurious "connection reset by peer" under load.
// See golang.org/issue/22540.
time.Sleep(10 * time.Millisecond)
res, err = cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
return
}
}
defer res.Body.Close()
_, err = io.Copy(io.Discard, res.Body)
if err != nil {
t.Error(err)
return
}
}()
}
wg.Wait()
if got := atomic.LoadInt32(&n); got != reqs {
t.Errorf("handler ran %d times; want %d", got, reqs)
}
}
func TestServerConnStateNew(t *testing.T) {
sawNew := false // if the test is buggy, we'll race on this variable.
srv := &Server{
ConnState: func(c net.Conn, state ConnState) {
if state == StateNew {
sawNew = true // testing that this write isn't racy
}
},
Handler: HandlerFunc(func(w ResponseWriter, r *Request) {}), // irrelevant
}
srv.Serve(&oneConnListener{
conn: &rwTestConn{
Reader: strings.NewReader("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"),
Writer: io.Discard,
},
})
if !sawNew { // testing that this read isn't racy
t.Error("StateNew not seen")
}
}
type closeWriteTestConn struct {
rwTestConn
didCloseWrite bool
}
func (c *closeWriteTestConn) CloseWrite() error {
c.didCloseWrite = true
return nil
}
func TestCloseWrite(t *testing.T) {
setParallel(t)
var srv Server
var testConn closeWriteTestConn
c := ExportServerNewConn(&srv, &testConn)
ExportCloseWriteAndWait(c)
if !testConn.didCloseWrite {
t.Error("didn't see CloseWrite call")
}
}
// This verifies that a handler can Flush and then Hijack.
//
// A similar test crashed once during development, but it was only
// testing this tangentially and temporarily until another TODO was
// fixed.
//
// So add an explicit test for this.
func TestServerFlushAndHijack(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
io.WriteString(w, "Hello, ")
w.(Flusher).Flush()
conn, buf, _ := w.(Hijacker).Hijack()
buf.WriteString("6\r\nworld!\r\n0\r\n\r\n")
if err := buf.Flush(); err != nil {
t.Error(err)
}
if err := conn.Close(); err != nil {
t.Error(err)
}
}))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
all, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if want := "Hello, world!"; string(all) != want {
t.Errorf("Got %q; want %q", all, want)
}
}
// golang.org/issue/8534 -- the Server shouldn't reuse a connection
// for keep-alive after it's seen any Write error (e.g. a timeout) on
// that net.Conn.
//
// To test, verify we don't timeout or see fewer unique client
// addresses (== unique connections) than requests.
func TestServerKeepAliveAfterWriteError(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
const numReq = 3
addrc := make(chan string, numReq)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
addrc <- r.RemoteAddr
time.Sleep(500 * time.Millisecond)
w.(Flusher).Flush()
}))
ts.Config.WriteTimeout = 250 * time.Millisecond
ts.Start()
defer ts.Close()
errc := make(chan error, numReq)
go func() {
defer close(errc)
for i := 0; i < numReq; i++ {
res, err := Get(ts.URL)
if res != nil {
res.Body.Close()
}
errc <- err
}
}()
timeout := time.NewTimer(numReq * 2 * time.Second) // 4x overkill
defer timeout.Stop()
addrSeen := map[string]bool{}
numOkay := 0
for {
select {
case v := <-addrc:
addrSeen[v] = true
case err, ok := <-errc:
if !ok {
if len(addrSeen) != numReq {
t.Errorf("saw %d unique client addresses; want %d", len(addrSeen), numReq)
}
if numOkay != 0 {
t.Errorf("got %d successful client requests; want 0", numOkay)
}
return
}
if err == nil {
numOkay++
}
case <-timeout.C:
t.Fatal("timeout waiting for requests to complete")
}
}
}
// Issue 9987: shouldn't add automatic Content-Length (or
// Content-Type) if a Transfer-Encoding was set by the handler.
func TestNoContentLengthIfTransferEncoding(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Transfer-Encoding", "foo")
io.WriteString(w, "<html>")
}))
defer ts.Close()
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil {
t.Fatal(err)
}
bs := bufio.NewScanner(c)
var got bytes.Buffer
for bs.Scan() {
if strings.TrimSpace(bs.Text()) == "" {
break
}
got.WriteString(bs.Text())
got.WriteByte('\n')
}
if err := bs.Err(); err != nil {
t.Fatal(err)
}
if strings.Contains(got.String(), "Content-Length") {
t.Errorf("Unexpected Content-Length in response headers: %s", got.String())
}
if strings.Contains(got.String(), "Content-Type") {
t.Errorf("Unexpected Content-Type in response headers: %s", got.String())
}
}
// tolerate extra CRLF(s) before Request-Line on subsequent requests on a conn
// Issue 10876.
func TestTolerateCRLFBeforeRequestLine(t *testing.T) {
req := []byte("POST / HTTP/1.1\r\nHost: golang.org\r\nContent-Length: 3\r\n\r\nABC" +
"\r\n\r\n" + // <-- this stuff is bogus, but we'll ignore it
"GET / HTTP/1.1\r\nHost: golang.org\r\n\r\n")
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
numReq := 0
go Serve(ln, HandlerFunc(func(rw ResponseWriter, r *Request) {
numReq++
}))
<-conn.closec
if numReq != 2 {
t.Errorf("num requests = %d; want 2", numReq)
t.Logf("Res: %s", buf.Bytes())
}
}
func TestIssue13893_Expect100(t *testing.T) {
// test that the Server doesn't filter out Expect headers.
req := reqBytes(`PUT /readbody HTTP/1.1
User-Agent: PycURL/7.22.0
Host: 127.0.0.1:9000
Accept: */*
Expect: 100-continue
Content-Length: 10
HelloWorld
`)
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
go Serve(ln, HandlerFunc(func(w ResponseWriter, r *Request) {
if _, ok := r.Header["Expect"]; !ok {
t.Error("Expect header should not be filtered out")
}
}))
<-conn.closec
}
func TestIssue11549_Expect100(t *testing.T) {
req := reqBytes(`PUT /readbody HTTP/1.1
User-Agent: PycURL/7.22.0
Host: 127.0.0.1:9000
Accept: */*
Expect: 100-continue
Content-Length: 10
HelloWorldPUT /noreadbody HTTP/1.1
User-Agent: PycURL/7.22.0
Host: 127.0.0.1:9000
Accept: */*
Expect: 100-continue
Content-Length: 10
GET /should-be-ignored HTTP/1.1
Host: foo
`)
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
numReq := 0
go Serve(ln, HandlerFunc(func(w ResponseWriter, r *Request) {
numReq++
if r.URL.Path == "/readbody" {
io.ReadAll(r.Body)
}
io.WriteString(w, "Hello world!")
}))
<-conn.closec
if numReq != 2 {
t.Errorf("num requests = %d; want 2", numReq)
}
if !strings.Contains(buf.String(), "Connection: close\r\n") {
t.Errorf("expected 'Connection: close' in response; got: %s", buf.String())
}
}
// If a Handler finishes and there's an unread request body,
// verify the server try to do implicit read on it before replying.
func TestHandlerFinishSkipBigContentLengthRead(t *testing.T) {
setParallel(t)
conn := &testConn{closec: make(chan bool)}
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 9999999999\r\n" +
"\r\n" + strings.Repeat("a", 1<<20))))
ls := &oneConnListener{conn}
var inHandlerLen int
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
inHandlerLen = conn.readBuf.Len()
rw.WriteHeader(404)
}))
<-conn.closec
afterHandlerLen := conn.readBuf.Len()
if afterHandlerLen != inHandlerLen {
t.Errorf("unexpected implicit read. Read buffer went from %d -> %d", inHandlerLen, afterHandlerLen)
}
}
func TestHandlerSetsBodyNil_h1(t *testing.T) { testHandlerSetsBodyNil(t, h1Mode) }
func TestHandlerSetsBodyNil_h2(t *testing.T) { testHandlerSetsBodyNil(t, h2Mode) }
func testHandlerSetsBodyNil(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
r.Body = nil
fmt.Fprintf(w, "%v", r.RemoteAddr)
}))
defer cst.close()
get := func() string {
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
return string(slurp)
}
a, b := get(), get()
if a != b {
t.Errorf("Failed to reuse connections between requests: %v vs %v", a, b)
}
}
// Test that we validate the Host header.
// Issue 11206 (invalid bytes in Host) and 13624 (Host present in HTTP/1.1)
func TestServerValidatesHostHeader(t *testing.T) {
tests := []struct {
proto string
host string
want int
}{
{"HTTP/0.9", "", 505},
{"HTTP/1.1", "", 400},
{"HTTP/1.1", "Host: \r\n", 200},
{"HTTP/1.1", "Host: 1.2.3.4\r\n", 200},
{"HTTP/1.1", "Host: foo.com\r\n", 200},
{"HTTP/1.1", "Host: foo-bar_baz.com\r\n", 200},
{"HTTP/1.1", "Host: foo.com:80\r\n", 200},
{"HTTP/1.1", "Host: ::1\r\n", 200},
{"HTTP/1.1", "Host: [::1]\r\n", 200}, // questionable without port, but accept it
{"HTTP/1.1", "Host: [::1]:80\r\n", 200},
{"HTTP/1.1", "Host: [::1%25en0]:80\r\n", 200},
{"HTTP/1.1", "Host: 1.2.3.4\r\n", 200},
{"HTTP/1.1", "Host: \x06\r\n", 400},
{"HTTP/1.1", "Host: \xff\r\n", 400},
{"HTTP/1.1", "Host: {\r\n", 400},
{"HTTP/1.1", "Host: }\r\n", 400},
{"HTTP/1.1", "Host: first\r\nHost: second\r\n", 400},
// HTTP/1.0 can lack a host header, but if present
// must play by the rules too:
{"HTTP/1.0", "", 200},
{"HTTP/1.0", "Host: first\r\nHost: second\r\n", 400},
{"HTTP/1.0", "Host: \xff\r\n", 400},
// Make an exception for HTTP upgrade requests:
{"PRI * HTTP/2.0", "", 200},
// Also an exception for CONNECT requests: (Issue 18215)
{"CONNECT golang.org:443 HTTP/1.1", "", 200},
// But not other HTTP/2 stuff:
{"PRI / HTTP/2.0", "", 505},
{"GET / HTTP/2.0", "", 505},
{"GET / HTTP/3.0", "", 505},
}
for _, tt := range tests {
conn := &testConn{closec: make(chan bool, 1)}
methodTarget := "GET / "
if !strings.HasPrefix(tt.proto, "HTTP/") {
methodTarget = ""
}
io.WriteString(&conn.readBuf, methodTarget+tt.proto+"\r\n"+tt.host+"\r\n")
ln := &oneConnListener{conn}
srv := Server{
ErrorLog: quietLog,
Handler: HandlerFunc(func(ResponseWriter, *Request) {}),
}
go srv.Serve(ln)
<-conn.closec
res, err := ReadResponse(bufio.NewReader(&conn.writeBuf), nil)
if err != nil {
t.Errorf("For %s %q, ReadResponse: %v", tt.proto, tt.host, res)
continue
}
if res.StatusCode != tt.want {
t.Errorf("For %s %q, Status = %d; want %d", tt.proto, tt.host, res.StatusCode, tt.want)
}
}
}
func TestServerHandlersCanHandleH2PRI(t *testing.T) {
const upgradeResponse = "upgrade here"
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
conn, br, err := w.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
defer conn.Close()
if r.Method != "PRI" || r.RequestURI != "*" {
t.Errorf("Got method/target %q %q; want PRI *", r.Method, r.RequestURI)
return
}
if !r.Close {
t.Errorf("Request.Close = true; want false")
}
const want = "SM\r\n\r\n"
buf := make([]byte, len(want))
n, err := io.ReadFull(br, buf)
if err != nil || string(buf[:n]) != want {
t.Errorf("Read = %v, %v (%q), want %q", n, err, buf[:n], want)
return
}
io.WriteString(conn, upgradeResponse)
}))
defer ts.Close()
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
io.WriteString(c, "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")
slurp, err := io.ReadAll(c)
if err != nil {
t.Fatal(err)
}
if string(slurp) != upgradeResponse {
t.Errorf("Handler response = %q; want %q", slurp, upgradeResponse)
}
}
// Test that we validate the valid bytes in HTTP/1 headers.
// Issue 11207.
func TestServerValidatesHeaders(t *testing.T) {
setParallel(t)
tests := []struct {
header string
want int
}{
{"", 200},
{"Foo: bar\r\n", 200},
{"X-Foo: bar\r\n", 200},
{"Foo: a space\r\n", 200},
{"A space: foo\r\n", 400}, // space in header
{"foo\xffbar: foo\r\n", 400}, // binary in header
{"foo\x00bar: foo\r\n", 400}, // binary in header
{"Foo: " + strings.Repeat("x", 1<<21) + "\r\n", 431}, // header too large
// Spaces between the header key and colon are not allowed.
// See RFC 7230, Section 3.2.4.
{"Foo : bar\r\n", 400},
{"Foo\t: bar\r\n", 400},
{"foo: foo foo\r\n", 200}, // LWS space is okay
{"foo: foo\tfoo\r\n", 200}, // LWS tab is okay
{"foo: foo\x00foo\r\n", 400}, // CTL 0x00 in value is bad
{"foo: foo\x7ffoo\r\n", 400}, // CTL 0x7f in value is bad
{"foo: foo\xfffoo\r\n", 200}, // non-ASCII high octets in value are fine
}
for _, tt := range tests {
conn := &testConn{closec: make(chan bool, 1)}
io.WriteString(&conn.readBuf, "GET / HTTP/1.1\r\nHost: foo\r\n"+tt.header+"\r\n")
ln := &oneConnListener{conn}
srv := Server{
ErrorLog: quietLog,
Handler: HandlerFunc(func(ResponseWriter, *Request) {}),
}
go srv.Serve(ln)
<-conn.closec
res, err := ReadResponse(bufio.NewReader(&conn.writeBuf), nil)
if err != nil {
t.Errorf("For %q, ReadResponse: %v", tt.header, res)
continue
}
if res.StatusCode != tt.want {
t.Errorf("For %q, Status = %d; want %d", tt.header, res.StatusCode, tt.want)
}
}
}
func TestServerRequestContextCancel_ServeHTTPDone_h1(t *testing.T) {
testServerRequestContextCancel_ServeHTTPDone(t, h1Mode)
}
func TestServerRequestContextCancel_ServeHTTPDone_h2(t *testing.T) {
testServerRequestContextCancel_ServeHTTPDone(t, h2Mode)
}
func testServerRequestContextCancel_ServeHTTPDone(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
ctxc := make(chan context.Context, 1)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
ctx := r.Context()
select {
case <-ctx.Done():
t.Error("should not be Done in ServeHTTP")
default:
}
ctxc <- ctx
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
ctx := <-ctxc
select {
case <-ctx.Done():
default:
t.Error("context should be done after ServeHTTP completes")
}
}
// Tests that the Request.Context available to the Handler is canceled
// if the peer closes their TCP connection. This requires that the server
// is always blocked in a Read call so it notices the EOF from the client.
// See issues 15927 and 15224.
func TestServerRequestContextCancel_ConnClose(t *testing.T) {
setParallel(t)
defer afterTest(t)
inHandler := make(chan struct{})
handlerDone := make(chan struct{})
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
close(inHandler)
<-r.Context().Done()
close(handlerDone)
}))
defer ts.Close()
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer c.Close()
io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n")
<-inHandler
c.Close() // this should trigger the context being done
<-handlerDone
}
func TestServerContext_ServerContextKey_h1(t *testing.T) {
testServerContext_ServerContextKey(t, h1Mode)
}
func TestServerContext_ServerContextKey_h2(t *testing.T) {
testServerContext_ServerContextKey(t, h2Mode)
}
func testServerContext_ServerContextKey(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
ctx := r.Context()
got := ctx.Value(ServerContextKey)
if _, ok := got.(*Server); !ok {
t.Errorf("context value = %T; want *http.Server", got)
}
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
}
func TestServerContext_LocalAddrContextKey_h1(t *testing.T) {
testServerContext_LocalAddrContextKey(t, h1Mode)
}
func TestServerContext_LocalAddrContextKey_h2(t *testing.T) {
testServerContext_LocalAddrContextKey(t, h2Mode)
}
func testServerContext_LocalAddrContextKey(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
ch := make(chan any, 1)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
ch <- r.Context().Value(LocalAddrContextKey)
}))
defer cst.close()
if _, err := cst.c.Head(cst.ts.URL); err != nil {
t.Fatal(err)
}
host := cst.ts.Listener.Addr().String()
select {
case got := <-ch:
if addr, ok := got.(net.Addr); !ok {
t.Errorf("local addr value = %T; want net.Addr", got)
} else if fmt.Sprint(addr) != host {
t.Errorf("local addr = %v; want %v", addr, host)
}
case <-time.After(5 * time.Second):
t.Error("timed out")
}
}
// https://golang.org/issue/15960
func TestHandlerSetTransferEncodingChunked(t *testing.T) {
setParallel(t)
defer afterTest(t)
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Transfer-Encoding", "chunked")
w.Write([]byte("hello"))
}))
resp := ht.rawResponse("GET / HTTP/1.1\nHost: foo")
const hdr = "Transfer-Encoding: chunked"
if n := strings.Count(resp, hdr); n != 1 {
t.Errorf("want 1 occurrence of %q in response, got %v\nresponse: %v", hdr, n, resp)
}
}
// https://golang.org/issue/16063
func TestHandlerSetTransferEncodingGzip(t *testing.T) {
setParallel(t)
defer afterTest(t)
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Transfer-Encoding", "gzip")
gz := gzip.NewWriter(w)
gz.Write([]byte("hello"))
gz.Close()
}))
resp := ht.rawResponse("GET / HTTP/1.1\nHost: foo")
for _, v := range []string{"gzip", "chunked"} {
hdr := "Transfer-Encoding: " + v
if n := strings.Count(resp, hdr); n != 1 {
t.Errorf("want 1 occurrence of %q in response, got %v\nresponse: %v", hdr, n, resp)
}
}
}
func BenchmarkClientServer(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
fmt.Fprintf(rw, "Hello world.\n")
}))
defer ts.Close()
b.StartTimer()
for i := 0; i < b.N; i++ {
res, err := Get(ts.URL)
if err != nil {
b.Fatal("Get:", err)
}
all, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
b.Fatal("ReadAll:", err)
}
body := string(all)
if body != "Hello world.\n" {
b.Fatal("Got body:", body)
}
}
b.StopTimer()
}
func BenchmarkClientServerParallel4(b *testing.B) {
benchmarkClientServerParallel(b, 4, false)
}
func BenchmarkClientServerParallel64(b *testing.B) {
benchmarkClientServerParallel(b, 64, false)
}
func BenchmarkClientServerParallelTLS4(b *testing.B) {
benchmarkClientServerParallel(b, 4, true)
}
func BenchmarkClientServerParallelTLS64(b *testing.B) {
benchmarkClientServerParallel(b, 64, true)
}
func benchmarkClientServerParallel(b *testing.B, parallelism int, useTLS bool) {
b.ReportAllocs()
ts := httptest.NewUnstartedServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
fmt.Fprintf(rw, "Hello world.\n")
}))
if useTLS {
ts.StartTLS()
} else {
ts.Start()
}
defer ts.Close()
b.ResetTimer()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
c := ts.Client()
for pb.Next() {
res, err := c.Get(ts.URL)
if err != nil {
b.Logf("Get: %v", err)
continue
}
all, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
b.Logf("ReadAll: %v", err)
continue
}
body := string(all)
if body != "Hello world.\n" {
panic("Got body: " + body)
}
}
})
}
// A benchmark for profiling the server without the HTTP client code.
// The client code runs in a subprocess.
//
// For use like:
//
// $ go test -c
// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof
// $ go tool pprof http.test http.prof
// (pprof) web
func BenchmarkServer(b *testing.B) {
b.ReportAllocs()
// Child process mode;
if url := os.Getenv("TEST_BENCH_SERVER_URL"); url != "" {
n, err := strconv.Atoi(os.Getenv("TEST_BENCH_CLIENT_N"))
if err != nil {
panic(err)
}
for i := 0; i < n; i++ {
res, err := Get(url)
if err != nil {
log.Panicf("Get: %v", err)
}
all, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Panicf("ReadAll: %v", err)
}
body := string(all)
if body != "Hello world.\n" {
log.Panicf("Got body: %q", body)
}
}
os.Exit(0)
return
}
var res = []byte("Hello world.\n")
b.StopTimer()
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.Write(res)
}))
defer ts.Close()
b.StartTimer()
cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer$")
cmd.Env = append([]string{
fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N),
fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL),
}, os.Environ()...)
out, err := cmd.CombinedOutput()
if err != nil {
b.Errorf("Test failure: %v, with output: %s", err, out)
}
}
// getNoBody wraps Get but closes any Response.Body before returning the response.
func getNoBody(urlStr string) (*Response, error) {
res, err := Get(urlStr)
if err != nil {
return nil, err
}
res.Body.Close()
return res, nil
}
// A benchmark for profiling the client without the HTTP server code.
// The server code runs in a subprocess.
func BenchmarkClient(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
defer afterTest(b)
var data = []byte("Hello world.\n")
if server := os.Getenv("TEST_BENCH_SERVER"); server != "" {
// Server process mode.
port := os.Getenv("TEST_BENCH_SERVER_PORT") // can be set by user
if port == "" {
port = "0"
}
ln, err := net.Listen("tcp", "localhost:"+port)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Println(ln.Addr().String())
HandleFunc("/", func(w ResponseWriter, r *Request) {
r.ParseForm()
if r.Form.Get("stop") != "" {
os.Exit(0)
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Write(data)
})
var srv Server
log.Fatal(srv.Serve(ln))
}
// Start server process.
cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkClient$")
cmd.Env = append(os.Environ(), "TEST_BENCH_SERVER=yes")
cmd.Stderr = os.Stderr
stdout, err := cmd.StdoutPipe()
if err != nil {
b.Fatal(err)
}
if err := cmd.Start(); err != nil {
b.Fatalf("subprocess failed to start: %v", err)
}
defer cmd.Process.Kill()
// Wait for the server in the child process to respond and tell us
// its listening address, once it's started listening:
timer := time.AfterFunc(10*time.Second, func() {
cmd.Process.Kill()
})
defer timer.Stop()
bs := bufio.NewScanner(stdout)
if !bs.Scan() {
b.Fatalf("failed to read listening URL from child: %v", bs.Err())
}
url := "http://" + strings.TrimSpace(bs.Text()) + "/"
timer.Stop()
if _, err := getNoBody(url); err != nil {
b.Fatalf("initial probe of child process failed: %v", err)
}
done := make(chan error)
stop := make(chan struct{})
defer close(stop)
go func() {
select {
case <-stop:
return
case done <- cmd.Wait():
}
}()
// Do b.N requests to the server.
b.StartTimer()
for i := 0; i < b.N; i++ {
res, err := Get(url)
if err != nil {
b.Fatalf("Get: %v", err)
}
body, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
b.Fatalf("ReadAll: %v", err)
}
if !bytes.Equal(body, data) {
b.Fatalf("Got body: %q", body)
}
}
b.StopTimer()
// Instruct server process to stop.
getNoBody(url + "?stop=yes")
select {
case err := <-done:
if err != nil {
b.Fatalf("subprocess failed: %v", err)
}
case <-time.After(5 * time.Second):
b.Fatalf("subprocess did not stop")
}
}
func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.0
Host: golang.org
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17
Accept-Encoding: gzip,deflate,sdch
Accept-Language: en-US,en;q=0.8
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
`)
res := []byte("Hello world!\n")
conn := &testConn{
// testConn.Close will not push into the channel
// if it's full.
closec: make(chan bool, 1),
}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.Write(res)
})
ln := new(oneConnListener)
for i := 0; i < b.N; i++ {
conn.readBuf.Reset()
conn.writeBuf.Reset()
conn.readBuf.Write(req)
ln.conn = conn
Serve(ln, handler)
<-conn.closec
}
}
// repeatReader reads content count times, then EOFs.
type repeatReader struct {
content []byte
count int
off int
}
func (r *repeatReader) Read(p []byte) (n int, err error) {
if r.count <= 0 {
return 0, io.EOF
}
n = copy(p, r.content[r.off:])
r.off += n
if r.off == len(r.content) {
r.count--
r.off = 0
}
return
}
func BenchmarkServerFakeConnWithKeepAlive(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17
Accept-Encoding: gzip,deflate,sdch
Accept-Language: en-US,en;q=0.8
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
`)
res := []byte("Hello world!\n")
conn := &rwTestConn{
Reader: &repeatReader{content: req, count: b.N},
Writer: io.Discard,
closec: make(chan bool, 1),
}
handled := 0
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
handled++
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.Write(res)
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
if b.N != handled {
b.Errorf("b.N=%d but handled %d", b.N, handled)
}
}
// same as above, but representing the most simple possible request
// and handler. Notably: the handler does not call rw.Header().
func BenchmarkServerFakeConnWithKeepAliveLite(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
`)
res := []byte("Hello world!\n")
conn := &rwTestConn{
Reader: &repeatReader{content: req, count: b.N},
Writer: io.Discard,
closec: make(chan bool, 1),
}
handled := 0
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
handled++
rw.Write(res)
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
if b.N != handled {
b.Errorf("b.N=%d but handled %d", b.N, handled)
}
}
const someResponse = "<html>some response</html>"
// A Response that's just no bigger than 2KB, the buffer-before-chunking threshold.
var response = bytes.Repeat([]byte(someResponse), 2<<10/len(someResponse))
// Both Content-Type and Content-Length set. Should be no buffering.
func BenchmarkServerHandlerTypeLen(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}))
}
// A Content-Type is set, but no length. No sniffing, but will count the Content-Length.
func BenchmarkServerHandlerNoLen(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/html")
w.Write(response)
}))
}
// A Content-Length is set, but the Content-Type will be sniffed.
func BenchmarkServerHandlerNoType(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}))
}
// Neither a Content-Type or Content-Length, so sniffed and counted.
func BenchmarkServerHandlerNoHeader(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Write(response)
}))
}
func benchmarkHandler(b *testing.B, h Handler) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
`)
conn := &rwTestConn{
Reader: &repeatReader{content: req, count: b.N},
Writer: io.Discard,
closec: make(chan bool, 1),
}
handled := 0
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
handled++
h.ServeHTTP(rw, r)
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
if b.N != handled {
b.Errorf("b.N=%d but handled %d", b.N, handled)
}
}
func BenchmarkServerHijack(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
`)
h := HandlerFunc(func(w ResponseWriter, r *Request) {
conn, _, err := w.(Hijacker).Hijack()
if err != nil {
panic(err)
}
conn.Close()
})
conn := &rwTestConn{
Writer: io.Discard,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
for i := 0; i < b.N; i++ {
conn.Reader = bytes.NewReader(req)
ln.conn = conn
Serve(ln, h)
<-conn.closec
}
}
func BenchmarkCloseNotifier(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
sawClose := make(chan bool)
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
<-rw.(CloseNotifier).CloseNotify()
sawClose <- true
}))
defer ts.Close()
tot := time.NewTimer(5 * time.Second)
defer tot.Stop()
b.StartTimer()
for i := 0; i < b.N; i++ {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
b.Fatalf("error dialing: %v", err)
}
_, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n")
if err != nil {
b.Fatal(err)
}
conn.Close()
tot.Reset(5 * time.Second)
select {
case <-sawClose:
case <-tot.C:
b.Fatal("timeout")
}
}
b.StopTimer()
}
// Verify this doesn't race (Issue 16505)
func TestConcurrentServerServe(t *testing.T) {
setParallel(t)
for i := 0; i < 100; i++ {
ln1 := &oneConnListener{conn: nil}
ln2 := &oneConnListener{conn: nil}
srv := Server{}
go func() { srv.Serve(ln1) }()
go func() { srv.Serve(ln2) }()
}
}
func TestServerIdleTimeout(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
setParallel(t)
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
io.Copy(io.Discard, r.Body)
io.WriteString(w, r.RemoteAddr)
}))
ts.Config.ReadHeaderTimeout = 1 * time.Second
ts.Config.IdleTimeout = 2 * time.Second
ts.Start()
defer ts.Close()
c := ts.Client()
get := func() string {
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
return string(slurp)
}
a1, a2 := get(), get()
if a1 != a2 {
t.Fatalf("did requests on different connections")
}
time.Sleep(3 * time.Second)
a3 := get()
if a2 == a3 {
t.Fatal("request three unexpectedly on same connection")
}
// And test that ReadHeaderTimeout still works:
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo.com\r\n"))
time.Sleep(2 * time.Second)
if _, err := io.CopyN(io.Discard, conn, 1); err == nil {
t.Fatal("copy byte succeeded; want err")
}
}
func get(t *testing.T, c *Client, url string) string {
res, err := c.Get(url)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
slurp, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
return string(slurp)
}
// Tests that calls to Server.SetKeepAlivesEnabled(false) closes any
// currently-open connections.
func TestServerSetKeepAlivesEnabledClosesConns(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
io.WriteString(w, r.RemoteAddr)
}))
defer ts.Close()
c := ts.Client()
tr := c.Transport.(*Transport)
get := func() string { return get(t, c, ts.URL) }
a1, a2 := get(), get()
if a1 != a2 {
t.Fatal("expected first two requests on same connection")
}
addr := strings.TrimPrefix(ts.URL, "http://")
// The two requests should have used the same connection,
// and there should not have been a second connection that
// was created by racing dial against reuse.
// (The first get was completed when the second get started.)
n := tr.IdleConnCountForTesting("http", addr)
if n != 1 {
t.Fatalf("idle count for %q after 2 gets = %d, want 1", addr, n)
}
// SetKeepAlivesEnabled should discard idle conns.
ts.Config.SetKeepAlivesEnabled(false)
var idle1 int
if !waitCondition(2*time.Second, 10*time.Millisecond, func() bool {
idle1 = tr.IdleConnCountForTesting("http", addr)
return idle1 == 0
}) {
t.Fatalf("idle count after SetKeepAlivesEnabled called = %v; want 0", idle1)
}
a3 := get()
if a3 == a2 {
t.Fatal("expected third request on new connection")
}
}
func TestServerShutdown_h1(t *testing.T) {
testServerShutdown(t, h1Mode)
}
func TestServerShutdown_h2(t *testing.T) {
testServerShutdown(t, h2Mode)
}
func testServerShutdown(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
var doShutdown func() // set later
var doStateCount func()
var shutdownRes = make(chan error, 1)
var statesRes = make(chan map[ConnState]int, 1)
var gotOnShutdown = make(chan struct{}, 1)
handler := HandlerFunc(func(w ResponseWriter, r *Request) {
doStateCount()
go doShutdown()
// Shutdown is graceful, so it should not interrupt
// this in-flight response. Add a tiny sleep here to
// increase the odds of a failure if shutdown has
// bugs.
time.Sleep(20 * time.Millisecond)
io.WriteString(w, r.RemoteAddr)
})
cst := newClientServerTest(t, h2, handler, func(srv *httptest.Server) {
srv.Config.RegisterOnShutdown(func() { gotOnShutdown <- struct{}{} })
})
defer cst.close()
doShutdown = func() {
shutdownRes <- cst.ts.Config.Shutdown(context.Background())
}
doStateCount = func() {
statesRes <- cst.ts.Config.ExportAllConnsByState()
}
get(t, cst.c, cst.ts.URL) // calls t.Fail on failure
if err := <-shutdownRes; err != nil {
t.Fatalf("Shutdown: %v", err)
}
select {
case <-gotOnShutdown:
case <-time.After(5 * time.Second):
t.Errorf("onShutdown callback not called, RegisterOnShutdown broken?")
}
if states := <-statesRes; states[StateActive] != 1 {
t.Errorf("connection in wrong state, %v", states)
}
res, err := cst.c.Get(cst.ts.URL)
if err == nil {
res.Body.Close()
t.Fatal("second request should fail. server should be shut down")
}
}
func TestServerShutdownStateNew(t *testing.T) {
if testing.Short() {
t.Skip("test takes 5-6 seconds; skipping in short mode")
}
setParallel(t)
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
// nothing.
}))
var connAccepted sync.WaitGroup
ts.Config.ConnState = func(conn net.Conn, state ConnState) {
if state == StateNew {
connAccepted.Done()
}
}
ts.Start()
defer ts.Close()
// Start a connection but never write to it.
connAccepted.Add(1)
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer c.Close()
// Wait for the connection to be accepted by the server. Otherwise, if
// Shutdown happens to run first, the server will be closed when
// encountering the connection, in which case it will be rejected
// immediately.
connAccepted.Wait()
shutdownRes := make(chan error, 1)
go func() {
shutdownRes <- ts.Config.Shutdown(context.Background())
}()
readRes := make(chan error, 1)
go func() {
_, err := c.Read([]byte{0})
readRes <- err
}()
const expectTimeout = 5 * time.Second
t0 := time.Now()
select {
case got := <-shutdownRes:
d := time.Since(t0)
if got != nil {
t.Fatalf("shutdown error after %v: %v", d, err)
}
if d < expectTimeout/2 {
t.Errorf("shutdown too soon after %v", d)
}
case <-time.After(expectTimeout * 3 / 2):
t.Fatalf("timeout waiting for shutdown")
}
// Wait for c.Read to unblock; should be already done at this point,
// or within a few milliseconds.
select {
case err := <-readRes:
if err == nil {
t.Error("expected error from Read")
}
case <-time.After(2 * time.Second):
t.Errorf("timeout waiting for Read to unblock")
}
}
// Issue 17878: tests that we can call Close twice.
func TestServerCloseDeadlock(t *testing.T) {
var s Server
s.Close()
s.Close()
}
// Issue 17717: tests that Server.SetKeepAlivesEnabled is respected by
// both HTTP/1 and HTTP/2.
func TestServerKeepAlivesEnabled_h1(t *testing.T) { testServerKeepAlivesEnabled(t, h1Mode) }
func TestServerKeepAlivesEnabled_h2(t *testing.T) { testServerKeepAlivesEnabled(t, h2Mode) }
func testServerKeepAlivesEnabled(t *testing.T, h2 bool) {
if h2 {
restore := ExportSetH2GoawayTimeout(10 * time.Millisecond)
defer restore()
}
// Not parallel: messes with global variable. (http2goAwayTimeout)
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {}))
defer cst.close()
srv := cst.ts.Config
srv.SetKeepAlivesEnabled(false)
for try := 0; try < 2; try++ {
if !waitCondition(2*time.Second, 10*time.Millisecond, srv.ExportAllConnsIdle) {
t.Fatalf("request %v: test server has active conns", try)
}
conns := 0
var info httptrace.GotConnInfo
ctx := httptrace.WithClientTrace(context.Background(), &httptrace.ClientTrace{
GotConn: func(v httptrace.GotConnInfo) {
conns++
info = v
},
})
req, err := NewRequestWithContext(ctx, "GET", cst.ts.URL, nil)
if err != nil {
t.Fatal(err)
}
res, err := cst.c.Do(req)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if conns != 1 {
t.Fatalf("request %v: got %v conns, want 1", try, conns)
}
if info.Reused || info.WasIdle {
t.Fatalf("request %v: Reused=%v (want false), WasIdle=%v (want false)", try, info.Reused, info.WasIdle)
}
}
}
// Issue 18447: test that the Server's ReadTimeout is stopped while
// the server's doing its 1-byte background read between requests,
// waiting for the connection to maybe close.
func TestServerCancelsReadTimeoutWhenIdle(t *testing.T) {
setParallel(t)
defer afterTest(t)
runTimeSensitiveTest(t, []time.Duration{
10 * time.Millisecond,
50 * time.Millisecond,
250 * time.Millisecond,
time.Second,
2 * time.Second,
}, func(t *testing.T, timeout time.Duration) error {
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
select {
case <-time.After(2 * timeout):
fmt.Fprint(w, "ok")
case <-r.Context().Done():
fmt.Fprint(w, r.Context().Err())
}
}))
ts.Config.ReadTimeout = timeout
ts.Start()
defer ts.Close()
c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
return fmt.Errorf("Get: %v", err)
}
slurp, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return fmt.Errorf("Body ReadAll: %v", err)
}
if string(slurp) != "ok" {
return fmt.Errorf("got: %q, want ok", slurp)
}
return nil
})
}
// runTimeSensitiveTest runs test with the provided durations until one passes.
// If they all fail, t.Fatal is called with the last one's duration and error value.
func runTimeSensitiveTest(t *testing.T, durations []time.Duration, test func(t *testing.T, d time.Duration) error) {
for i, d := range durations {
err := test(t, d)
if err == nil {
return
}
if i == len(durations)-1 {
t.Fatalf("failed with duration %v: %v", d, err)
}
}
}
// Issue 18535: test that the Server doesn't try to do a background
// read if it's already done one.
func TestServerDuplicateBackgroundRead(t *testing.T) {
if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm" {
testenv.SkipFlaky(t, 24826)
}
setParallel(t)
defer afterTest(t)
goroutines := 5
requests := 2000
if testing.Short() {
goroutines = 3
requests = 100
}
hts := httptest.NewServer(HandlerFunc(NotFound))
defer hts.Close()
reqBytes := []byte("GET / HTTP/1.1\r\nHost: e.com\r\n\r\n")
var wg sync.WaitGroup
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cn, err := net.Dial("tcp", hts.Listener.Addr().String())
if err != nil {
t.Error(err)
return
}
defer cn.Close()
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(io.Discard, cn)
}()
for j := 0; j < requests; j++ {
if t.Failed() {
return
}
_, err := cn.Write(reqBytes)
if err != nil {
t.Error(err)
return
}
}
}()
}
wg.Wait()
}
// Test that the bufio.Reader returned by Hijack includes any buffered
// byte (from the Server's backgroundRead) in its buffer. We want the
// Handler code to be able to tell that a byte is available via
// bufio.Reader.Buffered(), without resorting to Reading it
// (potentially blocking) to get at it.
func TestServerHijackGetsBackgroundByte(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/18657")
}
setParallel(t)
defer afterTest(t)
done := make(chan struct{})
inHandler := make(chan bool, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
defer close(done)
// Tell the client to send more data after the GET request.
inHandler <- true
conn, buf, err := w.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
defer conn.Close()
peek, err := buf.Reader.Peek(3)
if string(peek) != "foo" || err != nil {
t.Errorf("Peek = %q, %v; want foo, nil", peek, err)
}
select {
case <-r.Context().Done():
t.Error("context unexpectedly canceled")
default:
}
}))
defer ts.Close()
cn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer cn.Close()
if _, err := cn.Write([]byte("GET / HTTP/1.1\r\nHost: e.com\r\n\r\n")); err != nil {
t.Fatal(err)
}
<-inHandler
if _, err := cn.Write([]byte("foo")); err != nil {
t.Fatal(err)
}
if err := cn.(*net.TCPConn).CloseWrite(); err != nil {
t.Fatal(err)
}
select {
case <-done:
case <-time.After(2 * time.Second):
t.Error("timeout")
}
}
// Like TestServerHijackGetsBackgroundByte above but sending a
// immediate 1MB of data to the server to fill up the server's 4KB
// buffer.
func TestServerHijackGetsBackgroundByte_big(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/18657")
}
setParallel(t)
defer afterTest(t)
done := make(chan struct{})
const size = 8 << 10
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
defer close(done)
conn, buf, err := w.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
defer conn.Close()
slurp, err := io.ReadAll(buf.Reader)
if err != nil {
t.Errorf("Copy: %v", err)
}
allX := true
for _, v := range slurp {
if v != 'x' {
allX = false
}
}
if len(slurp) != size {
t.Errorf("read %d; want %d", len(slurp), size)
} else if !allX {
t.Errorf("read %q; want %d 'x'", slurp, size)
}
}))
defer ts.Close()
cn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer cn.Close()
if _, err := fmt.Fprintf(cn, "GET / HTTP/1.1\r\nHost: e.com\r\n\r\n%s",
strings.Repeat("x", size)); err != nil {
t.Fatal(err)
}
if err := cn.(*net.TCPConn).CloseWrite(); err != nil {
t.Fatal(err)
}
<-done
}
// Issue 18319: test that the Server validates the request method.
func TestServerValidatesMethod(t *testing.T) {
tests := []struct {
method string
want int
}{
{"GET", 200},
{"GE(T", 400},
}
for _, tt := range tests {
conn := &testConn{closec: make(chan bool, 1)}
io.WriteString(&conn.readBuf, tt.method+" / HTTP/1.1\r\nHost: foo.example\r\n\r\n")
ln := &oneConnListener{conn}
go Serve(ln, serve(200))
<-conn.closec
res, err := ReadResponse(bufio.NewReader(&conn.writeBuf), nil)
if err != nil {
t.Errorf("For %s, ReadResponse: %v", tt.method, res)
continue
}
if res.StatusCode != tt.want {
t.Errorf("For %s, Status = %d; want %d", tt.method, res.StatusCode, tt.want)
}
}
}
// Listener for TestServerListenNotComparableListener.
type eofListenerNotComparable []int
func (eofListenerNotComparable) Accept() (net.Conn, error) { return nil, io.EOF }
func (eofListenerNotComparable) Addr() net.Addr { return nil }
func (eofListenerNotComparable) Close() error { return nil }
// Issue 24812: don't crash on non-comparable Listener
func TestServerListenNotComparableListener(t *testing.T) {
var s Server
s.Serve(make(eofListenerNotComparable, 1)) // used to panic
}
// countCloseListener is a Listener wrapper that counts the number of Close calls.
type countCloseListener struct {
net.Listener
closes int32 // atomic
}
func (p *countCloseListener) Close() error {
var err error
if n := atomic.AddInt32(&p.closes, 1); n == 1 && p.Listener != nil {
err = p.Listener.Close()
}
return err
}
// Issue 24803: don't call Listener.Close on Server.Shutdown.
func TestServerCloseListenerOnce(t *testing.T) {
setParallel(t)
defer afterTest(t)
ln := newLocalListener(t)
defer ln.Close()
cl := &countCloseListener{Listener: ln}
server := &Server{}
sdone := make(chan bool, 1)
go func() {
server.Serve(cl)
sdone <- true
}()
time.Sleep(10 * time.Millisecond)
server.Shutdown(context.Background())
ln.Close()
<-sdone
nclose := atomic.LoadInt32(&cl.closes)
if nclose != 1 {
t.Errorf("Close calls = %v; want 1", nclose)
}
}
// Issue 20239: don't block in Serve if Shutdown is called first.
func TestServerShutdownThenServe(t *testing.T) {
var srv Server
cl := &countCloseListener{Listener: nil}
srv.Shutdown(context.Background())
got := srv.Serve(cl)
if got != ErrServerClosed {
t.Errorf("Serve err = %v; want ErrServerClosed", got)
}
nclose := atomic.LoadInt32(&cl.closes)
if nclose != 1 {
t.Errorf("Close calls = %v; want 1", nclose)
}
}
// Issue 23351: document and test behavior of ServeMux with ports
func TestStripPortFromHost(t *testing.T) {
mux := NewServeMux()
mux.HandleFunc("example.com/", func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "OK")
})
mux.HandleFunc("example.com:9000/", func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "uh-oh!")
})
req := httptest.NewRequest("GET", "http://example.com:9000/", nil)
rw := httptest.NewRecorder()
mux.ServeHTTP(rw, req)
response := rw.Body.String()
if response != "OK" {
t.Errorf("Response gotten was %q", response)
}
}
func TestServerContexts(t *testing.T) {
setParallel(t)
defer afterTest(t)
type baseKey struct{}
type connKey struct{}
ch := make(chan context.Context, 1)
ts := httptest.NewUnstartedServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
ch <- r.Context()
}))
ts.Config.BaseContext = func(ln net.Listener) context.Context {
if strings.Contains(reflect.TypeOf(ln).String(), "onceClose") {
t.Errorf("unexpected onceClose listener type %T", ln)
}
return context.WithValue(context.Background(), baseKey{}, "base")
}
ts.Config.ConnContext = func(ctx context.Context, c net.Conn) context.Context {
if got, want := ctx.Value(baseKey{}), "base"; got != want {
t.Errorf("in ConnContext, base context key = %#v; want %q", got, want)
}
return context.WithValue(ctx, connKey{}, "conn")
}
ts.Start()
defer ts.Close()
res, err := ts.Client().Get(ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
ctx := <-ch
if got, want := ctx.Value(baseKey{}), "base"; got != want {
t.Errorf("base context key = %#v; want %q", got, want)
}
if got, want := ctx.Value(connKey{}), "conn"; got != want {
t.Errorf("conn context key = %#v; want %q", got, want)
}
}
func TestServerContextsHTTP2(t *testing.T) {
setParallel(t)
defer afterTest(t)
type baseKey struct{}
type connKey struct{}
ch := make(chan context.Context, 1)
ts := httptest.NewUnstartedServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
if r.ProtoMajor != 2 {
t.Errorf("unexpected HTTP/1.x request")
}
ch <- r.Context()
}))
ts.Config.BaseContext = func(ln net.Listener) context.Context {
if strings.Contains(reflect.TypeOf(ln).String(), "onceClose") {
t.Errorf("unexpected onceClose listener type %T", ln)
}
return context.WithValue(context.Background(), baseKey{}, "base")
}
ts.Config.ConnContext = func(ctx context.Context, c net.Conn) context.Context {
if got, want := ctx.Value(baseKey{}), "base"; got != want {
t.Errorf("in ConnContext, base context key = %#v; want %q", got, want)
}
return context.WithValue(ctx, connKey{}, "conn")
}
ts.TLS = &tls.Config{
NextProtos: []string{"h2", "http/1.1"},
}
ts.StartTLS()
defer ts.Close()
ts.Client().Transport.(*Transport).ForceAttemptHTTP2 = true
res, err := ts.Client().Get(ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
ctx := <-ch
if got, want := ctx.Value(baseKey{}), "base"; got != want {
t.Errorf("base context key = %#v; want %q", got, want)
}
if got, want := ctx.Value(connKey{}), "conn"; got != want {
t.Errorf("conn context key = %#v; want %q", got, want)
}
}
// Issue 35750: check ConnContext not modifying context for other connections
func TestConnContextNotModifyingAllContexts(t *testing.T) {
setParallel(t)
defer afterTest(t)
type connKey struct{}
ts := httptest.NewUnstartedServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
rw.Header().Set("Connection", "close")
}))
ts.Config.ConnContext = func(ctx context.Context, c net.Conn) context.Context {
if got := ctx.Value(connKey{}); got != nil {
t.Errorf("in ConnContext, unexpected context key = %#v", got)
}
return context.WithValue(ctx, connKey{}, "conn")
}
ts.Start()
defer ts.Close()
var res *Response
var err error
res, err = ts.Client().Get(ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
res, err = ts.Client().Get(ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
}
// Issue 30710: ensure that as per the spec, a server responds
// with 501 Not Implemented for unsupported transfer-encodings.
func TestUnsupportedTransferEncodingsReturn501(t *testing.T) {
cst := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Write([]byte("Hello, World!"))
}))
defer cst.Close()
serverURL, err := url.Parse(cst.URL)
if err != nil {
t.Fatalf("Failed to parse server URL: %v", err)
}
unsupportedTEs := []string{
"fugazi",
"foo-bar",
"unknown",
}
for _, badTE := range unsupportedTEs {
http1ReqBody := fmt.Sprintf(""+
"POST / HTTP/1.1\r\nConnection: close\r\n"+
"Host: localhost\r\nTransfer-Encoding: %s\r\n\r\n", badTE)
gotBody, err := fetchWireResponse(serverURL.Host, []byte(http1ReqBody))
if err != nil {
t.Errorf("%q. unexpected error: %v", badTE, err)
continue
}
wantBody := fmt.Sprintf("" +
"HTTP/1.1 501 Not Implemented\r\nContent-Type: text/plain; charset=utf-8\r\n" +
"Connection: close\r\n\r\nUnsupported transfer encoding")
if string(gotBody) != wantBody {
t.Errorf("%q. body\ngot\n%q\nwant\n%q", badTE, gotBody, wantBody)
}
}
}
func TestContentEncodingNoSniffing_h1(t *testing.T) {
testContentEncodingNoSniffing(t, h1Mode)
}
func TestContentEncodingNoSniffing_h2(t *testing.T) {
testContentEncodingNoSniffing(t, h2Mode)
}
// Issue 31753: don't sniff when Content-Encoding is set
func testContentEncodingNoSniffing(t *testing.T, h2 bool) {
setParallel(t)
defer afterTest(t)
type setting struct {
name string
body []byte
// setting contentEncoding as an interface instead of a string
// directly, so as to differentiate between 3 states:
// unset, empty string "" and set string "foo/bar".
contentEncoding any
wantContentType string
}
settings := []*setting{
{
name: "gzip content-encoding, gzipped", // don't sniff.
contentEncoding: "application/gzip",
wantContentType: "",
body: func() []byte {
buf := new(bytes.Buffer)
gzw := gzip.NewWriter(buf)
gzw.Write([]byte("doctype html><p>Hello</p>"))
gzw.Close()
return buf.Bytes()
}(),
},
{
name: "zlib content-encoding, zlibbed", // don't sniff.
contentEncoding: "application/zlib",
wantContentType: "",
body: func() []byte {
buf := new(bytes.Buffer)
zw := zlib.NewWriter(buf)
zw.Write([]byte("doctype html><p>Hello</p>"))
zw.Close()
return buf.Bytes()
}(),
},
{
name: "no content-encoding", // must sniff.
wantContentType: "application/x-gzip",
body: func() []byte {
buf := new(bytes.Buffer)
gzw := gzip.NewWriter(buf)
gzw.Write([]byte("doctype html><p>Hello</p>"))
gzw.Close()
return buf.Bytes()
}(),
},
{
name: "phony content-encoding", // don't sniff.
contentEncoding: "foo/bar",
body: []byte("doctype html><p>Hello</p>"),
},
{
name: "empty but set content-encoding",
contentEncoding: "",
wantContentType: "audio/mpeg",
body: []byte("ID3"),
},
}
for _, tt := range settings {
t.Run(tt.name, func(t *testing.T) {
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, r *Request) {
if tt.contentEncoding != nil {
rw.Header().Set("Content-Encoding", tt.contentEncoding.(string))
}
rw.Write(tt.body)
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatalf("Failed to fetch URL: %v", err)
}
defer res.Body.Close()
if g, w := res.Header.Get("Content-Encoding"), tt.contentEncoding; g != w {
if w != nil { // The case where contentEncoding was set explicitly.
t.Errorf("Content-Encoding mismatch\n\tgot: %q\n\twant: %q", g, w)
} else if g != "" { // "" should be the equivalent when the contentEncoding is unset.
t.Errorf("Unexpected Content-Encoding %q", g)
}
}
if g, w := res.Header.Get("Content-Type"), tt.wantContentType; g != w {
t.Errorf("Content-Type mismatch\n\tgot: %q\n\twant: %q", g, w)
}
})
}
}
// Issue 30803: ensure that TimeoutHandler logs spurious
// WriteHeader calls, for consistency with other Handlers.
func TestTimeoutHandlerSuperfluousLogs(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
setParallel(t)
defer afterTest(t)
pc, curFile, _, _ := runtime.Caller(0)
curFileBaseName := filepath.Base(curFile)
testFuncName := runtime.FuncForPC(pc).Name()
timeoutMsg := "timed out here!"
tests := []struct {
name string
mustTimeout bool
wantResp string
}{
{
name: "return before timeout",
wantResp: "HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n",
},
{
name: "return after timeout",
mustTimeout: true,
wantResp: fmt.Sprintf("HTTP/1.1 503 Service Unavailable\r\nContent-Length: %d\r\n\r\n%s",
len(timeoutMsg), timeoutMsg),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
exitHandler := make(chan bool, 1)
defer close(exitHandler)
lastLine := make(chan int, 1)
sh := HandlerFunc(func(w ResponseWriter, r *Request) {
w.WriteHeader(404)
w.WriteHeader(404)
w.WriteHeader(404)
w.WriteHeader(404)
_, _, line, _ := runtime.Caller(0)
lastLine <- line
<-exitHandler
})
if !tt.mustTimeout {
exitHandler <- true
}
logBuf := new(bytes.Buffer)
srvLog := log.New(logBuf, "", 0)
// When expecting to timeout, we'll keep the duration short.
dur := 20 * time.Millisecond
if !tt.mustTimeout {
// Otherwise, make it arbitrarily long to reduce the risk of flakes.
dur = 10 * time.Second
}
th := TimeoutHandler(sh, dur, timeoutMsg)
cst := newClientServerTest(t, h1Mode /* the test is protocol-agnostic */, th, optWithServerLog(srvLog))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Deliberately removing the "Date" header since it is highly ephemeral
// and will cause failure if we try to match it exactly.
res.Header.Del("Date")
res.Header.Del("Content-Type")
// Match the response.
blob, _ := httputil.DumpResponse(res, true)
if g, w := string(blob), tt.wantResp; g != w {
t.Errorf("Response mismatch\nGot\n%q\n\nWant\n%q", g, w)
}
// Given 4 w.WriteHeader calls, only the first one is valid
// and the rest should be reported as the 3 spurious logs.
logEntries := strings.Split(strings.TrimSpace(logBuf.String()), "\n")
if g, w := len(logEntries), 3; g != w {
blob, _ := json.MarshalIndent(logEntries, "", " ")
t.Fatalf("Server logs count mismatch\ngot %d, want %d\n\nGot\n%s\n", g, w, blob)
}
lastSpuriousLine := <-lastLine
firstSpuriousLine := lastSpuriousLine - 3
// Now ensure that the regexes match exactly.
// "http: superfluous response.WriteHeader call from <fn>.func\d.\d (<curFile>:lastSpuriousLine-[1, 3]"
for i, logEntry := range logEntries {
wantLine := firstSpuriousLine + i
pat := fmt.Sprintf("^http: superfluous response.WriteHeader call from %s.func\\d+.\\d+ \\(%s:%d\\)$",
testFuncName, curFileBaseName, wantLine)
re := regexp.MustCompile(pat)
if !re.MatchString(logEntry) {
t.Errorf("Log entry mismatch\n\t%s\ndoes not match\n\t%s", logEntry, pat)
}
}
})
}
}
// fetchWireResponse is a helper for dialing to host,
// sending http1ReqBody as the payload and retrieving
// the response as it was sent on the wire.
func fetchWireResponse(host string, http1ReqBody []byte) ([]byte, error) {
conn, err := net.Dial("tcp", host)
if err != nil {
return nil, err
}
defer conn.Close()
if _, err := conn.Write(http1ReqBody); err != nil {
return nil, err
}
return io.ReadAll(conn)
}
func BenchmarkResponseStatusLine(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
bw := bufio.NewWriter(io.Discard)
var buf3 [3]byte
for pb.Next() {
Export_writeStatusLine(bw, true, 200, buf3[:])
}
})
}
func TestDisableKeepAliveUpgrade(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
setParallel(t)
defer afterTest(t)
s := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "Upgrade")
w.Header().Set("Upgrade", "someProto")
w.WriteHeader(StatusSwitchingProtocols)
c, buf, err := w.(Hijacker).Hijack()
if err != nil {
return
}
defer c.Close()
// Copy from the *bufio.ReadWriter, which may contain buffered data.
// Copy to the net.Conn, to avoid buffering the output.
io.Copy(c, buf)
}))
s.Config.SetKeepAlivesEnabled(false)
s.Start()
defer s.Close()
cl := s.Client()
cl.Transport.(*Transport).DisableKeepAlives = true
resp, err := cl.Get(s.URL)
if err != nil {
t.Fatalf("failed to perform request: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != StatusSwitchingProtocols {
t.Fatalf("unexpected status code: %v", resp.StatusCode)
}
rwc, ok := resp.Body.(io.ReadWriteCloser)
if !ok {
t.Fatalf("Response.Body is not an io.ReadWriteCloser: %T", resp.Body)
}
_, err = rwc.Write([]byte("hello"))
if err != nil {
t.Fatalf("failed to write to body: %v", err)
}
b := make([]byte, 5)
_, err = io.ReadFull(rwc, b)
if err != nil {
t.Fatalf("failed to read from body: %v", err)
}
if string(b) != "hello" {
t.Fatalf("unexpected value read from body:\ngot: %q\nwant: %q", b, "hello")
}
}
func TestMuxRedirectRelative(t *testing.T) {
setParallel(t)
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET http://example.com HTTP/1.1\r\nHost: test\r\n\r\n")))
if err != nil {
t.Errorf("%s", err)
}
mux := NewServeMux()
resp := httptest.NewRecorder()
mux.ServeHTTP(resp, req)
if got, want := resp.Header().Get("Location"), "/"; got != want {
t.Errorf("Location header expected %q; got %q", want, got)
}
if got, want := resp.Code, StatusMovedPermanently; got != want {
t.Errorf("Expected response code %d; got %d", want, got)
}
}
// TestQuerySemicolon tests the behavior of semicolons in queries. See Issue 25192.
func TestQuerySemicolon(t *testing.T) {
t.Cleanup(func() { afterTest(t) })
tests := []struct {
query string
xNoSemicolons string
xWithSemicolons string
warning bool
}{
{"?a=1;x=bad&x=good", "good", "bad", true},
{"?a=1;b=bad&x=good", "good", "good", true},
{"?a=1%3Bx=bad&x=good%3B", "good;", "good;", false},
{"?a=1;x=good;x=bad", "", "good", true},
}
for _, tt := range tests {
t.Run(tt.query+"/allow=false", func(t *testing.T) {
allowSemicolons := false
testQuerySemicolon(t, tt.query, tt.xNoSemicolons, allowSemicolons, tt.warning)
})
t.Run(tt.query+"/allow=true", func(t *testing.T) {
allowSemicolons, expectWarning := true, false
testQuerySemicolon(t, tt.query, tt.xWithSemicolons, allowSemicolons, expectWarning)
})
}
}
func testQuerySemicolon(t *testing.T, query string, wantX string, allowSemicolons, expectWarning bool) {
setParallel(t)
writeBackX := func(w ResponseWriter, r *Request) {
x := r.URL.Query().Get("x")
if expectWarning {
if err := r.ParseForm(); err == nil || !strings.Contains(err.Error(), "semicolon") {
t.Errorf("expected error mentioning semicolons from ParseForm, got %v", err)
}
} else {
if err := r.ParseForm(); err != nil {
t.Errorf("expected no error from ParseForm, got %v", err)
}
}
if got := r.FormValue("x"); x != got {
t.Errorf("got %q from FormValue, want %q", got, x)
}
fmt.Fprintf(w, "%s", x)
}
h := Handler(HandlerFunc(writeBackX))
if allowSemicolons {
h = AllowQuerySemicolons(h)
}
ts := httptest.NewUnstartedServer(h)
logBuf := &bytes.Buffer{}
ts.Config.ErrorLog = log.New(logBuf, "", 0)
ts.Start()
defer ts.Close()
req, _ := NewRequest("GET", ts.URL+query, nil)
res, err := ts.Client().Do(req)
if err != nil {
t.Fatal(err)
}
slurp, _ := io.ReadAll(res.Body)
res.Body.Close()
if got, want := res.StatusCode, 200; got != want {
t.Errorf("Status = %d; want = %d", got, want)
}
if got, want := string(slurp), wantX; got != want {
t.Errorf("Body = %q; want = %q", got, want)
}
if expectWarning {
if !strings.Contains(logBuf.String(), "semicolon") {
t.Errorf("got %q from ErrorLog, expected a mention of semicolons", logBuf.String())
}
} else {
if strings.Contains(logBuf.String(), "semicolon") {
t.Errorf("got %q from ErrorLog, expected no mention of semicolons", logBuf.String())
}
}
}
func TestMaxBytesHandler(t *testing.T) {
setParallel(t)
defer afterTest(t)
for _, maxSize := range []int64{100, 1_000, 1_000_000} {
for _, requestSize := range []int64{100, 1_000, 1_000_000} {
t.Run(fmt.Sprintf("max size %d request size %d", maxSize, requestSize),
func(t *testing.T) {
testMaxBytesHandler(t, maxSize, requestSize)
})
}
}
}
func testMaxBytesHandler(t *testing.T, maxSize, requestSize int64) {
var (
handlerN int64
handlerErr error
)
echo := HandlerFunc(func(w ResponseWriter, r *Request) {
var buf bytes.Buffer
handlerN, handlerErr = io.Copy(&buf, r.Body)
io.Copy(w, &buf)
})
ts := httptest.NewServer(MaxBytesHandler(echo, maxSize))
defer ts.Close()
c := ts.Client()
var buf strings.Builder
body := strings.NewReader(strings.Repeat("a", int(requestSize)))
res, err := c.Post(ts.URL, "text/plain", body)
if err != nil {
t.Errorf("unexpected connection error: %v", err)
} else {
_, err = io.Copy(&buf, res.Body)
res.Body.Close()
if err != nil {
t.Errorf("unexpected read error: %v", err)
}
}
if handlerN > maxSize {
t.Errorf("expected max request body %d; got %d", maxSize, handlerN)
}
if requestSize > maxSize && handlerErr == nil {
t.Error("expected error on handler side; got nil")
}
if requestSize <= maxSize {
if handlerErr != nil {
t.Errorf("%d expected nil error on handler side; got %v", requestSize, handlerErr)
}
if handlerN != requestSize {
t.Errorf("expected request of size %d; got %d", requestSize, handlerN)
}
}
if buf.Len() != int(handlerN) {
t.Errorf("expected echo of size %d; got %d", handlerN, buf.Len())
}
}
| [
"\"TEST_BENCH_SERVER_URL\"",
"\"TEST_BENCH_CLIENT_N\"",
"\"TEST_BENCH_SERVER\"",
"\"TEST_BENCH_SERVER_PORT\""
]
| []
| [
"TEST_BENCH_SERVER",
"TEST_BENCH_SERVER_URL",
"TEST_BENCH_SERVER_PORT",
"TEST_BENCH_CLIENT_N"
]
| [] | ["TEST_BENCH_SERVER", "TEST_BENCH_SERVER_URL", "TEST_BENCH_SERVER_PORT", "TEST_BENCH_CLIENT_N"] | go | 4 | 0 | |
kowalski/alert_watcher_zuds.py | import argparse
import os
import sys
import io
import time
import json
from bson.json_util import dumps
import traceback
import confluent_kafka
from ast import literal_eval
import avro.schema
import fastavro
import subprocess
import datetime
import multiprocessing
# import threading
import pymongo
import pytz
from numba import jit
import numpy as np
from tensorflow.keras.models import load_model
import gzip
import io
from astropy.io import fits
from copy import deepcopy
''' load config and secrets '''
with open('/app/config.json') as cjson:
config = json.load(cjson)
with open('/app/secrets.json') as sjson:
secrets = json.load(sjson)
for k in secrets:
config[k].update(secrets.get(k, {}))
def utc_now():
return datetime.datetime.now(pytz.utc)
def time_stamps():
"""
:return: local time, UTC time
"""
return datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S'), \
datetime.datetime.utcnow().strftime('%Y%m%d_%H:%M:%S')
@jit
def deg2hms(x):
"""Transform degrees to *hours:minutes:seconds* strings.
Parameters
----------
x : float
The degree value c [0, 360) to be written as a sexagesimal string.
Returns
-------
out : str
The input angle written as a sexagesimal string, in the
form, hours:minutes:seconds.
"""
assert 0.0 <= x < 360.0, 'Bad RA value in degrees'
# ac = Angle(x, unit='degree')
# hms = str(ac.to_string(unit='hour', sep=':', pad=True))
# print(str(hms))
_h = np.floor(x * 12.0 / 180.)
_m = np.floor((x * 12.0 / 180. - _h) * 60.0)
_s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0
hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)
# print(hms)
return hms
@jit
def deg2dms(x):
"""Transform degrees to *degrees:arcminutes:arcseconds* strings.
Parameters
----------
x : float
The degree value c [-90, 90] to be converted.
Returns
-------
out : str
The input angle as a string, written as degrees:minutes:seconds.
"""
assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'
# ac = Angle(x, unit='degree')
# dms = str(ac.to_string(unit='degree', sep=':', pad=True))
# print(dms)
_d = np.floor(abs(x)) * np.sign(x)
_m = np.floor(np.abs(x - _d) * 60.0)
_s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0
dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)
# print(dms)
return dms
@jit
def great_circle_distance(ra1_deg, dec1_deg, ra2_deg, dec2_deg):
"""
Distance between two points on the sphere
:param ra1_deg:
:param dec1_deg:
:param ra2_deg:
:param dec2_deg:
:return: distance in degrees
"""
# this is orders of magnitude faster than astropy.coordinates.Skycoord.separation
DEGRA = np.pi / 180.0
ra1, dec1, ra2, dec2 = ra1_deg * DEGRA, dec1_deg * DEGRA, ra2_deg * DEGRA, dec2_deg * DEGRA
delta_ra = np.abs(ra2 - ra1)
distance = np.arctan2(np.sqrt((np.cos(dec2) * np.sin(delta_ra)) ** 2
+ (np.cos(dec1) * np.sin(dec2) - np.sin(dec1) * np.cos(dec2) * np.cos(
delta_ra)) ** 2),
np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(delta_ra))
return distance * 180.0 / np.pi
@jit
def in_ellipse(alpha, delta0, alpha1, delta01, d0, axis_ratio, PA0):
"""
Check if a given point (alpha, delta0)
is within an ellipse specified by
center (alpha1, delta01), maj_ax (d0), axis ratio and positional angle
All angles are in decimal degrees
Adapted from q3c: https://github.com/segasai/q3c/blob/master/q3cube.c
:param alpha:
:param delta0:
:param alpha1:
:param delta01:
:param d0:
:param axis_ratio:
:param PA0:
:return:
"""
DEGRA = np.pi / 180.0
# convert degrees to radians
d_alpha = (alpha1 - alpha) * DEGRA
delta1 = delta01 * DEGRA
delta = delta0 * DEGRA
PA = PA0 * DEGRA
d = d0 * DEGRA
e = np.sqrt(1.0 - axis_ratio * axis_ratio)
t1 = np.cos(d_alpha)
t22 = np.sin(d_alpha)
t3 = np.cos(delta1)
t32 = np.sin(delta1)
t6 = np.cos(delta)
t26 = np.sin(delta)
t9 = np.cos(d)
t55 = np.sin(d)
if (t3 * t6 * t1 + t32 * t26) < 0:
return False
t2 = t1 * t1
t4 = t3 * t3
t5 = t2 * t4
t7 = t6 * t6
t8 = t5 * t7
t10 = t9 * t9
t11 = t7 * t10
t13 = np.cos(PA)
t14 = t13 * t13
t15 = t14 * t10
t18 = t7 * t14
t19 = t18 * t10
t24 = np.sin(PA)
t31 = t1 * t3
t36 = 2.0 * t31 * t32 * t26 * t6
t37 = t31 * t32
t38 = t26 * t6
t45 = t4 * t10
t56 = t55 * t55
t57 = t4 * t7
t60 = -t8 + t5 * t11 + 2.0 * t5 * t15 - t5 * t19 - \
2.0 * t1 * t4 * t22 * t10 * t24 * t13 * t26 - t36 + \
2.0 * t37 * t38 * t10 - 2.0 * t37 * t38 * t15 - t45 * t14 - t45 * t2 + \
2.0 * t22 * t3 * t32 * t6 * t24 * t10 * t13 - t56 + t7 - t11 + t4 - t57 + t57 * t10 + t19 - t18 * t45
t61 = e * e
t63 = t60 * t61 + t8 + t57 - t4 - t7 + t56 + t36
return t63 > 0
"""Utilities for manipulating Avro data and schemas.
"""
def _loadSingleAvsc(file_path, names):
"""Load a single avsc file.
"""
with open(file_path) as file_text:
json_data = json.load(file_text)
schema = avro.schema.SchemaFromJSONData(json_data, names)
return schema
def combineSchemas(schema_files):
"""Combine multiple nested schemas into a single schema.
Parameters
----------
schema_files : `list`
List of files containing schemas.
If nested, most internal schema must be first.
Returns
-------
`dict`
Avro schema
"""
known_schemas = avro.schema.Names()
for s in schema_files:
schema = _loadSingleAvsc(s, known_schemas)
return schema.to_json()
def writeAvroData(json_data, json_schema):
"""Encode json into Avro format given a schema.
Parameters
----------
json_data : `dict`
The JSON data containing message content.
json_schema : `dict`
The writer Avro schema for encoding data.
Returns
-------
`_io.BytesIO`
Encoded data.
"""
bytes_io = io.BytesIO()
fastavro.schemaless_writer(bytes_io, json_schema, json_data)
return bytes_io
def readAvroData(bytes_io, json_schema):
"""Read data and decode with a given Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
json_schema : `dict`
The reader Avro schema for decoding data.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.schemaless_reader(bytes_io, json_schema)
return message
def readSchemaData(bytes_io):
"""Read data that already has an Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.reader(bytes_io)
return message
class AlertError(Exception):
"""Base class for exceptions in this module.
"""
pass
class EopError(AlertError):
"""Exception raised when reaching end of partition.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
"""
def __init__(self, msg):
message = 'topic:%s, partition:%d, status:end, ' \
'offset:%d, key:%s, time:%.3f\n' \
% (msg.topic(), msg.partition(),
msg.offset(), str(msg.key()), time.time())
self.message = message
def __str__(self):
return self.message
class AlertConsumer(object):
"""Creates an alert stream Kafka consumer for a given topic.
Parameters
----------
topic : `str`
Name of the topic to subscribe to.
schema_files : Avro schema files
The reader Avro schema files for decoding data. Optional.
**kwargs
Keyword arguments for configuring confluent_kafka.Consumer().
"""
def __init__(self, topic, schema_files=None, **kwargs):
# keep track of disconnected partitions
self.num_disconnected_partitions = 0
self.topic = topic
def error_cb(err, _self=self):
print(*time_stamps(), 'error_cb -------->', err)
# print(err.code())
if err.code() == -195:
_self.num_disconnected_partitions += 1
if _self.num_disconnected_partitions == _self.num_partitions:
print(*time_stamps(), 'all partitions got disconnected, killing thread')
sys.exit()
else:
print(*time_stamps(), '{:s}: disconnected from partition.'.format(_self.topic),
'total:', self.num_disconnected_partitions)
# 'error_cb': error_cb
kwargs['error_cb'] = error_cb
self.consumer = confluent_kafka.Consumer(**kwargs)
self.num_partitions = 0
def on_assign(consumer, partitions, _self=self):
# force-reset offsets when subscribing to a topic:
for part in partitions:
# -2 stands for beginning and -1 for end
part.offset = -2
# keep number of partitions. when reaching end of last partition, kill thread and start from beginning
_self.num_partitions += 1
print(consumer.get_watermark_offsets(part))
self.consumer.subscribe([topic], on_assign=on_assign)
# self.consumer.subscribe([topic])
# fixme?
# if schema_files is not None:
# self.alert_schema = combineSchemas(schema_files)
# MongoDB:
self.config = config
self.collection_alerts = 'ZUDS_alerts'
self.collection_alerts_aux = 'ZUDS_alerts_aux'
self.db = None
self.connect_to_db()
# indexes
self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),
('candid', pymongo.DESCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),
('objectId', pymongo.DESCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('objectId', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candid', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candidate.ztfname', pymongo.ASCENDING)], background=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jdstartstack', pymongo.DESCENDING),
('candidate.jdendstack', pymongo.ASCENDING)],
background=True, sparse=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jd', pymongo.DESCENDING),
('candidate.drb', pymongo.DESCENDING),
('candid', pymongo.DESCENDING)],
background=True, sparse=True)
self.db['db'][self.collection_alerts].create_index([('candidate.jd', 1),
('candidate.drb', 1),
('candidate.isdiffpos', 1),
('candidate.ndethist', 1)],
name='jd__braai__magpsf__isdiffpos__ndethist',
background=True, sparse=True)
# ML models:
self.ml_models = dict()
for m in config['ml_models']:
try:
m_v = config["ml_models"][m]["version"]
self.ml_models[m] = {'model': load_model(f'/app/models/{m}_{m_v}.h5'),
'version': m_v}
except Exception as e:
print(*time_stamps(), f'Error loading ML model {m}')
traceback.print_exc()
print(e)
continue
def connect_to_db(self):
"""
Connect to mongo
:return:
"""
_config = self.config
try:
# there's only one instance of DB, it's too big to be replicated
_client = pymongo.MongoClient(host=_config['database']['host'],
port=_config['database']['port'], connect=False)
# grab main database:
_db = _client[_config['database']['db']]
except Exception as _e:
raise ConnectionRefusedError
try:
# authenticate
_db.authenticate(_config['database']['user'], _config['database']['pwd'])
except Exception as _e:
raise ConnectionRefusedError
self.db = dict()
self.db['client'] = _client
self.db['db'] = _db
def insert_db_entry(self, _collection=None, _db_entry=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _collection:
:param _db_entry:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entry is not None, 'Must specify document'
try:
self.db['db'][_collection].insert_one(_db_entry)
except Exception as _e:
print(*time_stamps(), 'Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))
traceback.print_exc()
print(_e)
def insert_multiple_db_entries(self, _collection=None, _db_entries=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _db:
:param _collection:
:param _db_entries:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entries is not None, 'Must specify documents'
try:
# ordered=False ensures that every insert operation will be attempted
# so that if, e.g., a document already exists, it will be simply skipped
self.db['db'][_collection].insert_many(_db_entries, ordered=False)
except pymongo.errors.BulkWriteError as bwe:
print(*time_stamps(), bwe.details)
except Exception as _e:
traceback.print_exc()
print(_e)
def replace_db_entry(self, _collection=None, _filter=None, _db_entry=None):
"""
Insert a document _doc to collection _collection in DB.
It is monitored for timeout in case DB connection hangs for some reason
:param _collection:
:param _filter:
:param _db_entry:
:return:
"""
assert _collection is not None, 'Must specify collection'
assert _db_entry is not None, 'Must specify document'
try:
self.db['db'][_collection].replace_one(_filter, _db_entry, upsert=True)
except Exception as _e:
print(*time_stamps(), 'Error replacing {:s} in {:s}'.format(str(_db_entry['_id']), _collection))
traceback.print_exc()
print(_e)
@staticmethod
def alert_mongify(alert):
doc = dict(alert)
# let mongo create a unique id
# candid+objectId is a unique combination:
# doc['_id'] = f"{alert['candid']}_{alert['objectId']}"
# placeholders for cross-matches and classifications
# doc['cross_matches'] = dict()
doc['classifications'] = dict()
# GeoJSON for 2D indexing
doc['coordinates'] = {}
_ra = doc['candidate']['ra']
_dec = doc['candidate']['dec']
_radec = [_ra, _dec]
# string format: H:M:S, D:M:S
# tic = time.time()
_radec_str = [deg2hms(_ra), deg2dms(_dec)]
# print(time.time() - tic)
# print(_radec_str)
doc['coordinates']['radec_str'] = _radec_str
# for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)
_radec_geojson = [_ra - 180.0, _dec]
doc['coordinates']['radec_geojson'] = {'type': 'Point',
'coordinates': _radec_geojson}
# radians and degrees:
# doc['coordinates']['radec_rad'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]
# doc['coordinates']['radec_deg'] = [_ra, _dec]
light_curve = deepcopy(doc['light_curve'])
doc.pop('light_curve', None)
if light_curve is None:
light_curve = []
for lc in light_curve:
if lc['flux'] > 0:
lc['mag'] = -2.5 * np.log10(lc['flux']) + lc['zp']
return doc, light_curve
def poll(self, path_alerts=None, path_tess=None, datestr=None, save_packets=True):
"""
Polls Kafka broker to consume topic.
:param path_alerts:
:param path_tess:
:param datestr:
:return:
"""
# msg = self.consumer.poll(timeout=timeout)
msg = self.consumer.poll()
if msg is None:
print(*time_stamps(), 'Caught error: msg is None')
if msg.error():
print('Caught error:', msg.error())
# if msg.value() is not None:
# print(*time_stamps(), msg.value())
raise EopError(msg)
elif msg is not None:
# decode avro packet
msg_decoded = self.decodeMessage(msg)
for record in msg_decoded:
candid = record['candid']
objectId = record['objectId']
print(*time_stamps(), self.topic, objectId, candid)
# check that candid not in collection_alerts
if self.db['db'][self.collection_alerts].count_documents({'candid': candid}, limit=1) == 0:
# candid not in db, ingest
if save_packets:
# save avro packet to disk
path_alert_dir = os.path.join(path_alerts, datestr)
# mkdir if does not exist
if not os.path.exists(path_alert_dir):
os.makedirs(path_alert_dir)
path_avro = os.path.join(path_alert_dir, f'{candid}.avro')
print(*time_stamps(), f'saving {candid} to disk')
with open(path_avro, 'wb') as f:
f.write(msg.value())
# ingest decoded avro packet into db
alert, light_curve = self.alert_mongify(record)
# alert filters:
# ML models:
scores = alert_filter__ml(record, ml_models=self.ml_models)
alert['classifications'] = scores
print(*time_stamps(), f'ingesting {alert["candid"]} into db')
self.insert_db_entry(_collection=self.collection_alerts, _db_entry=alert)
# light_curve: pop nulls - save space
light_curve = [{kk: vv for kk, vv in lc.items() if vv is not None} for lc in light_curve]
# cross-match with external catalogs if objectId not in collection_alerts_aux:
if self.db['db'][self.collection_alerts_aux].count_documents({'_id': objectId}, limit=1) == 0:
# tic = time.time()
xmatches = alert_filter__xmatch(self.db['db'], alert)
# CLU cross-match:
xmatches = {**xmatches, **alert_filter__xmatch_clu(self.db['db'], alert)}
# alert['cross_matches'] = xmatches
# toc = time.time()
# print(f'xmatch for {alert["candid"]} took {toc-tic:.2f} s')
alert_aux = {'_id': objectId,
'cross_matches': xmatches,
'light_curve': light_curve}
self.insert_db_entry(_collection=self.collection_alerts_aux, _db_entry=alert_aux)
else:
self.db['db'][self.collection_alerts_aux].update_one({'_id': objectId},
{'$addToSet':
{'light_curve':
{'$each': light_curve}}},
upsert=True)
# dump packet as json to disk if in a public TESS sector
if 'TESS' in alert['candidate']['programpi']:
# put light_curve back
alert['light_curve'] = light_curve
# get cross-matches
# xmatches = self.db['db'][self.collection_alerts_aux].find_one({'_id': objectId})
xmatches = self.db['db'][self.collection_alerts_aux].find({'_id': objectId},
{'cross_matches': 1},
limit=1)
xmatches = list(xmatches)[0]
alert['cross_matches'] = xmatches['cross_matches']
if save_packets:
path_tess_dir = os.path.join(path_tess, datestr)
# mkdir if does not exist
if not os.path.exists(path_tess_dir):
os.makedirs(path_tess_dir)
print(*time_stamps(), f'saving {alert["candid"]} to disk')
try:
with open(os.path.join(path_tess_dir, f"{alert['candid']}.json"), 'w') as f:
f.write(dumps(alert))
except Exception as e:
print(time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
def decodeMessage(self, msg):
"""Decode Avro message according to a schema.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
Returns
-------
`dict`
Decoded message.
"""
# print(msg.topic(), msg.offset(), msg.error(), msg.key(), msg.value())
message = msg.value()
# print(message)
try:
bytes_io = io.BytesIO(message)
decoded_msg = readSchemaData(bytes_io)
# print(decoded_msg)
# decoded_msg = readAvroData(bytes_io, self.alert_schema)
# print(decoded_msg)
except AssertionError:
# FIXME this exception is raised but not sure if it matters yet
bytes_io = io.BytesIO(message)
decoded_msg = None
except IndexError:
literal_msg = literal_eval(str(message, encoding='utf-8')) # works to give bytes
bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>
decoded_msg = readSchemaData(bytes_io) # yields reader
except Exception:
decoded_msg = message
finally:
return decoded_msg
def msg_text(message):
"""Remove postage stamp cutouts from an alert message.
"""
message_text = {k: message[k] for k in message
if k not in ['cutoutDifference', 'cutoutTemplate', 'cutoutScience']}
return message_text
def write_stamp_file(stamp_dict, output_dir):
"""Given a stamp dict that follows the cutout schema,
write data to a file in a given directory.
"""
try:
filename = stamp_dict['fileName']
try:
os.makedirs(output_dir)
except OSError:
pass
out_path = os.path.join(output_dir, filename)
with open(out_path, 'wb') as f:
f.write(stamp_dict['stampData'])
except TypeError:
sys.stderr.write('%% Cannot get stamp\n')
return
def alert_filter(alert, stampdir=None):
"""Filter to apply to each alert.
See schemas: https://github.com/ZwickyTransientFacility/ztf-avro-alert
"""
data = msg_text(alert)
if data: # Write your condition statement here
print(data) # Print all main alert data to screen
if stampdir is not None: # Collect all postage stamps
write_stamp_file(
alert.get('cutoutDifference'), stampdir)
write_stamp_file(
alert.get('cutoutTemplate'), stampdir)
write_stamp_file(
alert.get('cutoutScience'), stampdir)
return
def make_triplet(alert, to_tpu: bool = False):
"""
Feed in alert packet
"""
cutout_dict = dict()
for cutout in ('science', 'template', 'difference'):
# cutout_data = loads(dumps([alert[f'cutout{cutout.capitalize()}']['stampData']]))[0]
# cutout_data = alert[f'cutout{cutout.capitalize()}']['stampData']
cutout_data = alert[f'cutout{cutout.capitalize()}']
# unzip
with gzip.open(io.BytesIO(cutout_data), 'rb') as f:
with fits.open(io.BytesIO(f.read())) as hdu:
data = hdu[0].data
# replace nans with zeros
cutout_dict[cutout] = np.nan_to_num(data)
# L2-normalize
cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])
# pad to 63x63 if smaller
shape = cutout_dict[cutout].shape
if shape != (63, 63):
# print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')
cutout_dict[cutout] = np.pad(cutout_dict[cutout], [(0, 63 - shape[0]), (0, 63 - shape[1])],
mode='constant', constant_values=1e-9)
triplet = np.zeros((63, 63, 3))
triplet[:, :, 0] = cutout_dict['science']
triplet[:, :, 1] = cutout_dict['template']
triplet[:, :, 2] = cutout_dict['difference']
if to_tpu:
# Edge TPUs require additional processing
triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()
return triplet
def alert_filter__ml(alert, ml_models: dict = None):
"""Filter to apply to each alert.
"""
scores = dict()
try:
''' braai '''
triplet = make_triplet(alert)
triplets = np.expand_dims(triplet, axis=0)
braai = ml_models['braai']['model'].predict(x=triplets)[0]
# braai = 1.0
scores['braai'] = float(braai)
scores['braai_version'] = ml_models['braai']['version']
except Exception as e:
print(*time_stamps(), str(e))
return scores
# cone search radius:
cone_search_radius = float(config['xmatch']['cone_search_radius'])
# convert to rad:
if config['xmatch']['cone_search_unit'] == 'arcsec':
cone_search_radius *= np.pi / 180.0 / 3600.
elif config['xmatch']['cone_search_unit'] == 'arcmin':
cone_search_radius *= np.pi / 180.0 / 60.
elif config['xmatch']['cone_search_unit'] == 'deg':
cone_search_radius *= np.pi / 180.0
elif config['xmatch']['cone_search_unit'] == 'rad':
cone_search_radius *= 1
else:
raise Exception('Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]')
def alert_filter__xmatch(db, alert):
"""
Filter to apply to each alert.
"""
xmatches = dict()
try:
ra_geojson = float(alert['candidate']['ra'])
# geojson-friendly ra:
ra_geojson -= 180.0
dec_geojson = float(alert['candidate']['dec'])
''' catalogs '''
for catalog in config['xmatch']['catalogs']:
catalog_filter = config['xmatch']['catalogs'][catalog]['filter']
catalog_projection = config['xmatch']['catalogs'][catalog]['projection']
object_position_query = dict()
object_position_query['coordinates.radec_geojson'] = {
'$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius]}}
s = db[catalog].find({**object_position_query, **catalog_filter},
{**catalog_projection})
xmatches[catalog] = list(s)
except Exception as e:
print(*time_stamps(), str(e))
return xmatches
# cone search radius in deg:
cone_search_radius_clu = 3.0
# convert deg to rad:
cone_search_radius_clu *= np.pi / 180.0
def alert_filter__xmatch_clu(database, alert, size_margin=3, clu_version='CLU_20190625'):
"""
Filter to apply to each alert.
:param size_margin: multiply galaxy size by this much before looking for a match
:param clu_version: CLU catalog version
"""
xmatches = dict()
try:
ra = float(alert['candidate']['ra'])
dec = float(alert['candidate']['dec'])
# geojson-friendly ra:
ra_geojson = float(alert['candidate']['ra']) - 180.0
dec_geojson = dec
catalog_filter = {}
catalog_projection = {"_id": 1, "name": 1, "ra": 1, "dec": 1,
"a": 1, "b2a": 1, "pa": 1, "z": 1,
"sfr_fuv": 1, "mstar": 1, "sfr_ha": 1,
"coordinates.radec_str": 1}
# first do a coarse search of everything that is around
object_position_query = dict()
object_position_query['coordinates.radec_geojson'] = {
'$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius_clu]}}
s = database[clu_version].find({**object_position_query, **catalog_filter},
{**catalog_projection})
galaxies = list(s)
# these guys are very big, so check them separately
M31 = {'_id': 596900, 'name': 'PGC2557',
'ra': 10.6847, 'dec': 41.26901, 'a': 6.35156, 'b2a': 0.32, 'pa': 35.0,
'sfr_fuv': None, 'mstar': 253816876.412914, 'sfr_ha': 0,
'coordinates': {'radec_geojson': ["00:42:44.3503", "41:16:08.634"]}
}
M33 = {'_id': 597543, 'name': 'PGC5818',
'ra': 23.46204, 'dec': 30.66022, 'a': 2.35983, 'b2a': 0.59, 'pa': 23.0,
'sfr_fuv': None, 'mstar': 4502777.420493, 'sfr_ha': 0,
'coordinates': {'radec_geojson': ["01:33:50.8900", "30:39:36.800"]}
}
# do elliptical matches
matches = []
for galaxy in galaxies + [M31, M33]:
alpha1, delta01 = galaxy['ra'], galaxy['dec']
d0, axis_ratio, PA0 = galaxy['a'], galaxy['b2a'], galaxy['pa']
# no shape info for galaxy? replace with median values
if d0 < -990:
d0 = 0.0265889
if axis_ratio < -990:
axis_ratio = 0.61
if PA0 < -990:
PA0 = 86.0
in_galaxy = in_ellipse(ra, dec, alpha1, delta01, size_margin * d0, axis_ratio, PA0)
if in_galaxy:
match = galaxy
distance_arcsec = round(great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2)
match['coordinates']['distance_arcsec'] = distance_arcsec
matches.append(match)
xmatches[clu_version] = matches
except Exception as e:
print(*time_stamps(), str(e))
return xmatches
def listener(topic, bootstrap_servers='', offset_reset='earliest',
group=None, path_alerts=None, path_tess=None, save_packets=True):
"""
Listen to a topic
:param topic:
:param bootstrap_servers:
:param offset_reset:
:param group:
:param path_alerts:
:return:
"""
# def error_cb(err):
# print(*time_stamps(), 'error_cb -------->', err)
# # print(err.code())
# if err.code() == -195:
# print(*time_stamps(), 'got disconnected, killing thread')
# sys.exit()
# Configure consumer connection to Kafka broker
conf = {'bootstrap.servers': bootstrap_servers,
# 'error_cb': error_cb,
'default.topic.config': {'auto.offset.reset': offset_reset}}
if group is not None:
conf['group.id'] = group
else:
conf['group.id'] = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else 'kowalski.caltech.edu'
# make it unique:
conf['group.id'] = '{:s}_{:s}'.format(conf['group.id'], datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f'))
# Configure Avro reader schema
schema_files = ["ztf-avro-alert/schema/candidate.avsc",
"ztf-avro-alert/schema/cutout.avsc",
"ztf-avro-alert/schema/light_curve.avsc",
"ztf-avro-alert/schema/alert.avsc"]
# date string:
datestr = topic.split('_')[1]
# Start alert stream consumer
stream_reader = AlertConsumer(topic, schema_files, **conf)
# todo: Subscribe alert filters to stream_readers
# todo: they will be notified when an alert arrived/got x-matched
while True:
try:
# poll!
# print(*time_stamps(), 'Polling')
stream_reader.poll(path_alerts=path_alerts, path_tess=path_tess,
datestr=datestr, save_packets=save_packets)
except EopError as e:
# Write when reaching end of partition
# sys.stderr.write(e.message)
print(*time_stamps(), e.message)
except IndexError:
# sys.stderr.write('%% Data cannot be decoded\n')
print(*time_stamps(), '%% Data cannot be decoded\n')
except UnicodeDecodeError:
# sys.stderr.write('%% Unexpected data format received\n')
print(*time_stamps(), '%% Unexpected data format received\n')
except KeyboardInterrupt:
# sys.stderr.write('%% Aborted by user\n')
print(*time_stamps(), '%% Aborted by user\n')
sys.exit()
except Exception as e:
print(*time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
sys.exit()
def main(_obs_date=None, _save_packets=True):
topics_on_watch = dict()
while True:
try:
if True:
# get kafka topic names with kafka-topics command
kafka_cmd = [config['kafka-topics']['cmd'],
'--zookeeper', config['kafka-topics']['zookeeper'], '-list']
# print(kafka_cmd)
topics = subprocess.run(kafka_cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')[:-1]
# print(topics)
if _obs_date is None:
datestr = datetime.datetime.utcnow().strftime('%Y%m%d')
else:
datestr = _obs_date
# as of 20180403 naming convention is ztf_%Y%m%d_programidN
# topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t)]
# ZUDS only
topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t) and ('zuds' in t)]
print(*time_stamps(), topics_tonight)
if False:
# for testing
topics_tonight = ['ztf_20180604_programid3']
for t in topics_tonight:
if t not in topics_on_watch:
print(*time_stamps(), f'starting listener thread for {t}')
offset_reset = config['kafka']['default.topic.config']['auto.offset.reset']
bootstrap_servers = config['kafka']['bootstrap.servers']
group = '{:s}'.format(config['kafka']['group'])
# print(group)
path_alerts = config['path']['path_alerts']
path_tess = config['path']['path_tess']
save_packets = _save_packets
# topics_on_watch[t] = threading.Thread(target=listener,
# args=(t, bootstrap_servers,
# offset_reset, group, path_alerts))
topics_on_watch[t] = multiprocessing.Process(target=listener,
args=(t, bootstrap_servers,
offset_reset, group,
path_alerts, path_tess,
save_packets))
topics_on_watch[t].daemon = True
topics_on_watch[t].start()
else:
print(*time_stamps(), f'performing thread health check for {t}')
try:
# if not topics_on_watch[t].isAlive():
if not topics_on_watch[t].is_alive():
print(*time_stamps(), f'{t} died, removing')
# topics_on_watch[t].terminate()
topics_on_watch.pop(t, None)
else:
print(*time_stamps(), f'{t} appears normal')
except Exception as _e:
print(*time_stamps(), 'Failed to perform health check', str(_e))
pass
except Exception as e:
print(*time_stamps(), str(e))
_err = traceback.format_exc()
print(*time_stamps(), str(_err))
if _obs_date is None:
time.sleep(300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fetch AVRO packets from Kafka streams and ingest them into DB')
parser.add_argument('--obsdate', help='observing date')
parser.add_argument('--noio', help='reduce i/o - do not save packets', action='store_true')
args = parser.parse_args()
obs_date = args.obsdate
save = False if args.noio else True
# print(obs_date)
main(_obs_date=obs_date, _save_packets=save)
| []
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | python | 1 | 0 | |
venv/lib/python3.6/site-packages/werkzeug/debug/__init__.py | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import uuid
import json
import time
import getpass
import hashlib
import mimetypes
from itertools import chain
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.http import parse_cookie
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
from werkzeug._internal import _log
from werkzeug._compat import text_type
# DEPRECATED
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr # noqa
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
rv = _machine_id
if rv is not None:
return rv
def _generate():
# Potential sources of secret information on linux. The machine-id
# is stable across boots, the boot id is not
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
return f.readline().strip()
except IOError:
continue
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
try:
# Also catch import errors: subprocess may not be available, e.g.
# Google App Engine
# See https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
wr = None
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
pass
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
machineGuid, wrType = wr.QueryValueEx(rk, "MachineGuid")
if wrType == wr.REG_SZ:
return machineGuid.encode("utf-8")
else:
return machineGuid
except WindowsError:
pass
_machine_id = rv = _generate()
return rv
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", getattr(app.__class__, "__module__"))
try:
# `getpass.getuser()` imports the `pwd` module,
# which does not exist in the Google App Engine sandbox.
username = getpass.getuser()
except ImportError:
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", getattr(app.__class__, "__name__")),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
lodgeit_url=None,
pin_security=True,
pin_logging=True,
):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning("Werkzeug now pastes into gists."))
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. " "DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
def _get_pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
def _set_pin(self, value):
self._pin = value
pin = property(_get_pin, _set_pin)
del _get_pin, _set_pin
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), "shared", basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
f = open(filename, "rb")
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(self._failed_pin_auth > 5 and 5.0 or 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info",
" * To enable the debugger you need to " "enter the security pin:",
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif cmd == "paste" and traceback is not None and secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)
| []
| []
| [
"WERKZEUG_RUN_MAIN",
"WERKZEUG_DEBUG_PIN"
]
| [] | ["WERKZEUG_RUN_MAIN", "WERKZEUG_DEBUG_PIN"] | python | 2 | 0 | |
controllers/suite_test.go | /*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/humio/humio-operator/pkg/kubernetes"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
humioapi "github.com/humio/cli/api"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
openshiftsecurityv1 "github.com/openshift/api/security/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/humio/humio-operator/pkg/helpers"
"github.com/humio/humio-operator/pkg/humio"
"github.com/humio/humio-operator/pkg/openshift"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
corev1alpha1 "github.com/humio/humio-operator/api/v1alpha1"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var k8sManager ctrl.Manager
var humioClient humio.Client
var testTimeout time.Duration
var testProcessID string
var testNamespace corev1.Namespace
const testInterval = time.Second * 1
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
var log logr.Logger
zapLog, _ := helpers.NewLogger()
defer zapLog.Sync()
log = zapr.NewLogger(zapLog)
logf.SetLogger(log)
Expect(os.Getenv("HUMIO_E2E_LICENSE")).NotTo(BeEmpty())
By("bootstrapping test environment")
useExistingCluster := true
testProcessID = fmt.Sprintf("e2e-%s", kubernetes.RandomString())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testTimeout = time.Second * 300
testEnv = &envtest.Environment{
UseExistingCluster: &useExistingCluster,
}
humioClient = humio.NewClient(log, &humioapi.Config{}, "")
} else {
testTimeout = time.Second * 30
testEnv = &envtest.Environment{
// TODO: If we want to add support for TLS-functionality, we need to install cert-manager's CRD's
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
humioClient = humio.NewMockClient(
humioapi.Cluster{},
nil,
nil,
nil,
"",
)
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
if helpers.IsOpenShift() {
err = openshiftsecurityv1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
}
if helpers.UseCertManager() {
err = cmapi.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
}
err = corev1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
watchNamespace, _ := getWatchNamespace()
options := ctrl.Options{
Scheme: scheme.Scheme,
MetricsBindAddress: "0",
Namespace: watchNamespace,
Logger: log,
}
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
if strings.Contains(watchNamespace, ",") {
log.Info(fmt.Sprintf("manager will be watching namespace %q", watchNamespace))
// configure cluster-scoped with MultiNamespacedCacheBuilder
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(watchNamespace, ","))
// TODO: Get rid of Namespace property on Reconciler objects and instead use a custom cache implementation as this cache doesn't support watching a subset of namespace while still allowing to watch cluster-scoped resources. https://github.com/kubernetes-sigs/controller-runtime/issues/934
}
k8sManager, err = ctrl.NewManager(cfg, options)
Expect(err).NotTo(HaveOccurred())
err = (&HumioExternalClusterReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioClusterReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioIngestTokenReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioParserReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioRepositoryReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioViewReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioActionReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
err = (&HumioAlertReconciler{
Client: k8sManager.GetClient(),
HumioClient: humioClient,
BaseLogger: log,
Namespace: testProcessID,
}).SetupWithManager(k8sManager)
Expect(err).NotTo(HaveOccurred())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).NotTo(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).NotTo(BeNil())
By(fmt.Sprintf("Creating test namespace: %s", testProcessID))
testNamespace = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testProcessID,
},
}
err = k8sClient.Create(context.TODO(), &testNamespace)
Expect(err).ToNot(HaveOccurred())
if helpers.IsOpenShift() {
var err error
ctx := context.Background()
Eventually(func() bool {
_, err = openshift.GetSecurityContextConstraints(ctx, k8sClient)
if errors.IsNotFound(err) {
// Object has not been created yet
return true
}
if err != nil {
// Some other error happened. Typically:
// <*cache.ErrCacheNotStarted | 0x31fc738>: {}
// the cache is not started, can not read objects occurred
return false
}
// At this point we know the object already exists.
return true
}, testTimeout, testInterval).Should(BeTrue())
if errors.IsNotFound(err) {
By("Simulating helm chart installation of the SecurityContextConstraints object")
sccName := os.Getenv("OPENSHIFT_SCC_NAME")
priority := int32(0)
scc := openshiftsecurityv1.SecurityContextConstraints{
ObjectMeta: metav1.ObjectMeta{
Name: sccName,
Namespace: testProcessID,
},
Priority: &priority,
AllowPrivilegedContainer: true,
DefaultAddCapabilities: []corev1.Capability{},
RequiredDropCapabilities: []corev1.Capability{
"KILL",
"MKNOD",
"SETUID",
"SETGID",
},
AllowedCapabilities: []corev1.Capability{
"NET_BIND_SERVICE",
"SYS_NICE",
},
AllowHostDirVolumePlugin: true,
Volumes: []openshiftsecurityv1.FSType{
openshiftsecurityv1.FSTypeConfigMap,
openshiftsecurityv1.FSTypeDownwardAPI,
openshiftsecurityv1.FSTypeEmptyDir,
openshiftsecurityv1.FSTypeHostPath,
openshiftsecurityv1.FSTypePersistentVolumeClaim,
openshiftsecurityv1.FSProjected,
openshiftsecurityv1.FSTypeSecret,
},
AllowedFlexVolumes: nil,
AllowHostNetwork: false,
AllowHostPorts: false,
AllowHostPID: false,
AllowHostIPC: false,
SELinuxContext: openshiftsecurityv1.SELinuxContextStrategyOptions{
Type: openshiftsecurityv1.SELinuxStrategyMustRunAs,
},
RunAsUser: openshiftsecurityv1.RunAsUserStrategyOptions{
Type: openshiftsecurityv1.RunAsUserStrategyRunAsAny,
},
SupplementalGroups: openshiftsecurityv1.SupplementalGroupsStrategyOptions{
Type: openshiftsecurityv1.SupplementalGroupsStrategyRunAsAny,
},
FSGroup: openshiftsecurityv1.FSGroupStrategyOptions{
Type: openshiftsecurityv1.FSGroupStrategyRunAsAny,
},
ReadOnlyRootFilesystem: false,
Users: []string{},
Groups: nil,
SeccompProfiles: nil,
}
Expect(k8sClient.Create(ctx, &scc)).To(Succeed())
}
}
}, 120)
var _ = AfterSuite(func() {
By(fmt.Sprintf("Removing test namespace: %s", testProcessID))
err := k8sClient.Delete(context.TODO(), &testNamespace)
Expect(err).ToNot(HaveOccurred())
By("Tearing down the test environment")
err = testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
// getWatchNamespace returns the Namespace the operator should be watching for changes
func getWatchNamespace() (string, error) {
// WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE
// which specifies the Namespace to watch.
// An empty value means the operator is running with cluster scope.
var watchNamespaceEnvVar = "WATCH_NAMESPACE"
ns, found := os.LookupEnv(watchNamespaceEnvVar)
if !found {
return "", fmt.Errorf("%s must be set", watchNamespaceEnvVar)
}
return ns, nil
}
func usingClusterBy(cluster, text string, callbacks ...func()) {
time := time.Now().Format(time.RFC3339Nano)
fmt.Fprintln(GinkgoWriter, "STEP | "+time+" | "+cluster+": "+text)
if len(callbacks) == 1 {
callbacks[0]()
}
if len(callbacks) > 1 {
panic("just one callback per By, please")
}
}
| [
"\"HUMIO_E2E_LICENSE\"",
"\"TEST_USE_EXISTING_CLUSTER\"",
"\"OPENSHIFT_SCC_NAME\""
]
| []
| [
"TEST_USE_EXISTING_CLUSTER",
"OPENSHIFT_SCC_NAME",
"HUMIO_E2E_LICENSE"
]
| [] | ["TEST_USE_EXISTING_CLUSTER", "OPENSHIFT_SCC_NAME", "HUMIO_E2E_LICENSE"] | go | 3 | 0 | |
localstack/utils/analytics/metadata.py | import dataclasses
import functools
import json
import logging
import os
import platform
from localstack import config, constants
from localstack.utils import common
LOG = logging.getLogger(__name__)
@dataclasses.dataclass
class ClientMetadata:
session_id: str
machine_id: str
api_key: str
system: str
version: str
is_ci: bool
is_docker: bool
is_testing: bool
def __repr__(self):
d = dataclasses.asdict(self)
# anonymize api_key
k = d.get("api_key")
if k:
k = "*" * len(k)
d["api_key"] = k
return "ClientMetadata(%s)" % d
def get_version_string() -> str:
gh = config.LOCALSTACK_BUILD_GIT_HASH
if gh:
return f"{constants.VERSION}:{gh}"
else:
return constants.VERSION
def read_client_metadata() -> ClientMetadata:
return ClientMetadata(
session_id=get_session_id(),
machine_id=get_machine_id(),
api_key=read_api_key_safe(),
system=get_system(),
version=get_version_string(),
is_ci=os.getenv("CI") is not None,
is_docker=config.is_in_docker,
is_testing=config.is_env_true(constants.ENV_INTERNAL_TEST_RUN),
)
@functools.lru_cache()
def get_session_id() -> str:
return _generate_session_id()
@functools.lru_cache()
def get_client_metadata() -> ClientMetadata:
metadata = read_client_metadata()
if config.DEBUG_ANALYTICS:
LOG.info("resolved client metadata: %s", metadata)
return metadata
@functools.lru_cache()
def get_machine_id() -> str:
machine_id = None
# determine machine_id from config files
configs_map = {}
# TODO check if this distinction is needed - config.CONFIG_FILE_PATH already handles tmp vs home folder
config_file_tmp = get_config_file_tempdir()
config_file_home = get_config_file_homedir()
for config_file in (config_file_home, config_file_tmp):
if config_file:
local_configs = configs_map[config_file] = config.load_config_file(
config_file=config_file
)
if "machine_id" in local_configs:
machine_id = local_configs["machine_id"]
break
# if we can neither find NOR create the config files, fall back to process id
if not configs_map:
return get_session_id()
# assign default id if empty
if not machine_id:
machine_id = common.short_uid()
# update machine_id in all config files
for config_file, configs in configs_map.items():
configs["machine_id"] = machine_id
common.save_file(config_file, json.dumps(configs))
return machine_id
def _generate_session_id() -> str:
return common.long_uid()
def _get_config_file(path):
common.get_or_create_file(path)
return path
def get_config_file_homedir():
return _get_config_file(config.CONFIG_FILE_PATH)
def get_config_file_tempdir():
return _get_config_file(os.path.join(config.dirs.tmp, ".localstack"))
def read_api_key_safe():
try:
from localstack_ext.bootstrap.licensing import read_api_key
return read_api_key(raise_if_missing=False)
except Exception:
return None
def get_system() -> str:
return platform.system()
| []
| []
| [
"CI"
]
| [] | ["CI"] | python | 1 | 0 | |
server/server_test.go | package server_test
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/mail"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/dgrijalva/jwt-go"
"github.com/google/go-cmp/cmp"
iclient "github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/influxdb/influxql"
imodels "github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/toml"
"github.com/influxdata/kapacitor/alert"
"github.com/influxdata/kapacitor/client/v1"
"github.com/influxdata/kapacitor/command"
"github.com/influxdata/kapacitor/command/commandtest"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/server"
"github.com/influxdata/kapacitor/services/alert/alerttest"
"github.com/influxdata/kapacitor/services/alertmanager"
"github.com/influxdata/kapacitor/services/alertmanager/alertmanagertest"
"github.com/influxdata/kapacitor/services/hipchat/hipchattest"
"github.com/influxdata/kapacitor/services/httppost"
"github.com/influxdata/kapacitor/services/httppost/httpposttest"
"github.com/influxdata/kapacitor/services/k8s"
"github.com/influxdata/kapacitor/services/kafka"
"github.com/influxdata/kapacitor/services/kafka/kafkatest"
"github.com/influxdata/kapacitor/services/mqtt"
"github.com/influxdata/kapacitor/services/mqtt/mqtttest"
"github.com/influxdata/kapacitor/services/opsgenie"
"github.com/influxdata/kapacitor/services/opsgenie/opsgenietest"
"github.com/influxdata/kapacitor/services/opsgenie2/opsgenie2test"
"github.com/influxdata/kapacitor/services/pagerduty"
"github.com/influxdata/kapacitor/services/pagerduty/pagerdutytest"
"github.com/influxdata/kapacitor/services/pagerduty2"
"github.com/influxdata/kapacitor/services/pagerduty2/pagerduty2test"
"github.com/influxdata/kapacitor/services/pushover/pushovertest"
"github.com/influxdata/kapacitor/services/sensu/sensutest"
"github.com/influxdata/kapacitor/services/slack"
"github.com/influxdata/kapacitor/services/slack/slacktest"
"github.com/influxdata/kapacitor/services/smtp/smtptest"
"github.com/influxdata/kapacitor/services/snmptrap/snmptraptest"
"github.com/influxdata/kapacitor/services/swarm"
"github.com/influxdata/kapacitor/services/talk/talktest"
"github.com/influxdata/kapacitor/services/telegram"
"github.com/influxdata/kapacitor/services/telegram/telegramtest"
"github.com/influxdata/kapacitor/services/udf"
"github.com/influxdata/kapacitor/services/victorops"
"github.com/influxdata/kapacitor/services/victorops/victoropstest"
"github.com/k-sone/snmpgo"
"github.com/pkg/errors"
)
var udfDir string
func init() {
dir, _ := os.Getwd()
udfDir = filepath.Clean(filepath.Join(dir, "../udf"))
}
func TestServer_Ping(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
_, version, err := cli.Ping()
if err != nil {
t.Fatal(err)
}
if version != "testServer" {
t.Fatal("unexpected version", version)
}
}
func TestServer_Pprof_Index(t *testing.T) {
s, _ := OpenDefaultServer()
defer s.Close()
testCases := []struct {
path string
code int
contentType string
}{
{
path: "/debug/pprof/",
code: http.StatusOK,
contentType: "text/html; charset=utf-8",
},
{
path: "/debug/pprof/block",
code: http.StatusOK,
contentType: "application/octet-stream",
},
{
path: "/debug/pprof/goroutine",
code: http.StatusOK,
contentType: "application/octet-stream",
},
{
path: "/debug/pprof/heap",
code: http.StatusOK,
contentType: "application/octet-stream",
},
{
path: "/debug/pprof/threadcreate",
code: http.StatusOK,
contentType: "application/octet-stream",
},
}
for _, tc := range testCases {
t.Run(tc.path, func(t *testing.T) {
r, err := http.Get(s.URL() + tc.path)
if err != nil {
t.Fatal(err)
}
if got, exp := r.StatusCode, tc.code; got != exp {
t.Errorf("unexpected status code got %d exp %d", got, exp)
}
if got, exp := r.Header.Get("Content-Type"), tc.contentType; got != exp {
t.Errorf("unexpected content type got %s exp %s", got, exp)
}
})
}
}
func TestServer_Authenticate_Fail(t *testing.T) {
conf := NewConfig()
conf.HTTP.AuthEnabled = true
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, _, err = cli.Ping()
if err == nil {
t.Error("expected authentication error")
} else if exp, got := "unable to parse authentication credentials", err.Error(); got != exp {
t.Errorf("unexpected error message: got %q exp %q", got, exp)
}
}
func TestServer_Authenticate_User(t *testing.T) {
conf := NewConfig()
conf.HTTP.AuthEnabled = true
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.UserAuthentication,
Username: "bob",
Password: "bob's secure password",
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, version, err := cli.Ping()
if err != nil {
t.Fatal(err)
}
if version != "testServer" {
t.Fatal("unexpected version", version)
}
}
func TestServer_Authenticate_Bearer_Fail(t *testing.T) {
secret := "secret"
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"username": "bob",
"exp": time.Now().Add(10 * time.Second).Unix(),
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
t.Fatal(err)
}
conf := NewConfig()
conf.HTTP.AuthEnabled = true
// Use a different secret so the token is invalid
conf.HTTP.SharedSecret = secret + "extra secret"
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.BearerAuthentication,
Token: tokenString,
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, _, err = cli.Ping()
if err == nil {
t.Error("expected authentication error")
} else if exp, got := "invalid token: signature is invalid", err.Error(); got != exp {
t.Errorf("unexpected error message: got %q exp %q", got, exp)
}
}
func TestServer_Authenticate_Bearer_Expired(t *testing.T) {
secret := "secret"
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"username": "bob",
"exp": time.Now().Add(-10 * time.Second).Unix(),
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
t.Fatal(err)
}
conf := NewConfig()
conf.HTTP.AuthEnabled = true
conf.HTTP.SharedSecret = secret
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.BearerAuthentication,
Token: tokenString,
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, _, err = cli.Ping()
if err == nil {
t.Error("expected authentication error")
} else if exp, got := "invalid token: Token is expired", err.Error(); got != exp {
t.Errorf("unexpected error message: got %q exp %q", got, exp)
}
}
func TestServer_Authenticate_Bearer(t *testing.T) {
secret := "secret"
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"username": "bob",
"exp": time.Now().Add(10 * time.Second).Unix(),
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
t.Fatal(err)
}
conf := NewConfig()
conf.HTTP.AuthEnabled = true
conf.HTTP.SharedSecret = secret
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.BearerAuthentication,
Token: tokenString,
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, version, err := cli.Ping()
if err != nil {
t.Fatal(err)
}
if version != "testServer" {
t.Fatal("unexpected version", version)
}
}
func TestServer_CreateTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTask_Quiet(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
.quiet()
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTaskImplicitStream(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `dbrp "mydb"."myrp"
dbrp "otherdb"."default"
stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTaskBatch(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `dbrp "mydb"."myrp"
batch
|query('SELECT * from mydb.myrp.mymeas')
|log()
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.BatchTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.BatchTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nquery1 -> log2;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTaskImplicitAndExplicit(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `dbrp "mydb"."myrp"
dbrp "otherdb"."default"
stream
|from()
.measurement('test')
`
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
// It is expected that error should be non nil
if err == nil {
t.Fatal("expected task to fail to be created")
}
}
func TestServer_CreateTaskExplicitUpdateImplicit(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
createDBRPs := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
createTick := `stream
|from()
.measurement('test')
`
updateDBRPs := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
updateTick := `dbrp "mydb"."myrp"
stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
DBRPs: createDBRPs,
TICKscript: createTick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, createDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, createDBRPs)
}
if ti.TICKscript != createTick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, createTick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
TICKscript: updateTick,
})
if err != nil {
t.Fatal(err)
}
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, updateDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, updateDBRPs)
}
if ti.TICKscript != updateTick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, updateTick)
}
dot = "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_EnableTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if ti.Executing != true {
t.Fatalf("unexpected executing got %v exp %v", ti.Executing, true)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := `digraph testTaskID {
graph [throughput="0.00 points/s"];
stream0 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
stream0 -> from1 [processed="0"];
from1 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
}`
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_EnableTaskOnCreate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if ti.Executing != true {
t.Fatalf("unexpected executing got %v exp %v", ti.Executing, true)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := `digraph testTaskID {
graph [throughput="0.00 points/s"];
stream0 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
stream0 -> from1 [processed="0"];
from1 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
}`
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_DisableTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_DeleteTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
err = cli.DeleteTask(task.Link)
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err == nil {
t.Fatal("unexpected task:", ti)
}
}
func TestServer_TaskNums(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
// Create a bunch of tasks with every 3rd task enabled
count := 100
enabled := 0
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
status := client.Disabled
if i%3 == 0 {
enabled++
status = client.Enabled
}
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: fmt.Sprintf("%s-%d", id, i),
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: status,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
// Enable a bunch of tasks
for i, task := range tasks {
if i%2 == 0 && task.Status != client.Enabled {
enabled++
tasks[i].Status = client.Enabled
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
}
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
// Disable a bunch of tasks
for i, task := range tasks {
if i%5 == 0 && task.Status != client.Disabled {
enabled--
tasks[i].Status = client.Disabled
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
}); err != nil {
t.Fatal(err)
}
}
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
// Delete a bunch of tasks
for i, task := range tasks {
if i%6 == 0 {
count--
if task.Status == client.Enabled {
enabled--
}
if err := cli.DeleteTask(task.Link); err != nil {
t.Fatal(err)
}
}
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
}
func TestServer_ListTasks(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
count := 10
ttype := client.StreamTask
tick := `stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
for i := 0; i < count; i++ {
id := fmt.Sprintf("testTaskID%d", i)
status := client.Disabled
if i%2 == 0 {
status = client.Enabled
}
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: status,
})
if err != nil {
t.Fatal(err)
}
}
tasks, err := cli.ListTasks(nil)
if err != nil {
t.Fatal(err)
}
if exp, got := count, len(tasks); exp != got {
t.Fatalf("unexpected number of tasks: exp:%d got:%d", exp, got)
}
for i, task := range tasks {
if exp, got := fmt.Sprintf("testTaskID%d", i), task.ID; exp != got {
t.Errorf("unexpected task.ID i:%d exp:%s got:%s", i, exp, got)
}
if exp, got := client.StreamTask, task.Type; exp != got {
t.Errorf("unexpected task.Type i:%d exp:%v got:%v", i, exp, got)
}
if !reflect.DeepEqual(task.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps i:%d exp:%s got:%s", i, dbrps, task.DBRPs)
}
exp := client.Disabled
if i%2 == 0 {
exp = client.Enabled
}
if got := task.Status; exp != got {
t.Errorf("unexpected task.Status i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := i%2 == 0, task.Executing; exp != got {
t.Errorf("unexpected task.Executing i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := true, len(task.Dot) != 0; exp != got {
t.Errorf("unexpected task.Dot i:%d exp:\n%v\ngot:\n%v\n", i, exp, got)
}
if exp, got := tick, task.TICKscript; exp != got {
t.Errorf("unexpected task.TICKscript i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.Error; exp != got {
t.Errorf("unexpected task.Error i:%d exp:%v got:%v", i, exp, got)
}
}
}
func TestServer_ListTasks_Fields(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
count := 100
ttype := client.StreamTask
tick := `stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
for i := 0; i < count; i++ {
id := fmt.Sprintf("testTaskID%d", i)
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
}
tasks, err := cli.ListTasks(&client.ListTasksOptions{
Pattern: "testTaskID1*",
Fields: []string{"type", "status"},
Offset: 1,
Limit: 5,
})
if err != nil {
t.Fatal(err)
}
if exp, got := 5, len(tasks); exp != got {
t.Fatalf("unexpected number of tasks: exp:%d got:%d", exp, got)
}
for i, task := range tasks {
if exp, got := fmt.Sprintf("testTaskID1%d", i), task.ID; exp != got {
t.Errorf("unexpected task.ID i:%d exp:%s got:%s", i, exp, got)
}
if exp, got := client.StreamTask, task.Type; exp != got {
t.Errorf("unexpected task.Type i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := client.Enabled, task.Status; exp != got {
t.Errorf("unexpected task.Status i:%d exp:%v got:%v", i, exp, got)
}
// We didn't request these fields so they should be default zero values
if exp, got := 0, len(task.DBRPs); exp != got {
t.Fatalf("unexpected dbrps i:%d exp:%d got:%d", i, exp, got)
}
if exp, got := false, task.Executing; exp != got {
t.Errorf("unexpected task.Executing i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.Dot; exp != got {
t.Errorf("unexpected task.Dot i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.TICKscript; exp != got {
t.Errorf("unexpected task.TICKscript i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.Error; exp != got {
t.Errorf("unexpected task.Error i:%d exp:%v got:%v", i, exp, got)
}
}
}
func TestServer_CreateTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
}
func TestServer_UpdateTemplateID(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
newID := "newTemplateID"
template, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
if got, exp := template.Link.Href, "/kapacitor/v1/templates/newTemplateID"; got != exp {
t.Fatalf("unexpected template link got %s exp %s", got, exp)
}
ti, err = cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != newID {
t.Fatalf("unexpected id got %s exp %s", ti.ID, newID)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot = "digraph newTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
}
func TestServer_CreateTemplateImplicitAndUpdateExplicitWithTasks(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
implicitTick := `dbrp "telegraf"."autogen"
var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
TICKscript: implicitTick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != implicitTick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, implicitTick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
implicitDBRPs := []client.DBRP{
{
Database: "telegraf",
RetentionPolicy: "autogen",
},
}
count := 1
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
task, err := cli.CreateTask(client.CreateTaskOptions{
TemplateID: template.ID,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(ti.DBRPs, implicitDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, implicitDBRPs)
}
}
updateTick := `var x = 5
stream
|from()
.measurement('test')
`
_, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: id,
TICKscript: updateTick,
})
// Expects error
if err == nil {
t.Fatal(err)
}
finalTick := `dbrp "telegraf"."autogen"
dbrp "telegraf"."not_autogen"
var x = 5
stream
|from()
.measurement('test')
`
finalDBRPs := []client.DBRP{
{
Database: "telegraf",
RetentionPolicy: "autogen",
},
{
Database: "telegraf",
RetentionPolicy: "not_autogen",
},
}
template, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: id,
TICKscript: finalTick,
})
if err != nil {
t.Fatal(err)
}
for _, task := range tasks {
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(ti.DBRPs, finalDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, finalDBRPs)
}
}
}
func TestServer_UpdateTemplateID_WithTasks(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
count := 100
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
task, err := cli.CreateTask(client.CreateTaskOptions{
TemplateID: template.ID,
DBRPs: dbrps,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
}
newID := "newTemplateID"
template, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
for _, task := range tasks {
got, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if got.TemplateID != newID {
t.Errorf("unexpected task TemplateID got %s exp %s", got.TemplateID, newID)
}
if got.TICKscript != tick {
t.Errorf("unexpected task TICKscript got %s exp %s", got.TICKscript, tick)
}
}
}
func TestServer_UpdateTemplateID_Fail(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
newID := "anotherTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
// Create conflicting template
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: newID,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
}); err == nil {
t.Fatal("expected update template to fail on name conflict")
}
// Can still get old template
ti, err = cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
}
func TestServer_UpdateTemplateID_WithTasks_Fail(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
count := 100
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
task, err := cli.CreateTask(client.CreateTaskOptions{
TemplateID: template.ID,
DBRPs: dbrps,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
}
// Create conflicting template
newID := "newTemplateID"
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: newID,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
TICKscript: "stream",
}); err == nil {
t.Fatal("expected update template to fail on conflicting name")
}
for _, task := range tasks {
got, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if got.TemplateID != id {
t.Errorf("unexpected task TemplateID got %s exp %s", got.TemplateID, id)
}
if got.TICKscript != tick {
t.Errorf("unexpected task TICKscript got %s exp %s", got.TICKscript, tick)
}
}
}
func TestServer_DeleteTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
err = cli.DeleteTemplate(template.Link)
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err == nil {
t.Fatal("unexpected template:", ti)
}
}
func TestServer_CreateTaskFromTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `// Configurable measurement
var measurement = 'test'
stream
|from()
.measurement(measurement)
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
templateInfo, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if templateInfo.Error != "" {
t.Fatal(templateInfo.Error)
}
if templateInfo.ID != id {
t.Fatalf("unexpected template.id got %s exp %s", templateInfo.ID, id)
}
if templateInfo.Type != client.StreamTask {
t.Fatalf("unexpected template.type got %v exp %v", templateInfo.Type, client.StreamTask)
}
if templateInfo.TICKscript != tick {
t.Fatalf("unexpected template.TICKscript got %s exp %s", templateInfo.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if templateInfo.Dot != dot {
t.Fatalf("unexpected template.dot\ngot\n%s\nexp\n%s\n", templateInfo.Dot, dot)
}
expVars := client.Vars{
"measurement": {
Value: "test",
Type: client.VarString,
Description: "Configurable measurement",
},
}
if got, exp := templateInfo.Vars, expVars; !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected template vars: got %v exp %v", got, exp)
}
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
vars := client.Vars{
"measurement": {
Value: "another_measurement",
Type: client.VarString,
},
}
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: "taskid",
TemplateID: id,
DBRPs: dbrps,
Vars: vars,
})
if err != nil {
t.Fatal(err)
}
taskInfo, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if taskInfo.Error != "" {
t.Fatal(taskInfo.Error)
}
if taskInfo.ID != "taskid" {
t.Fatalf("unexpected task.id got %s exp %s", taskInfo.ID, "taskid")
}
if taskInfo.Type != client.StreamTask {
t.Fatalf("unexpected task.type got %v exp %v", taskInfo.Type, client.StreamTask)
}
if taskInfo.TICKscript != tick {
t.Fatalf("unexpected task.TICKscript got %s exp %s", taskInfo.TICKscript, tick)
}
dot = "digraph taskid {\nstream0 -> from1;\n}"
if taskInfo.Dot != dot {
t.Fatalf("unexpected task.dot\ngot\n%s\nexp\n%s\n", taskInfo.Dot, dot)
}
if taskInfo.Status != client.Disabled {
t.Fatalf("unexpected task.status got %v exp %v", taskInfo.Status, client.Disabled)
}
if !reflect.DeepEqual(taskInfo.DBRPs, dbrps) {
t.Fatalf("unexpected task.dbrps got %s exp %s", taskInfo.DBRPs, dbrps)
}
if !reflect.DeepEqual(taskInfo.Vars, vars) {
t.Fatalf("unexpected task.vars got %s exp %s", taskInfo.Vars, vars)
}
}
func TestServer_DynamicStreamTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
testCases := []struct {
name string
tick string
want client.TaskType
}{
{
name: "stream",
tick: `
dbrp "db"."rp"
stream
|from()
.measurement('test')
`,
want: client.StreamTask,
},
{
name: "stream_through_var",
tick: `
dbrp "db"."rp"
var s = stream
s
|from()
.measurement('test')
`,
want: client.StreamTask,
},
{
name: "batch",
tick: `
dbrp "db"."rp"
batch
|query('select * from db.rp.m')
`,
want: client.BatchTask,
},
{
name: "batch_through_var",
tick: `
dbrp "db"."rp"
var b = batch
b
|query('select * from db.rp.m')
`,
want: client.BatchTask,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: tc.name,
TICKscript: tc.tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
if task.Type != tc.want {
t.Fatalf("unexpected task type: got: %v want: %v", task.Type, tc.want)
}
})
}
}
func TestServer_StreamTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestServer_StreamTask_NoRP(t *testing.T) {
conf := NewConfig()
conf.DefaultRetentionPolicy = "myrp"
s := OpenServer(conf)
defer s.Close()
cli := Client(s)
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var field = 'nonexistent'
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask_MissingVar(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var field string
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
}); err == nil {
t.Error("expected error for missing task vars")
} else if exp, got := "invalid TICKscript: missing value for var \"field\".", err.Error(); got != exp {
t.Errorf("unexpected error message: got %s exp %s", got, exp)
}
}
func TestServer_StreamTemplateTask_AllTypes(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var bool bool
var count_threshold int
var value_threshold float
var window duration
var field string
var tagMatch regex
var match lambda
var eval lambda
var groups list
var secondGroup list
stream
|from()
.measurement('test')
.where(lambda: match AND "tag" =~ tagMatch AND bool AND "value" >= value_threshold)
.groupBy(groups)
|log().prefix('FROM')
|window()
.period(window)
.every(window)
|log().prefix('WINDOW')
|count(field)
|log().prefix('COUNT')
|groupBy(secondGroup)
|sum('count')
.as('count')
|log().prefix('SUM')
|where(lambda: "count" >= count_threshold)
|log().prefix('WHERE')
|eval(eval)
.as('count')
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"bool": {
Value: true,
Type: client.VarBool,
},
"count_threshold": {
Value: int64(1),
Type: client.VarInt,
},
"value_threshold": {
Value: float64(1.0),
Type: client.VarFloat,
},
"window": {
Value: 10 * time.Second,
Type: client.VarDuration,
},
"field": {
Value: "value",
Type: client.VarString,
},
"tagMatch": {
Value: "^a.*",
Type: client.VarRegex,
},
"match": {
Value: `"value" == 1.0`,
Type: client.VarLambda,
},
"eval": {
Value: `"count" * 2`,
Type: client.VarLambda,
},
"groups": {
Value: []client.Var{client.Var{Type: client.VarStar}},
Type: client.VarList,
},
"secondGroup": {
Value: []client.Var{client.Var{Value: "tag", Type: client.VarString}},
Type: client.VarList,
},
},
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test,tag=abc,other=a value=1 0000000000
test,tag=abc,other=b value=1 0000000000
test,tag=abc,other=a value=1 0000000001
test,tag=bbc,other=b value=1 0000000001
test,tag=abc,other=a value=1 0000000002
test,tag=abc,other=a value=0 0000000002
test,tag=abc,other=b value=1 0000000003
test,tag=abc,other=a value=1 0000000003
test,tag=abc,other=a value=1 0000000004
test,tag=abc,other=b value=1 0000000005
test,tag=abc,other=a value=1 0000000005
test,tag=bbc,other=a value=1 0000000005
test,tag=abc,other=b value=1 0000000006
test,tag=abc,other=a value=1 0000000007
test,tag=abc,other=b value=0 0000000008
test,tag=abc,other=a value=1 0000000009
test,tag=abc,other=a value=1 0000000010
test,tag=abc,other=a value=1 0000000011
test,tag=abc,other=b value=1 0000000011
test,tag=bbc,other=a value=1 0000000011
test,tag=bbc,other=b value=1 0000000011
test,tag=abc,other=a value=1 0000000021
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"tag":"abc"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",24]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTaskFromUpdate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var field = 'nonexistent'
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Disabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
})
if err != nil {
t.Fatal(err)
}
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask_UpdateTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tickWrong := `
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('wrong')
|httpOut('count')
`
tickCorrect := `
var field string
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tickWrong,
})
if err != nil {
t.Fatal(err)
}
if _, err = cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
}); err != nil {
t.Fatal(err)
}
if _, err := cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
TICKscript: tickCorrect,
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask_UpdateTemplate_Rollback(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tickCorrect := `
var field string
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
tickNewVar := `
var field string
var period duration
stream
|from()
.measurement('test')
|window()
.period(period)
.every(period)
|count(field)
|httpOut('count')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tickCorrect,
})
if err != nil {
t.Fatal(err)
}
// Create several tasks
count := 5
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
if task, err := cli.CreateTask(client.CreateTaskOptions{
ID: fmt.Sprintf("%s-%d", taskId, i),
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
}); err != nil {
t.Fatal(err)
} else {
tasks[i] = task
}
}
if _, err := cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
TICKscript: tickNewVar,
}); err == nil {
t.Error("expected error for breaking template update, got nil")
} else if got, exp := err.Error(), `error reloading associated task testStreamTask-0: missing value for var "period".`; exp != got {
t.Errorf("unexpected error for breaking template update, got %s exp %s", got, exp)
}
// Get all tasks and make sure their TICKscript has the original value
for _, task := range tasks {
if gotTask, err := cli.Task(task.Link, &client.TaskOptions{ScriptFormat: "raw"}); err != nil {
t.Fatal(err)
} else if got, exp := gotTask.TICKscript, tickCorrect; got != exp {
t.Errorf("unexpected task TICKscript:\ngot\n%s\nexp\n%s\n", got, exp)
}
}
// Update all tasks with new var
for _, task := range tasks {
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
"period": {
Value: 10 * time.Second,
Type: client.VarDuration,
},
},
}); err != nil {
t.Fatal(err)
}
}
// Now update template should succeed since the tasks are updated too.
if _, err := cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
TICKscript: tickNewVar,
}); err != nil {
t.Fatal(err)
}
for _, task := range tasks {
taskId := task.ID
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
for _, task := range tasks {
taskId := task.ID
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
}
func TestServer_UpdateTaskID(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
newID := "newTaskID"
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
if got, exp := task.Link.Href, "/kapacitor/v1/tasks/newTaskID"; got != exp {
t.Fatalf("unexpected task link got %s exp %s", got, exp)
}
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != newID {
t.Fatalf("unexpected id got %s exp %s", ti.ID, newID)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot = "digraph newTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_UpdateTaskID_Fail(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
newID := "anotherTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
// Create conflicting task
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: newID,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
ID: newID,
}); err == nil {
t.Fatal("expected error on name conflict")
}
// Can still get old task
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_UpdateTaskID_Enabled(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
if !ti.Executing {
t.Fatal("expected task to be executing")
}
newID := "newTaskID"
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
if got, exp := task.Link.Href, "/kapacitor/v1/tasks/newTaskID"; got != exp {
t.Fatalf("unexpected task link got %s exp %s", got, exp)
}
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != newID {
t.Fatalf("unexpected id got %s exp %s", ti.ID, newID)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
if !ti.Executing {
t.Fatal("expected task to be executing")
}
}
func TestServer_StreamTask_AllMeasurements(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test0 value=1 0000000000
test1 value=1 0000000001
test0 value=1 0000000001
test1 value=1 0000000002
test0 value=1 0000000002
test1 value=1 0000000003
test0 value=1 0000000003
test1 value=1 0000000004
test0 value=1 0000000005
test1 value=1 0000000005
test0 value=1 0000000005
test1 value=1 0000000006
test0 value=1 0000000007
test1 value=1 0000000008
test0 value=1 0000000009
test1 value=1 0000000010
test0 value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test0","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestServer_BatchTask(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
count := 0
stopTimeC := make(chan time.Time, 1)
db := NewInfluxDB(func(q string) *iclient.Response {
stmt, err := influxql.ParseStatement(q)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
slct, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil
}
cond, ok := slct.Condition.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition to be binary expression"}
}
stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be binary expression"}
}
stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be string literal"}
}
count++
switch count {
case 1:
stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
stopTimeC <- stopTime
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
stopTime.Add(-2 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
{
stopTime.Add(-1 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
},
}},
}},
}
default:
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{},
}},
}},
}
}
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(5ms)
.every(5ms)
.align()
|count('value')
|where(lambda: "count" == 2)
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
timeout := time.NewTicker(100 * time.Millisecond)
defer timeout.Stop()
select {
case <-timeout.C:
t.Fatal("timedout waiting for query")
case stopTime := <-stopTimeC:
exp := fmt.Sprintf(`{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",2]]}]}`, stopTime.Local().Format(time.RFC3339Nano))
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
}
}
func TestServer_BatchTask_InfluxDBConfigUpdate(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
count := 0
stopTimeC := make(chan time.Time, 1)
badCount := 0
dbBad := NewInfluxDB(func(q string) *iclient.Response {
badCount++
// Return empty results
return &iclient.Response{
Results: []iclient.Result{},
}
})
defer dbBad.Close()
db := NewInfluxDB(func(q string) *iclient.Response {
stmt, err := influxql.ParseStatement(q)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
slct, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil
}
cond, ok := slct.Condition.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition to be binary expression"}
}
stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be binary expression"}
}
stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be string literal"}
}
count++
switch count {
case 1:
stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
stopTimeC <- stopTime
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
stopTime.Add(-2 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
{
stopTime.Add(-1 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
},
}},
}},
}
default:
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{},
}},
}},
}
}
})
defer db.Close()
// Set bad URL first
c.InfluxDB[0].URLs = []string{dbBad.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(5ms)
.every(5ms)
.align()
|count('value')
|where(lambda: "count" == 2)
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
// Update InfluxDB config, while task is running
influxdbDefault := cli.ConfigElementLink("influxdb", "default")
if err := cli.ConfigUpdate(influxdbDefault, client.ConfigUpdateAction{
Set: map[string]interface{}{
"urls": []string{db.URL()},
},
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
timeout := time.NewTicker(100 * time.Millisecond)
defer timeout.Stop()
select {
case <-timeout.C:
t.Fatal("timedout waiting for query")
case stopTime := <-stopTimeC:
exp := fmt.Sprintf(`{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",2]]}]}`, stopTime.Local().Format(time.RFC3339Nano))
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
}
if badCount == 0 {
t.Error("expected bad influxdb to be queried at least once")
}
}
func TestServer_InvalidBatchTask(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testInvalidBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query(' SELECT value from unknowndb.unknownrp.cpu ')
.period(5ms)
.every(5ms)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
expErr := `batch query is not allowed to request data from "unknowndb"."unknownrp"`
if err != nil && err.Error() != expErr {
t.Fatalf("unexpected err: got %v exp %s", err, expErr)
}
err = cli.DeleteTask(task.Link)
if err != nil {
t.Fatal(err)
}
}
func TestServer_RecordReplayStream(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testStreamTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|alert()
.id('test-count')
.message('{{ .ID }} got: {{ index .Fields "count" }}')
.crit(lambda: TRUE)
.log('` + tmpDir + `/alert.log')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordStream(client.RecordStreamOptions{
ID: "recordingid",
Task: task.ID,
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
test value=1 0000000012
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 100 {
t.Fatal("failed to finish recording")
}
}
if recording.Status != client.Finished || recording.Error != "" {
t.Errorf("recording failed: %s", recording.Error)
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
ID: "replayid",
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to finish replay")
}
}
if replay.Status != client.Finished || replay.Error != "" {
t.Errorf("replay failed: %s", replay.Error)
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := response{
ID: "test-count",
Message: "test-count got: 15",
Time: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "test",
Columns: []string{"time", "count"},
Values: [][]interface{}{
{
time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano),
15.0,
},
},
},
},
},
}
got := response{}
d := json.NewDecoder(f)
d.Decode(&got)
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list after delete:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list after delete:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
}
func TestServer_RecordReplayStreamWithPost(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testStreamTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|alert()
.id('test-count')
.message('{{ .ID }} got: {{ index .Fields "count" }}')
.crit(lambda: TRUE)
.post('http://localhost:8080')
.log('` + tmpDir + `/alert.log')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordStream(client.RecordStreamOptions{
ID: "recordingid",
Task: task.ID,
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
test value=1 0000000012
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 100 {
t.Fatal("failed to finish recording")
}
}
if recording.Status != client.Finished || recording.Error != "" {
t.Errorf("recording failed: %s", recording.Error)
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
ID: "replayid",
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to finish replay")
}
}
if replay.Status != client.Finished || replay.Error != "" {
t.Errorf("replay failed: %s", replay.Error)
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := response{
ID: "test-count",
Message: "test-count got: 15",
Time: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "test",
Columns: []string{"time", "count"},
Values: [][]interface{}{
{
time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano),
15.0,
},
},
},
},
},
}
got := response{}
d := json.NewDecoder(f)
d.Decode(&got)
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list after delete:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list after delete:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
}
func TestServer_RecordReplayBatch(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
value := 0
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, value, 0, time.UTC).Format(time.RFC3339Nano),
float64(value),
},
{
time.Date(1971, 1, 1, 0, 0, value+1, 0, time.UTC).Format(time.RFC3339Nano),
float64(value + 1),
},
},
}},
}},
}
value += 2
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordBatch(client.RecordBatchOptions{
ID: "recordingid",
Task: id,
Start: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
Stop: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
// Wait for recording to finish.
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom recording")
}
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perform replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
func TestServer_ReplayBatch(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
value := 0
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, value, 0, time.UTC).Format(time.RFC3339Nano),
float64(value),
},
{
time.Date(1971, 1, 1, 0, 0, value+1, 0, time.UTC).Format(time.RFC3339Nano),
float64(value + 1),
},
},
}},
}},
}
value += 2
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
replay, err := cli.ReplayBatch(client.ReplayBatchOptions{
ID: "replayid",
Task: id,
Start: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
Stop: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry := 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
func TestServer_RecordReplayQuery(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano),
0.0,
},
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano),
1.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
}},
}
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordQuery(client.RecordQueryOptions{
ID: "recordingid",
Query: "SELECT value from mydb.myrp.cpu",
Type: client.BatchTask,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
// Wait for recording to finish.
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom recording")
}
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
// ------------
// Test List/Delete Recordings/Replays
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
// Test List Recordings via direct default URL
resp, err := http.Get(s.URL() + "/recordings")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
// Response type
type recResponse struct {
Recordings []client.Recording `json:"recordings"`
}
dec = json.NewDecoder(resp.Body)
recR := recResponse{}
dec.Decode(&recR)
if exp, got := 1, len(recR.Recordings); exp != got {
t.Fatalf("unexpected recordings count, got %d exp %d", got, exp)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
// Test List Replays via direct default URL
resp, err = http.Get(s.URL() + "/replays")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
// Response type
type repResponse struct {
Replays []client.Replay `json:"replays"`
}
dec = json.NewDecoder(resp.Body)
repR := repResponse{}
dec.Decode(&repR)
if exp, got := 1, len(repR.Replays); exp != got {
t.Fatalf("unexpected replays count, got %d exp %d", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
func TestServer_ReplayQuery(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano),
0.0,
},
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano),
1.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
}},
}
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
replay, err := cli.ReplayQuery(client.ReplayQueryOptions{
ID: "replayid",
Query: "SELECT value from mydb.myrp.cpu",
Task: id,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry := 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
// Test for recording and replaying a stream query where data has missing fields and tags.
func TestServer_RecordReplayQuery_Missing(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{
{
Name: "m",
Tags: map[string]string{"t1": "", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano),
1.0,
nil,
},
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
nil,
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano),
nil,
10.0,
},
{
time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC).Format(time.RFC3339Nano),
11.0,
nil,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "", "t2": "4"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
4.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "", "t2": "7"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC).Format(time.RFC3339Nano),
nil,
7.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "3", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
3.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "5", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
5.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "6", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC).Format(time.RFC3339Nano),
nil,
6.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "8", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC).Format(time.RFC3339Nano),
nil,
8.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "9", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC).Format(time.RFC3339Nano),
nil,
9.0,
},
},
},
},
}},
}
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testStreamQueryRecordReplay"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
// setup temp dir for alert.log
tmpDir, err := ioutil.TempDir("", "testStreamTaskRecordingReplay")
if err != nil {
t.Fatal(err)
}
//defer os.RemoveAll(tmpDir)
tick := `stream
|from()
.measurement('m')
|log()
|alert()
.id('test-stream-query')
.crit(lambda: TRUE)
.details('')
.log('` + tmpDir + `/alert.log')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
}); err != nil {
t.Fatal(err)
}
recording, err := cli.RecordQuery(client.RecordQueryOptions{
ID: "recordingid",
Query: "SELECT * FROM mydb.myrp.m",
Type: client.StreamTask,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
// Wait for recording to finish.
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom recording")
}
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
// Validate we got the data in the alert.log
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
exp := []alert.Data{
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "a"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC),
1.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 1 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC),
2.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "3"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
3.0,
3.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 3 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t2": "4"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
4.0,
4.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 4 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "5"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC),
5.0,
5.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 5 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "6"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
6.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 6 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t2": "7"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC),
7.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 7 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "8"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC),
8.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 8 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "9"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC),
9.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 9 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC),
10.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 10 * time.Second,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "a"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC),
11.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
var got []alert.Data
for dec.More() {
g := alert.Data{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %+v\nexp %+v", got, exp)
}
}
// If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the
// kapacitor project.
func TestServer_UDFStreamAgents(t *testing.T) {
tdir, err := ioutil.TempDir("", "kapacitor_server_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tdir)
agents := []struct {
buildFunc func() error
config udf.FunctionConfig
}{
// Go
{
buildFunc: func() error {
// Explicitly compile the binary.
// We could just use 'go run' but I ran into race conditions
// where 'go run' was not handing off to the compiled process in time
// and I didn't care to dig into 'go run's specific behavior.
cmd := exec.Command(
"go",
"build",
"-o",
filepath.Join(tdir, "movavg"+ExecutableSuffix),
filepath.Join(udfDir, "agent/examples/moving_avg/moving_avg.go"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Log(string(out))
return err
}
return nil
},
config: udf.FunctionConfig{
Prog: filepath.Join(tdir, "movavg"),
Timeout: toml.Duration(time.Minute),
},
},
// Python
{
buildFunc: func() error { return nil },
config: udf.FunctionConfig{
Prog: PythonExecutable,
Args: []string{"-u", filepath.Join(udfDir, "agent/examples/moving_avg/moving_avg.py")},
Timeout: toml.Duration(time.Minute),
Env: map[string]string{
"PYTHONPATH": strings.Join(
[]string{filepath.Join(udfDir, "agent/py"), os.Getenv("PYTHONPATH")},
string(filepath.ListSeparator),
),
},
},
},
}
for _, agent := range agents {
err := agent.buildFunc()
if err != nil {
t.Fatal(err)
}
c := NewConfig()
c.UDF.Functions = map[string]udf.FunctionConfig{
"movingAvg": agent.config,
}
testStreamAgent(t, c)
}
}
func testStreamAgent(t *testing.T, c *server.Config) {
s := NewServer(c)
err := s.Open()
if err != nil {
t.Fatal(err)
}
defer s.Close()
cli := Client(s)
id := "testUDFTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
.groupBy('group')
@movingAvg()
.field('value')
.size(10)
.as('mean')
|window()
.period(11s)
.every(11s)
|last('mean').as('mean')
|httpOut('moving_avg')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/moving_avg", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test,group=a value=1 0000000000
test,group=b value=2 0000000000
test,group=a value=1 0000000001
test,group=b value=2 0000000001
test,group=a value=1 0000000002
test,group=b value=2 0000000002
test,group=a value=1 0000000003
test,group=b value=2 0000000003
test,group=a value=1 0000000004
test,group=b value=2 0000000004
test,group=a value=1 0000000005
test,group=b value=2 0000000005
test,group=a value=1 0000000006
test,group=b value=2 0000000006
test,group=a value=1 0000000007
test,group=b value=2 0000000007
test,group=a value=1 0000000008
test,group=b value=2 0000000008
test,group=a value=1 0000000009
test,group=b value=2 0000000009
test,group=a value=0 0000000010
test,group=b value=1 0000000010
test,group=a value=0 0000000011
test,group=b value=0 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"group":"a"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",0.9]]},{"name":"test","tags":{"group":"b"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",1.9]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
// If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the
// kapacitor project.
func TestServer_UDFStreamAgentsSocket(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping on windows as unix sockets are not available")
}
tdir, err := ioutil.TempDir("", "kapacitor_server_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tdir)
agents := []struct {
startFunc func() *exec.Cmd
config udf.FunctionConfig
}{
// Go
{
startFunc: func() *exec.Cmd {
cmd := exec.Command(
"go",
"build",
"-o",
filepath.Join(tdir, "mirror"+ExecutableSuffix),
filepath.Join(udfDir, "agent/examples/mirror/mirror.go"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatal(string(out))
}
cmd = exec.Command(
filepath.Join(tdir, "mirror"),
"-socket",
filepath.Join(tdir, "mirror.go.sock"),
)
cmd.Stderr = os.Stderr
return cmd
},
config: udf.FunctionConfig{
Socket: filepath.Join(tdir, "mirror.go.sock"),
Timeout: toml.Duration(time.Minute),
},
},
// Python
{
startFunc: func() *exec.Cmd {
cmd := exec.Command(
PythonExecutable,
"-u",
filepath.Join(udfDir, "agent/examples/mirror/mirror.py"),
filepath.Join(tdir, "mirror.py.sock"),
)
cmd.Stderr = os.Stderr
env := os.Environ()
env = append(env, fmt.Sprintf(
"%s=%s",
"PYTHONPATH",
strings.Join(
[]string{filepath.Join(udfDir, "agent/py"), os.Getenv("PYTHONPATH")},
string(filepath.ListSeparator),
),
))
cmd.Env = env
return cmd
},
config: udf.FunctionConfig{
Socket: filepath.Join(tdir, "mirror.py.sock"),
Timeout: toml.Duration(time.Minute),
},
},
}
for _, agent := range agents {
cmd := agent.startFunc()
cmd.Start()
defer cmd.Process.Signal(os.Interrupt)
if err != nil {
t.Fatal(err)
}
c := NewConfig()
c.UDF.Functions = map[string]udf.FunctionConfig{
"mirror": agent.config,
}
testStreamAgentSocket(t, c)
}
}
func testStreamAgentSocket(t *testing.T, c *server.Config) {
s := NewServer(c)
err := s.Open()
if err != nil {
t.Fatal(err)
}
defer s.Close()
cli := Client(s)
id := "testUDFTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
.groupBy('group')
@mirror()
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test,group=a value=1 0000000000
test,group=a value=1 0000000001
test,group=a value=1 0000000002
test,group=a value=1 0000000003
test,group=a value=1 0000000004
test,group=a value=1 0000000005
test,group=a value=1 0000000006
test,group=a value=1 0000000007
test,group=a value=1 0000000008
test,group=a value=1 0000000009
test,group=a value=0 0000000010
test,group=a value=0 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"group":"a"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",10]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
// If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the
// kapacitor project.
func TestServer_UDFBatchAgents(t *testing.T) {
tdir, err := ioutil.TempDir("", "kapacitor_server_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tdir)
agents := []struct {
buildFunc func() error
config udf.FunctionConfig
}{
// Go
{
buildFunc: func() error {
// Explicitly compile the binary.
// We could just use 'go run' but I ran into race conditions
// where 'go run' was not handing off to the compiled process in time
// and I didn't care to dig into 'go run's specific behavior.
cmd := exec.Command(
"go",
"build",
"-o",
filepath.Join(tdir, "outliers"+ExecutableSuffix),
filepath.Join(udfDir, "agent/examples/outliers/outliers.go"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Log(string(out))
return err
}
return nil
},
config: udf.FunctionConfig{
Prog: filepath.Join(tdir, "outliers"),
Timeout: toml.Duration(time.Minute),
},
},
// Python
{
buildFunc: func() error { return nil },
config: udf.FunctionConfig{
Prog: PythonExecutable,
Args: []string{"-u", filepath.Join(udfDir, "agent/examples/outliers/outliers.py")},
Timeout: toml.Duration(time.Minute),
Env: map[string]string{
"PYTHONPATH": strings.Join(
[]string{filepath.Join(udfDir, "agent/py"), os.Getenv("PYTHONPATH")},
string(filepath.ListSeparator),
),
},
},
},
}
for _, agent := range agents {
err := agent.buildFunc()
if err != nil {
t.Fatal(err)
}
c := NewConfig()
c.UDF.Functions = map[string]udf.FunctionConfig{
"outliers": agent.config,
}
testBatchAgent(t, c)
}
}
func testBatchAgent(t *testing.T, c *server.Config) {
count := 0
stopTimeC := make(chan time.Time, 2)
db := NewInfluxDB(func(q string) *iclient.Response {
stmt, err := influxql.ParseStatement(q)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
slct, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil
}
cond, ok := slct.Condition.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition to be binary expression"}
}
stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be binary expression"}
}
stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be string literal"}
}
count++
switch count {
case 1, 2:
stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
stopTimeC <- stopTime
data := []float64{
5,
6,
7,
13,
33,
35,
36,
45,
46,
47,
48,
50,
51,
52,
53,
54,
80,
85,
90,
100,
}
// Shuffle data using count as seed.
// Data order should not effect the result.
r := rand.New(rand.NewSource(int64(count)))
for i := range data {
j := r.Intn(i + 1)
data[i], data[j] = data[j], data[i]
}
// Create set values with time from shuffled data.
values := make([][]interface{}, len(data))
for i, value := range data {
values[i] = []interface{}{
stopTime.Add(time.Duration(i-len(data)) * time.Millisecond).Format(time.RFC3339Nano),
value,
}
}
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Tags: map[string]string{
"count": strconv.FormatInt(int64(count%2), 10),
},
Values: values,
}},
}},
}
default:
return nil
}
})
c.InfluxDB[0].URLs = []string{db.URL()}
c.InfluxDB[0].Enabled = true
s := NewServer(c)
err := s.Open()
if err != nil {
t.Fatal(err)
}
defer s.Close()
cli := Client(s)
id := "testUDFTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query(' SELECT value from mydb.myrp.cpu ')
.period(5ms)
.every(5ms)
.groupBy('count')
@outliers()
.field('value')
.scale(1.5)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
stopTimes := make([]time.Time, 2)
for i := range stopTimes {
timeout := time.NewTicker(100 * time.Millisecond)
defer timeout.Stop()
select {
case <-timeout.C:
t.Fatal("timedout waiting for query")
case stopTime := <-stopTimeC:
stopTimes[i] = stopTime
}
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
exp := fmt.Sprintf(
`{"series":[{"name":"cpu","tags":{"count":"1"},"columns":["time","count"],"values":[["%s",5]]},{"name":"cpu","tags":{"count":"0"},"columns":["time","count"],"values":[["%s",5]]}]}`,
stopTimes[0].Format(time.RFC3339Nano),
stopTimes[1].Format(time.RFC3339Nano),
)
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*50)
if err != nil {
t.Error(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
}
func TestServer_CreateTask_Defaults(t *testing.T) {
s, cli := OpenDefaultServer()
baseURL := s.URL()
body := `
{
"id" : "TASK_ID",
"type" : "stream",
"dbrps": [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}],
"script": "stream\n |from()\n .measurement('cpu')\n"
}`
resp, err := http.Post(baseURL+"/tasks", "application/json", strings.NewReader(body))
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
id := "TASK_ID"
tick := "stream\n |from()\n .measurement('cpu')\n"
dbrps := []client.DBRP{
{
Database: "DATABASE_NAME",
RetentionPolicy: "RP_NAME",
},
}
ti, err := cli.Task(cli.TaskLink(id), nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph TASK_ID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_ListTask_Defaults(t *testing.T) {
s, cli := OpenDefaultServer()
baseURL := s.URL()
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
id := "task_id"
tick := "stream\n |from()\n"
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: client.StreamTask,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(baseURL + "/tasks")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
// Response type
type response struct {
Tasks []client.Task `json:"tasks"`
}
dec := json.NewDecoder(resp.Body)
tasks := response{}
dec.Decode(&tasks)
if exp, got := 1, len(tasks.Tasks); exp != got {
t.Fatalf("unexpected tasks count, got %d exp %d", got, exp)
}
task = tasks.Tasks[0]
if task.ID != id {
t.Fatalf("unexpected id got %s exp %s", task.ID, id)
}
if task.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", task.Type, client.StreamTask)
}
if task.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", task.Status, client.Disabled)
}
if !reflect.DeepEqual(task.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", task.DBRPs, dbrps)
}
if task.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", task.TICKscript, tick)
}
dot := "digraph task_id {\nstream0 -> from1;\n}"
if task.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", task.Dot, dot)
}
}
func TestServer_CreateTask_ValidIDs(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
testCases := []struct {
id string
valid bool
}{
{
id: "task_id",
valid: true,
},
{
id: "task_id7",
valid: true,
},
{
id: "task.id7",
valid: true,
},
{
id: "task-id7",
valid: true,
},
{
id: "tásk7",
valid: true,
},
{
id: "invalid id",
valid: false,
},
{
id: "invalid*id",
valid: false,
},
{
id: "task/id7",
valid: false,
},
}
for _, tc := range testCases {
id := tc.id
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if !tc.valid {
exp := fmt.Sprintf("task ID must contain only letters, numbers, '-', '.' and '_'. %q", id)
if err.Error() != exp {
t.Errorf("unexpected error: got %s exp %s", err.Error(), exp)
}
continue
}
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph " + id + " {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
}
func TestServer_CreateRecording_ValidIDs(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: "task_id",
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
testCases := []struct {
id string
valid bool
}{
{
id: "recording_id",
valid: true,
},
{
id: "recording_id7",
valid: true,
},
{
id: "recording.id7",
valid: true,
},
{
id: "recording-id7",
valid: true,
},
{
id: "récording7",
valid: true,
},
{
id: "invalid id",
valid: false,
},
{
id: "invalid*id",
valid: false,
},
{
id: "recording/id7",
valid: false,
},
}
for _, tc := range testCases {
id := tc.id
recording, err := cli.RecordStream(client.RecordStreamOptions{
ID: id,
Task: "task_id",
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if !tc.valid {
exp := fmt.Sprintf("recording ID must contain only letters, numbers, '-', '.' and '_'. %q", id)
if err.Error() != exp {
t.Errorf("unexpected error: got %s exp %s", err.Error(), exp)
}
continue
}
if err != nil {
t.Fatal(err)
}
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
if exp, got := id, recording.ID; got != exp {
t.Errorf("unexpected recording ID got %s exp %s", got, exp)
}
}
}
func TestServer_CreateReplay_ValidIDs(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: "task_id",
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.RecordStream(client.RecordStreamOptions{
ID: "recording_id",
Task: "task_id",
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
testCases := []struct {
id string
valid bool
}{
{
id: "replay_id",
valid: true,
},
{
id: "replay_id7",
valid: true,
},
{
id: "replay.id7",
valid: true,
},
{
id: "replay-id7",
valid: true,
},
{
id: "réplay7",
valid: true,
},
{
id: "invalid id",
valid: false,
},
{
id: "invalid*id",
valid: false,
},
{
id: "replay/id7",
valid: false,
},
}
for _, tc := range testCases {
id := tc.id
replay, err := cli.CreateReplay(client.CreateReplayOptions{
ID: id,
Task: "task_id",
Recording: "recording_id",
Clock: client.Fast,
RecordingTime: true,
})
if !tc.valid {
exp := fmt.Sprintf("replay ID must contain only letters, numbers, '-', '.' and '_'. %q", id)
if err.Error() != exp {
t.Errorf("unexpected error: got %s exp %s", err.Error(), exp)
}
continue
}
if err != nil {
t.Fatal(err)
}
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.ID; got != exp {
t.Errorf("unexpected replay ID got %s exp %s", got, exp)
}
}
}
func TestServer_UpdateConfig(t *testing.T) {
type updateAction struct {
element string
updateAction client.ConfigUpdateAction
expSection client.ConfigSection
expElement client.ConfigElement
}
db := NewInfluxDB(func(q string) *iclient.Response {
return &iclient.Response{}
})
testCases := []struct {
section string
element string
setDefaults func(*server.Config)
expDefaultSection client.ConfigSection
expDefaultElement client.ConfigElement
updates []updateAction
}{
{
section: "influxdb",
element: "default",
setDefaults: func(c *server.Config) {
c.InfluxDB[0].Enabled = true
c.InfluxDB[0].Username = "bob"
c.InfluxDB[0].Password = "secret"
c.InfluxDB[0].URLs = []string{db.URL()}
// Set really long timeout since we shouldn't hit it
c.InfluxDB[0].StartUpTimeout = toml.Duration(time.Hour)
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
},
updates: []updateAction{
{
// Set Invalid URL to make sure we can fix it without waiting for connection timeouts
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"urls": []string{"http://192.0.2.0:8086"},
},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"default": true,
"subscription-protocol": "https",
"subscriptions": map[string][]string{"_internal": []string{"monitor"}},
},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Delete: []string{"urls"},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Add: map[string]interface{}{
"name": "new",
"urls": []string{db.URL()},
},
},
element: "new",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/new"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": false,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "new",
"password": false,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "5m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "",
},
Redacted: []string{
"password",
},
},
},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/new"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": false,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "new",
"password": false,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "5m0s",
"subscription-protocol": "http",
"subscriptions": nil,
"subscription-mode": "cluster",
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "",
},
Redacted: []string{
"password",
},
},
},
},
},
{
section: "alertmanager",
setDefaults: func(c *server.Config) {
c.Alertmanager.URL = "http://alertmanager.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alertmanager"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alertmanager/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "",
"url": "http://alertmanager.example.com",
"insecure-skip-verify": false,
"timeout": "0s",
},
},
},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alertmanager/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "",
"url": "http://alertmanager.example.com",
"insecure-skip-verify": false,
"timeout": "0s",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"origin": "kapacitor",
"timeout": "3h",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alertmanager"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alertmanager/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "kapacitor",
"url": "http://alertmanager.example.com",
"insecure-skip-verify": false,
"timeout": "3h0m0s",
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alertmanager/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "kapacitor",
"url": "http://alertmanager.example.com",
"insecure-skip-verify": false,
"timeout": "3h0m0s",
},
},
},
},
},
{
section: "httppost",
element: "test",
setDefaults: func(c *server.Config) {
apc := httppost.Config{
Endpoint: "test",
URL: "http://httppost.example.com",
Headers: map[string]string{
"testing": "works",
},
}
c.HTTPPost = httppost.Configs{apc}
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "works",
},
"basic-auth": false,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
}},
},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "works",
},
"basic-auth": false,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
},
},
updates: []updateAction{
{
element: "test",
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"headers": map[string]string{
"testing": "more",
},
"basic-auth": httppost.BasicAuth{
Username: "usr",
Password: "pass",
},
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "more",
},
"basic-auth": true,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "more",
},
"basic-auth": true,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
},
},
},
},
},
{
section: "pushover",
setDefaults: func(c *server.Config) {
c.Pushover.URL = "http://pushover.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"token": false,
"user-key": false,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
}},
},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"token": false,
"user-key": false,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "token",
"user-key": "kapacitor",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"user-key": true,
"token": true,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"user-key": true,
"token": true,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
},
},
},
},
},
{
section: "kubernetes",
setDefaults: func(c *server.Config) {
c.Kubernetes = k8s.Configs{k8s.NewConfig()}
c.Kubernetes[0].APIServers = []string{"http://localhost:80001"}
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": false,
"resource": "",
},
Redacted: []string{
"token",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": false,
"resource": "",
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "secret",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": true,
"resource": "",
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": true,
"resource": "",
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "hipchat",
setDefaults: func(c *server.Config) {
c.HipChat.URL = "http://hipchat.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "",
"state-changes-only": false,
"token": false,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "",
"state-changes-only": false,
"token": false,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "token",
"room": "kapacitor",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "kapacitor",
"state-changes-only": false,
"token": true,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "kapacitor",
"state-changes-only": false,
"token": true,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "mqtt",
setDefaults: func(c *server.Config) {
cfg := &mqtt.Config{
Name: "default",
URL: "tcp://mqtt.example.com:1883",
}
cfg.SetNewClientF(mqtttest.NewClient)
c.MQTT = mqtt.Configs{
*cfg,
}
},
element: "default",
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "",
"username": "",
"password": false,
},
Redacted: []string{
"password",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "",
"username": "",
"password": false,
},
Redacted: []string{
"password",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"client-id": "kapacitor-default",
"password": "super secret",
},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "kapacitor-default",
"username": "",
"password": true,
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "kapacitor-default",
"username": "",
"password": true,
},
Redacted: []string{
"password",
},
},
},
},
},
{
section: "opsgenie",
setDefaults: func(c *server.Config) {
c.OpsGenie.URL = "http://opsgenie.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": false,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": nil,
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": false,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": nil,
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"api-key": "token",
"global": true,
"teams": []string{"teamA", "teamB"},
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": true,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": []interface{}{"teamA", "teamB"},
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": true,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": []interface{}{"teamA", "teamB"},
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
},
},
},
},
{
section: "opsgenie2",
setDefaults: func(c *server.Config) {
c.OpsGenie2.URL = "http://opsgenie2.example.com"
c.OpsGenie2.RecoveryAction = "notes"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie2"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie2/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": false,
"recipients": nil,
"teams": nil,
"url": "http://opsgenie2.example.com",
"recovery_action": "notes",
},
Redacted: []string{
"api-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie2/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": false,
"recipients": nil,
"teams": nil,
"url": "http://opsgenie2.example.com",
"recovery_action": "notes",
},
Redacted: []string{
"api-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"api-key": "token",
"global": true,
"teams": []string{"teamA", "teamB"},
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie2"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie2/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": true,
"recipients": nil,
"teams": []interface{}{"teamA", "teamB"},
"url": "http://opsgenie2.example.com",
"recovery_action": "notes",
},
Redacted: []string{
"api-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie2/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": true,
"recipients": nil,
"teams": []interface{}{"teamA", "teamB"},
"url": "http://opsgenie2.example.com",
"recovery_action": "notes",
},
Redacted: []string{
"api-key",
},
},
},
},
},
{
section: "pagerduty",
setDefaults: func(c *server.Config) {
c.PagerDuty.ServiceKey = "secret"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"service-key": true,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"service-key": true,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"service-key": "",
"enabled": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": true,
"global": false,
"service-key": false,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": true,
"global": false,
"service-key": false,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
},
},
},
},
{
section: "pagerduty2",
setDefaults: func(c *server.Config) {
c.PagerDuty2.RoutingKey = "secret"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty2"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty2/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"routing-key": true,
"url": pagerduty2.DefaultPagerDuty2APIURL,
},
Redacted: []string{
"routing-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty2/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"routing-key": true,
"url": pagerduty2.DefaultPagerDuty2APIURL,
},
Redacted: []string{
"routing-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"routing-key": "",
"enabled": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty2"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty2/"},
Options: map[string]interface{}{
"enabled": true,
"global": false,
"routing-key": false,
"url": pagerduty2.DefaultPagerDuty2APIURL,
},
Redacted: []string{
"routing-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty2/"},
Options: map[string]interface{}{
"enabled": true,
"global": false,
"routing-key": false,
"url": pagerduty2.DefaultPagerDuty2APIURL,
},
Redacted: []string{
"routing-key",
},
},
},
},
},
{
section: "smtp",
setDefaults: func(c *server.Config) {
c.SMTP.Host = "smtp.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": false,
"host": "smtp.example.com",
"idle-timeout": "30s",
"no-verify": false,
"password": false,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": false,
"host": "smtp.example.com",
"idle-timeout": "30s",
"no-verify": false,
"password": false,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"idle-timeout": "1m0s",
"global": true,
"password": "secret",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": true,
"host": "smtp.example.com",
"idle-timeout": "1m0s",
"no-verify": false,
"password": true,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": true,
"host": "smtp.example.com",
"idle-timeout": "1m0s",
"no-verify": false,
"password": true,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
},
},
},
},
{
section: "sensu",
setDefaults: func(c *server.Config) {
c.Sensu.Addr = "sensu.example.com:3000"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.example.com:3000",
"enabled": false,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.example.com:3000",
"enabled": false,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"addr": "sensu.local:3000",
"enabled": true,
"source": "Kapacitor",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.local:3000",
"enabled": true,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.local:3000",
"enabled": true,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
},
},
},
},
{
section: "slack",
setDefaults: func(c *server.Config) {
cfg := &slack.Config{
Global: true,
Default: true,
Username: slack.DefaultUsername,
}
c.Slack = slack.Configs{
*cfg,
}
},
element: "",
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"workspace": "",
"default": true,
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"workspace": "",
"default": true,
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Add: map[string]interface{}{
"workspace": "company_private",
"enabled": true,
"global": false,
"channel": "#general",
"username": slack.DefaultUsername,
"url": "http://slack.example.com/secret-token",
},
},
element: "company_private",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"workspace": "",
"default": true,
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_private"},
Options: map[string]interface{}{
"workspace": "company_private",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_private"},
Options: map[string]interface{}{
"workspace": "company_private",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Add: map[string]interface{}{
"workspace": "company_public",
"enabled": true,
"global": false,
"channel": "#general",
"username": slack.DefaultUsername,
"url": "http://slack.example.com/secret-token",
},
},
element: "company_public",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"workspace": "",
"default": true,
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_private"},
Options: map[string]interface{}{
"workspace": "company_private",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_public"},
Options: map[string]interface{}{
"workspace": "company_public",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_public"},
Options: map[string]interface{}{
"workspace": "company_public",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": false,
"username": "testbot",
},
},
element: "company_public",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"workspace": "",
"default": true,
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_private"},
Options: map[string]interface{}{
"workspace": "company_private",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_public"},
Options: map[string]interface{}{
"workspace": "company_public",
"channel": "#general",
"default": false,
"enabled": false,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "testbot",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_public"},
Options: map[string]interface{}{
"workspace": "company_public",
"channel": "#general",
"default": false,
"enabled": false,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "testbot",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Delete: []string{"username"},
},
element: "company_public",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"workspace": "",
"default": true,
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_private"},
Options: map[string]interface{}{
"workspace": "company_private",
"channel": "#general",
"default": false,
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_public"},
Options: map[string]interface{}{
"workspace": "company_public",
"channel": "#general",
"default": false,
"enabled": false,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/company_public"},
Options: map[string]interface{}{
"workspace": "company_public",
"channel": "#general",
"default": false,
"enabled": false,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
},
},
{
section: "snmptrap",
setDefaults: func(c *server.Config) {
c.SNMPTrap.Community = "test"
c.SNMPTrap.Retries = 2.0
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "localhost:162",
"enabled": false,
"community": true,
"retries": 2.0,
},
Redacted: []string{
"community",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "localhost:162",
"enabled": false,
"community": true,
"retries": 2.0,
},
Redacted: []string{
"community",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"addr": "snmptrap.example.com:162",
"community": "public",
"retries": 1.0,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "snmptrap.example.com:162",
"enabled": true,
"community": true,
"retries": 1.0,
},
Redacted: []string{
"community",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "snmptrap.example.com:162",
"enabled": true,
"community": true,
"retries": 1.0,
},
Redacted: []string{
"community",
},
},
},
},
},
{
section: "swarm",
setDefaults: func(c *server.Config) {
c.Swarm = swarm.Configs{swarm.Config{
Servers: []string{"http://localhost:80001"},
}}
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": false,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": false,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": true,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": true,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
},
},
},
},
{
section: "talk",
setDefaults: func(c *server.Config) {
c.Talk.AuthorName = "Kapacitor"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": false,
"url": false,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": false,
"url": false,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"url": "http://talk.example.com/secret-token",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": true,
"url": true,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": true,
"url": true,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
},
},
},
},
{
section: "telegram",
setDefaults: func(c *server.Config) {
c.Telegram.ChatId = "kapacitor"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": false,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": false,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": false,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": false,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"token": "token",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": true,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": true,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": true,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": true,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "victorops",
setDefaults: func(c *server.Config) {
c.VictorOps.RoutingKey = "test"
c.VictorOps.APIKey = "secret"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": false,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
"json-data": false,
},
Redacted: []string{
"api-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": false,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
"json-data": false,
},
Redacted: []string{
"api-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"api-key": "",
"global": true,
"json-data": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": true,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
"json-data": true,
},
Redacted: []string{
"api-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": true,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
"json-data": true,
},
Redacted: []string{
"api-key",
},
},
},
},
},
}
compareElements := func(got, exp client.ConfigElement) error {
if got.Link != exp.Link {
return fmt.Errorf("elements have different links, got %v exp %v", got.Link, exp.Link)
}
for k, v := range exp.Options {
if g, ok := got.Options[k]; !ok {
return fmt.Errorf("missing option %q", k)
} else if !reflect.DeepEqual(g, v) {
return fmt.Errorf("unexpected config option %q got %#v exp %#v types: got %T exp %T", k, g, v, g, v)
}
}
for k := range got.Options {
if v, ok := exp.Options[k]; !ok {
return fmt.Errorf("extra option %q with value %#v", k, v)
}
}
if len(got.Redacted) != len(exp.Redacted) {
return fmt.Errorf("unexpected element redacted lists: got %v exp %v", got.Redacted, exp.Redacted)
}
sort.Strings(got.Redacted)
sort.Strings(exp.Redacted)
for i := range exp.Redacted {
if got.Redacted[i] != exp.Redacted[i] {
return fmt.Errorf("unexpected element redacted lists: got %v exp %v", got.Redacted, exp.Redacted)
}
}
return nil
}
compareSections := func(got, exp client.ConfigSection) error {
if got.Link != exp.Link {
return fmt.Errorf("sections have different links, got %v exp %v", got.Link, exp.Link)
}
if len(got.Elements) != len(exp.Elements) {
return fmt.Errorf("sections are different lengths, got %d exp %d", len(got.Elements), len(exp.Elements))
}
for i := range exp.Elements {
if err := compareElements(got.Elements[i], exp.Elements[i]); err != nil {
return errors.Wrapf(err, "section element %d are not equal", i)
}
}
return nil
}
validate := func(
cli *client.Client,
section,
element string,
expSection client.ConfigSection,
expElement client.ConfigElement,
) error {
// Get all sections
if config, err := cli.ConfigSections(); err != nil {
return errors.Wrap(err, "failed to get sections")
} else {
if err := compareSections(config.Sections[section], expSection); err != nil {
return fmt.Errorf("%s: %v", section, err)
}
}
// Get the specific section
sectionLink := cli.ConfigSectionLink(section)
if got, err := cli.ConfigSection(sectionLink); err != nil {
return err
} else {
if err := compareSections(got, expSection); err != nil {
return fmt.Errorf("%s: %v", section, err)
}
}
elementLink := cli.ConfigElementLink(section, element)
// Get the specific element
if got, err := cli.ConfigElement(elementLink); err != nil {
return err
} else {
if err := compareElements(got, expElement); err != nil {
return fmt.Errorf("%s/%s: %v", section, element, err)
}
}
return nil
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("%s/%s-%d", tc.section, tc.element, i), func(t *testing.T) {
// Create default config
c := NewConfig()
if tc.setDefaults != nil {
tc.setDefaults(c)
}
s := OpenServer(c)
cli := Client(s)
defer s.Close()
if err := validate(cli, tc.section, tc.element, tc.expDefaultSection, tc.expDefaultElement); err != nil {
t.Errorf("unexpected defaults for %s/%s: %v", tc.section, tc.element, err)
}
for i, ua := range tc.updates {
link := cli.ConfigElementLink(tc.section, ua.element)
if len(ua.updateAction.Add) > 0 ||
len(ua.updateAction.Remove) > 0 {
link = cli.ConfigSectionLink(tc.section)
}
if err := cli.ConfigUpdate(link, ua.updateAction); err != nil {
t.Fatal(err)
}
if err := validate(cli, tc.section, ua.element, ua.expSection, ua.expElement); err != nil {
t.Errorf("unexpected update result %d for %s/%s: %v", i, tc.section, ua.element, err)
}
}
})
}
}
func TestServer_ListServiceTests(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
serviceTests, err := cli.ListServiceTests(nil)
if err != nil {
t.Fatal(err)
}
expServiceTests := client.ServiceTests{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests"},
Services: []client.ServiceTest{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/alertmanager"},
Name: "alertmanager",
Options: client.ServiceTestOptions{
"resource": "testResource",
"event": "testEvent",
"environment": "",
"severity": "critical",
"group": "testGroup",
"value": "testValue",
"message": "test alertmanager message",
"origin": "",
"service": []interface{}{
"testServiceA",
"testServiceB",
},
"timeout": "24h0m0s",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/azure"},
Name: "azure",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/consul"},
Name: "consul",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/dns"},
Name: "dns",
Options: client.ServiceTestOptions{
"id": ""},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/ec2"},
Name: "ec2",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/file-discovery"},
Name: "file-discovery",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/gce"},
Name: "gce",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/hipchat"},
Name: "hipchat",
Options: client.ServiceTestOptions{
"room": "",
"message": "test hipchat message",
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/httppost"},
Name: "httppost",
Options: client.ServiceTestOptions{
"endpoint": "example",
"url": "http://localhost:3000/",
"headers": map[string]interface{}{"Auth": "secret"},
"timeout": float64(0),
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/influxdb"},
Name: "influxdb",
Options: client.ServiceTestOptions{
"cluster": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/kafka"},
Name: "kafka",
Options: client.ServiceTestOptions{
"cluster": "example",
"topic": "test",
"key": "key",
"message": "test kafka message",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/kubernetes"},
Name: "kubernetes",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/marathon"},
Name: "marathon",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/mqtt"},
Name: "mqtt",
Options: client.ServiceTestOptions{
"broker-name": "",
"topic": "",
"message": "test MQTT message",
"qos": "at-most-once",
"retained": false,
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/nerve"},
Name: "nerve",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/opsgenie"},
Name: "opsgenie",
Options: client.ServiceTestOptions{
"teams": nil,
"recipients": nil,
"message-type": "CRITICAL",
"message": "test opsgenie message",
"entity-id": "testEntityID",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/opsgenie2"},
Name: "opsgenie2",
Options: client.ServiceTestOptions{
"teams": nil,
"recipients": nil,
"message-type": "CRITICAL",
"message": "test opsgenie message",
"entity-id": "testEntityID",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/pagerduty"},
Name: "pagerduty",
Options: client.ServiceTestOptions{
"incident-key": "testIncidentKey",
"description": "test pagerduty message",
"level": "CRITICAL",
"details": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/pagerduty2"},
Name: "pagerduty2",
Options: client.ServiceTestOptions{
"alert_id": "testAlertID",
"description": "test pagerduty2 message",
"level": "CRITICAL",
"event_data": map[string]interface{}{
"Fields": map[string]interface{}{},
"Result": map[string]interface{}{
"series": interface{}(nil),
},
"Name": "testPagerDuty2",
"TaskName": "",
"Group": "",
"Tags": map[string]interface{}{},
"Recoverable": false,
"Category": "",
},
"timestamp": "2014-11-12T11:45:26.371Z",
"links": []interface{}{
map[string]interface{}{
"href": "https://example.com/a",
"text": "a",
},
map[string]interface{}{
"href": "https://example.com/b",
"text": "b",
},
},
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/pushover"},
Name: "pushover",
Options: client.ServiceTestOptions{
"user-key": "", //gohere
"message": "test pushover message",
"device": "",
"title": "",
"url": "",
"url-title": "",
"sound": "",
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/scraper"},
Name: "scraper",
Options: client.ServiceTestOptions{
"name": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/sensu"},
Name: "sensu",
Options: client.ServiceTestOptions{
"name": "testName",
"output": "testOutput",
"source": "Kapacitor",
"handlers": []interface{}{},
"metadata": map[string]interface{}{},
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/serverset"},
Name: "serverset",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/slack"},
Name: "slack",
Options: client.ServiceTestOptions{
"workspace": "",
"channel": "",
"icon-emoji": "",
"level": "CRITICAL",
"message": "test slack message",
"username": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/smtp"},
Name: "smtp",
Options: client.ServiceTestOptions{
"to": nil,
"subject": "test subject",
"body": "test body",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/snmptrap"},
Name: "snmptrap",
Options: client.ServiceTestOptions{
"trap-oid": "1.1.1.1",
"data-list": []interface{}{
map[string]interface{}{
"oid": "1.1.1.1.2",
"type": "s",
"value": "test snmptrap message",
},
},
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/static-discovery"},
Name: "static-discovery",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/swarm"},
Name: "swarm",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/talk"},
Name: "talk",
Options: client.ServiceTestOptions{
"title": "testTitle",
"text": "test talk text",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/telegram"},
Name: "telegram",
Options: client.ServiceTestOptions{
"chat-id": "",
"parse-mode": "",
"message": "test telegram message",
"disable-web-page-preview": false,
"disable-notification": false,
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/triton"},
Name: "triton",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/victorops"},
Name: "victorops",
Options: client.ServiceTestOptions{
"routingKey": "",
"messageType": "CRITICAL",
"message": "test victorops message",
"entityID": "testEntityID",
},
},
},
}
if got, exp := serviceTests.Link.Href, expServiceTests.Link.Href; got != exp {
t.Errorf("unexpected service tests link.href: got %s exp %s", got, exp)
}
if got, exp := len(serviceTests.Services), len(expServiceTests.Services); got != exp {
t.Fatalf("unexpected length of services: got %d exp %d", got, exp)
}
for i := range expServiceTests.Services {
exp := expServiceTests.Services[i]
got := serviceTests.Services[i]
if !reflect.DeepEqual(got, exp) {
t.Errorf("unexpected server test %s:\n%s", exp.Name, cmp.Diff(exp, got))
}
}
}
func TestServer_ListServiceTests_WithPattern(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
serviceTests, err := cli.ListServiceTests(&client.ListServiceTestsOptions{
Pattern: "s*",
})
if err != nil {
t.Fatal(err)
}
expServiceTests := client.ServiceTests{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests"},
Services: []client.ServiceTest{
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/scraper"},
Name: "scraper",
Options: client.ServiceTestOptions{
"name": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/sensu"},
Name: "sensu",
Options: client.ServiceTestOptions{
"name": "testName",
"output": "testOutput",
"source": "Kapacitor",
"handlers": []interface{}{},
"metadata": map[string]interface{}{},
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/serverset"},
Name: "serverset",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/slack"},
Name: "slack",
Options: client.ServiceTestOptions{
"workspace": "",
"channel": "",
"icon-emoji": "",
"level": "CRITICAL",
"message": "test slack message",
"username": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/smtp"},
Name: "smtp",
Options: client.ServiceTestOptions{
"to": nil,
"subject": "test subject",
"body": "test body",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/snmptrap"},
Name: "snmptrap",
Options: client.ServiceTestOptions{
"trap-oid": "1.1.1.1",
"data-list": []interface{}{
map[string]interface{}{
"oid": "1.1.1.1.2",
"type": "s",
"value": "test snmptrap message",
},
},
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/static-discovery"},
Name: "static-discovery",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/swarm"},
Name: "swarm",
Options: client.ServiceTestOptions{
"id": "",
},
},
},
}
if got, exp := serviceTests.Link.Href, expServiceTests.Link.Href; got != exp {
t.Errorf("unexpected service tests link.href: got %s exp %s", got, exp)
}
if got, exp := len(serviceTests.Services), len(expServiceTests.Services); got != exp {
t.Fatalf("unexpected length of services: got %d exp %d", got, exp)
}
for i := range expServiceTests.Services {
exp := expServiceTests.Services[i]
got := serviceTests.Services[i]
if !reflect.DeepEqual(got, exp) {
t.Errorf("unexpected server test %s:\ngot\n%#v\nexp\n%#v\n", exp.Name, got, exp)
}
}
}
func TestServer_DoServiceTest(t *testing.T) {
db := NewInfluxDB(func(q string) *iclient.Response {
return &iclient.Response{}
})
testCases := []struct {
service string
setDefaults func(*server.Config)
options client.ServiceTestOptions
exp client.ServiceTestResult
}{
{
service: "alertmanager",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "hipchat",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "influxdb",
setDefaults: func(c *server.Config) {
c.InfluxDB[0].Enabled = true
c.InfluxDB[0].Name = "default"
c.InfluxDB[0].URLs = []string{db.URL()}
},
options: client.ServiceTestOptions{
"cluster": "default",
},
exp: client.ServiceTestResult{
Success: true,
Message: "",
},
},
{
service: "influxdb",
options: client.ServiceTestOptions{
"cluster": "default",
},
exp: client.ServiceTestResult{
Success: false,
Message: "cluster \"default\" is not enabled or does not exist",
},
},
{
service: "kubernetes",
options: client.ServiceTestOptions{
"id": "default",
},
exp: client.ServiceTestResult{
Success: false,
Message: "unknown kubernetes cluster \"default\"",
},
},
{
service: "mqtt",
options: client.ServiceTestOptions{
"broker-name": "default",
"topic": "test",
},
exp: client.ServiceTestResult{
Success: false,
Message: "unknown MQTT broker \"default\"",
},
},
{
service: "opsgenie",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "opsgenie2",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "failed to prepare API request: service is not enabled",
},
},
{
service: "pagerduty",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "pagerduty2",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "pushover",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "sensu",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "slack",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "smtp",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "snmptrap",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "swarm",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "unknown swarm cluster \"\"",
},
},
{
service: "talk",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "telegram",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "victorops",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
}
for _, tc := range testCases {
// Create default config
c := NewConfig()
if tc.setDefaults != nil {
tc.setDefaults(c)
}
s := OpenServer(c)
cli := Client(s)
defer s.Close()
tr, err := cli.DoServiceTest(cli.ServiceTestLink(tc.service), tc.options)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(tr, tc.exp) {
t.Log("Options", tc.options)
t.Errorf("unexpected service test result for %s:\ngot\n%#v\nexp\n%#v\n", tc.service, tr, tc.exp)
}
}
}
func TestServer_AlertHandlers_CRUD(t *testing.T) {
testCases := []struct {
topic string
create client.TopicHandlerOptions
expCreate client.TopicHandler
patch client.JSONPatch
expPatch client.TopicHandler
put client.TopicHandlerOptions
expPut client.TopicHandler
}{
{
topic: "system",
create: client.TopicHandlerOptions{
ID: "myhandler",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
expCreate: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/system/handlers/myhandler"},
ID: "myhandler",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
patch: client.JSONPatch{
{
Path: "/kind",
Operation: "replace",
Value: "log",
},
{
Path: "/options/channel",
Operation: "remove",
},
{
Path: "/options/path",
Operation: "add",
Value: AlertLogPath,
},
},
expPatch: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/system/handlers/myhandler"},
ID: "myhandler",
Kind: "log",
Options: map[string]interface{}{
"path": AlertLogPath,
},
},
put: client.TopicHandlerOptions{
ID: "newid",
Kind: "smtp",
Options: map[string]interface{}{
"to": []string{"[email protected]"},
},
},
expPut: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/system/handlers/newid"},
ID: "newid",
Kind: "smtp",
Options: map[string]interface{}{
"to": []interface{}{"[email protected]"},
},
},
},
{
// Topic and handler have same name
topic: "slack",
create: client.TopicHandlerOptions{
ID: "slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
expCreate: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/slack/handlers/slack"},
ID: "slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
patch: client.JSONPatch{
{
Path: "/kind",
Operation: "replace",
Value: "log",
},
{
Path: "/options/channel",
Operation: "remove",
},
{
Path: "/options/path",
Operation: "add",
Value: AlertLogPath,
},
},
expPatch: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/slack/handlers/slack"},
ID: "slack",
Kind: "log",
Options: map[string]interface{}{
"path": AlertLogPath,
},
},
put: client.TopicHandlerOptions{
ID: "slack",
Kind: "smtp",
Options: map[string]interface{}{
"to": []string{"[email protected]"},
},
},
expPut: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/slack/handlers/slack"},
ID: "slack",
Kind: "smtp",
Options: map[string]interface{}{
"to": []interface{}{"[email protected]"},
},
},
},
}
for _, tc := range testCases {
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
h, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tc.topic), tc.create)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, tc.expCreate) {
t.Errorf("unexpected handler created:\ngot\n%#v\nexp\n%#v\n", h, tc.expCreate)
}
h, err = cli.PatchTopicHandler(h.Link, tc.patch)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, tc.expPatch) {
t.Errorf("unexpected handler patched:\ngot\n%#v\nexp\n%#v\n", h, tc.expPatch)
}
h, err = cli.ReplaceTopicHandler(h.Link, tc.put)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, tc.expPut) {
t.Errorf("unexpected handler put:\ngot\n%#v\nexp\n%#v\n", h, tc.expPut)
}
// Restart server
s.Restart()
rh, err := cli.TopicHandler(h.Link)
if err != nil {
t.Fatalf("could not find handler after restart: %v", err)
}
if got, exp := rh, h; !reflect.DeepEqual(got, exp) {
t.Errorf("unexpected handler after restart:\ngot\n%#v\nexp\n%#v\n", got, exp)
}
err = cli.DeleteTopicHandler(h.Link)
if err != nil {
t.Fatal(err)
}
_, err = cli.TopicHandler(h.Link)
if err == nil {
t.Errorf("expected handler to be deleted")
}
handlers, err := cli.ListTopicHandlers(cli.TopicHandlersLink(tc.topic), nil)
if err != nil {
t.Fatal(err)
}
for _, h := range handlers.Handlers {
if h.ID == tc.expPut.ID {
t.Errorf("expected handler to be deleted")
break
}
}
}
}
func TestServer_AlertHandlers(t *testing.T) {
resultJSON := `{"series":[{"name":"alert","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
alertData := alert.Data{
ID: "id",
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Level: alert.Critical,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
1.0,
}},
},
},
},
}
adJSON, err := json.Marshal(alertData)
if err != nil {
t.Fatal(err)
}
testCases := []struct {
handler client.TopicHandler
setup func(*server.Config, *client.TopicHandler) (context.Context, error)
result func(context.Context) error
}{
{
handler: client.TopicHandler{
Kind: "alertmanager",
Options: map[string]interface{}{
"origin": "kapacitor",
"group": "test",
"customer": "vna",
"environment": "env",
"timeout": time.Duration(24 * time.Hour),
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := alertmanagertest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Alertmanager.Enabled = true
c.Alertmanager.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*alertmanagertest.Server)
ts.Close()
got := ts.Requests()
ar := alertmanager.AlertmanagerRequest{
Status: "firing",
Labels: alertmanager.AlertmanagerLabels{Instance: "alert", Event: "id", Environment: "env", Origin: "kapacitor", Service: []string{"alert"}, Group: "test", Customer: "vna"},
Annotations: alertmanager.AlertmanagerAnnotations{Value:"",Summary:"message", Severity: "critical"},
};
exp := []alertmanagertest.Request{{
URL: "/alert",
PostData: alertmanagertest.PostData{
ar,
},
}}
//PostData: alertmanagertest.PostData{
// Resource: "alert",
// Event: "id",
// Group: "test",
// Environment: "env",
// Text: "message",
// Origin: "kapacitor",
// Service: []string{"alert"},
// Timeout: 86400,
//},
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected alertmanager request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "exec",
Options: map[string]interface{}{
"prog": "/bin/alert-handler.sh",
"args": []string{"arg1", "arg2", "arg3"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
te := alerttest.NewExec()
ctxt := context.WithValue(nil, "exec", te)
c.Commander = te.Commander
return ctxt, nil
},
result: func(ctxt context.Context) error {
te := ctxt.Value("exec").(*alerttest.Exec)
expData := []*commandtest.Command{{
Spec: command.Spec{
Prog: "/bin/alert-handler.sh",
Args: []string{"arg1", "arg2", "arg3"},
},
Started: true,
Waited: true,
Killed: false,
StdinData: append(adJSON, '\n'),
}}
cmds := te.Commands()
if got, exp := len(cmds), len(expData); got != exp {
return fmt.Errorf("unexpected commands length: got %d exp %d", got, exp)
}
for i := range expData {
if err := expData[i].Compare(cmds[i]); err != nil {
return fmt.Errorf("unexpected command %d: %v", i, err)
}
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "hipchat",
Options: map[string]interface{}{
"token": "testtoken1234567",
"room": "1234567",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := hipchattest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.HipChat.Enabled = true
c.HipChat.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*hipchattest.Server)
ts.Close()
got := ts.Requests()
exp := []hipchattest.Request{{
URL: "/1234567/notification?auth_token=testtoken1234567",
PostData: hipchattest.PostData{
From: "kapacitor",
Message: "message",
Color: "red",
Notify: true,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected hipchat request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "kafka",
Options: map[string]interface{}{
"cluster": "default",
"topic": "test",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := kafkatest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.Kafka = kafka.Configs{{
Enabled: true,
ID: "default",
Brokers: []string{ts.Addr.String()},
}}
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*kafkatest.Server)
time.Sleep(2 * time.Second)
ts.Close()
got, err := ts.Messages()
if err != nil {
return err
}
exp := []kafkatest.Message{{
Topic: "test",
Partition: 1,
Offset: 0,
Key: "id",
Message: string(adJSON) + "\n",
Time: time.Now().UTC(),
}}
cmpopts := []cmp.Option{
cmp.Comparer(func(a, b time.Time) bool {
diff := a.Sub(b)
if diff < 0 {
diff = -diff
}
// It is ok as long as the timestamp is within
// 5 seconds of the current time. If we are that close,
// then it likely means the timestamp was correctly
// written.
return diff < 5*time.Second
}),
}
if !cmp.Equal(exp, got, cmpopts...) {
return fmt.Errorf("unexpected kafka messages -exp/+got:\n%s", cmp.Diff(exp, got))
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "log",
Options: map[string]interface{}{
"mode": 0604,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
tdir := MustTempDir()
p := filepath.Join(tdir, "alert.log")
ha.Options["path"] = p
l := alerttest.NewLog(p)
ctxt := context.WithValue(nil, "tdir", tdir)
ctxt = context.WithValue(ctxt, "log", l)
return ctxt, nil
},
result: func(ctxt context.Context) error {
tdir := ctxt.Value("tdir").(string)
defer os.RemoveAll(tdir)
l := ctxt.Value("log").(*alerttest.Log)
expData := []alert.Data{alertData}
expMode := os.FileMode(LogFileExpectedMode)
m, err := l.Mode()
if err != nil {
return err
}
if got, exp := m, expMode; exp != got {
return fmt.Errorf("unexpected file mode: got %v exp %v", got, exp)
}
data, err := l.Data()
if err != nil {
return err
}
if got, exp := data, expData; !reflect.DeepEqual(got, exp) {
return fmt.Errorf("unexpected alert data written to log:\ngot\n%+v\nexp\n%+v\n", got, exp)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "mqtt",
Options: map[string]interface{}{
"topic": "test",
"qos": "at-least-once",
"retained": true,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
cc := new(mqtttest.ClientCreator)
ctxt := context.WithValue(nil, "clientCreator", cc)
cfg := &mqtt.Config{
Enabled: true,
Name: "test",
URL: "tcp://mqtt.example.com:1883",
}
cfg.SetNewClientF(cc.NewClient)
c.MQTT = mqtt.Configs{*cfg}
return ctxt, nil
},
result: func(ctxt context.Context) error {
s := ctxt.Value("clientCreator").(*mqtttest.ClientCreator)
if got, exp := len(s.Clients), 1; got != exp {
return fmt.Errorf("unexpected number of clients created : exp %d got: %d", exp, got)
}
if got, exp := len(s.Configs), 1; got != exp {
return fmt.Errorf("unexpected number of configs received: exp %d got: %d", exp, got)
}
if got, exp := s.Configs[0].URL, "tcp://mqtt.example.com:1883"; exp != got {
return fmt.Errorf("unexpected config URL: exp %q got %q", exp, got)
}
got := s.Clients[0].PublishData
exp := []mqtttest.PublishData{{
Topic: "test",
QoS: mqtt.AtLeastOnce,
Retained: true,
Message: []byte("message"),
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected mqtt publish data:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "mqtt",
Options: map[string]interface{}{
"topic": "test/{{.TaskName}}",
"qos": "at-least-once",
"retained": true,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
cc := new(mqtttest.ClientCreator)
ctxt := context.WithValue(nil, "clientCreator", cc)
cfg := &mqtt.Config{
Enabled: true,
Name: "test",
URL: "tcp://mqtt.example.com:1883",
}
cfg.SetNewClientF(cc.NewClient)
c.MQTT = mqtt.Configs{*cfg}
return ctxt, nil
},
result: func(ctxt context.Context) error {
s := ctxt.Value("clientCreator").(*mqtttest.ClientCreator)
if got, exp := len(s.Clients), 1; got != exp {
return fmt.Errorf("unexpected number of clients created : exp %d got: %d", exp, got)
}
if got, exp := len(s.Configs), 1; got != exp {
return fmt.Errorf("unexpected number of configs received: exp %d got: %d", exp, got)
}
if got, exp := s.Configs[0].URL, "tcp://mqtt.example.com:1883"; exp != got {
return fmt.Errorf("unexpected config URL: exp %q got %q", exp, got)
}
got := s.Clients[0].PublishData
exp := []mqtttest.PublishData{{
Topic: "test/testAlertHandlers",
QoS: mqtt.AtLeastOnce,
Retained: true,
Message: []byte("message"),
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected mqtt publish data:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "opsgenie",
Options: map[string]interface{}{
"teams-list": []string{"A team", "B team"},
"recipients-list": []string{"test_recipient1", "test_recipient2"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := opsgenietest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.OpsGenie.Enabled = true
c.OpsGenie.URL = ts.URL
c.OpsGenie.APIKey = "api_key"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*opsgenietest.Server)
ts.Close()
got := ts.Requests()
exp := []opsgenietest.Request{{
URL: "/",
PostData: opsgenietest.PostData{
ApiKey: "api_key",
Message: "message",
Entity: "id",
Alias: "id",
Note: "",
Details: map[string]interface{}{
"Level": "CRITICAL",
"Monitoring Tool": "Kapacitor",
},
Description: resultJSON,
Teams: []string{"A team", "B team"},
Recipients: []string{"test_recipient1", "test_recipient2"},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected opsgenie request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "opsgenie2",
Options: map[string]interface{}{
"teams-list": []string{"A team", "B team"},
"recipients-list": []string{"test_recipient1", "test_recipient2"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := opsgenie2test.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.OpsGenie2.Enabled = true
c.OpsGenie2.URL = ts.URL
c.OpsGenie2.RecoveryAction = "notes"
c.OpsGenie2.APIKey = "api_key"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*opsgenie2test.Server)
ts.Close()
got := ts.Requests()
exp := []opsgenie2test.Request{{
URL: "/",
Authorization: "GenieKey api_key",
PostData: opsgenie2test.PostData{
Message: "message",
Entity: "id",
Alias: "aWQ=",
Note: "",
Priority: "P1",
Details: map[string]string{
"Level": "CRITICAL",
"Monitoring Tool": "Kapacitor",
"Kapacitor Task Name": "alert",
},
Description: resultJSON,
Responders: []map[string]string{
{"name": "A team", "type": "team"},
{"name": "B team", "type": "team"},
{"username": "test_recipient1", "type": "user"},
{"username": "test_recipient2", "type": "user"},
},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected opsgenie2 request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "pagerduty",
Options: map[string]interface{}{
"service-key": "service_key",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := pagerdutytest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.PagerDuty.Enabled = true
c.PagerDuty.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*pagerdutytest.Server)
kapacitorURL := ctxt.Value("kapacitorURL").(string)
ts.Close()
got := ts.Requests()
exp := []pagerdutytest.Request{{
URL: "/",
PostData: pagerdutytest.PostData{
ServiceKey: "service_key",
EventType: "trigger",
Description: "message",
Client: "kapacitor",
ClientURL: kapacitorURL,
Details: "details",
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected pagerduty request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "pagerduty2",
Options: map[string]interface{}{
"routing-key": "rkey",
"links": []interface{}{
map[string]string{
"href": "http://example.com",
"text": "t1",
},
map[string]string{
"href": "http://example.com/{{.TaskName}}",
"text": "t2",
},
},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := pagerduty2test.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.PagerDuty2.Enabled = true
c.PagerDuty2.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*pagerduty2test.Server)
kapacitorURL := ctxt.Value("kapacitorURL").(string)
ts.Close()
got := ts.Requests()
exp := []pagerduty2test.Request{{
URL: "/",
PostData: pagerduty2test.PostData{
Client: "kapacitor",
ClientURL: kapacitorURL,
EventAction: "trigger",
DedupKey: "id",
Payload: &pagerduty2test.PDCEF{
Summary: "message",
Source: "unknown",
Severity: "critical",
Class: "testAlertHandlers",
CustomDetails: map[string]interface{}{
"result": map[string]interface{}{
"series": []interface{}{
map[string]interface{}{
"name": "alert",
"columns": []interface{}{"time", "value"},
"values": []interface{}{
[]interface{}{"1970-01-01T00:00:00Z", float64(1)},
},
},
},
},
},
Timestamp: "1970-01-01T00:00:00.000000000Z",
},
RoutingKey: "rkey",
Links: []pagerduty2test.Link{
{Href: "http://example.com", Text: "t1"},
{Href: "http://example.com/testAlertHandlers", Text: "t2"},
},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected pagerduty2 request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "post",
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := alerttest.NewPostServer()
ha.Options = map[string]interface{}{"url": ts.URL}
ctxt := context.WithValue(nil, "server", ts)
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*alerttest.PostServer)
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected post request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "post",
Options: map[string]interface{}{
"endpoint": "test",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := httpposttest.NewAlertServer(nil, true)
ctxt := context.WithValue(nil, "server", ts)
c.HTTPPost = httppost.Configs{{
Endpoint: "test",
URL: ts.URL,
AlertTemplate: `{{.Message}}`,
}}
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*httpposttest.AlertServer)
exp := []httpposttest.AlertRequest{{
MatchingHeaders: true,
Raw: []byte("message"),
}}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected httppost alert request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "pushover",
Options: map[string]interface{}{},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := pushovertest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Pushover.Enabled = true
c.Pushover.URL = ts.URL
c.Pushover.Token = "api_key"
c.Pushover.UserKey = "user"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*pushovertest.Server)
ts.Close()
got := ts.Requests()
exp := []pushovertest.Request{{
PostData: pushovertest.PostData{
Token: "api_key",
UserKey: "user",
Message: "message",
Priority: 1,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected pushover request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "sensu",
Options: map[string]interface{}{
"source": "Kapacitor",
"metadata": map[string]interface{}{
"k1": "v1",
"k2": 5,
},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := sensutest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.Sensu.Enabled = true
c.Sensu.Addr = ts.Addr
c.Sensu.Source = "Kapacitor"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*sensutest.Server)
ts.Close()
exp := []sensutest.Request{{
Source: "Kapacitor",
Output: "message",
Name: "id",
Status: 2,
Metadata: map[string]interface{}{
"k1": "v1",
"k2": float64(5),
},
}}
got := ts.Requests()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected sensu request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := slacktest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Slack[0].Enabled = true
c.Slack[0].URL = ts.URL + "/test/slack/url"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*slacktest.Server)
ts.Close()
got := ts.Requests()
exp := []slacktest.Request{{
URL: "/test/slack/url",
PostData: slacktest.PostData{
Channel: "#test",
Username: "kapacitor",
Text: "",
Attachments: []slacktest.Attachment{
{
Fallback: "message",
Color: "danger",
Text: "message",
Mrkdwn_in: []string{"text"},
},
},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected slack request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "smtp",
Options: map[string]interface{}{
"to": []string{"[email protected]", "[email protected]"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := smtptest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.SMTP.Enabled = true
c.SMTP.Host = ts.Host
c.SMTP.Port = ts.Port
c.SMTP.From = "[email protected]"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*smtptest.Server)
ts.Close()
errors := ts.Errors()
if len(errors) != 0 {
return fmt.Errorf("multiple errors %d: %v", len(errors), errors)
}
expMail := []*smtptest.Message{{
Header: mail.Header{
"Mime-Version": []string{"1.0"},
"Content-Type": []string{"text/html; charset=UTF-8"},
"Content-Transfer-Encoding": []string{"quoted-printable"},
"To": []string{"[email protected], [email protected]"},
"From": []string{"[email protected]"},
"Subject": []string{"message"},
},
Body: "details\n",
}}
msgs := ts.SentMessages()
if got, exp := len(msgs), len(expMail); got != exp {
return fmt.Errorf("unexpected number of messages sent: got %d exp %d", got, exp)
}
for i, exp := range expMail {
got := msgs[i]
if err := exp.Compare(got); err != nil {
return fmt.Errorf("unexpected message %d: %v", i, err)
}
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "snmptrap",
Options: map[string]interface{}{
"trap-oid": "1.1.2",
"data-list": []map[string]string{
{
"oid": "1.1.2.1",
"type": "s",
"value": "{{.Message}}",
},
{
"oid": "1.1.2.2",
"type": "s",
"value": "{{.Level}}",
},
},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := snmptraptest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.SNMPTrap.Enabled = true
c.SNMPTrap.Addr = ts.Addr
c.SNMPTrap.Community = ts.Community
c.SNMPTrap.Retries = 3
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*snmptraptest.Server)
ts.Close()
got := ts.Traps()
exp := []snmptraptest.Trap{{
Pdu: snmptraptest.Pdu{
Type: snmpgo.SNMPTrapV2,
ErrorStatus: snmpgo.NoError,
VarBinds: snmptraptest.VarBinds{
{
Oid: "1.3.6.1.2.1.1.3.0",
Value: "1000",
Type: "TimeTicks",
},
{
Oid: "1.3.6.1.6.3.1.1.4.1.0",
Value: "1.1.2",
Type: "Oid",
},
{
Oid: "1.1.2.1",
Value: "message",
Type: "OctetString",
},
{
Oid: "1.1.2.2",
Value: "CRITICAL",
Type: "OctetString",
},
},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected snmptrap request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "talk",
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := talktest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Talk.Enabled = true
c.Talk.URL = ts.URL
c.Talk.AuthorName = "Kapacitor"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*talktest.Server)
ts.Close()
got := ts.Requests()
exp := []talktest.Request{{
URL: "/",
PostData: talktest.PostData{
AuthorName: "Kapacitor",
Text: "message",
Title: "id",
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected talk request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "tcp",
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := alerttest.NewTCPServer()
if err != nil {
return nil, err
}
ha.Options = map[string]interface{}{"address": ts.Addr}
ctxt := context.WithValue(nil, "server", ts)
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*alerttest.TCPServer)
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "telegram",
Options: map[string]interface{}{
"chat-id": "chat id",
"disable-web-page-preview": true,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := telegramtest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Telegram.Enabled = true
c.Telegram.URL = ts.URL + "/bot"
c.Telegram.Token = "TOKEN:AUTH"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*telegramtest.Server)
ts.Close()
got := ts.Requests()
exp := []telegramtest.Request{{
URL: "/botTOKEN:AUTH/sendMessage",
PostData: telegramtest.PostData{
ChatId: "chat id",
Text: "message",
ParseMode: "",
DisableWebPagePreview: true,
DisableNotification: false,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected telegram request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "victorops",
Options: map[string]interface{}{
"routing-key": "key",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := victoropstest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.VictorOps.Enabled = true
c.VictorOps.URL = ts.URL
c.VictorOps.APIKey = "api_key"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*victoropstest.Server)
ts.Close()
got := ts.Requests()
exp := []victoropstest.Request{{
URL: "/api_key/key",
PostData: victoropstest.PostData{
MessageType: "CRITICAL",
EntityID: "id",
StateMessage: "message",
Timestamp: 0,
MonitoringTool: "kapacitor",
Data: resultJSON,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected victorops request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("%s-%d", tc.handler.Kind, i), func(t *testing.T) {
kind := tc.handler.Kind
// Create default config
c := NewConfig()
var ctxt context.Context
if tc.setup != nil {
var err error
ctxt, err = tc.setup(c, &tc.handler)
if err != nil {
t.Fatal(err)
}
}
s := OpenServer(c)
cli := Client(s)
closed := false
defer func() {
if !closed {
s.Close()
}
}()
ctxt = context.WithValue(ctxt, "kapacitorURL", s.URL())
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandlers",
Kind: tc.handler.Kind,
Options: tc.handler.Options,
}); err != nil {
t.Fatalf("%s: %v", kind, err)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.crit(lambda: TRUE)
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatalf("%s: %v", kind, err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Close the entire server to ensure all data is processed
s.Close()
closed = true
if err := tc.result(ctxt); err != nil {
t.Errorf("%s: %v", kind, err)
}
})
}
}
func TestServer_Alert_Duration(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.tcp('` + ts.Addr + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Write point
point := "alert value=2 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
topic := "main:testAlertHandlers:alert2"
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "CRITICAL",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
event, err := cli.TopicEvent(expTopicEvents.Events[0].Link)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(event, expTopicEvents.Events[0]) {
t.Errorf("unexpected topic event for anonymous topic:\ngot\n%+v\nexp\n%+v\n", event, expTopicEvents.Events[0])
}
// Write point
point = "alert value=3 0000000001"
v = url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
expTopicEvents = client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
Duration: client.Duration(time.Second),
Level: "CRITICAL",
},
}},
}
te, err = cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic after second point:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_Alert_Aggregate(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
aggTopic := "agg"
// Create task for alert
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.topic('` + aggTopic + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "agg_task",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Create tpc handler on tcp topic
tcpTopic := "tcp"
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tcpTopic), client.TopicHandlerOptions{
ID: "tcp_handler",
Kind: "tcp",
Options: map[string]interface{}{
"address": ts.Addr,
},
}); err != nil {
t.Fatal(err)
}
// Create aggregate handler on agg topic
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(aggTopic), client.TopicHandlerOptions{
ID: "aggregate_handler",
Kind: "aggregate",
Options: map[string]interface{}{
"id": "id-agg",
"interval": 100 * time.Millisecond,
"topic": "tcp",
},
}); err != nil {
t.Fatal(err)
}
// Write points
point := `alert value=3 0000000000000
alert value=4 0000000000001
alert value=2 0000000000002
`
v := url.Values{}
v.Add("precision", "ms")
s.MustWrite("mydb", "myrp", point, v)
time.Sleep(110 * time.Millisecond)
// Check TCP handler got event
alertData := alert.Data{
ID: "id-agg",
Message: "Received 3 events in the last 100ms.",
Details: "message\nmessage\nmessage",
Time: time.Date(1970, 1, 1, 0, 0, 0, 2000000, time.UTC),
Level: alert.Critical,
Duration: 2 * time.Millisecond,
Recoverable: false,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
3.0,
}},
},
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 1000000, time.UTC),
4.0,
}},
},
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 2000000, time.UTC),
2.0,
}},
},
},
},
}
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
// Check event on topic
l := cli.TopicEventsLink(tcpTopic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: tcpTopic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id-agg", tcpTopic)},
ID: "id-agg",
State: client.EventState{
Message: "Received 3 events in the last 100ms.",
Details: "message\nmessage\nmessage",
Time: time.Date(1970, 1, 1, 0, 0, 0, 2000000, time.UTC),
Duration: client.Duration(2 * time.Millisecond),
Level: "CRITICAL",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for aggregate topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_Alert_Publish(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
publishTopic := "publish"
// Create task for alert
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.topic('` + publishTopic + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "publish_task",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Create tpc handler on tcp topic
tcpTopic := "tcp"
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tcpTopic), client.TopicHandlerOptions{
ID: "tcp_handler",
Kind: "tcp",
Options: map[string]interface{}{
"address": ts.Addr,
},
}); err != nil {
t.Fatal(err)
}
// Create publish handler on publish topic
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(publishTopic), client.TopicHandlerOptions{
ID: "publish_handler",
Kind: "publish",
Options: map[string]interface{}{
// Publish to tcpTopic
"topics": []string{tcpTopic},
},
}); err != nil {
t.Fatal(err)
}
// Write points
point := `alert value=2 0000000000`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
s.Restart()
// Check TCP handler got event
alertData := alert.Data{
ID: "id",
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Level: alert.Critical,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
2.0,
}},
},
},
},
}
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
// Check event on topic
l := cli.TopicEventsLink(tcpTopic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: tcpTopic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", tcpTopic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "CRITICAL",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for publish topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_Alert_Match(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
topic := "test"
// Create task for alert
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.topic('` + topic + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "alert_task",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Create tpc handler with match condition
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(topic), client.TopicHandlerOptions{
ID: "tcp_handler",
Kind: "tcp",
Options: map[string]interface{}{
"address": ts.Addr,
},
Match: `"host" == 'serverA' AND level() == CRITICAL`,
}); err != nil {
t.Fatal(err)
}
// Write points
point := `alert,host=serverA value=0 0000000000
alert,host=serverB value=2 0000000001
alert,host=serverB value=0 0000000002
alert,host=serverA value=2 0000000003
alert,host=serverB value=0 0000000004
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
s.Restart()
alertData := alert.Data{
ID: "id",
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 3, 0, time.UTC),
Level: alert.Critical,
Recoverable: true,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Tags: map[string]string{"host": "serverA"},
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 3, 0, time.UTC),
2.0,
}},
},
},
},
}
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
// Topic should have must recent event
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 4, 0, time.UTC),
Duration: client.Duration(time.Second),
Level: "OK",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for publish topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_AlertAnonTopic(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.warn(lambda: "value" <= 1.0)
.crit(lambda: "value" > 1.0)
.tcp('` + ts.Addr + `')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
// Write warning point
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
topic := "main:testAlertHandlers:alert2"
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "WARNING",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
event, err := cli.TopicEvent(expTopicEvents.Events[0].Link)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(event, expTopicEvents.Events[0]) {
t.Errorf("unexpected topic event for anonymous topic:\ngot\n%+v\nexp\n%+v\n", event, expTopicEvents.Events[0])
}
// Disable task
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
if _, err := cli.ListTopicEvents(l, nil); err == nil {
t.Fatal("expected error listing anonymous topic for disabled task")
} else if got, exp := err.Error(), fmt.Sprintf("failed to get topic events: unknown topic %q", topic); got != exp {
t.Errorf("unexpected error message for nonexistent anonymous topic: got %q exp %q", got, exp)
}
// Enable task
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
te, err = cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic after re-enable:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
// Restart the server, again and ensure that the anonymous topic state is restored
s.Restart()
te, err = cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic after re-enable and restart:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
// Delete task
if err := cli.DeleteTask(task.Link); err != nil {
t.Fatal(err)
}
if _, err := cli.ListTopicEvents(l, nil); err == nil {
t.Fatal("expected error listing anonymous topic for deleted task")
} else if got, exp := err.Error(), fmt.Sprintf("failed to get topic events: unknown topic %q", topic); got != exp {
t.Errorf("unexpected error message for nonexistent anonymous topic: got %q exp %q", got, exp)
}
}
func TestServer_AlertTopic_PersistedState(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
tmpDir := MustTempDir()
defer os.RemoveAll(tmpDir)
tmpPath := filepath.Join(tmpDir, "alert.log")
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.warn(lambda: TRUE)
.log('` + tmpPath + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
topics := []string{
"test",
"main:testAlertHandlers:alert2",
}
for _, topic := range topics {
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "WARNING",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for topic %q:\ngot\n%+v\nexp\n%+v\n", topic, te, expTopicEvents)
}
event, err := cli.TopicEvent(expTopicEvents.Events[0].Link)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(event, expTopicEvents.Events[0]) {
t.Errorf("unexpected topic event for topic %q:\ngot\n%+v\nexp\n%+v\n", topic, event, expTopicEvents.Events[0])
}
te, err = cli.ListTopicEvents(l, &client.ListTopicEventsOptions{
MinLevel: "CRITICAL",
})
if err != nil {
t.Fatal(err)
}
expTopicEvents.Events = expTopicEvents.Events[0:0]
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events with minLevel for topic %q:\ngot\n%+v\nexp\n%+v\n", topic, te, expTopicEvents)
}
l = cli.TopicLink(topic)
if err := cli.DeleteTopic(l); err != nil {
t.Fatal(err)
}
te, err = cli.ListTopicEvents(l, nil)
if err == nil {
t.Fatalf("expected error for deleted topic %q", topic)
}
}
}
func TestServer_Alert_Inhibition(t *testing.T) {
// Test Overview
// Create several alerts:
// * cpu - alert on host cpu usage by region,host,cpu
// * mem - alert on host mem usage by region,host
// * host - alert on host up/down by region,host
// * region - alert on region up/down by region
//
// The host alert will inhibit the cpu and mem alerts by host
// The region alert will inhibit the cpu mem and host alerts by region
//
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
closed := false
defer func() {
if !closed {
s.Close()
}
}()
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("inhibition"), client.TopicHandlerOptions{
ID: "tcpHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
memAlert := `
stream
|from()
.measurement('mem')
.groupBy(*)
|alert()
.category('system')
.topic('inhibition')
.message('mem')
.details('')
.crit(lambda: "v")
`
cpuAlert := `
stream
|from()
.measurement('cpu')
.groupBy(*)
|alert()
.category('system')
.topic('inhibition')
.message('cpu')
.details('')
.crit(lambda: "v")
`
hostAlert := `
stream
|from()
.measurement('host')
.groupBy(*)
|alert()
.category('host_alert')
.topic('inhibition')
.message('host')
.details('')
.crit(lambda: "v")
.inhibit('system', 'region', 'host')
`
regionAlert := `
stream
|from()
.measurement('region')
.groupBy(*)
|alert()
.category('region_alert')
.topic('inhibition')
.message('region')
.details('')
.crit(lambda: "v")
.inhibit('host_alert', 'region')
.inhibit('system', 'region')
`
tasks := map[string]string{
"cpu": cpuAlert,
"mem": memAlert,
"host": hostAlert,
"region": regionAlert,
}
for id, tick := range tasks {
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
}
batches := []string{
//#0 Send initial batch with all alerts in the green state
`cpu,region=west,host=A,cpu=0 v=false 0
cpu,region=west,host=A,cpu=1 v=false 0
cpu,region=west,host=B,cpu=0 v=false 0
cpu,region=west,host=B,cpu=1 v=false 0
cpu,region=east,host=A,cpu=0 v=false 0
cpu,region=east,host=A,cpu=1 v=false 0
cpu,region=east,host=B,cpu=0 v=false 0
cpu,region=east,host=B,cpu=1 v=false 0
mem,region=west,host=A v=false 0
mem,region=west,host=B v=false 0
mem,region=east,host=A v=false 0
mem,region=east,host=B v=false 0
host,region=west,host=A v=false 0
host,region=west,host=B v=false 0
host,region=east,host=A v=false 0
host,region=east,host=B v=false 0
region,region=west v=false 0
region,region=east v=false 0
`,
//#1 Send batch where some mem and cpu alerts fire
`cpu,region=west,host=B,cpu=0 v=true 1
cpu,region=east,host=A,cpu=1 v=true 1
mem,region=west,host=B v=true 1
mem,region=east,host=A v=true 1
`,
//#2 Send batch where some host alerts fire
`host,region=west,host=B v=true 2
host,region=east,host=B v=true 2
`,
//#3 Send batch where some mem and cpu alerts fire
`cpu,region=west,host=B,cpu=0 v=true 3
cpu,region=east,host=A,cpu=1 v=true 3
mem,region=west,host=B v=true 3
mem,region=east,host=A v=true 3
`,
//#4 Send batch were hosts alerts recover
`host,region=west,host=B v=false 4
host,region=east,host=B v=false 4
`,
//#5 Send batch where some mem and cpu alerts fire
`cpu,region=west,host=B,cpu=0 v=true 5
cpu,region=east,host=A,cpu=1 v=true 5
mem,region=west,host=B v=true 5
mem,region=east,host=A v=true 5
`,
//#6 Send batch where region alert fires
`region,region=east v=true 6`,
//#7 Send batch where some mem, cpu and host alerts fire
`cpu,region=west,host=B,cpu=0 v=true 7
cpu,region=east,host=A,cpu=1 v=true 7
mem,region=west,host=B v=true 7
mem,region=east,host=A v=true 7
host,region=west,host=A v=true 7
host,region=east,host=B v=true 7
`,
//#8 Send batch where region alert recovers
`region,region=east v=false 8`,
//#9 Send batch where some mem, cpu and host alerts fire
`cpu,region=west,host=B,cpu=0 v=true 9
cpu,region=east,host=A,cpu=1 v=true 9
mem,region=west,host=B v=true 9
mem,region=east,host=A v=true 9
host,region=west,host=A v=true 9
host,region=east,host=B v=true 9
`,
}
v := url.Values{}
v.Add("precision", "s")
for _, p := range batches {
s.MustWrite("mydb", "myrp", p, v)
time.Sleep(50 * time.Millisecond)
}
// Close the entire server to ensure all data is processed
s.Close()
closed = true
want := []alert.Data{
// #1
{
ID: "cpu:cpu=0,host=B,region=west",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
{
ID: "cpu:cpu=1,host=A,region=east",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
{
ID: "mem:host=A,region=east",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
{
ID: "mem:host=B,region=west",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
// #2
{
ID: "host:host=B,region=east",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 2, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
{
ID: "host:host=B,region=west",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 2, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
// #3
{
ID: "cpu:cpu=1,host=A,region=east",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 3, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
},
{
ID: "mem:host=A,region=east",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 3, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
},
// #4
{
ID: "host:host=B,region=east",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 4, 0, time.UTC),
Level: alert.OK,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
},
{
ID: "host:host=B,region=west",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 4, 0, time.UTC),
Level: alert.OK,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
},
// #5
{
ID: "cpu:cpu=0,host=B,region=west",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 5, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 4 * time.Second,
Recoverable: true,
},
{
ID: "cpu:cpu=1,host=A,region=east",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 5, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 4 * time.Second,
Recoverable: true,
},
{
ID: "mem:host=A,region=east",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 5, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 4 * time.Second,
Recoverable: true,
},
{
ID: "mem:host=B,region=west",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 5, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 4 * time.Second,
Recoverable: true,
},
// #6
{
ID: "region:region=east",
Message: "region",
Time: time.Date(1970, 1, 1, 0, 0, 6, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
// #7
{
ID: "cpu:cpu=0,host=B,region=west",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 7, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 6 * time.Second,
Recoverable: true,
},
{
ID: "host:host=A,region=west",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 7, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0,
Recoverable: true,
},
{
ID: "mem:host=B,region=west",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 7, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 6 * time.Second,
Recoverable: true,
},
// #8
{
ID: "region:region=east",
Message: "region",
Time: time.Date(1970, 1, 1, 0, 0, 8, 0, time.UTC),
Level: alert.OK,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
},
// #9
{
ID: "cpu:cpu=0,host=B,region=west",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 8 * time.Second,
Recoverable: true,
},
{
ID: "cpu:cpu=1,host=A,region=east",
Message: "cpu",
Time: time.Date(1970, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 8 * time.Second,
Recoverable: true,
},
{
ID: "host:host=A,region=west",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Recoverable: true,
},
{
ID: "host:host=B,region=east",
Message: "host",
Time: time.Date(1970, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 2 * time.Second,
Recoverable: true,
},
{
ID: "mem:host=A,region=east",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 8 * time.Second,
Recoverable: true,
},
{
ID: "mem:host=B,region=west",
Message: "mem",
Time: time.Date(1970, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 8 * time.Second,
Recoverable: true,
},
}
ts.Close()
got := ts.Data()
// Remove the .Data result from the alerts
for i := range got {
got[i].Data = models.Result{}
}
// Sort results since order doesn't matter
//sort.Slice(want, func(i, j int) bool {
// if want[i].Time.Equal(want[j].Time) {
// return want[i].ID < want[j].ID
// }
// return want[i].Time.Before(want[j].Time)
//})
sort.Slice(got, func(i, j int) bool {
if got[i].Time.Equal(got[j].Time) {
return got[i].ID < got[j].ID
}
return got[i].Time.Before(got[j].Time)
})
t.Logf("want: %d got: %d", len(want), len(got))
if !cmp.Equal(got, want) {
t.Errorf("unexpected alert during inhibited run -want/+got\n%s", cmp.Diff(want, got))
}
//for i := range want {
// if !cmp.Equal(got[i], want[i]) {
// t.Errorf("unexpected alert during inhibited run -want/+got\n%s", cmp.Diff(want[i], got[i]))
// }
//}
}
func TestServer_AlertListHandlers(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
thl := cli.TopicHandlersLink("test")
// Number of handlers to create
n := 3
for i := 0; i < n; i++ {
id := fmt.Sprintf("handler%d", i)
if _, err := cli.CreateTopicHandler(thl, client.TopicHandlerOptions{
ID: id,
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
}
expHandlers := client.TopicHandlers{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/test/handlers?pattern="},
Topic: "test",
Handlers: []client.TopicHandler{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/test/handlers/handler0"},
ID: "handler0",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/test/handlers/handler1"},
ID: "handler1",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/test/handlers/handler2"},
ID: "handler2",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
},
},
}
handlers, err := cli.ListTopicHandlers(thl, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(handlers, expHandlers) {
t.Errorf("unexpected handlers:\ngot\n%+v\nexp\n%+v\n", handlers, expHandlers)
}
// Restart the server
s.Restart()
// Check again
handlers, err = cli.ListTopicHandlers(thl, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(handlers, expHandlers) {
t.Errorf("unexpected handlers after restart:\ngot\n%+v\nexp\n%+v\n", handlers, expHandlers)
}
var exp client.TopicHandlers
// Pattern = *
handlers, err = cli.ListTopicHandlers(thl, &client.ListTopicHandlersOptions{
Pattern: "*",
})
if err != nil {
t.Fatal(err)
}
exp = expHandlers
exp.Link.Href = "/kapacitor/v1/alerts/topics/test/handlers?pattern=%2A"
if !reflect.DeepEqual(handlers, exp) {
t.Errorf("unexpected handlers with pattern \"*\":\ngot\n%+v\nexp\n%+v\n", handlers, exp)
}
// Pattern = handler*
handlers, err = cli.ListTopicHandlers(thl, &client.ListTopicHandlersOptions{
Pattern: "handler*",
})
if err != nil {
t.Fatal(err)
}
exp = expHandlers
exp.Link.Href = "/kapacitor/v1/alerts/topics/test/handlers?pattern=handler%2A"
if !reflect.DeepEqual(handlers, exp) {
t.Errorf("unexpected handlers with pattern \"handler*\":\ngot\n%+v\nexp\n%+v\n", handlers, exp)
}
// Pattern = handler0
handlers, err = cli.ListTopicHandlers(thl, &client.ListTopicHandlersOptions{
Pattern: "handler0",
})
if err != nil {
t.Fatal(err)
}
exp = expHandlers
exp.Link.Href = "/kapacitor/v1/alerts/topics/test/handlers?pattern=handler0"
exp.Handlers = expHandlers.Handlers[0:1]
if !reflect.DeepEqual(handlers, exp) {
t.Errorf("unexpected handlers with pattern \"handler0\":\ngot\n%+v\nexp\n%+v\n", handlers, exp)
}
}
func TestServer_AlertTopic(t *testing.T) {
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("misc"), client.TopicHandlerOptions{
ID: "testAlertHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": "localhost:4657"},
}); err != nil {
t.Fatal(err)
}
expTopic := client.Topic{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/misc"},
ID: "misc",
Level: "OK",
Collected: 0,
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1/alerts/topics/misc/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1/alerts/topics/misc/handlers"},
}
topic, err := cli.Topic(cli.TopicLink("misc"))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(topic, expTopic) {
t.Errorf("unexpected topic:\ngot\n%+v\nexp\n%+v\n", topic, expTopic)
}
}
func TestServer_AlertListTopics(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
for _, topic := range []string{"system", "misc", "test"} {
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(topic), client.TopicHandlerOptions{
ID: "testAlertHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
}
expTopics := client.Topics{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics?min-level=OK&pattern="},
Topics: []client.Topic{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/misc"},
ID: "misc",
Level: "OK",
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1/alerts/topics/misc/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1/alerts/topics/misc/handlers"},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/system"},
ID: "system",
Level: "OK",
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1/alerts/topics/system/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1/alerts/topics/system/handlers"},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/alerts/topics/test"},
ID: "test",
Level: "OK",
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1/alerts/topics/test/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1/alerts/topics/test/handlers"},
},
},
}
topics, err := cli.ListTopics(nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(topics, expTopics) {
t.Errorf("unexpected topics:\ngot\n%+v\nexp\n%+v\n", topics, expTopics)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.crit(lambda: TRUE)
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
// Update expected topics since we triggered an event.
expTopics.Topics[2].Level = "CRITICAL"
// Check again
topics, err = cli.ListTopics(nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(topics, expTopics) {
t.Errorf("unexpected topics after restart:\ngot\n%+v\nexp\n%+v\n", topics, expTopics)
}
var exp client.Topics
// Pattern = *
topics, err = cli.ListTopics(&client.ListTopicsOptions{
Pattern: "*",
})
if err != nil {
t.Fatal(err)
}
exp = expTopics
exp.Link.Href = "/kapacitor/v1/alerts/topics?min-level=OK&pattern=%2A"
if !reflect.DeepEqual(topics, exp) {
t.Errorf("unexpected topics with pattern \"*\":\ngot\n%+v\nexp\n%+v\n", topics, exp)
}
// Pattern = test
topics, err = cli.ListTopics(&client.ListTopicsOptions{
Pattern: "test",
})
if err != nil {
t.Fatal(err)
}
exp = expTopics
exp.Link.Href = "/kapacitor/v1/alerts/topics?min-level=OK&pattern=test"
exp.Topics = expTopics.Topics[2:]
if !reflect.DeepEqual(topics, exp) {
t.Errorf("unexpected topics with pattern \"test\":\ngot\n%+v\nexp\n%+v\n", topics, exp)
}
// MinLevel = INFO
topics, err = cli.ListTopics(&client.ListTopicsOptions{
MinLevel: "INFO",
})
if err != nil {
t.Fatal(err)
}
exp = expTopics
exp.Link.Href = "/kapacitor/v1/alerts/topics?min-level=INFO&pattern="
exp.Topics = expTopics.Topics[2:]
if !reflect.DeepEqual(topics, exp) {
t.Errorf("unexpected topics min level \"info\":\ngot\n%+v\nexp\n%+v\n", topics, exp)
}
}
func TestServer_AlertHandler_MultipleHandlers(t *testing.T) {
resultJSON := `{"series":[{"name":"alert","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
// Create default config
c := NewConfig()
// Configure slack
slack := slacktest.NewServer()
c.Slack[0].Enabled = true
c.Slack[0].URL = slack.URL + "/test/slack/url"
// Configure victorops
vo := victoropstest.NewServer()
c.VictorOps.Enabled = true
c.VictorOps.URL = vo.URL
c.VictorOps.APIKey = "api_key"
s := OpenServer(c)
cli := Client(s)
closed := false
defer func() {
if !closed {
s.Close()
}
}()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandlers-VO",
Kind: "victorops",
Options: map[string]interface{}{
"routing-key": "key",
},
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandlers-Slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
}); err != nil {
t.Fatal(err)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.crit(lambda: TRUE)
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Close the entire server to ensure all data is processed
s.Close()
closed = true
// Validate slack
{
slack.Close()
got := slack.Requests()
exp := []slacktest.Request{{
URL: "/test/slack/url",
PostData: slacktest.PostData{
Channel: "#test",
Username: "kapacitor",
Text: "",
Attachments: []slacktest.Attachment{
{
Fallback: "message",
Color: "danger",
Text: "message",
Mrkdwn_in: []string{"text"},
},
},
},
}}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected slack request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
}
// Validate victorops
{
vo.Close()
got := vo.Requests()
exp := []victoropstest.Request{{
URL: "/api_key/key",
PostData: victoropstest.PostData{
MessageType: "CRITICAL",
EntityID: "id",
StateMessage: "message",
Timestamp: 0,
MonitoringTool: "kapacitor",
Data: resultJSON,
},
}}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected victorops request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
}
}
func TestStorage_Rebuild(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
storages, err := cli.ListStorage()
if err != nil {
t.Fatal(err)
}
for _, storage := range storages.Storage {
t.Log(storage.Link)
err := cli.DoStorageAction(storage.Link, client.StorageActionOptions{
Action: client.StorageRebuild,
})
if err != nil {
t.Errorf("error rebuilding storage %q: %v", storage.Name, err)
}
}
}
func TestStorage_Backup(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
// Create a task
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
// Perform backup
size, r, err := cli.Backup()
if err != nil {
t.Fatal(err)
}
defer r.Close()
backup, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if got, exp := int64(len(backup)), size; got != exp {
t.Fatalf("unexpected backup size got %d exp %d", got, exp)
}
// Stop the server
s.Stop()
// Restore from backup
if err := ioutil.WriteFile(s.Config.Storage.BoltDBPath, backup, 0644); err != nil {
t.Fatal(err)
}
// Start the server again
s.Start()
// Check that the task was restored
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestLoadService(t *testing.T) {
s, c, cli := OpenLoadServer()
// If the list of test fixtures changes update this list
tasks := []string{"base", "cpu_alert", "implicit", "join", "other"}
ts, err := cli.ListTasks(nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, task := range ts {
if exp, got := tasks[i], task.ID; exp != got {
t.Fatalf("expected task ID to be %v, got %v\n", exp, got)
}
}
// If the list of test fixtures changes update this list
templates := []string{"base_template", "implicit_template"}
tmps, err := cli.ListTemplates(nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, template := range tmps {
if exp, got := templates[i], template.ID; exp != got {
t.Fatalf("expected template ID to be %v, got %v\n", exp, got)
}
}
// If the list of test fixtures changes update this list
topicHandlers := []string{"example", "other"}
link := cli.TopicHandlersLink("cpu")
ths, err := cli.ListTopicHandlers(link, nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, th := range ths.Handlers {
if exp, got := topicHandlers[i], th.ID; exp != got {
t.Fatalf("expected topic-handler ID to be %v, got %v\n", exp, got)
}
}
// delete task file
err = os.Rename(
path.Join(c.Load.Dir, "tasks", "join.tick"),
path.Join(c.Load.Dir, "tasks", "z.tick"),
)
if err != nil {
t.Fatalf("failed to rename tickscript: %v", err)
}
// reload
s.Reload()
// If the list of test fixtures changes update this list
tasks = []string{"base", "cpu_alert", "implicit", "other", "z"}
ts, err = cli.ListTasks(nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, task := range ts {
if exp, got := tasks[i], task.ID; exp != got {
t.Fatalf("expected task ID to be %v, got %v\n", exp, got)
}
}
// rename template file
err = os.Rename(
path.Join(c.Load.Dir, "templates", "base_template.tick"),
path.Join(c.Load.Dir, "templates", "new.tick"),
)
if err != nil {
t.Fatalf("failed to rename tickscript: %v", err)
}
// reload
s.Reload()
// If the list of test fixtures changes update this list
templates = []string{"implicit_template", "new"}
tmps, err = cli.ListTemplates(nil)
if err != nil {
t.Fatalf("enountered error listing templates: %v", err)
}
for i, template := range tmps {
if exp, got := templates[i], template.ID; exp != got {
t.Fatalf("expected template ID to be %v, got %v\n", exp, got)
}
}
// move template file back
err = os.Rename(
path.Join(c.Load.Dir, "templates", "new.tick"),
path.Join(c.Load.Dir, "templates", "base_template.tick"),
)
// add a new handler
f, err := os.Create(path.Join(c.Load.Dir, "handlers", "new.tick"))
if err != nil {
t.Fatalf("failed to create new handler file: %v", err)
}
script := `topic: cpu
id: new
kind: slack
match: changed() == TRUE
options:
channel: '#alerts'
`
if _, err := f.Write([]byte(script)); err != nil {
t.Fatalf("failed to write handler: %v", err)
}
f.Close()
// remove handler file back
if err := os.Remove(path.Join(c.Load.Dir, "handlers", "other.yaml")); err != nil {
t.Fatalf("failed to remove handler file: %v", err)
}
// reload
s.Reload()
// If the list of test fixtures changes update this list
topicHandlers = []string{"example", "new"}
link = cli.TopicHandlersLink("cpu")
ths, err = cli.ListTopicHandlers(link, nil)
if err != nil {
t.Fatalf("enountered error listing topic-handlers: %v", err)
}
for i, th := range ths.Handlers {
if exp, got := topicHandlers[i], th.ID; exp != got {
t.Fatalf("expected topic-handler ID to be %v, got %v\n", exp, got)
}
}
}
func TestSideloadService(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
if err := copyFiles("testdata/sideload", dir); err != nil {
t.Fatal(err)
}
s, cli := OpenDefaultServer()
defer s.Close()
id := "testSideloadTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := fmt.Sprintf(`stream
|from()
.measurement('test')
|sideload()
.source('file://%s')
.order('host/{{.host}}.yml', 'service/{{.service}}.yml', 'region/{{.region}}.yml')
.field('cpu_usage_idle_warn', 30.0)
.field('cpu_usage_idle_crit', 15.0)
|httpOut('sideload')
`, dir)
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/sideload", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test,host=host002,service=cart,region=us-east-1 value=1 0000000000`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"host":"host002","region":"us-east-1","service":"cart"},"columns":["time","cpu_usage_idle_crit","cpu_usage_idle_warn","value"],"values":[["1970-01-01T00:00:00Z",4,10,1]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
// Update source file
host002Override := `
---
cpu_usage_idle_warn: 8
`
f, err := os.Create(filepath.Join(dir, "host/host002.yml"))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(f, strings.NewReader(host002Override))
if err != nil {
t.Fatal(err)
}
f.Close()
// reload
s.Reload()
// Write new points
points = `test,host=host002,service=cart,region=us-east-1 value=2 0000000001`
s.MustWrite("mydb", "myrp", points, v)
exp = `{"series":[{"name":"test","tags":{"host":"host002","region":"us-east-1","service":"cart"},"columns":["time","cpu_usage_idle_crit","cpu_usage_idle_warn","value"],"values":[["1970-01-01T00:00:01Z",5,8,2]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestLogSessions_HeaderJSON(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
u := cli.BaseURL()
u.Path = "/logs"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
t.Fatal(err)
return
}
req.Header.Add("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
return
}
defer resp.Body.Close()
if exp, got := "application/json; charset=utf-8", resp.Header.Get("Content-Type"); exp != got {
t.Fatalf("expected: %v, got: %v\n", exp, got)
return
}
}
func TestLogSessions_HeaderGzip(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
u := cli.BaseURL()
u.Path = "/logs"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
t.Fatal(err)
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
return
}
defer resp.Body.Close()
if exp, got := "", resp.Header.Get("Content-Encoding"); exp != got {
t.Fatalf("expected: %v, got: %v\n", exp, got)
return
}
}
func compareListIgnoreOrder(got, exp []interface{}, cmpF func(got, exp interface{}) bool) error {
if len(got) != len(exp) {
return fmt.Errorf("unequal lists ignoring order:\ngot\n%s\nexp\n%s\n", spew.Sdump(got), spew.Sdump(exp))
}
if cmpF == nil {
cmpF = func(got, exp interface{}) bool {
if !reflect.DeepEqual(got, exp) {
return false
}
return true
}
}
for _, e := range exp {
found := false
for _, g := range got {
if cmpF(g, e) {
found = true
break
}
}
if !found {
return fmt.Errorf("unequal lists ignoring order:\ngot\n%s\nexp\n%s\n", spew.Sdump(got), spew.Sdump(exp))
}
}
return nil
}
| [
"\"PYTHONPATH\"",
"\"PYTHONPATH\"",
"\"PYTHONPATH\""
]
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | go | 1 | 0 | |
trains_agent/helper/package/poetry_api.py | from copy import deepcopy
from functools import wraps
import attr
import sys
import os
from pathlib2 import Path
from trains_agent.helper.process import Argv, DEVNULL, check_if_command_exists
from trains_agent.session import Session, POETRY
def prop_guard(prop, log_prop=None):
assert isinstance(prop, property)
assert not log_prop or isinstance(log_prop, property)
def decorator(func):
message = "%s:%s calling {}, {} = %s".format(
func.__name__, prop.fget.__name__
)
@wraps(func)
def new_func(self, *args, **kwargs):
prop_value = prop.fget(self)
if log_prop:
log_prop.fget(self).debug(
message,
type(self).__name__,
"" if prop_value else " not",
prop_value,
)
if prop_value:
return func(self, *args, **kwargs)
return new_func
return decorator
class PoetryConfig:
def __init__(self, session, interpreter=None):
# type: (Session, str) -> ()
self.session = session
self._log = session.get_logger(__name__)
self._python = interpreter or sys.executable
self._initialized = False
@property
def log(self):
return self._log
@property
def enabled(self):
return self.session.config["agent.package_manager.type"] == POETRY
_guard_enabled = prop_guard(enabled, log)
def run(self, *args, **kwargs):
func = kwargs.pop("func", Argv.get_output)
kwargs.setdefault("stdin", DEVNULL)
kwargs['env'] = deepcopy(os.environ)
if 'VIRTUAL_ENV' in kwargs['env'] or 'CONDA_PREFIX' in kwargs['env']:
kwargs['env'].pop('VIRTUAL_ENV', None)
kwargs['env'].pop('CONDA_PREFIX', None)
kwargs['env'].pop('PYTHONPATH', None)
if hasattr(sys, "real_prefix") and hasattr(sys, "base_prefix"):
path = ':'+kwargs['env']['PATH']
path = path.replace(':'+sys.base_prefix, ':'+sys.real_prefix, 1)
kwargs['env']['PATH'] = path
if check_if_command_exists("poetry"):
argv = Argv("poetry", *args)
else:
argv = Argv(self._python, "-m", "poetry", *args)
self.log.debug("running: %s", argv)
return func(argv, **kwargs)
def _config(self, *args, **kwargs):
return self.run("config", *args, **kwargs)
@_guard_enabled
def initialize(self, cwd=None):
if not self._initialized:
self._initialized = True
try:
self._config("--local", "virtualenvs.in-project", "true", cwd=cwd)
# self._config("repositories.{}".format(self.REPO_NAME), PYTHON_INDEX)
# self._config("http-basic.{}".format(self.REPO_NAME), *PYTHON_INDEX_CREDENTIALS)
except Exception as ex:
print("Exception: {}\nError: Failed configuring Poetry virtualenvs.in-project".format(ex))
raise
def get_api(self, path):
# type: (Path) -> PoetryAPI
return PoetryAPI(self, path)
@attr.s
class PoetryAPI(object):
config = attr.ib(type=PoetryConfig)
path = attr.ib(type=Path, converter=Path)
INDICATOR_FILES = "pyproject.toml", "poetry.lock"
def install(self):
# type: () -> bool
if self.enabled:
self.config.run("install", "-n", cwd=str(self.path), func=Argv.check_call)
return True
return False
@property
def enabled(self):
return self.config.enabled and (
any((self.path / indicator).exists() for indicator in self.INDICATOR_FILES)
)
def freeze(self):
lines = self.config.run("show", cwd=str(self.path)).splitlines()
lines = [[p for p in line.split(' ') if p] for line in lines]
return {"pip": [parts[0]+'=='+parts[1]+' # '+' '.join(parts[2:]) for parts in lines]}
def get_python_command(self, extra):
if check_if_command_exists("poetry"):
return Argv("poetry", "run", "python", *extra)
else:
return Argv(self.config._python, "-m", "poetry", "run", "python", *extra)
def upgrade_pip(self, *args, **kwargs):
pass
def set_selected_package_manager(self, *args, **kwargs):
pass
def out_of_scope_install_package(self, *args, **kwargs):
pass
def install_from_file(self, *args, **kwargs):
pass
| []
| []
| []
| [] | [] | python | 0 | 0 | |
config.go | // Copyright 2018 Telefónica
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/sirupsen/logrus"
)
var (
kafkaBrokerList = "kafka:9092"
kafkaTopic = "metrics"
kafkaPartition = kafka.TopicPartition{
Topic: &kafkaTopic,
Partition: kafka.PartitionAny,
}
kafkaCompression = "none"
kafkaBatchNumMessages = "10000"
serializer Serializer
)
func init() {
logrus.SetFormatter(&logrus.JSONFormatter{})
logrus.SetOutput(os.Stdout)
if value := os.Getenv("LOG_LEVEL"); value == "" {
logrus.SetLevel(parseLogLevel(value))
}
if value := os.Getenv("KAFKA_BROKER_LIST"); value != "" {
kafkaBrokerList = value
}
if value := os.Getenv("KAFKA_TOPIC"); value != "" {
kafkaTopic = value
kafkaPartition = kafka.TopicPartition{
Topic: &kafkaTopic,
Partition: kafka.PartitionAny,
}
}
if value := os.Getenv("KAFKA_COMPRESSION"); value != "" {
kafkaCompression = value
}
if value := os.Getenv("KAFKA_BATCH_NUM_MESSAGES"); value != "" {
kafkaBatchNumMessages = value
}
var err error
serializer, err = parseSerializationFormat(os.Getenv("SERIALIZATION_FORMAT"))
if err != nil {
logrus.WithError(err).Fatalln("couldn't create a metrics serializer")
}
}
func parseLogLevel(value string) logrus.Level {
level, err := logrus.ParseLevel(value)
if err != nil {
logrus.WithField("log-level-value", value).Warningln("invalid log level from env var, using info")
return logrus.InfoLevel
}
return level
}
func parseSerializationFormat(value string) (Serializer, error) {
switch value {
case "json":
return NewJSONSerializer()
case "avro-json":
return NewAvroJSONSerializer("schemas/metric.avsc")
default:
logrus.WithField("serialization-format-value", value).Warningln("invalid serialization format, using json")
return NewJSONSerializer()
}
}
| [
"\"LOG_LEVEL\"",
"\"KAFKA_BROKER_LIST\"",
"\"KAFKA_TOPIC\"",
"\"KAFKA_COMPRESSION\"",
"\"KAFKA_BATCH_NUM_MESSAGES\"",
"\"SERIALIZATION_FORMAT\""
]
| []
| [
"KAFKA_COMPRESSION",
"SERIALIZATION_FORMAT",
"KAFKA_TOPIC",
"LOG_LEVEL",
"KAFKA_BROKER_LIST",
"KAFKA_BATCH_NUM_MESSAGES"
]
| [] | ["KAFKA_COMPRESSION", "SERIALIZATION_FORMAT", "KAFKA_TOPIC", "LOG_LEVEL", "KAFKA_BROKER_LIST", "KAFKA_BATCH_NUM_MESSAGES"] | go | 6 | 0 | |
uocli/auth/oauth.py | import os
import sys
from json import JSONDecodeError
from pathlib import Path
from requests_oauthlib import OAuth2Session
from uocli import config
from uocli.auth.webserver import get_authorization_code
import webbrowser
import requests
import time
import json
HAS_GUI = True if (sys.platform == 'win32' or 'DISPLAY' in os.environ) else False
SUPPORTED_CLIENTS = config['supported_clients']
AUTH_SERVER_URL = config['auth_server']['url']
class ClientNotSupportedException(Exception):
pass
class OAuth(object):
def __init__(self, client_id, client_secret, realm=None, username=None, password=None):
if not HAS_GUI and not (username or password):
raise ClientNotSupportedException("You must either have an active X-Server session or provide username "
"and password for authentication")
self.supported_clients = SUPPORTED_CLIENTS
self.realm = realm if realm else "UO"
self.muon_path = Path(Path.home() / ".muon")
self.token_file = Path(self.muon_path / "auth_tokens")
self.redirect_uri = "http://127.0.0.1:8239"
self.keycloak_token_url = f"{AUTH_SERVER_URL}/auth/realms/{self.realm}/protocol/openid-connect/token"
self.keycloak_client_id = client_id
self.keycloak_client_secret = client_secret
self.username = username
self.password = password
if self.keycloak_client_id not in self.supported_clients:
raise ClientNotSupportedException
def authenticate(self, token_info=None):
post_data = {}
if token_info and token_info['refresh_expires_in'] > time.time() + 10:
post_data = {'grant_type': 'refresh_token',
'client_id': self.keycloak_client_id,
'client_secret': self.keycloak_client_secret,
'refresh_token': token_info['refresh_token']}
else:
if HAS_GUI:
oauth_session = OAuth2Session(client_id=self.keycloak_client_id, redirect_uri=self.redirect_uri,
scope="email")
authorization_url, state = oauth_session.authorization_url(
f"{AUTH_SERVER_URL}/auth/realms/{self.realm}/protocol/openid-connect/auth",
access_type="offline", )
webbrowser.open_new(url=authorization_url)
auth_code = get_authorization_code()
post_data = {'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': self.redirect_uri}
else:
post_data = {'grant_type': 'password',
'username': self.username, 'password': self.password}
token_response = requests.post(self.keycloak_token_url,
data=post_data,
verify=True,
allow_redirects=False,
auth=(self.keycloak_client_id, self.keycloak_client_secret))
access_token_response = json.loads(token_response.text)
if 'error' in access_token_response:
if access_token_response.get('error_description', "") == "Session not active":
# Lets try full Auth
self.authenticate()
else:
raise ClientNotSupportedException(access_token_response)
self.save_tokens(access_token_response)
def get_tokens(self):
reauth = False
token_info = None
if self.token_file.is_file():
with open(self.token_file, 'r') as fh:
try:
current_token_info = json.load(fh)
if self.keycloak_client_id not in current_token_info.keys():
reauth = True
token_info = current_token_info[self.keycloak_client_id]
if float(token_info['expires_in']) < time.time():
reauth = True
except (KeyError, JSONDecodeError):
reauth = True
if reauth:
self.authenticate(token_info)
return self.get_tokens()
return token_info
else:
self.authenticate()
return self.get_tokens()
def save_tokens(self, token_info):
current_time = int(time.time())
Path(self.muon_path).mkdir(parents=True, exist_ok=True)
self.token_file.touch()
# Update the expiry timeout to epoch time
token_info['expires_in'] += current_time - 5 # 5 seconds less just in case we are slow to get this data
token_info['refresh_expires_in'] += current_time - 5 # 5 seconds less just in case we are slow to get this data
token_info.pop('scope')
token_info.pop('session_state')
token_info = {self.keycloak_client_id: token_info}
# Update the tokens to file
current_token_info = {}
with open(self.token_file, 'r') as fh:
try:
current_token_info = json.load(fh)
except JSONDecodeError:
pass
current_token_info.update(token_info)
with open(Path(self.token_file), 'w', encoding='utf-8') as fh:
json.dump(current_token_info, fh, ensure_ascii=False, indent=4)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bankCallout/service.go | package bankCallout
import (
"context"
"database/sql"
"fmt"
_ "github.com/denisenkom/go-mssqldb"
"github.com/pkg/errors"
"github.com/weAutomateEverything/go2hal/firstCall"
"github.com/weAutomateEverything/go2hal/telegram"
"log"
"os"
"strings"
"time"
)
type Service interface {
setGroup(ctx context.Context, chat uint32, group string) (name string, number string, err error)
getGroup(ctx context.Context, chat uint32) (string, error)
}
func NewService(store Store, telegram telegram.Service, telegramStore telegram.Store) firstCall.CalloutFunction {
return bankCallout{
store: store,
telegram: telegram,
telegramStore: telegramStore,
}
}
type bankCallout struct {
store Store
telegram telegram.Service
telegramStore telegram.Store
}
func (s bankCallout) Escalate(ctx context.Context, count int, chat uint32) (name string, number string, err error) {
if count < 3 {
return s.GetFirstCallDetails(ctx, chat)
}
if count < 6 {
return s.GetSecondCallDetails(ctx, chat)
}
if count < 9 {
return s.GetManagementDetails(ctx, chat)
}
err = errors.New("Unable to escalate any further. Giving up. ")
return
}
func (s bankCallout) Configured(chat uint32) bool {
_, err := s.store.getCalloutGroup(chat)
return err != nil
}
func (s bankCallout) GetSecondCallDetails(ctx context.Context, chat uint32) (name string, number string, err error) {
group, err := s.store.getCalloutGroup(chat)
if err != nil {
return
}
return s.getCallout(s.getCalloutString(group, "SecondName", "Primary2nd", "CalloutListingSecondCall"))
}
func (s bankCallout) GetFirstCallDetails(ctx context.Context, chat uint32) (name string, number string, err error) {
group, err := s.store.getCalloutGroup(chat)
if err != nil {
return
}
return s.getCallout(s.getCalloutString(group, "FirstName", "Primary1st", "CalloutListingFirstCall"))
}
func (s bankCallout) GetManagementDetails(ctx context.Context, chat uint32) (name string, number string, err error) {
group, err := s.store.getCalloutGroup(chat)
if err != nil {
return
}
return s.getCallout(s.getManagementCalloutString(group))
}
func (s bankCallout) setGroup(ctx context.Context, chat uint32, group string) (name string, number string, err error) {
name, number, err = s.getCallout(s.getCalloutString(group, "FirstName", "Primary1st", "CalloutListingFirstCall"))
if err != nil {
return
}
err = s.store.setCallout(chat, group)
if err != nil {
return
}
g, err := s.telegramStore.GetRoomKey(chat)
if err != nil {
return
}
s.telegram.SendMessage(ctx, g, fmt.Sprintf("Your callout group has been successfully changed to %v. On firstcall is %v, %v", group, name, number), 0)
return
}
func (s bankCallout) getGroup(ctx context.Context, chat uint32) (string, error) {
return s.store.getCalloutGroup(chat)
}
func (s bankCallout) getCallout(query string) (name string, number string, err error) {
name, number = "", ""
c := fmt.Sprintf("server=%v;user id=%v;password=%v;encrypt=disable;database=%v", getCalloutDbServer(), getCalloutDbUser(), getCalloutDbPassword(), getCalloutDBSchema())
log.Println(c)
db, err := sql.Open("mssql", c)
if err != nil {
return
}
defer db.Close()
log.Println(s)
stmt, err := db.Query(query)
if err != nil {
return
}
defer stmt.Close()
stmt.Next()
err = stmt.Scan(&name, &number)
if err != nil {
return
}
number = strings.Replace(number, " ", "", -1)
number = strings.Replace(number, "-", "", -1)
if strings.HasPrefix(number, "0") {
number = strings.Replace(number, "0", "+27", 1)
}
return
}
func (s bankCallout) getManagementCalloutString(group string) string {
t := time.Now().Format("2006-01-02 15:04:05")
return fmt.Sprintf("SELECT ManagementFirstName,Primary1stManagement FROM vCalloutManagement where ((DateFrom < '%v' and DateTo > '%v') or Always = true) and Team = '%v'", t, t, group)
}
func (s bankCallout) getCalloutString(group string, nameFields, NumberField, table string) string {
t := time.Now().Format("2006-01-02 15:04:05")
return fmt.Sprintf("SELECT %v,%v FROM %v where DateFrom < '%v' and DateTo > '%v' and Team = '%v'", nameFields, NumberField, table, t, t, group)
}
func getCalloutDbServer() string {
return os.Getenv("CALLOUT_DB_SERVER")
}
func getCalloutDbUser() string {
return os.Getenv("CALLOUT_DB_USER")
}
func getCalloutDbPassword() string {
return os.Getenv("CALLOUT_DB_PASSWORD")
}
func getCalloutDBSchema() string {
return os.Getenv("CALLOUT_DB_SCHEMA")
}
| [
"\"CALLOUT_DB_SERVER\"",
"\"CALLOUT_DB_USER\"",
"\"CALLOUT_DB_PASSWORD\"",
"\"CALLOUT_DB_SCHEMA\""
]
| []
| [
"CALLOUT_DB_USER",
"CALLOUT_DB_PASSWORD",
"CALLOUT_DB_SERVER",
"CALLOUT_DB_SCHEMA"
]
| [] | ["CALLOUT_DB_USER", "CALLOUT_DB_PASSWORD", "CALLOUT_DB_SERVER", "CALLOUT_DB_SCHEMA"] | go | 4 | 0 | |
api/integrations/perforce/git_p4.py | #!/usr/bin/env python2.7
#
# git-p4.py -- A tool for bidirectional operation between a Perforce depot and git.
#
# Author: Simon Hausmann <[email protected]>
# Copyright: 2007 Simon Hausmann <[email protected]>
# 2007 Trolltech ASA
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
#
import sys
if sys.hexversion < 0x02040000:
# The limiter is the subprocess module
sys.stderr.write("git-p4: requires Python 2.4 or later.\n")
sys.exit(1)
import os
import optparse
import marshal
import subprocess
import tempfile
import time
import platform
import re
import shutil
import stat
try:
from subprocess import CalledProcessError
except ImportError:
# from python2.7:subprocess.py
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
verbose = False
# Only labels/tags matching this will be imported/exported
defaultLabelRegexp = r'[a-zA-Z0-9_\-.]+$'
def p4_build_cmd(cmd):
"""Build a suitable p4 command line.
This consolidates building and returning a p4 command line into one
location. It means that hooking into the environment, or other configuration
can be done more easily.
"""
real_cmd = ["p4"]
if isinstance(cmd,basestring):
real_cmd = ' '.join(real_cmd) + ' ' + cmd
else:
real_cmd += cmd
return real_cmd
def chdir(path, is_client_path=False):
"""Do chdir to the given path, and set the PWD environment
variable for use by P4. It does not look at getcwd() output.
Since we're not using the shell, it is necessary to set the
PWD environment variable explicitly.
Normally, expand the path to force it to be absolute. This
addresses the use of relative path names inside P4 settings,
e.g. P4CONFIG=.p4config. P4 does not simply open the filename
as given; it looks for .p4config using PWD.
If is_client_path, the path was handed to us directly by p4,
and may be a symbolic link. Do not call os.getcwd() in this
case, because it will cause p4 to think that PWD is not inside
the client path.
"""
os.chdir(path)
if not is_client_path:
path = os.getcwd()
os.environ['PWD'] = path
def die(msg):
if verbose:
raise Exception(msg)
else:
sys.stderr.write(msg + "\n")
sys.exit(1)
def write_pipe(c, stdin):
if verbose:
sys.stderr.write('Writing pipe: %s\n' % str(c))
expand = isinstance(c,basestring)
p = subprocess.Popen(c, stdin=subprocess.PIPE, shell=expand)
pipe = p.stdin
val = pipe.write(stdin)
pipe.close()
if p.wait():
die('Command failed: %s' % str(c))
return val
def p4_write_pipe(c, stdin):
real_cmd = p4_build_cmd(c)
return write_pipe(real_cmd, stdin)
def read_pipe(c, ignore_error=False):
if verbose:
sys.stderr.write('Reading pipe: %s\n' % str(c))
expand = isinstance(c,basestring)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = pipe.read()
if p.wait() and not ignore_error:
die('Command failed: %s' % str(c))
return val
def p4_read_pipe(c, ignore_error=False):
real_cmd = p4_build_cmd(c)
return read_pipe(real_cmd, ignore_error)
def read_pipe_lines(c):
if verbose:
sys.stderr.write('Reading pipe: %s\n' % str(c))
expand = isinstance(c, basestring)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = pipe.readlines()
if pipe.close() or p.wait():
die('Command failed: %s' % str(c))
return val
def p4_read_pipe_lines(c):
"""Specifically invoke p4 on the command supplied. """
real_cmd = p4_build_cmd(c)
return read_pipe_lines(real_cmd)
def p4_has_command(cmd):
"""Ask p4 for help on this command. If it returns an error, the
command does not exist in this version of p4."""
real_cmd = p4_build_cmd(["help", cmd])
p = subprocess.Popen(real_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def p4_has_move_command():
"""See if the move command exists, that it supports -k, and that
it has not been administratively disabled. The arguments
must be correct, but the filenames do not have to exist. Use
ones with wildcards so even if they exist, it will fail."""
if not p4_has_command("move"):
return False
cmd = p4_build_cmd(["move", "-k", "@from", "@to"])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
# return code will be 1 in either case
if err.find("Invalid option") >= 0:
return False
if err.find("disabled") >= 0:
return False
# assume it failed because @... was invalid changelist
return True
def system(cmd):
expand = isinstance(cmd,basestring)
if verbose:
sys.stderr.write("executing %s\n" % str(cmd))
retcode = subprocess.call(cmd, shell=expand)
if retcode:
raise CalledProcessError(retcode, cmd)
def p4_system(cmd):
"""Specifically invoke p4 as the system command. """
real_cmd = p4_build_cmd(cmd)
expand = isinstance(real_cmd, basestring)
retcode = subprocess.call(real_cmd, shell=expand)
if retcode:
raise CalledProcessError(retcode, real_cmd)
_p4_version_string = None
def p4_version_string():
"""Read the version string, showing just the last line, which
hopefully is the interesting version bit.
$ p4 -V
Perforce - The Fast Software Configuration Management System.
Copyright 1995-2011 Perforce Software. All rights reserved.
Rev. P4/NTX86/2011.1/393975 (2011/12/16).
"""
global _p4_version_string
if not _p4_version_string:
a = p4_read_pipe_lines(["-V"])
_p4_version_string = a[-1].rstrip()
return _p4_version_string
def p4_integrate(src, dest):
p4_system(["integrate", "-Dt", wildcard_encode(src), wildcard_encode(dest)])
def p4_sync(f, *options):
p4_system(["sync"] + list(options) + [wildcard_encode(f)])
def p4_add(f):
# forcibly add file names with wildcards
if wildcard_present(f):
p4_system(["add", "-f", f])
else:
p4_system(["add", f])
def p4_delete(f):
p4_system(["delete", wildcard_encode(f)])
def p4_edit(f):
p4_system(["edit", wildcard_encode(f)])
def p4_revert(f):
p4_system(["revert", wildcard_encode(f)])
def p4_reopen(type, f):
p4_system(["reopen", "-t", type, wildcard_encode(f)])
def p4_move(src, dest):
p4_system(["move", "-k", wildcard_encode(src), wildcard_encode(dest)])
def p4_describe(change):
"""Make sure it returns a valid result by checking for
the presence of field "time". Return a dict of the
results."""
ds = p4CmdList(["describe", "-s", str(change)])
if len(ds) != 1:
die("p4 describe -s %d did not return 1 result: %s" % (change, str(ds)))
d = ds[0]
if "p4ExitCode" in d:
die("p4 describe -s %d exited with %d: %s" % (change, d["p4ExitCode"],
str(d)))
if "code" in d:
if d["code"] == "error":
die("p4 describe -s %d returned error code: %s" % (change, str(d)))
if "time" not in d:
die("p4 describe -s %d returned no \"time\": %s" % (change, str(d)))
return d
#
# Canonicalize the p4 type and return a tuple of the
# base type, plus any modifiers. See "p4 help filetypes"
# for a list and explanation.
#
def split_p4_type(p4type):
p4_filetypes_historical = {
"ctempobj": "binary+Sw",
"ctext": "text+C",
"cxtext": "text+Cx",
"ktext": "text+k",
"kxtext": "text+kx",
"ltext": "text+F",
"tempobj": "binary+FSw",
"ubinary": "binary+F",
"uresource": "resource+F",
"uxbinary": "binary+Fx",
"xbinary": "binary+x",
"xltext": "text+Fx",
"xtempobj": "binary+Swx",
"xtext": "text+x",
"xunicode": "unicode+x",
"xutf16": "utf16+x",
}
if p4type in p4_filetypes_historical:
p4type = p4_filetypes_historical[p4type]
mods = ""
s = p4type.split("+")
base = s[0]
mods = ""
if len(s) > 1:
mods = s[1]
return (base, mods)
#
# return the raw p4 type of a file (text, text+ko, etc)
#
def p4_type(file):
results = p4CmdList(["fstat", "-T", "headType", file])
return results[0]['headType']
#
# Given a type base and modifier, return a regexp matching
# the keywords that can be expanded in the file
#
def p4_keywords_regexp_for_type(base, type_mods):
if base in ("text", "unicode", "binary"):
kwords = None
if "ko" in type_mods:
kwords = 'Id|Header'
elif "k" in type_mods:
kwords = 'Id|Header|Author|Date|DateTime|Change|File|Revision'
else:
return None
pattern = r"""
\$ # Starts with a dollar, followed by...
(%s) # one of the keywords, followed by...
(:[^$\n]+)? # possibly an old expansion, followed by...
\$ # another dollar
""" % kwords
return pattern
else:
return None
#
# Given a file, return a regexp matching the possible
# RCS keywords that will be expanded, or None for files
# with kw expansion turned off.
#
def p4_keywords_regexp_for_file(file):
if not os.path.exists(file):
return None
else:
(type_base, type_mods) = split_p4_type(p4_type(file))
return p4_keywords_regexp_for_type(type_base, type_mods)
def setP4ExecBit(file, mode):
# Reopens an already open file and changes the execute bit to match
# the execute bit setting in the passed in mode.
p4Type = "+x"
if not isModeExec(mode):
p4Type = getP4OpenedType(file)
p4Type = re.sub('^([cku]?)x(.*)', '\\1\\2', p4Type)
p4Type = re.sub('(.*?\+.*?)x(.*?)', '\\1\\2', p4Type)
if p4Type[-1] == "+":
p4Type = p4Type[0:-1]
p4_reopen(p4Type, file)
def getP4OpenedType(file):
# Returns the perforce file type for the given file.
result = p4_read_pipe(["opened", wildcard_encode(file)])
match = re.match(".*\((.+)\)\r?$", result)
if match:
return match.group(1)
else:
die("Could not determine file type for %s (result: '%s')" % (file, result))
# Return the set of all p4 labels
def getP4Labels(depotPaths):
labels = set()
if isinstance(depotPaths,basestring):
depotPaths = [depotPaths]
for l in p4CmdList(["labels"] + ["%s..." % p for p in depotPaths]):
label = l['label']
labels.add(label)
return labels
# Return the set of all git tags
def getGitTags():
gitTags = set()
for line in read_pipe_lines(["git", "tag"]):
tag = line.strip()
gitTags.add(tag)
return gitTags
def diffTreePattern():
# This is a simple generator for the diff tree regex pattern. This could be
# a class variable if this and parseDiffTreeEntry were a part of a class.
pattern = re.compile(':(\d+) (\d+) (\w+) (\w+) ([A-Z])(\d+)?\t(.*?)((\t(.*))|$)')
while True:
yield pattern
def parseDiffTreeEntry(entry):
"""Parses a single diff tree entry into its component elements.
See git-diff-tree(1) manpage for details about the format of the diff
output. This method returns a dictionary with the following elements:
src_mode - The mode of the source file
dst_mode - The mode of the destination file
src_sha1 - The sha1 for the source file
dst_sha1 - The sha1 fr the destination file
status - The one letter status of the diff (i.e. 'A', 'M', 'D', etc)
status_score - The score for the status (applicable for 'C' and 'R'
statuses). This is None if there is no score.
src - The path for the source file.
dst - The path for the destination file. This is only present for
copy or renames. If it is not present, this is None.
If the pattern is not matched, None is returned."""
match = diffTreePattern().next().match(entry)
if match:
return {
'src_mode': match.group(1),
'dst_mode': match.group(2),
'src_sha1': match.group(3),
'dst_sha1': match.group(4),
'status': match.group(5),
'status_score': match.group(6),
'src': match.group(7),
'dst': match.group(10)
}
return None
def isModeExec(mode):
# Returns True if the given git mode represents an executable file,
# otherwise False.
return mode[-3:] == "755"
def isModeExecChanged(src_mode, dst_mode):
return isModeExec(src_mode) != isModeExec(dst_mode)
def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None):
if isinstance(cmd,basestring):
cmd = "-G " + cmd
expand = True
else:
cmd = ["-G"] + cmd
expand = False
cmd = p4_build_cmd(cmd)
if verbose:
sys.stderr.write("Opening pipe: %s\n" % str(cmd))
# Use a temporary file to avoid deadlocks without
# subprocess.communicate(), which would put another copy
# of stdout into memory.
stdin_file = None
if stdin is not None:
stdin_file = tempfile.TemporaryFile(prefix='p4-stdin', mode=stdin_mode)
if isinstance(stdin,basestring):
stdin_file.write(stdin)
else:
for i in stdin:
stdin_file.write(i + '\n')
stdin_file.flush()
stdin_file.seek(0)
p4 = subprocess.Popen(cmd,
shell=expand,
stdin=stdin_file,
stdout=subprocess.PIPE)
result = []
try:
while True:
entry = marshal.load(p4.stdout)
if cb is not None:
cb(entry)
else:
result.append(entry)
except EOFError:
pass
exitCode = p4.wait()
if exitCode != 0:
entry = {}
entry["p4ExitCode"] = exitCode
result.append(entry)
return result
def p4Cmd(cmd):
list = p4CmdList(cmd)
result = {}
for entry in list:
result.update(entry)
return result;
def p4Where(depotPath):
if not depotPath.endswith("/"):
depotPath += "/"
depotPath = depotPath + "..."
outputList = p4CmdList(["where", depotPath])
output = None
for entry in outputList:
if "depotFile" in entry:
if entry["depotFile"] == depotPath:
output = entry
break
elif "data" in entry:
data = entry.get("data")
space = data.find(" ")
if data[:space] == depotPath:
output = entry
break
if output == None:
return ""
if output["code"] == "error":
return ""
clientPath = ""
if "path" in output:
clientPath = output.get("path")
elif "data" in output:
data = output.get("data")
lastSpace = data.rfind(" ")
clientPath = data[lastSpace + 1:]
if clientPath.endswith("..."):
clientPath = clientPath[:-3]
return clientPath
def currentGitBranch():
return read_pipe("git name-rev HEAD").split(" ")[1].strip()
def isValidGitDir(path):
if (os.path.exists(path + "/HEAD")
and os.path.exists(path + "/refs") and os.path.exists(path + "/objects")):
return True;
return False
def parseRevision(ref):
return read_pipe("git rev-parse %s" % ref).strip()
def branchExists(ref):
rev = read_pipe(["git", "rev-parse", "-q", "--verify", ref],
ignore_error=True)
return len(rev) > 0
def extractLogMessageFromGitCommit(commit):
logMessage = ""
## fixme: title is first line of commit, not 1st paragraph.
foundTitle = False
for log in read_pipe_lines("git cat-file commit %s" % commit):
if not foundTitle:
if len(log) == 1:
foundTitle = True
continue
logMessage += log
return logMessage
def extractSettingsGitLog(log):
values = {}
for line in log.split("\n"):
line = line.strip()
m = re.search (r"^ *\[git-p4: (.*)\]$", line)
if not m:
continue
assignments = m.group(1).split (':')
for a in assignments:
vals = a.split ('=')
key = vals[0].strip()
val = ('='.join (vals[1:])).strip()
if val.endswith ('\"') and val.startswith('"'):
val = val[1:-1]
values[key] = val
paths = values.get("depot-paths")
if not paths:
paths = values.get("depot-path")
if paths:
values['depot-paths'] = paths.split(',')
return values
def gitBranchExists(branch):
proc = subprocess.Popen(["git", "rev-parse", branch],
stderr=subprocess.PIPE, stdout=subprocess.PIPE);
return proc.wait() == 0;
_gitConfig = {}
def gitConfig(key):
if not _gitConfig.has_key(key):
cmd = [ "git", "config", key ]
s = read_pipe(cmd, ignore_error=True)
_gitConfig[key] = s.strip()
return _gitConfig[key]
def gitConfigBool(key):
"""Return a bool, using git config --bool. It is True only if the
variable is set to true, and False if set to false or not present
in the config."""
if not _gitConfig.has_key(key):
cmd = [ "git", "config", "--bool", key ]
s = read_pipe(cmd, ignore_error=True)
v = s.strip()
_gitConfig[key] = v == "true"
return _gitConfig[key]
def gitConfigList(key):
if not _gitConfig.has_key(key):
s = read_pipe(["git", "config", "--get-all", key], ignore_error=True)
_gitConfig[key] = s.strip().split(os.linesep)
return _gitConfig[key]
def p4BranchesInGit(branchesAreInRemotes=True):
"""Find all the branches whose names start with "p4/", looking
in remotes or heads as specified by the argument. Return
a dictionary of { branch: revision } for each one found.
The branch names are the short names, without any
"p4/" prefix."""
branches = {}
cmdline = "git rev-parse --symbolic "
if branchesAreInRemotes:
cmdline += "--remotes"
else:
cmdline += "--branches"
for line in read_pipe_lines(cmdline):
line = line.strip()
# only import to p4/
if not line.startswith('p4/'):
continue
# special symbolic ref to p4/master
if line == "p4/HEAD":
continue
# strip off p4/ prefix
branch = line[len("p4/"):]
branches[branch] = parseRevision(line)
return branches
def branch_exists(branch):
"""Make sure that the given ref name really exists."""
cmd = [ "git", "rev-parse", "--symbolic", "--verify", branch ]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = p.communicate()
if p.returncode:
return False
# expect exactly one line of output: the branch name
return out.rstrip() == branch
def findUpstreamBranchPoint(head = "HEAD"):
branches = p4BranchesInGit()
# map from depot-path to branch name
branchByDepotPath = {}
for branch in branches.keys():
tip = branches[branch]
log = extractLogMessageFromGitCommit(tip)
settings = extractSettingsGitLog(log)
if settings.has_key("depot-paths"):
paths = ",".join(settings["depot-paths"])
branchByDepotPath[paths] = "remotes/p4/" + branch
settings = None
parent = 0
while parent < 65535:
commit = head + "~%s" % parent
log = extractLogMessageFromGitCommit(commit)
settings = extractSettingsGitLog(log)
if settings.has_key("depot-paths"):
paths = ",".join(settings["depot-paths"])
if branchByDepotPath.has_key(paths):
return [branchByDepotPath[paths], settings]
parent = parent + 1
return ["", settings]
def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent=True):
if not silent:
print ("Creating/updating branch(es) in %s based on origin branch(es)"
% localRefPrefix)
originPrefix = "origin/p4/"
for line in read_pipe_lines("git rev-parse --symbolic --remotes"):
line = line.strip()
if (not line.startswith(originPrefix)) or line.endswith("HEAD"):
continue
headName = line[len(originPrefix):]
remoteHead = localRefPrefix + headName
originHead = line
original = extractSettingsGitLog(extractLogMessageFromGitCommit(originHead))
if (not original.has_key('depot-paths')
or not original.has_key('change')):
continue
update = False
if not gitBranchExists(remoteHead):
if verbose:
print "creating %s" % remoteHead
update = True
else:
settings = extractSettingsGitLog(extractLogMessageFromGitCommit(remoteHead))
if settings.has_key('change') > 0:
if settings['depot-paths'] == original['depot-paths']:
originP4Change = int(original['change'])
p4Change = int(settings['change'])
if originP4Change > p4Change:
print ("%s (%s) is newer than %s (%s). "
"Updating p4 branch from origin."
% (originHead, originP4Change,
remoteHead, p4Change))
update = True
else:
print ("Ignoring: %s was imported from %s while "
"%s was imported from %s"
% (originHead, ','.join(original['depot-paths']),
remoteHead, ','.join(settings['depot-paths'])))
if update:
system("git update-ref %s %s" % (remoteHead, originHead))
def originP4BranchesExist():
return gitBranchExists("origin") or gitBranchExists("origin/p4") or gitBranchExists("origin/p4/master")
def p4ChangesForPaths(depotPaths, changeRange):
assert depotPaths
cmd = ['changes']
for p in depotPaths:
cmd += ["%s...%s" % (p, changeRange)]
output = p4_read_pipe_lines(cmd)
changes = {}
for line in output:
changeNum = int(line.split(" ")[1])
changes[changeNum] = True
changelist = changes.keys()
changelist.sort()
return changelist
def p4PathStartsWith(path, prefix):
# This method tries to remedy a potential mixed-case issue:
#
# If UserA adds //depot/DirA/file1
# and UserB adds //depot/dira/file2
#
# we may or may not have a problem. If you have core.ignorecase=true,
# we treat DirA and dira as the same directory
if gitConfigBool("core.ignorecase"):
return path.lower().startswith(prefix.lower())
return path.startswith(prefix)
def getClientSpec():
"""Look at the p4 client spec, create a View() object that contains
all the mappings, and return it."""
specList = p4CmdList("client -o")
if len(specList) != 1:
die('Output from "client -o" is %d lines, expecting 1' %
len(specList))
# dictionary of all client parameters
entry = specList[0]
# the //client/ name
client_name = entry["Client"]
# just the keys that start with "View"
view_keys = [ k for k in entry.keys() if k.startswith("View") ]
# hold this new View
view = View(client_name)
# append the lines, in order, to the view
for view_num in range(len(view_keys)):
k = "View%d" % view_num
if k not in view_keys:
die("Expected view key %s missing" % k)
view.append(entry[k])
return view
def getClientRoot():
"""Grab the client directory."""
output = p4CmdList("client -o")
if len(output) != 1:
die('Output from "client -o" is %d lines, expecting 1' % len(output))
entry = output[0]
if "Root" not in entry:
die('Client has no "Root"')
return entry["Root"]
#
# P4 wildcards are not allowed in filenames. P4 complains
# if you simply add them, but you can force it with "-f", in
# which case it translates them into %xx encoding internally.
#
def wildcard_decode(path):
# Search for and fix just these four characters. Do % last so
# that fixing it does not inadvertently create new %-escapes.
# Cannot have * in a filename in windows; untested as to
# what p4 would do in such a case.
if not platform.system() == "Windows":
path = path.replace("%2A", "*")
path = path.replace("%23", "#") \
.replace("%40", "@") \
.replace("%25", "%")
return path
def wildcard_encode(path):
# do % first to avoid double-encoding the %s introduced here
path = path.replace("%", "%25") \
.replace("*", "%2A") \
.replace("#", "%23") \
.replace("@", "%40")
return path
def wildcard_present(path):
m = re.search("[*#@%]", path)
return m is not None
class Command:
def __init__(self):
self.usage = "usage: %prog [options]"
self.needsGit = True
self.verbose = False
class P4UserMap:
def __init__(self):
self.userMapFromPerforceServer = False
self.myP4UserId = None
def p4UserId(self):
if self.myP4UserId:
return self.myP4UserId
results = p4CmdList("user -o")
for r in results:
if r.has_key('User'):
self.myP4UserId = r['User']
return r['User']
die("Could not find your p4 user id")
def p4UserIsMe(self, p4User):
# return True if the given p4 user is actually me
me = self.p4UserId()
if not p4User or p4User != me:
return False
else:
return True
def getUserCacheFilename(self):
home = os.environ.get("HOME", os.environ.get("USERPROFILE"))
return home + "/.gitp4-usercache.txt"
def getUserMapFromPerforceServer(self):
if self.userMapFromPerforceServer:
return
self.users = {}
self.emails = {}
for output in p4CmdList("users"):
if not output.has_key("User"):
continue
self.users[output["User"]] = output["FullName"] + " <" + output["Email"] + ">"
self.emails[output["Email"]] = output["User"]
s = ''
for (key, val) in self.users.items():
s += "%s\t%s\n" % (key.expandtabs(1), val.expandtabs(1))
open(self.getUserCacheFilename(), "wb").write(s)
self.userMapFromPerforceServer = True
def loadUserMapFromCache(self):
self.users = {}
self.userMapFromPerforceServer = False
try:
cache = open(self.getUserCacheFilename(), "rb")
lines = cache.readlines()
cache.close()
for line in lines:
entry = line.strip().split("\t")
self.users[entry[0]] = entry[1]
except IOError:
self.getUserMapFromPerforceServer()
class P4Debug(Command):
def __init__(self):
Command.__init__(self)
self.options = []
self.description = "A tool to debug the output of p4 -G."
self.needsGit = False
def run(self, args):
j = 0
for output in p4CmdList(args):
print 'Element: %d' % j
j += 1
print output
return True
class P4RollBack(Command):
def __init__(self):
Command.__init__(self)
self.options = [
optparse.make_option("--local", dest="rollbackLocalBranches", action="store_true")
]
self.description = "A tool to debug the multi-branch import. Don't use :)"
self.rollbackLocalBranches = False
def run(self, args):
if len(args) != 1:
return False
maxChange = int(args[0])
if "p4ExitCode" in p4Cmd("changes -m 1"):
die("Problems executing p4");
if self.rollbackLocalBranches:
refPrefix = "refs/heads/"
lines = read_pipe_lines("git rev-parse --symbolic --branches")
else:
refPrefix = "refs/remotes/"
lines = read_pipe_lines("git rev-parse --symbolic --remotes")
for line in lines:
if self.rollbackLocalBranches or (line.startswith("p4/") and line != "p4/HEAD\n"):
line = line.strip()
ref = refPrefix + line
log = extractLogMessageFromGitCommit(ref)
settings = extractSettingsGitLog(log)
depotPaths = settings['depot-paths']
change = settings['change']
changed = False
if len(p4Cmd("changes -m 1 " + ' '.join (['%s...@%s' % (p, maxChange)
for p in depotPaths]))) == 0:
print "Branch %s did not exist at change %s, deleting." % (ref, maxChange)
system("git update-ref -d %s `git rev-parse %s`" % (ref, ref))
continue
while change and int(change) > maxChange:
changed = True
if self.verbose:
print "%s is at %s ; rewinding towards %s" % (ref, change, maxChange)
system("git update-ref %s \"%s^\"" % (ref, ref))
log = extractLogMessageFromGitCommit(ref)
settings = extractSettingsGitLog(log)
depotPaths = settings['depot-paths']
change = settings['change']
if changed:
print "%s rewound to %s" % (ref, change)
return True
class P4Submit(Command, P4UserMap):
conflict_behavior_choices = ("ask", "skip", "quit")
def __init__(self):
Command.__init__(self)
P4UserMap.__init__(self)
self.options = [
optparse.make_option("--origin", dest="origin"),
optparse.make_option("-M", dest="detectRenames", action="store_true"),
# preserve the user, requires relevant p4 permissions
optparse.make_option("--preserve-user", dest="preserveUser", action="store_true"),
optparse.make_option("--export-labels", dest="exportLabels", action="store_true"),
optparse.make_option("--dry-run", "-n", dest="dry_run", action="store_true"),
optparse.make_option("--prepare-p4-only", dest="prepare_p4_only", action="store_true"),
optparse.make_option("--conflict", dest="conflict_behavior",
choices=self.conflict_behavior_choices),
optparse.make_option("--branch", dest="branch"),
]
self.description = "Submit changes from git to the perforce depot."
self.usage += " [name of git branch to submit into perforce depot]"
self.origin = ""
self.detectRenames = False
self.preserveUser = gitConfigBool("git-p4.preserveUser")
self.dry_run = False
self.prepare_p4_only = False
self.conflict_behavior = None
self.isWindows = (platform.system() == "Windows")
self.exportLabels = False
self.p4HasMoveCommand = p4_has_move_command()
self.branch = None
def check(self):
if len(p4CmdList("opened ...")) > 0:
die("You have files opened with perforce! Close them before starting the sync.")
def separate_jobs_from_description(self, message):
"""Extract and return a possible Jobs field in the commit
message. It goes into a separate section in the p4 change
specification.
A jobs line starts with "Jobs:" and looks like a new field
in a form. Values are white-space separated on the same
line or on following lines that start with a tab.
This does not parse and extract the full git commit message
like a p4 form. It just sees the Jobs: line as a marker
to pass everything from then on directly into the p4 form,
but outside the description section.
Return a tuple (stripped log message, jobs string)."""
m = re.search(r'^Jobs:', message, re.MULTILINE)
if m is None:
return (message, None)
jobtext = message[m.start():]
stripped_message = message[:m.start()].rstrip()
return (stripped_message, jobtext)
def prepareLogMessage(self, template, message, jobs):
"""Edits the template returned from "p4 change -o" to insert
the message in the Description field, and the jobs text in
the Jobs field."""
result = ""
inDescriptionSection = False
for line in template.split("\n"):
if line.startswith("#"):
result += line + "\n"
continue
if inDescriptionSection:
if line.startswith("Files:") or line.startswith("Jobs:"):
inDescriptionSection = False
# insert Jobs section
if jobs:
result += jobs + "\n"
else:
continue
else:
if line.startswith("Description:"):
inDescriptionSection = True
line += "\n"
for messageLine in message.split("\n"):
line += "\t" + messageLine + "\n"
result += line + "\n"
return result
def patchRCSKeywords(self, file, pattern):
# Attempt to zap the RCS keywords in a p4 controlled file matching the given pattern
(handle, outFileName) = tempfile.mkstemp(dir='.')
try:
outFile = os.fdopen(handle, "w+")
inFile = open(file, "r")
regexp = re.compile(pattern, re.VERBOSE)
for line in inFile.readlines():
line = regexp.sub(r'$\1$', line)
outFile.write(line)
inFile.close()
outFile.close()
# Forcibly overwrite the original file
os.unlink(file)
shutil.move(outFileName, file)
except:
# cleanup our temporary file
os.unlink(outFileName)
print "Failed to strip RCS keywords in %s" % file
raise
print "Patched up RCS keywords in %s" % file
def p4UserForCommit(self,id):
# Return the tuple (perforce user,git email) for a given git commit id
self.getUserMapFromPerforceServer()
gitEmail = read_pipe(["git", "log", "--max-count=1",
"--format=%ae", id])
gitEmail = gitEmail.strip()
if not self.emails.has_key(gitEmail):
return (None,gitEmail)
else:
return (self.emails[gitEmail],gitEmail)
def checkValidP4Users(self,commits):
# check if any git authors cannot be mapped to p4 users
for id in commits:
(user,email) = self.p4UserForCommit(id)
if not user:
msg = "Cannot find p4 user for email %s in commit %s." % (email, id)
if gitConfigBool("git-p4.allowMissingP4Users"):
print "%s" % msg
else:
die("Error: %s\nSet git-p4.allowMissingP4Users to true to allow this." % msg)
def lastP4Changelist(self):
# Get back the last changelist number submitted in this client spec. This
# then gets used to patch up the username in the change. If the same
# client spec is being used by multiple processes then this might go
# wrong.
results = p4CmdList("client -o") # find the current client
client = None
for r in results:
if r.has_key('Client'):
client = r['Client']
break
if not client:
die("could not get client spec")
results = p4CmdList(["changes", "-c", client, "-m", "1"])
for r in results:
if r.has_key('change'):
return r['change']
die("Could not get changelist number for last submit - cannot patch up user details")
def modifyChangelistUser(self, changelist, newUser):
# fixup the user field of a changelist after it has been submitted.
changes = p4CmdList("change -o %s" % changelist)
if len(changes) != 1:
die("Bad output from p4 change modifying %s to user %s" %
(changelist, newUser))
c = changes[0]
if c['User'] == newUser: return # nothing to do
c['User'] = newUser
input = marshal.dumps(c)
result = p4CmdList("change -f -i", stdin=input)
for r in result:
if r.has_key('code'):
if r['code'] == 'error':
die("Could not modify user field of changelist %s to %s:%s" % (changelist, newUser, r['data']))
if r.has_key('data'):
print("Updated user field for changelist %s to %s" % (changelist, newUser))
return
die("Could not modify user field of changelist %s to %s" % (changelist, newUser))
def canChangeChangelists(self):
# check to see if we have p4 admin or super-user permissions, either of
# which are required to modify changelists.
results = p4CmdList(["protects", self.depotPath])
for r in results:
if r.has_key('perm'):
if r['perm'] == 'admin':
return 1
if r['perm'] == 'super':
return 1
return 0
def prepareSubmitTemplate(self):
"""Run "p4 change -o" to grab a change specification template.
This does not use "p4 -G", as it is nice to keep the submission
template in original order, since a human might edit it.
Remove lines in the Files section that show changes to files
outside the depot path we're committing into."""
template = ""
inFilesSection = False
for line in p4_read_pipe_lines(['change', '-o']):
if line.endswith("\r\n"):
line = line[:-2] + "\n"
if inFilesSection:
if line.startswith("\t"):
# path starts and ends with a tab
path = line[1:]
lastTab = path.rfind("\t")
if lastTab != -1:
path = path[:lastTab]
if not p4PathStartsWith(path, self.depotPath):
continue
else:
inFilesSection = False
else:
if line.startswith("Files:"):
inFilesSection = True
template += line
return template
def edit_template(self, template_file):
"""Invoke the editor to let the user change the submission
message. Return true if okay to continue with the submit."""
# if configured to skip the editing part, just submit
if gitConfigBool("git-p4.skipSubmitEdit"):
return True
# look at the modification time, to check later if the user saved
# the file
mtime = os.stat(template_file).st_mtime
# invoke the editor
if os.environ.has_key("P4EDITOR") and (os.environ.get("P4EDITOR") != ""):
editor = os.environ.get("P4EDITOR")
else:
editor = read_pipe("git var GIT_EDITOR").strip()
system(editor + " " + template_file)
# If the file was not saved, prompt to see if this patch should
# be skipped. But skip this verification step if configured so.
if gitConfigBool("git-p4.skipSubmitEditCheck"):
return True
# modification time updated means user saved the file
if os.stat(template_file).st_mtime > mtime:
return True
while True:
response = raw_input("Submit template unchanged. Submit anyway? [y]es, [n]o (skip this patch) ")
if response == 'y':
return True
if response == 'n':
return False
def applyCommit(self, id):
"""Apply one commit, return True if it succeeded."""
print "Applying", read_pipe(["git", "show", "-s",
"--format=format:%h %s", id])
(p4User, gitEmail) = self.p4UserForCommit(id)
diff = read_pipe_lines("git diff-tree -r %s \"%s^\" \"%s\"" % (self.diffOpts, id, id))
filesToAdd = set()
filesToDelete = set()
editedFiles = set()
pureRenameCopy = set()
filesToChangeExecBit = {}
for line in diff:
diff = parseDiffTreeEntry(line)
modifier = diff['status']
path = diff['src']
if modifier == "M":
p4_edit(path)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
filesToChangeExecBit[path] = diff['dst_mode']
editedFiles.add(path)
elif modifier == "A":
filesToAdd.add(path)
filesToChangeExecBit[path] = diff['dst_mode']
if path in filesToDelete:
filesToDelete.remove(path)
elif modifier == "D":
filesToDelete.add(path)
if path in filesToAdd:
filesToAdd.remove(path)
elif modifier == "C":
src, dest = diff['src'], diff['dst']
p4_integrate(src, dest)
pureRenameCopy.add(dest)
if diff['src_sha1'] != diff['dst_sha1']:
p4_edit(dest)
pureRenameCopy.discard(dest)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
p4_edit(dest)
pureRenameCopy.discard(dest)
filesToChangeExecBit[dest] = diff['dst_mode']
if self.isWindows:
# turn off read-only attribute
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
editedFiles.add(dest)
elif modifier == "R":
src, dest = diff['src'], diff['dst']
if self.p4HasMoveCommand:
p4_edit(src) # src must be open before move
p4_move(src, dest) # opens for (move/delete, move/add)
else:
p4_integrate(src, dest)
if diff['src_sha1'] != diff['dst_sha1']:
p4_edit(dest)
else:
pureRenameCopy.add(dest)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
if not self.p4HasMoveCommand:
p4_edit(dest) # with move: already open, writable
filesToChangeExecBit[dest] = diff['dst_mode']
if not self.p4HasMoveCommand:
if self.isWindows:
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
filesToDelete.add(src)
editedFiles.add(dest)
else:
die("unknown modifier %s for %s" % (modifier, path))
diffcmd = "git format-patch -k --stdout \"%s^\"..\"%s\"" % (id, id)
patchcmd = diffcmd + " | git apply "
tryPatchCmd = patchcmd + "--check -"
applyPatchCmd = patchcmd + "--check --apply -"
patch_succeeded = True
if os.system(tryPatchCmd) != 0:
fixed_rcs_keywords = False
patch_succeeded = False
print "Unfortunately applying the change failed!"
# Patch failed, maybe it's just RCS keyword woes. Look through
# the patch to see if that's possible.
if gitConfigBool("git-p4.attemptRCSCleanup"):
file = None
pattern = None
kwfiles = {}
for file in editedFiles | filesToDelete:
# did this file's delta contain RCS keywords?
pattern = p4_keywords_regexp_for_file(file)
if pattern:
# this file is a possibility...look for RCS keywords.
regexp = re.compile(pattern, re.VERBOSE)
for line in read_pipe_lines(["git", "diff", "%s^..%s" % (id, id), file]):
if regexp.search(line):
if verbose:
print "got keyword match on %s in %s in %s" % (pattern, line, file)
kwfiles[file] = pattern
break
for file in kwfiles:
if verbose:
print "zapping %s with %s" % (line,pattern)
# File is being deleted, so not open in p4. Must
# disable the read-only bit on windows.
if self.isWindows and file not in editedFiles:
os.chmod(file, stat.S_IWRITE)
self.patchRCSKeywords(file, kwfiles[file])
fixed_rcs_keywords = True
if fixed_rcs_keywords:
print "Retrying the patch with RCS keywords cleaned up"
if os.system(tryPatchCmd) == 0:
patch_succeeded = True
if not patch_succeeded:
for f in editedFiles:
p4_revert(f)
return False
#
# Apply the patch for real, and do add/delete/+x handling.
#
system(applyPatchCmd)
for f in filesToAdd:
p4_add(f)
for f in filesToDelete:
p4_revert(f)
p4_delete(f)
# Set/clear executable bits
for f in filesToChangeExecBit.keys():
mode = filesToChangeExecBit[f]
setP4ExecBit(f, mode)
#
# Build p4 change description, starting with the contents
# of the git commit message.
#
logMessage = extractLogMessageFromGitCommit(id)
logMessage = logMessage.strip()
(logMessage, jobs) = self.separate_jobs_from_description(logMessage)
template = self.prepareSubmitTemplate()
submitTemplate = self.prepareLogMessage(template, logMessage, jobs)
if self.preserveUser:
submitTemplate += "\n######## Actual user %s, modified after commit\n" % p4User
if self.checkAuthorship and not self.p4UserIsMe(p4User):
submitTemplate += "######## git author %s does not match your p4 account.\n" % gitEmail
submitTemplate += "######## Use option --preserve-user to modify authorship.\n"
submitTemplate += "######## Variable git-p4.skipUserNameCheck hides this message.\n"
separatorLine = "######## everything below this line is just the diff #######\n"
# diff
if os.environ.has_key("P4DIFF"):
del(os.environ["P4DIFF"])
diff = ""
for editedFile in editedFiles:
diff += p4_read_pipe(['diff', '-du',
wildcard_encode(editedFile)])
# new file diff
newdiff = ""
for newFile in filesToAdd:
newdiff += "==== new file ====\n"
newdiff += "--- /dev/null\n"
newdiff += "+++ %s\n" % newFile
f = open(newFile, "r")
for line in f.readlines():
newdiff += "+" + line
f.close()
# change description file: submitTemplate, separatorLine, diff, newdiff
(handle, fileName) = tempfile.mkstemp()
tmpFile = os.fdopen(handle, "w+")
if self.isWindows:
submitTemplate = submitTemplate.replace("\n", "\r\n")
separatorLine = separatorLine.replace("\n", "\r\n")
newdiff = newdiff.replace("\n", "\r\n")
tmpFile.write(submitTemplate + separatorLine + diff + newdiff)
tmpFile.close()
if self.prepare_p4_only:
#
# Leave the p4 tree prepared, and the submit template around
# and let the user decide what to do next
#
print
print "P4 workspace prepared for submission."
print "To submit or revert, go to client workspace"
print " " + self.clientPath
print
print "To submit, use \"p4 submit\" to write a new description,"
print "or \"p4 submit -i %s\" to use the one prepared by" \
" \"git p4\"." % fileName
print "You can delete the file \"%s\" when finished." % fileName
if self.preserveUser and p4User and not self.p4UserIsMe(p4User):
print "To preserve change ownership by user %s, you must\n" \
"do \"p4 change -f <change>\" after submitting and\n" \
"edit the User field."
if pureRenameCopy:
print "After submitting, renamed files must be re-synced."
print "Invoke \"p4 sync -f\" on each of these files:"
for f in pureRenameCopy:
print " " + f
print
print "To revert the changes, use \"p4 revert ...\", and delete"
print "the submit template file \"%s\"" % fileName
if filesToAdd:
print "Since the commit adds new files, they must be deleted:"
for f in filesToAdd:
print " " + f
print
return True
#
# Let the user edit the change description, then submit it.
#
if self.edit_template(fileName):
# read the edited message and submit
ret = True
tmpFile = open(fileName, "rb")
message = tmpFile.read()
tmpFile.close()
submitTemplate = message[:message.index(separatorLine)]
if self.isWindows:
submitTemplate = submitTemplate.replace("\r\n", "\n")
p4_write_pipe(['submit', '-i'], submitTemplate)
if self.preserveUser:
if p4User:
# Get last changelist number. Cannot easily get it from
# the submit command output as the output is
# unmarshalled.
changelist = self.lastP4Changelist()
self.modifyChangelistUser(changelist, p4User)
# The rename/copy happened by applying a patch that created a
# new file. This leaves it writable, which confuses p4.
for f in pureRenameCopy:
p4_sync(f, "-f")
else:
# skip this patch
ret = False
print "Submission cancelled, undoing p4 changes."
for f in editedFiles:
p4_revert(f)
for f in filesToAdd:
p4_revert(f)
os.remove(f)
for f in filesToDelete:
p4_revert(f)
os.remove(fileName)
return ret
# Export git tags as p4 labels. Create a p4 label and then tag
# with that.
def exportGitTags(self, gitTags):
validLabelRegexp = gitConfig("git-p4.labelExportRegexp")
if len(validLabelRegexp) == 0:
validLabelRegexp = defaultLabelRegexp
m = re.compile(validLabelRegexp)
for name in gitTags:
if not m.match(name):
if verbose:
print "tag %s does not match regexp %s" % (name, validLabelRegexp)
continue
# Get the p4 commit this corresponds to
logMessage = extractLogMessageFromGitCommit(name)
values = extractSettingsGitLog(logMessage)
if not values.has_key('change'):
# a tag pointing to something not sent to p4; ignore
if verbose:
print "git tag %s does not give a p4 commit" % name
continue
else:
changelist = values['change']
# Get the tag details.
inHeader = True
isAnnotated = False
body = []
for l in read_pipe_lines(["git", "cat-file", "-p", name]):
l = l.strip()
if inHeader:
if re.match(r'tag\s+', l):
isAnnotated = True
elif re.match(r'\s*$', l):
inHeader = False
continue
else:
body.append(l)
if not isAnnotated:
body = ["lightweight tag imported by git p4\n"]
# Create the label - use the same view as the client spec we are using
clientSpec = getClientSpec()
labelTemplate = "Label: %s\n" % name
labelTemplate += "Description:\n"
for b in body:
labelTemplate += "\t" + b + "\n"
labelTemplate += "View:\n"
for depot_side in clientSpec.mappings:
labelTemplate += "\t%s\n" % depot_side
if self.dry_run:
print "Would create p4 label %s for tag" % name
elif self.prepare_p4_only:
print "Not creating p4 label %s for tag due to option" \
" --prepare-p4-only" % name
else:
p4_write_pipe(["label", "-i"], labelTemplate)
# Use the label
p4_system(["tag", "-l", name] +
["%s@%s" % (depot_side, changelist) for depot_side in clientSpec.mappings])
if verbose:
print "created p4 label for tag %s" % name
def run(self, args):
if len(args) == 0:
self.master = currentGitBranch()
if len(self.master) == 0 or not gitBranchExists("refs/heads/%s" % self.master):
die("Detecting current git branch failed!")
elif len(args) == 1:
self.master = args[0]
if not branchExists(self.master):
die("Branch %s does not exist" % self.master)
else:
return False
allowSubmit = gitConfig("git-p4.allowSubmit")
if len(allowSubmit) > 0 and not self.master in allowSubmit.split(","):
die("%s is not in git-p4.allowSubmit" % self.master)
[upstream, settings] = findUpstreamBranchPoint()
self.depotPath = settings['depot-paths'][0]
if len(self.origin) == 0:
self.origin = upstream
if self.preserveUser:
if not self.canChangeChangelists():
die("Cannot preserve user names without p4 super-user or admin permissions")
# if not set from the command line, try the config file
if self.conflict_behavior is None:
val = gitConfig("git-p4.conflict")
if val:
if val not in self.conflict_behavior_choices:
die("Invalid value '%s' for config git-p4.conflict" % val)
else:
val = "ask"
self.conflict_behavior = val
if self.verbose:
print "Origin branch is " + self.origin
if len(self.depotPath) == 0:
print "Internal error: cannot locate perforce depot path from existing branches"
sys.exit(128)
self.useClientSpec = False
if gitConfigBool("git-p4.useclientspec"):
self.useClientSpec = True
if self.useClientSpec:
self.clientSpecDirs = getClientSpec()
if self.useClientSpec:
# all files are relative to the client spec
self.clientPath = getClientRoot()
else:
self.clientPath = p4Where(self.depotPath)
if self.clientPath == "":
die("Error: Cannot locate perforce checkout of %s in client view" % self.depotPath)
print "Perforce checkout for depot path %s located at %s" % (self.depotPath, self.clientPath)
self.oldWorkingDirectory = os.getcwd()
# ensure the clientPath exists
new_client_dir = False
if not os.path.exists(self.clientPath):
new_client_dir = True
os.makedirs(self.clientPath)
chdir(self.clientPath, is_client_path=True)
if self.dry_run:
print "Would synchronize p4 checkout in %s" % self.clientPath
else:
print "Synchronizing p4 checkout..."
if new_client_dir:
# old one was destroyed, and maybe nobody told p4
p4_sync("...", "-f")
else:
p4_sync("...")
self.check()
commits = []
for line in read_pipe_lines(["git", "rev-list", "--no-merges", "%s..%s" % (self.origin, self.master)]):
commits.append(line.strip())
commits.reverse()
if self.preserveUser or gitConfigBool("git-p4.skipUserNameCheck"):
self.checkAuthorship = False
else:
self.checkAuthorship = True
if self.preserveUser:
self.checkValidP4Users(commits)
#
# Build up a set of options to be passed to diff when
# submitting each commit to p4.
#
if self.detectRenames:
# command-line -M arg
self.diffOpts = "-M"
else:
# If not explicitly set check the config variable
detectRenames = gitConfig("git-p4.detectRenames")
if detectRenames.lower() == "false" or detectRenames == "":
self.diffOpts = ""
elif detectRenames.lower() == "true":
self.diffOpts = "-M"
else:
self.diffOpts = "-M%s" % detectRenames
# no command-line arg for -C or --find-copies-harder, just
# config variables
detectCopies = gitConfig("git-p4.detectCopies")
if detectCopies.lower() == "false" or detectCopies == "":
pass
elif detectCopies.lower() == "true":
self.diffOpts += " -C"
else:
self.diffOpts += " -C%s" % detectCopies
if gitConfigBool("git-p4.detectCopiesHarder"):
self.diffOpts += " --find-copies-harder"
#
# Apply the commits, one at a time. On failure, ask if should
# continue to try the rest of the patches, or quit.
#
if self.dry_run:
print "Would apply"
applied = []
last = len(commits) - 1
for i, commit in enumerate(commits):
if self.dry_run:
print " ", read_pipe(["git", "show", "-s",
"--format=format:%h %s", commit])
ok = True
else:
ok = self.applyCommit(commit)
if ok:
applied.append(commit)
else:
if self.prepare_p4_only and i < last:
print "Processing only the first commit due to option" \
" --prepare-p4-only"
break
if i < last:
quit = False
while True:
# prompt for what to do, or use the option/variable
if self.conflict_behavior == "ask":
print "What do you want to do?"
response = raw_input("[s]kip this commit but apply"
" the rest, or [q]uit? ")
if not response:
continue
elif self.conflict_behavior == "skip":
response = "s"
elif self.conflict_behavior == "quit":
response = "q"
else:
die("Unknown conflict_behavior '%s'" %
self.conflict_behavior)
if response[0] == "s":
print "Skipping this commit, but applying the rest"
break
if response[0] == "q":
print "Quitting"
quit = True
break
if quit:
break
chdir(self.oldWorkingDirectory)
if self.dry_run:
pass
elif self.prepare_p4_only:
pass
elif len(commits) == len(applied):
print "All commits applied!"
sync = P4Sync()
if self.branch:
sync.branch = self.branch
sync.run([])
rebase = P4Rebase()
rebase.rebase()
else:
if len(applied) == 0:
print "No commits applied."
else:
print "Applied only the commits marked with '*':"
for c in commits:
if c in applied:
star = "*"
else:
star = " "
print star, read_pipe(["git", "show", "-s",
"--format=format:%h %s", c])
print "You will have to do 'git p4 sync' and rebase."
if gitConfigBool("git-p4.exportLabels"):
self.exportLabels = True
if self.exportLabels:
p4Labels = getP4Labels(self.depotPath)
gitTags = getGitTags()
missingGitTags = gitTags - p4Labels
self.exportGitTags(missingGitTags)
# exit with error unless everything applied perfectly
if len(commits) != len(applied):
sys.exit(1)
return True
class View(object):
"""Represent a p4 view ("p4 help views"), and map files in a
repo according to the view."""
def __init__(self, client_name):
self.mappings = []
self.client_prefix = "//%s/" % client_name
# cache results of "p4 where" to lookup client file locations
self.client_spec_path_cache = {}
def append(self, view_line):
"""Parse a view line, splitting it into depot and client
sides. Append to self.mappings, preserving order. This
is only needed for tag creation."""
# Split the view line into exactly two words. P4 enforces
# structure on these lines that simplifies this quite a bit.
#
# Either or both words may be double-quoted.
# Single quotes do not matter.
# Double-quote marks cannot occur inside the words.
# A + or - prefix is also inside the quotes.
# There are no quotes unless they contain a space.
# The line is already white-space stripped.
# The two words are separated by a single space.
#
if view_line[0] == '"':
# First word is double quoted. Find its end.
close_quote_index = view_line.find('"', 1)
if close_quote_index <= 0:
die("No first-word closing quote found: %s" % view_line)
depot_side = view_line[1:close_quote_index]
# skip closing quote and space
rhs_index = close_quote_index + 1 + 1
else:
space_index = view_line.find(" ")
if space_index <= 0:
die("No word-splitting space found: %s" % view_line)
depot_side = view_line[0:space_index]
rhs_index = space_index + 1
# prefix + means overlay on previous mapping
if depot_side.startswith("+"):
depot_side = depot_side[1:]
# prefix - means exclude this path, leave out of mappings
exclude = False
if depot_side.startswith("-"):
exclude = True
depot_side = depot_side[1:]
if not exclude:
self.mappings.append(depot_side)
def convert_client_path(self, clientFile):
# chop off //client/ part to make it relative
if not clientFile.startswith(self.client_prefix):
die("No prefix '%s' on clientFile '%s'" %
(self.client_prefix, clientFile))
return clientFile[len(self.client_prefix):]
def update_client_spec_path_cache(self, files):
""" Caching file paths by "p4 where" batch query """
# List depot file paths exclude that already cached
fileArgs = [f['path'] for f in files if f['path'] not in self.client_spec_path_cache]
if len(fileArgs) == 0:
return # All files in cache
where_result = p4CmdList(["-x", "-", "where"], stdin=fileArgs)
for res in where_result:
if "code" in res and res["code"] == "error":
# assume error is "... file(s) not in client view"
continue
if "clientFile" not in res:
die("No clientFile from 'p4 where %s'" % depot_path)
if "unmap" in res:
# it will list all of them, but only one not unmap-ped
continue
self.client_spec_path_cache[res['depotFile']] = self.convert_client_path(res["clientFile"])
# not found files or unmap files set to ""
for depotFile in fileArgs:
if depotFile not in self.client_spec_path_cache:
self.client_spec_path_cache[depotFile] = ""
def map_in_client(self, depot_path):
"""Return the relative location in the client where this
depot file should live. Returns "" if the file should
not be mapped in the client."""
if depot_path in self.client_spec_path_cache:
return self.client_spec_path_cache[depot_path]
die( "Error: %s is not found in client spec path" % depot_path )
return ""
class P4Sync(Command, P4UserMap):
delete_actions = ( "delete", "move/delete", "purge" )
def __init__(self):
Command.__init__(self)
P4UserMap.__init__(self)
self.options = [
optparse.make_option("--branch", dest="branch"),
optparse.make_option("--detect-branches", dest="detectBranches", action="store_true"),
optparse.make_option("--changesfile", dest="changesFile"),
optparse.make_option("--silent", dest="silent", action="store_true"),
optparse.make_option("--detect-labels", dest="detectLabels", action="store_true"),
optparse.make_option("--import-labels", dest="importLabels", action="store_true"),
optparse.make_option("--import-local", dest="importIntoRemotes", action="store_false",
help="Import into refs/heads/ , not refs/remotes"),
optparse.make_option("--max-changes", dest="maxChanges"),
optparse.make_option("--keep-path", dest="keepRepoPath", action='store_true',
help="Keep entire BRANCH/DIR/SUBDIR prefix during import"),
optparse.make_option("--use-client-spec", dest="useClientSpec", action='store_true',
help="Only sync files that are included in the Perforce Client Spec")
]
self.description = """Imports from Perforce into a git repository.\n
example:
//depot/my/project/ -- to import the current head
//depot/my/project/@all -- to import everything
//depot/my/project/@1,6 -- to import only from revision 1 to 6
(a ... is not needed in the path p4 specification, it's added implicitly)"""
self.usage += " //depot/path[@revRange]"
self.silent = False
self.createdBranches = set()
self.committedChanges = set()
self.branch = ""
self.detectBranches = False
self.detectLabels = False
self.importLabels = False
self.changesFile = ""
self.syncWithOrigin = True
self.importIntoRemotes = True
self.maxChanges = ""
self.keepRepoPath = False
self.depotPaths = None
self.p4BranchesInGit = []
self.cloneExclude = []
self.useClientSpec = False
self.useClientSpec_from_options = False
self.clientSpecDirs = None
self.tempBranches = []
self.tempBranchLocation = "git-p4-tmp"
if gitConfig("git-p4.syncFromOrigin") == "false":
self.syncWithOrigin = False
# Force a checkpoint in fast-import and wait for it to finish
def checkpoint(self):
self.gitStream.write("checkpoint\n\n")
self.gitStream.write("progress checkpoint\n\n")
out = self.gitOutput.readline()
if self.verbose:
print "checkpoint finished: " + out
def extractFilesFromCommit(self, commit):
self.cloneExclude = [re.sub(r"\.\.\.$", "", path)
for path in self.cloneExclude]
files = []
fnum = 0
while commit.has_key("depotFile%s" % fnum):
path = commit["depotFile%s" % fnum]
if [p for p in self.cloneExclude
if p4PathStartsWith(path, p)]:
found = False
else:
found = [p for p in self.depotPaths
if p4PathStartsWith(path, p)]
if not found:
fnum = fnum + 1
continue
file = {}
file["path"] = path
file["rev"] = commit["rev%s" % fnum]
file["action"] = commit["action%s" % fnum]
file["type"] = commit["type%s" % fnum]
files.append(file)
fnum = fnum + 1
return files
def stripRepoPath(self, path, prefixes):
"""When streaming files, this is called to map a p4 depot path
to where it should go in git. The prefixes are either
self.depotPaths, or self.branchPrefixes in the case of
branch detection."""
if self.useClientSpec:
# branch detection moves files up a level (the branch name)
# from what client spec interpretation gives
path = self.clientSpecDirs.map_in_client(path)
if self.detectBranches:
for b in self.knownBranches:
if path.startswith(b + "/"):
path = path[len(b)+1:]
elif self.keepRepoPath:
# Preserve everything in relative path name except leading
# //depot/; just look at first prefix as they all should
# be in the same depot.
depot = re.sub("^(//[^/]+/).*", r'\1', prefixes[0])
if p4PathStartsWith(path, depot):
path = path[len(depot):]
else:
for p in prefixes:
if p4PathStartsWith(path, p):
path = path[len(p):]
break
path = wildcard_decode(path)
return path
def splitFilesIntoBranches(self, commit):
"""Look at each depotFile in the commit to figure out to what
branch it belongs."""
if self.clientSpecDirs:
files = self.extractFilesFromCommit(commit)
self.clientSpecDirs.update_client_spec_path_cache(files)
branches = {}
fnum = 0
while commit.has_key("depotFile%s" % fnum):
path = commit["depotFile%s" % fnum]
found = [p for p in self.depotPaths
if p4PathStartsWith(path, p)]
if not found:
fnum = fnum + 1
continue
file = {}
file["path"] = path
file["rev"] = commit["rev%s" % fnum]
file["action"] = commit["action%s" % fnum]
file["type"] = commit["type%s" % fnum]
fnum = fnum + 1
# start with the full relative path where this file would
# go in a p4 client
if self.useClientSpec:
relPath = self.clientSpecDirs.map_in_client(path)
else:
relPath = self.stripRepoPath(path, self.depotPaths)
for branch in self.knownBranches.keys():
# add a trailing slash so that a commit into qt/4.2foo
# doesn't end up in qt/4.2, e.g.
if relPath.startswith(branch + "/"):
if branch not in branches:
branches[branch] = []
branches[branch].append(file)
break
return branches
# output one file from the P4 stream
# - helper for streamP4Files
def streamOneP4File(self, file, contents):
relPath = self.stripRepoPath(file['depotFile'], self.branchPrefixes)
if verbose:
sys.stderr.write("%s\n" % relPath)
(type_base, type_mods) = split_p4_type(file["type"])
git_mode = "100644"
if "x" in type_mods:
git_mode = "100755"
if type_base == "symlink":
git_mode = "120000"
# p4 print on a symlink sometimes contains "target\n";
# if it does, remove the newline
data = ''.join(contents)
if data[-1] == '\n':
contents = [data[:-1]]
else:
contents = [data]
if type_base == "utf16":
# p4 delivers different text in the python output to -G
# than it does when using "print -o", or normal p4 client
# operations. utf16 is converted to ascii or utf8, perhaps.
# But ascii text saved as -t utf16 is completely mangled.
# Invoke print -o to get the real contents.
#
# On windows, the newlines will always be mangled by print, so put
# them back too. This is not needed to the cygwin windows version,
# just the native "NT" type.
#
text = p4_read_pipe(['print', '-q', '-o', '-', file['depotFile']])
if p4_version_string().find("/NT") >= 0:
text = text.replace("\r\n", "\n")
contents = [ text ]
if type_base == "apple":
# Apple filetype files will be streamed as a concatenation of
# its appledouble header and the contents. This is useless
# on both macs and non-macs. If using "print -q -o xx", it
# will create "xx" with the data, and "%xx" with the header.
# This is also not very useful.
#
# Ideally, someday, this script can learn how to generate
# appledouble files directly and import those to git, but
# non-mac machines can never find a use for apple filetype.
print "\nIgnoring apple filetype file %s" % file['depotFile']
return
# Note that we do not try to de-mangle keywords on utf16 files,
# even though in theory somebody may want that.
pattern = p4_keywords_regexp_for_type(type_base, type_mods)
if pattern:
regexp = re.compile(pattern, re.VERBOSE)
text = ''.join(contents)
text = regexp.sub(r'$\1$', text)
contents = [ text ]
self.gitStream.write("M %s inline %s\n" % (git_mode, relPath))
# total length...
length = 0
for d in contents:
length = length + len(d)
self.gitStream.write("data %d\n" % length)
for d in contents:
self.gitStream.write(d)
self.gitStream.write("\n")
def streamOneP4Deletion(self, file):
relPath = self.stripRepoPath(file['path'], self.branchPrefixes)
if verbose:
sys.stderr.write("delete %s\n" % relPath)
self.gitStream.write("D %s\n" % relPath)
# handle another chunk of streaming data
def streamP4FilesCb(self, marshalled):
# catch p4 errors and complain
err = None
if "code" in marshalled:
if marshalled["code"] == "error":
if "data" in marshalled:
err = marshalled["data"].rstrip()
if err:
f = None
if self.stream_have_file_info:
if "depotFile" in self.stream_file:
f = self.stream_file["depotFile"]
# force a failure in fast-import, else an empty
# commit will be made
self.gitStream.write("\n")
self.gitStream.write("die-now\n")
self.gitStream.close()
# ignore errors, but make sure it exits first
self.importProcess.wait()
if f:
die("Error from p4 print for %s: %s" % (f, err))
else:
die("Error from p4 print: %s" % err)
if marshalled.has_key('depotFile') and self.stream_have_file_info:
# start of a new file - output the old one first
self.streamOneP4File(self.stream_file, self.stream_contents)
self.stream_file = {}
self.stream_contents = []
self.stream_have_file_info = False
# pick up the new file information... for the
# 'data' field we need to append to our array
for k in marshalled.keys():
if k == 'data':
self.stream_contents.append(marshalled['data'])
else:
self.stream_file[k] = marshalled[k]
self.stream_have_file_info = True
# Stream directly from "p4 files" into "git fast-import"
def streamP4Files(self, files):
filesForCommit = []
filesToRead = []
filesToDelete = []
for f in files:
# if using a client spec, only add the files that have
# a path in the client
if self.clientSpecDirs:
if self.clientSpecDirs.map_in_client(f['path']) == "":
continue
filesForCommit.append(f)
if f['action'] in self.delete_actions:
filesToDelete.append(f)
else:
filesToRead.append(f)
# deleted files...
for f in filesToDelete:
self.streamOneP4Deletion(f)
if len(filesToRead) > 0:
self.stream_file = {}
self.stream_contents = []
self.stream_have_file_info = False
# curry self argument
def streamP4FilesCbSelf(entry):
self.streamP4FilesCb(entry)
fileArgs = ['%s#%s' % (f['path'], f['rev']) for f in filesToRead]
p4CmdList(["-x", "-", "print"],
stdin=fileArgs,
cb=streamP4FilesCbSelf)
# do the last chunk
if self.stream_file.has_key('depotFile'):
self.streamOneP4File(self.stream_file, self.stream_contents)
def make_email(self, userid):
if userid in self.users:
return self.users[userid]
else:
return "%s <a@b>" % userid
# Stream a p4 tag
def streamTag(self, gitStream, labelName, labelDetails, commit, epoch):
if verbose:
print "writing tag %s for commit %s" % (labelName, commit)
gitStream.write("tag %s\n" % labelName)
gitStream.write("from %s\n" % commit)
if labelDetails.has_key('Owner'):
owner = labelDetails["Owner"]
else:
owner = None
# Try to use the owner of the p4 label, or failing that,
# the current p4 user id.
if owner:
email = self.make_email(owner)
else:
email = self.make_email(self.p4UserId())
tagger = "%s %s %s" % (email, epoch, self.tz)
gitStream.write("tagger %s\n" % tagger)
print "labelDetails=",labelDetails
if labelDetails.has_key('Description'):
description = labelDetails['Description']
else:
description = 'Label from git p4'
gitStream.write("data %d\n" % len(description))
gitStream.write(description)
gitStream.write("\n")
def commit(self, details, files, branch, parent = ""):
epoch = details["time"]
author = details["user"]
if self.verbose:
print "commit into %s" % branch
# start with reading files; if that fails, we should not
# create a commit.
new_files = []
for f in files:
if [p for p in self.branchPrefixes if p4PathStartsWith(f['path'], p)]:
new_files.append (f)
else:
sys.stderr.write("Ignoring file outside of prefix: %s\n" % f['path'])
if self.clientSpecDirs:
self.clientSpecDirs.update_client_spec_path_cache(files)
self.gitStream.write("commit %s\n" % branch)
# gitStream.write("mark :%s\n" % details["change"])
self.committedChanges.add(int(details["change"]))
committer = ""
if author not in self.users:
self.getUserMapFromPerforceServer()
committer = "%s %s %s" % (self.make_email(author), epoch, self.tz)
self.gitStream.write("committer %s\n" % committer)
self.gitStream.write("data <<EOT\n")
self.gitStream.write(details["desc"])
self.gitStream.write("\n[git-p4: depot-paths = \"%s\": change = %s" %
(','.join(self.branchPrefixes), details["change"]))
if len(details['options']) > 0:
self.gitStream.write(": options = %s" % details['options'])
self.gitStream.write("]\nEOT\n\n")
if len(parent) > 0:
if self.verbose:
print "parent %s" % parent
self.gitStream.write("from %s\n" % parent)
self.streamP4Files(new_files)
self.gitStream.write("\n")
change = int(details["change"])
if self.labels.has_key(change):
label = self.labels[change]
labelDetails = label[0]
labelRevisions = label[1]
if self.verbose:
print "Change %s is labelled %s" % (change, labelDetails)
files = p4CmdList(["files"] + ["%s...@%s" % (p, change)
for p in self.branchPrefixes])
if len(files) == len(labelRevisions):
cleanedFiles = {}
for info in files:
if info["action"] in self.delete_actions:
continue
cleanedFiles[info["depotFile"]] = info["rev"]
if cleanedFiles == labelRevisions:
self.streamTag(self.gitStream, 'tag_%s' % labelDetails['label'], labelDetails, branch, epoch)
else:
if not self.silent:
print ("Tag %s does not match with change %s: files do not match."
% (labelDetails["label"], change))
else:
if not self.silent:
print ("Tag %s does not match with change %s: file count is different."
% (labelDetails["label"], change))
# Build a dictionary of changelists and labels, for "detect-labels" option.
def getLabels(self):
self.labels = {}
l = p4CmdList(["labels"] + ["%s..." % p for p in self.depotPaths])
if len(l) > 0 and not self.silent:
print "Finding files belonging to labels in %s" % `self.depotPaths`
for output in l:
label = output["label"]
revisions = {}
newestChange = 0
if self.verbose:
print "Querying files for label %s" % label
for file in p4CmdList(["files"] +
["%s...@%s" % (p, label)
for p in self.depotPaths]):
revisions[file["depotFile"]] = file["rev"]
change = int(file["change"])
if change > newestChange:
newestChange = change
self.labels[newestChange] = [output, revisions]
if self.verbose:
print "Label changes: %s" % self.labels.keys()
# Import p4 labels as git tags. A direct mapping does not
# exist, so assume that if all the files are at the same revision
# then we can use that, or it's something more complicated we should
# just ignore.
def importP4Labels(self, stream, p4Labels):
if verbose:
print "import p4 labels: " + ' '.join(p4Labels)
ignoredP4Labels = gitConfigList("git-p4.ignoredP4Labels")
validLabelRegexp = gitConfig("git-p4.labelImportRegexp")
if len(validLabelRegexp) == 0:
validLabelRegexp = defaultLabelRegexp
m = re.compile(validLabelRegexp)
for name in p4Labels:
commitFound = False
if not m.match(name):
if verbose:
print "label %s does not match regexp %s" % (name,validLabelRegexp)
continue
if name in ignoredP4Labels:
continue
labelDetails = p4CmdList(['label', "-o", name])[0]
# get the most recent changelist for each file in this label
change = p4Cmd(["changes", "-m", "1"] + ["%s...@%s" % (p, name)
for p in self.depotPaths])
if change.has_key('change'):
# find the corresponding git commit; take the oldest commit
changelist = int(change['change'])
gitCommit = read_pipe(["git", "rev-list", "--max-count=1",
"--reverse", ":/\[git-p4:.*change = %d\]" % changelist])
if len(gitCommit) == 0:
print "could not find git commit for changelist %d" % changelist
else:
gitCommit = gitCommit.strip()
commitFound = True
# Convert from p4 time format
try:
tmwhen = time.strptime(labelDetails['Update'], "%Y/%m/%d %H:%M:%S")
except ValueError:
print "Could not convert label time %s" % labelDetails['Update']
tmwhen = 1
when = int(time.mktime(tmwhen))
self.streamTag(stream, name, labelDetails, gitCommit, when)
if verbose:
print "p4 label %s mapped to git commit %s" % (name, gitCommit)
else:
if verbose:
print "Label %s has no changelists - possibly deleted?" % name
if not commitFound:
# We can't import this label; don't try again as it will get very
# expensive repeatedly fetching all the files for labels that will
# never be imported. If the label is moved in the future, the
# ignore will need to be removed manually.
system(["git", "config", "--add", "git-p4.ignoredP4Labels", name])
def guessProjectName(self):
for p in self.depotPaths:
if p.endswith("/"):
p = p[:-1]
p = p[p.strip().rfind("/") + 1:]
if not p.endswith("/"):
p += "/"
return p
def getBranchMapping(self):
lostAndFoundBranches = set()
user = gitConfig("git-p4.branchUser")
if len(user) > 0:
command = "branches -u %s" % user
else:
command = "branches"
for info in p4CmdList(command):
details = p4Cmd(["branch", "-o", info["branch"]])
viewIdx = 0
while details.has_key("View%s" % viewIdx):
paths = details["View%s" % viewIdx].split(" ")
viewIdx = viewIdx + 1
# require standard //depot/foo/... //depot/bar/... mapping
if len(paths) != 2 or not paths[0].endswith("/...") or not paths[1].endswith("/..."):
continue
source = paths[0]
destination = paths[1]
## HACK
if p4PathStartsWith(source, self.depotPaths[0]) and p4PathStartsWith(destination, self.depotPaths[0]):
source = source[len(self.depotPaths[0]):-4]
destination = destination[len(self.depotPaths[0]):-4]
if destination in self.knownBranches:
if not self.silent:
print "p4 branch %s defines a mapping from %s to %s" % (info["branch"], source, destination)
print "but there exists another mapping from %s to %s already!" % (self.knownBranches[destination], destination)
continue
self.knownBranches[destination] = source
lostAndFoundBranches.discard(destination)
if source not in self.knownBranches:
lostAndFoundBranches.add(source)
# Perforce does not strictly require branches to be defined, so we also
# check git config for a branch list.
#
# Example of branch definition in git config file:
# [git-p4]
# branchList=main:branchA
# branchList=main:branchB
# branchList=branchA:branchC
configBranches = gitConfigList("git-p4.branchList")
for branch in configBranches:
if branch:
(source, destination) = branch.split(":")
self.knownBranches[destination] = source
lostAndFoundBranches.discard(destination)
if source not in self.knownBranches:
lostAndFoundBranches.add(source)
for branch in lostAndFoundBranches:
self.knownBranches[branch] = branch
def getBranchMappingFromGitBranches(self):
branches = p4BranchesInGit(self.importIntoRemotes)
for branch in branches.keys():
if branch == "master":
branch = "main"
else:
branch = branch[len(self.projectName):]
self.knownBranches[branch] = branch
def updateOptionDict(self, d):
option_keys = {}
if self.keepRepoPath:
option_keys['keepRepoPath'] = 1
d["options"] = ' '.join(sorted(option_keys.keys()))
def readOptions(self, d):
self.keepRepoPath = (d.has_key('options')
and ('keepRepoPath' in d['options']))
def gitRefForBranch(self, branch):
if branch == "main":
return self.refPrefix + "master"
if len(branch) <= 0:
return branch
return self.refPrefix + self.projectName + branch
def gitCommitByP4Change(self, ref, change):
if self.verbose:
print "looking in ref " + ref + " for change %s using bisect..." % change
earliestCommit = ""
latestCommit = parseRevision(ref)
while True:
if self.verbose:
print "trying: earliest %s latest %s" % (earliestCommit, latestCommit)
next = read_pipe("git rev-list --bisect %s %s" % (latestCommit, earliestCommit)).strip()
if len(next) == 0:
if self.verbose:
print "argh"
return ""
log = extractLogMessageFromGitCommit(next)
settings = extractSettingsGitLog(log)
currentChange = int(settings['change'])
if self.verbose:
print "current change %s" % currentChange
if currentChange == change:
if self.verbose:
print "found %s" % next
return next
if currentChange < change:
earliestCommit = "^%s" % next
else:
latestCommit = "%s" % next
return ""
def importNewBranch(self, branch, maxChange):
# make fast-import flush all changes to disk and update the refs using the checkpoint
# command so that we can try to find the branch parent in the git history
self.gitStream.write("checkpoint\n\n");
self.gitStream.flush();
branchPrefix = self.depotPaths[0] + branch + "/"
range = "@1,%s" % maxChange
#print "prefix" + branchPrefix
changes = p4ChangesForPaths([branchPrefix], range)
if len(changes) <= 0:
return False
firstChange = changes[0]
#print "first change in branch: %s" % firstChange
sourceBranch = self.knownBranches[branch]
sourceDepotPath = self.depotPaths[0] + sourceBranch
sourceRef = self.gitRefForBranch(sourceBranch)
#print "source " + sourceBranch
branchParentChange = int(p4Cmd(["changes", "-m", "1", "%s...@1,%s" % (sourceDepotPath, firstChange)])["change"])
#print "branch parent: %s" % branchParentChange
gitParent = self.gitCommitByP4Change(sourceRef, branchParentChange)
if len(gitParent) > 0:
self.initialParents[self.gitRefForBranch(branch)] = gitParent
#print "parent git commit: %s" % gitParent
self.importChanges(changes)
return True
def searchParent(self, parent, branch, target):
parentFound = False
for blob in read_pipe_lines(["git", "rev-list", "--reverse",
"--no-merges", parent]):
blob = blob.strip()
if len(read_pipe(["git", "diff-tree", blob, target])) == 0:
parentFound = True
if self.verbose:
print "Found parent of %s in commit %s" % (branch, blob)
break
if parentFound:
return blob
else:
return None
def importChanges(self, changes):
cnt = 1
for change in changes:
description = p4_describe(change)
self.updateOptionDict(description)
if not self.silent:
sys.stdout.write("\rImporting revision %s (%s%%)" % (change, cnt * 100 / len(changes)))
sys.stdout.flush()
cnt = cnt + 1
try:
if self.detectBranches:
branches = self.splitFilesIntoBranches(description)
for branch in branches.keys():
## HACK --hwn
branchPrefix = self.depotPaths[0] + branch + "/"
self.branchPrefixes = [ branchPrefix ]
parent = ""
filesForCommit = branches[branch]
if self.verbose:
print "branch is %s" % branch
self.updatedBranches.add(branch)
if branch not in self.createdBranches:
self.createdBranches.add(branch)
parent = self.knownBranches[branch]
if parent == branch:
parent = ""
else:
fullBranch = self.projectName + branch
if fullBranch not in self.p4BranchesInGit:
if not self.silent:
print("\n Importing new branch %s" % fullBranch);
if self.importNewBranch(branch, change - 1):
parent = ""
self.p4BranchesInGit.append(fullBranch)
if not self.silent:
print("\n Resuming with change %s" % change);
if self.verbose:
print "parent determined through known branches: %s" % parent
branch = self.gitRefForBranch(branch)
parent = self.gitRefForBranch(parent)
if self.verbose:
print "looking for initial parent for %s; current parent is %s" % (branch, parent)
if len(parent) == 0 and branch in self.initialParents:
parent = self.initialParents[branch]
del self.initialParents[branch]
blob = None
if len(parent) > 0:
tempBranch = "%s/%d" % (self.tempBranchLocation, change)
if self.verbose:
print "Creating temporary branch: " + tempBranch
self.commit(description, filesForCommit, tempBranch)
self.tempBranches.append(tempBranch)
self.checkpoint()
blob = self.searchParent(parent, branch, tempBranch)
if blob:
self.commit(description, filesForCommit, branch, blob)
else:
if self.verbose:
print "Parent of %s not found. Committing into head of %s" % (branch, parent)
self.commit(description, filesForCommit, branch, parent)
else:
files = self.extractFilesFromCommit(description)
self.commit(description, files, self.branch,
self.initialParent)
# only needed once, to connect to the previous commit
self.initialParent = ""
except IOError:
print self.gitError.read()
sys.exit(1)
def importHeadRevision(self, revision):
print "Doing initial import of %s from revision %s into %s" % (' '.join(self.depotPaths), revision, self.branch)
details = {}
details["user"] = "git perforce import user"
details["desc"] = ("Initial import of %s from the state at revision %s\n"
% (' '.join(self.depotPaths), revision))
details["change"] = revision
newestRevision = 0
fileCnt = 0
fileArgs = ["%s...%s" % (p,revision) for p in self.depotPaths]
for info in p4CmdList(["files"] + fileArgs):
if 'code' in info and info['code'] == 'error':
sys.stderr.write("p4 returned an error: %s\n"
% info['data'])
if info['data'].find("must refer to client") >= 0:
sys.stderr.write("This particular p4 error is misleading.\n")
sys.stderr.write("Perhaps the depot path was misspelled.\n");
sys.stderr.write("Depot path: %s\n" % " ".join(self.depotPaths))
sys.exit(1)
if 'p4ExitCode' in info:
sys.stderr.write("p4 exitcode: %s\n" % info['p4ExitCode'])
sys.exit(1)
change = int(info["change"])
if change > newestRevision:
newestRevision = change
if info["action"] in self.delete_actions:
# don't increase the file cnt, otherwise details["depotFile123"] will have gaps!
#fileCnt = fileCnt + 1
continue
for prop in ["depotFile", "rev", "action", "type" ]:
details["%s%s" % (prop, fileCnt)] = info[prop]
fileCnt = fileCnt + 1
details["change"] = newestRevision
# Use time from top-most change so that all git p4 clones of
# the same p4 repo have the same commit SHA1s.
res = p4_describe(newestRevision)
details["time"] = res["time"]
self.updateOptionDict(details)
try:
self.commit(details, self.extractFilesFromCommit(details), self.branch)
except IOError:
print "IO error with git fast-import. Is your git version recent enough?"
print self.gitError.read()
def run(self, args):
self.depotPaths = []
self.changeRange = ""
self.previousDepotPaths = []
self.hasOrigin = False
# map from branch depot path to parent branch
self.knownBranches = {}
self.initialParents = {}
if self.importIntoRemotes:
self.refPrefix = "refs/remotes/p4/"
else:
self.refPrefix = "refs/heads/p4/"
if self.syncWithOrigin:
self.hasOrigin = originP4BranchesExist()
if self.hasOrigin:
if not self.silent:
print 'Syncing with origin first, using "git fetch origin"'
system("git fetch origin")
branch_arg_given = bool(self.branch)
if len(self.branch) == 0:
self.branch = self.refPrefix + "master"
if gitBranchExists("refs/heads/p4") and self.importIntoRemotes:
system("git update-ref %s refs/heads/p4" % self.branch)
system("git branch -D p4")
# accept either the command-line option, or the configuration variable
if self.useClientSpec:
# will use this after clone to set the variable
self.useClientSpec_from_options = True
else:
if gitConfigBool("git-p4.useclientspec"):
self.useClientSpec = True
if self.useClientSpec:
self.clientSpecDirs = getClientSpec()
# TODO: should always look at previous commits,
# merge with previous imports, if possible.
if args == []:
if self.hasOrigin:
createOrUpdateBranchesFromOrigin(self.refPrefix, self.silent)
# branches holds mapping from branch name to sha1
branches = p4BranchesInGit(self.importIntoRemotes)
# restrict to just this one, disabling detect-branches
if branch_arg_given:
short = self.branch.split("/")[-1]
if short in branches:
self.p4BranchesInGit = [ short ]
else:
self.p4BranchesInGit = branches.keys()
if len(self.p4BranchesInGit) > 1:
if not self.silent:
print "Importing from/into multiple branches"
self.detectBranches = True
for branch in branches.keys():
self.initialParents[self.refPrefix + branch] = \
branches[branch]
if self.verbose:
print "branches: %s" % self.p4BranchesInGit
p4Change = 0
for branch in self.p4BranchesInGit:
logMsg = extractLogMessageFromGitCommit(self.refPrefix + branch)
settings = extractSettingsGitLog(logMsg)
self.readOptions(settings)
if (settings.has_key('depot-paths')
and settings.has_key ('change')):
change = int(settings['change']) + 1
p4Change = max(p4Change, change)
depotPaths = sorted(settings['depot-paths'])
if self.previousDepotPaths == []:
self.previousDepotPaths = depotPaths
else:
paths = []
for (prev, cur) in zip(self.previousDepotPaths, depotPaths):
prev_list = prev.split("/")
cur_list = cur.split("/")
for i in range(0, min(len(cur_list), len(prev_list))):
if cur_list[i] <> prev_list[i]:
i = i - 1
break
paths.append ("/".join(cur_list[:i + 1]))
self.previousDepotPaths = paths
if p4Change > 0:
self.depotPaths = sorted(self.previousDepotPaths)
self.changeRange = "@%s,#head" % p4Change
if not self.silent and not self.detectBranches:
print "Performing incremental import into %s git branch" % self.branch
# accept multiple ref name abbreviations:
# refs/foo/bar/branch -> use it exactly
# p4/branch -> prepend refs/remotes/ or refs/heads/
# branch -> prepend refs/remotes/p4/ or refs/heads/p4/
if not self.branch.startswith("refs/"):
if self.importIntoRemotes:
prepend = "refs/remotes/"
else:
prepend = "refs/heads/"
if not self.branch.startswith("p4/"):
prepend += "p4/"
self.branch = prepend + self.branch
if len(args) == 0 and self.depotPaths:
if not self.silent:
print "Depot paths: %s" % ' '.join(self.depotPaths)
else:
if self.depotPaths and self.depotPaths != args:
print ("previous import used depot path %s and now %s was specified. "
"This doesn't work!" % (' '.join (self.depotPaths),
' '.join (args)))
sys.exit(1)
self.depotPaths = sorted(args)
revision = ""
self.users = {}
# Make sure no revision specifiers are used when --changesfile
# is specified.
bad_changesfile = False
if len(self.changesFile) > 0:
for p in self.depotPaths:
if p.find("@") >= 0 or p.find("#") >= 0:
bad_changesfile = True
break
if bad_changesfile:
die("Option --changesfile is incompatible with revision specifiers")
newPaths = []
for p in self.depotPaths:
if p.find("@") != -1:
atIdx = p.index("@")
self.changeRange = p[atIdx:]
if self.changeRange == "@all":
self.changeRange = ""
elif ',' not in self.changeRange:
revision = self.changeRange
self.changeRange = ""
p = p[:atIdx]
elif p.find("#") != -1:
hashIdx = p.index("#")
revision = p[hashIdx:]
p = p[:hashIdx]
elif self.previousDepotPaths == []:
# pay attention to changesfile, if given, else import
# the entire p4 tree at the head revision
if len(self.changesFile) == 0:
revision = "#head"
p = re.sub ("\.\.\.$", "", p)
if not p.endswith("/"):
p += "/"
newPaths.append(p)
self.depotPaths = newPaths
# --detect-branches may change this for each branch
self.branchPrefixes = self.depotPaths
self.loadUserMapFromCache()
self.labels = {}
if self.detectLabels:
self.getLabels();
if self.detectBranches:
## FIXME - what's a P4 projectName ?
self.projectName = self.guessProjectName()
if self.hasOrigin:
self.getBranchMappingFromGitBranches()
else:
self.getBranchMapping()
if self.verbose:
print "p4-git branches: %s" % self.p4BranchesInGit
print "initial parents: %s" % self.initialParents
for b in self.p4BranchesInGit:
if b != "master":
## FIXME
b = b[len(self.projectName):]
self.createdBranches.add(b)
self.tz = "%+03d%02d" % (- time.timezone / 3600, ((- time.timezone % 3600) / 60))
self.importProcess = subprocess.Popen(["git", "fast-import"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE);
self.gitOutput = self.importProcess.stdout
self.gitStream = self.importProcess.stdin
self.gitError = self.importProcess.stderr
if revision:
self.importHeadRevision(revision)
else:
changes = []
if len(self.changesFile) > 0:
output = open(self.changesFile).readlines()
changeSet = set()
for line in output:
changeSet.add(int(line))
for change in changeSet:
changes.append(change)
changes.sort()
else:
# catch "git p4 sync" with no new branches, in a repo that
# does not have any existing p4 branches
if len(args) == 0:
if not self.p4BranchesInGit:
die("No remote p4 branches. Perhaps you never did \"git p4 clone\" in here.")
# The default branch is master, unless --branch is used to
# specify something else. Make sure it exists, or complain
# nicely about how to use --branch.
if not self.detectBranches:
if not branch_exists(self.branch):
if branch_arg_given:
die("Error: branch %s does not exist." % self.branch)
else:
die("Error: no branch %s; perhaps specify one with --branch." %
self.branch)
if self.verbose:
print "Getting p4 changes for %s...%s" % (', '.join(self.depotPaths),
self.changeRange)
changes = p4ChangesForPaths(self.depotPaths, self.changeRange)
if len(self.maxChanges) > 0:
changes = changes[:min(int(self.maxChanges), len(changes))]
if len(changes) == 0:
if not self.silent:
print "No changes to import!"
else:
if not self.silent and not self.detectBranches:
print "Import destination: %s" % self.branch
self.updatedBranches = set()
if not self.detectBranches:
if args:
# start a new branch
self.initialParent = ""
else:
# build on a previous revision
self.initialParent = parseRevision(self.branch)
self.importChanges(changes)
if not self.silent:
print ""
if len(self.updatedBranches) > 0:
sys.stdout.write("Updated branches: ")
for b in self.updatedBranches:
sys.stdout.write("%s " % b)
sys.stdout.write("\n")
if gitConfigBool("git-p4.importLabels"):
self.importLabels = True
if self.importLabels:
p4Labels = getP4Labels(self.depotPaths)
gitTags = getGitTags()
missingP4Labels = p4Labels - gitTags
self.importP4Labels(self.gitStream, missingP4Labels)
self.gitStream.close()
if self.importProcess.wait() != 0:
die("fast-import failed: %s" % self.gitError.read())
self.gitOutput.close()
self.gitError.close()
# Cleanup temporary branches created during import
if self.tempBranches != []:
for branch in self.tempBranches:
read_pipe("git update-ref -d %s" % branch)
os.rmdir(os.path.join(os.environ.get("GIT_DIR", ".git"), self.tempBranchLocation))
# Create a symbolic ref p4/HEAD pointing to p4/<branch> to allow
# a convenient shortcut refname "p4".
if self.importIntoRemotes:
head_ref = self.refPrefix + "HEAD"
if not gitBranchExists(head_ref) and gitBranchExists(self.branch):
system(["git", "symbolic-ref", head_ref, self.branch])
return True
class P4Rebase(Command):
def __init__(self):
Command.__init__(self)
self.options = [
optparse.make_option("--import-labels", dest="importLabels", action="store_true"),
]
self.importLabels = False
self.description = ("Fetches the latest revision from perforce and "
+ "rebases the current work (branch) against it")
def run(self, args):
sync = P4Sync()
sync.importLabels = self.importLabels
sync.run([])
return self.rebase()
def rebase(self):
if os.system("git update-index --refresh") != 0:
die("Some files in your working directory are modified and different than what is in your index. You can use git update-index <filename> to bring the index up-to-date or stash away all your changes with git stash.");
if len(read_pipe("git diff-index HEAD --")) > 0:
die("You have uncommitted changes. Please commit them before rebasing or stash them away with git stash.");
[upstream, settings] = findUpstreamBranchPoint()
if len(upstream) == 0:
die("Cannot find upstream branchpoint for rebase")
# the branchpoint may be p4/foo~3, so strip off the parent
upstream = re.sub("~[0-9]+$", "", upstream)
print "Rebasing the current branch onto %s" % upstream
oldHead = read_pipe("git rev-parse HEAD").strip()
system("git rebase %s" % upstream)
system("git diff-tree --stat --summary -M %s HEAD" % oldHead)
return True
class P4Clone(P4Sync):
def __init__(self):
P4Sync.__init__(self)
self.description = "Creates a new git repository and imports from Perforce into it"
self.usage = "usage: %prog [options] //depot/path[@revRange]"
self.options += [
optparse.make_option("--destination", dest="cloneDestination",
action='store', default=None,
help="where to leave result of the clone"),
optparse.make_option("-/", dest="cloneExclude",
action="append", type="string",
help="exclude depot path"),
optparse.make_option("--bare", dest="cloneBare",
action="store_true", default=False),
]
self.cloneDestination = None
self.needsGit = False
self.cloneBare = False
# This is required for the "append" cloneExclude action
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
def defaultDestination(self, args):
## TODO: use common prefix of args?
depotPath = args[0]
depotDir = re.sub("(@[^@]*)$", "", depotPath)
depotDir = re.sub("(#[^#]*)$", "", depotDir)
depotDir = re.sub(r"\.\.\.$", "", depotDir)
depotDir = re.sub(r"/$", "", depotDir)
return os.path.split(depotDir)[1]
def run(self, args):
if len(args) < 1:
return False
if self.keepRepoPath and not self.cloneDestination:
sys.stderr.write("Must specify destination for --keep-path\n")
sys.exit(1)
depotPaths = args
if not self.cloneDestination and len(depotPaths) > 1:
self.cloneDestination = depotPaths[-1]
depotPaths = depotPaths[:-1]
self.cloneExclude = ["/"+p for p in self.cloneExclude]
for p in depotPaths:
if not p.startswith("//"):
sys.stderr.write('Depot paths must start with "//": %s\n' % p)
return False
if not self.cloneDestination:
self.cloneDestination = self.defaultDestination(args)
print "Importing from %s into %s" % (', '.join(depotPaths), self.cloneDestination)
if not os.path.exists(self.cloneDestination):
os.makedirs(self.cloneDestination)
chdir(self.cloneDestination)
init_cmd = [ "git", "init" ]
if self.cloneBare:
init_cmd.append("--bare")
retcode = subprocess.call(init_cmd)
if retcode:
raise CalledProcessError(retcode, init_cmd)
if not P4Sync.run(self, depotPaths):
return False
# create a master branch and check out a work tree
if gitBranchExists(self.branch):
system([ "git", "branch", "master", self.branch ])
if not self.cloneBare:
system([ "git", "checkout", "-f" ])
else:
print 'Not checking out any branch, use ' \
'"git checkout -q -b master <branch>"'
# auto-set this variable if invoked with --use-client-spec
if self.useClientSpec_from_options:
system("git config --bool git-p4.useclientspec true")
return True
class P4Branches(Command):
def __init__(self):
Command.__init__(self)
self.options = [ ]
self.description = ("Shows the git branches that hold imports and their "
+ "corresponding perforce depot paths")
self.verbose = False
def run(self, args):
if originP4BranchesExist():
createOrUpdateBranchesFromOrigin()
cmdline = "git rev-parse --symbolic "
cmdline += " --remotes"
for line in read_pipe_lines(cmdline):
line = line.strip()
if not line.startswith('p4/') or line == "p4/HEAD":
continue
branch = line
log = extractLogMessageFromGitCommit("refs/remotes/%s" % branch)
settings = extractSettingsGitLog(log)
print "%s <= %s (%s)" % (branch, ",".join(settings["depot-paths"]), settings["change"])
return True
class HelpFormatter(optparse.IndentedHelpFormatter):
def __init__(self):
optparse.IndentedHelpFormatter.__init__(self)
def format_description(self, description):
if description:
return description + "\n"
else:
return ""
def printUsage(commands):
print "usage: %s <command> [options]" % sys.argv[0]
print ""
print "valid commands: %s" % ", ".join(commands)
print ""
print "Try %s <command> --help for command specific help." % sys.argv[0]
print ""
commands = {
"debug" : P4Debug,
"submit" : P4Submit,
"commit" : P4Submit,
"sync" : P4Sync,
"rebase" : P4Rebase,
"clone" : P4Clone,
"rollback" : P4RollBack,
"branches" : P4Branches
}
def main():
if len(sys.argv[1:]) == 0:
printUsage(commands.keys())
sys.exit(2)
cmdName = sys.argv[1]
try:
klass = commands[cmdName]
cmd = klass()
except KeyError:
print "unknown command %s" % cmdName
print ""
printUsage(commands.keys())
sys.exit(2)
options = cmd.options
cmd.gitdir = os.environ.get("GIT_DIR", None)
args = sys.argv[2:]
options.append(optparse.make_option("--verbose", "-v", dest="verbose", action="store_true"))
if cmd.needsGit:
options.append(optparse.make_option("--git-dir", dest="gitdir"))
parser = optparse.OptionParser(cmd.usage.replace("%prog", "%prog " + cmdName),
options,
description = cmd.description,
formatter = HelpFormatter())
(cmd, args) = parser.parse_args(sys.argv[2:], cmd);
global verbose
verbose = cmd.verbose
if cmd.needsGit:
if cmd.gitdir == None:
cmd.gitdir = os.path.abspath(".git")
if not isValidGitDir(cmd.gitdir):
cmd.gitdir = read_pipe("git rev-parse --git-dir").strip()
if os.path.exists(cmd.gitdir):
cdup = read_pipe("git rev-parse --show-cdup").strip()
if len(cdup) > 0:
chdir(cdup);
if not isValidGitDir(cmd.gitdir):
if isValidGitDir(cmd.gitdir + "/.git"):
cmd.gitdir += "/.git"
else:
die("fatal: cannot locate git repository at %s" % cmd.gitdir)
os.environ["GIT_DIR"] = cmd.gitdir
if not cmd.run(args):
parser.print_help()
sys.exit(2)
if __name__ == '__main__':
main()
| []
| []
| [
"HOME",
"P4DIFF",
"P4EDITOR",
"USERPROFILE",
"PWD",
"GIT_DIR"
]
| [] | ["HOME", "P4DIFF", "P4EDITOR", "USERPROFILE", "PWD", "GIT_DIR"] | python | 6 | 0 | |
rolls/cmds/start_funcs.py | import asyncio
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
from rolls.cmds.passphrase_funcs import get_current_passphrase
from rolls.daemon.client import DaemonProxy, connect_to_daemon_and_validate
from rolls.util.keychain import KeyringMaxUnlockAttempts
from rolls.util.service_groups import services_for_groups
def launch_start_daemon(root_path: Path) -> subprocess.Popen:
os.environ["ROLLS_ROOT"] = str(root_path)
# TODO: use startupinfo=subprocess.DETACHED_PROCESS on windows
rolls = sys.argv[0]
process = subprocess.Popen(f"{rolls} run_daemon --wait-for-unlock".split(), stdout=subprocess.PIPE)
return process
async def create_start_daemon_connection(root_path: Path) -> Optional[DaemonProxy]:
connection = await connect_to_daemon_and_validate(root_path)
if connection is None:
print("Starting daemon")
# launch a daemon
process = launch_start_daemon(root_path)
# give the daemon a chance to start up
if process.stdout:
process.stdout.readline()
await asyncio.sleep(1)
# it prints "daemon: listening"
connection = await connect_to_daemon_and_validate(root_path)
if connection:
passphrase = None
if await connection.is_keyring_locked():
passphrase = get_current_passphrase()
if passphrase:
print("Unlocking daemon keyring")
await connection.unlock_keyring(passphrase)
return connection
return None
async def async_start(root_path: Path, group: str, restart: bool) -> None:
try:
daemon = await create_start_daemon_connection(root_path)
except KeyringMaxUnlockAttempts:
print("Failed to unlock keyring")
return None
if daemon is None:
print("Failed to create the rolls daemon")
return None
for service in services_for_groups(group):
if await daemon.is_running(service_name=service):
print(f"{service}: ", end="", flush=True)
if restart:
if not await daemon.is_running(service_name=service):
print("not running")
elif await daemon.stop_service(service_name=service):
print("stopped")
else:
print("stop failed")
else:
print("Already running, use `-r` to restart")
continue
print(f"{service}: ", end="", flush=True)
msg = await daemon.start_service(service_name=service)
success = msg and msg["data"]["success"]
if success is True:
print("started")
else:
error = "no response"
if msg:
error = msg["data"]["error"]
print(f"{service} failed to start. Error: {error}")
await daemon.close()
| []
| []
| [
"ROLLS_ROOT"
]
| [] | ["ROLLS_ROOT"] | python | 1 | 0 | |
handler/bugsnaghandler/handler_integration_test.go | package bugsnaghandler
import (
"net/http"
"os"
"testing"
"time"
"github.com/pkg/errors"
"github.com/goph/emperror"
"github.com/goph/emperror/httperr"
)
func newHandler(t *testing.T) *Handler {
apiKey := os.Getenv("BUGSNAG_API_KEY")
if apiKey == "" {
t.Skip("missing bugsnag credentials")
}
return New(apiKey)
}
func TestIntegration_Handler(t *testing.T) {
handler := newHandler(t)
err := errors.New("error")
handler.Handle(err)
// Wait for the notice to reach the queue before closing
time.Sleep(500 * time.Millisecond)
}
func TestIntegration_WithContext(t *testing.T) {
handler := newHandler(t)
err := emperror.With(errors.New("error with context"), "key", "value")
handler.Handle(err)
// Wait for the notice to reach the queue before closing
time.Sleep(500 * time.Millisecond)
}
func TestIntegration_WithHTTPRequest(t *testing.T) {
handler := newHandler(t)
req, e := http.NewRequest("GET", "https://google.com", nil)
if e != nil {
t.Fatal(e)
}
err := httperr.WithHTTPRequest(errors.New("error with http request"), req)
handler.Handle(err)
// Wait for the notice to reach the queue before closing
time.Sleep(500 * time.Millisecond)
}
| [
"\"BUGSNAG_API_KEY\""
]
| []
| [
"BUGSNAG_API_KEY"
]
| [] | ["BUGSNAG_API_KEY"] | go | 1 | 0 | |
demos/demo_pycloudmessenger/POM1/NeuralNetworks/pom1_NN_master_pycloudmessenger.py | # -*- coding: utf-8 -*-
'''
@author: Marcos Fernandez Diaz
February 2020
Example of use: python pom1_NN_model_averaging_master_pycloudmessenger.py --user <user> --password <password> --task_name <task_name> --normalization <normalization> --implementation <implementation>
Parameters:
- user: String with the name of the user. If the user does not exist in the pycloudmessenger platform a new one will be created
- password: String with the password
- task_name: String with the name of the task. If the task already exists, an error will be displayed
- normalization: String indicating whether to apply normalization. Possible options are std or minmax. By default no normalization is used
- implementation: String indicating whether to use gradient_averaging or model_averaging implementation. By default the latter is used.
- optimizer: String indicating the type of optimizer to use (only valid when gradient implementation=gradient_descent).
'''
# Import general modules
import argparse
import logging
import json
import time
import numpy as np
import sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disables tensorflow warnings
import tensorflow as tf
import onnxruntime
from sklearn.metrics import accuracy_score
# Add higher directory to python modules path.
sys.path.append("../../../../")
os.environ['KMP_WARNINGS'] = 'off' # Remove KMP_AFFINITY logs
# To be imported from MMLL (pip installed)
from MMLL.nodes.MasterNode import MasterNode
from MMLL.comms.comms_pycloudmessenger import Comms_master as Comms
# To be imported from demo_tools
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.evaluation_tools import display, plot_cm_seaborn, create_folders
# Set up logger
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default=None, help='User')
parser.add_argument('--password', type=str, default=None, help='Password')
parser.add_argument('--task_name', type=str, default=None, help='Name of the task')
parser.add_argument('--normalization', type=str, default='no', choices=['no', 'std', 'minmax'], help='Type of normalization')
parser.add_argument('--implementation', type=str, default='model_averaging', choices=['model_averaging', 'gradient_descent'], help='Type of implementation')
parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam', 'RMSprop', 'SGD'], help='Gradient descent optimizer')
FLAGS, unparsed = parser.parse_known_args()
user_name = FLAGS.user
user_password = FLAGS.password
task_name = FLAGS.task_name
normalization = FLAGS.normalization
implementation = FLAGS.implementation
optimizer = FLAGS.optimizer
# Set basic configuration
dataset_name = 'mnist'
verbose = False
pom = 1
model_type = 'NN'
Nworkers = 2
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Setting up the logger
logger = Logger('./results/logs/Master.log')
# Load the model architecture as defined by Keras model.to_json()
keras_filename = 'keras_model_MLP_mnist.json'
try:
with open('./' + keras_filename, 'r') as json_file:
model_architecture = json_file.read()
except:
display('Error - The file ' + keras_filename + ' defining the neural network architecture is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","./")) + '"', logger, verbose)
sys.exit()
# Task definition
if implementation.lower() == 'model_averaging':
model_averaging = 'True'
else:
model_averaging = 'False'
task_definition = {"quorum": Nworkers,
"POM": pom,
"model_type": model_type,
}
# Load the credentials for pycloudmessenger
display('===========================================', logger, verbose)
display('Creating Master... ', logger, verbose)
display('Please wait until Master is ready before launching the workers...', logger, verbose)
# Note: this part creates the task and waits for the workers to join. This code is
# intended to be used only at the demos, in Musketeer this part must be done in the client.
credentials_filename = '../../musketeer.json'
try:
with open(credentials_filename, 'r') as f:
credentials = json.load(f)
except:
display('Error - The file musketeer.json is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","../../")) + '"', logger, verbose)
sys.exit()
# Create task and wait for participants to join
tm = Task_Manager(credentials_filename)
aggregator = tm.create_master_and_taskname(display, logger, task_definition, user_name=user_name, user_password=user_password, task_name=task_name)
display('Waiting for the workers to join task name = %s' % tm.task_name, logger, verbose)
tm.wait_for_workers_to_join(display, logger)
# Creating the comms object
display('Creating MasterNode under POM %d, communicating through pycloudmessenger' %pom, logger, verbose)
comms = Comms(aggregator)
# Creating Masternode
mn = MasterNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' %dataset_name, logger, verbose)
# Load data
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
try:
dc = DC(data_file)
except:
display('Error - The file ' + dataset_name + '_demonstrator_data.pkl does not exist. Please download it from Box and put it under the following path: "' + os.path.abspath(os.path.join("","../../../../input_data/")) + '"', logger, verbose)
sys.exit()
# Input and output data description needed for preprocessing
number_inputs = 784
feature_description = {"type": "num"}
feature_array = [feature_description for index in range(number_inputs)]
data_description = {
"NI": number_inputs,
"input_types": feature_array
}
# Creating a ML model
model_parameters = {}
model_parameters['learning_rate'] = 0.15
model_parameters['Nmaxiter'] = 5
model_parameters['model_architecture'] = model_architecture
model_parameters['optimizer'] = optimizer
model_parameters['momentum'] = 1
model_parameters['nesterov'] = 'False'
model_parameters['loss'] = 'categorical_crossentropy'
model_parameters['metric'] = 'accuracy'
model_parameters['batch_size'] = 128
model_parameters['num_epochs'] = 2
model_parameters['model_averaging'] = model_averaging
model_parameters['aggregator'] = None
mn.create_model_Master(model_type, model_parameters=model_parameters)
display('MMLL model %s is ready for training!' % model_type, logger, verbose)
# Normalization of data in each worker before training
if normalization=='std':
normalizer = mn.normalizer_fit_transform_workers(data_description, 'global_mean_std')
elif normalization=='minmax':
normalizer = mn.normalizer_fit_transform_workers(data_description, 'global_min_max')
# Start the training procedure.
display('Training the model %s' % model_type, logger, verbose)
t_ini = time.time()
[Xval, yval] = dc.get_data_val()
if normalization != 'no':
Xval = normalizer.transform(Xval)
mn.fit(Xval=Xval, yval=yval)
t_end = time.time()
display('Training is complete: Training time = %s seconds' % str(t_end - t_ini)[0:6], logger, verbose)
# Retrieving and saving the final model
display('Retrieving the trained model from MasterNode', logger, verbose)
model = mn.get_model()
# Warning: this save_model utility is only for demo purposes
output_filename_model = './results/models/Master_' + dataset_name + '_model'
model.save(output_filename_model)
# Making predictions on test data
display('------------- Obtaining predictions----------------------------------\n', logger, verbose)
[Xtst, ytst] = dc.get_data_tst()
if normalization != 'no':
Xtst = normalizer.transform(Xtst)
preds_tst = model.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
y = np.argmax(ytst, axis=-1) # Convert to labels
classes = np.arange(ytst.shape[1]) # 0 to 9
# Evaluating the results
display('------------- Evaluating --------------------------------------------\n', logger, verbose)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended
# to be used for the demos. Use them at your own risk.
output_filename = 'Master_NN_confusion_matrix_' + dataset_name + '.png'
title = 'NN confusion matrix in test set master'
plot_cm_seaborn(preds_tst, y, classes, title, output_filename, logger, verbose, normalize=True)
# Load Tf SavedModel and check results
model_loaded = tf.keras.models.load_model(output_filename_model)
preds_tst = model_loaded.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
# Model export to ONXX
output_filename_model = './results/models/Master_' + dataset_name + '_model.onnx'
model.save(output_filename_model)
# Compute the prediction with ONNX Runtime
onnx_session = onnxruntime.InferenceSession(output_filename_model)
onnx_inputs = {onnx_session.get_inputs()[0].name: Xtst}
onnx_output = onnx_session.run(None, onnx_inputs)[0]
onnx_output = np.argmax(onnx_output, axis=-1) # Convert to labels
err_onnx = accuracy_score(y,onnx_output)
display('Test accuracy in ONNX model is %f' %err_onnx, logger, verbose)
# Terminate workers
display('Terminating all worker nodes.', logger, verbose)
mn.terminate_workers()
display('----------------------------------------------------------------------', logger, verbose)
display('------------------------- END MMLL Procedure -------------------------', logger, verbose)
display('----------------------------------------------------------------------\n', logger, verbose)
| []
| []
| [
"KMP_WARNINGS",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["KMP_WARNINGS", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
main.go | package main
import (
"encoding/json"
"log"
"net/http"
"os"
gitlab "github.com/xanzy/go-gitlab"
authentication "k8s.io/api/authentication/v1beta1"
)
func main() {
//https://gitlab.com/api/v4
gitlabUrl := os.Getenv("GITLAB_URL")
http.HandleFunc("/authenticate", func(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
var tr authentication.TokenReview
err := decoder.Decode(&tr)
if err != nil {
log.Println("[Error]", err.Error())
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]interface{}{
"apiVersion": "authentication.k8s.io/v1beta1",
"kind": "TokenReview",
"status": authentication.TokenReviewStatus{
Authenticated: false,
},
})
return
}
// Check User
git, err := gitlab.NewClient(tr.Spec.Token, gitlab.WithBaseURL(gitlabUrl))
if err != nil {
log.Println("[Error] failed to get client", err.Error())
return
}
user, _, err := git.Users.CurrentUser(nil)
if err != nil {
log.Println("[Error]", err.Error())
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(map[string]interface{}{
"apiVersion": "authentication.k8s.io/v1beta1",
"kind": "TokenReview",
"status": authentication.TokenReviewStatus{
Authenticated: false,
},
})
return
}
projects, _, err := git.Groups.ListGroups(&gitlab.ListGroupsOptions{})
var groups []string
for _, g := range projects {
groups = append(groups, g.Name)
}
log.Printf("[Success] login as %s, groups: %v", user.Username, groups)
w.WriteHeader(http.StatusOK)
trs := authentication.TokenReviewStatus{
Authenticated: true,
User: authentication.UserInfo{
Username: user.Username,
UID: user.Username,
Groups: groups,
},
}
json.NewEncoder(w).Encode(map[string]interface{}{
"apiVersion": "authentication.k8s.io/v1beta1",
"kind": "TokenReview",
"status": trs,
})
})
log.Fatal(http.ListenAndServe(":3000", nil))
}
| [
"\"GITLAB_URL\""
]
| []
| [
"GITLAB_URL"
]
| [] | ["GITLAB_URL"] | go | 1 | 0 | |
tf/pred_simese_cnn.py | # !/usr/bin/env python
import sys
import os
import tensorflow as tf
from dataset import Dataset
from train import FLAGS
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
FLAGS.model_dir = '../model/test1/'
FLAGS.max_document_length = 34
Upload=False
def main(input_file, output_file):
graph = tf.Graph()
with graph.as_default(): # with tf.Graph().as_default() as g:
sess = tf.Session()
with sess.as_default():
# Load the saved meta graph and restore variables
# saver = tf.train.Saver(tf.global_variables())
meta_file = os.path.abspath(os.path.join(FLAGS.model_dir, 'checkpoints/model-8700.meta'))
new_saver = tf.train.import_meta_graph(meta_file)
#new_saver.restore(sess, tf.train.latest_checkpoint(os.path.join(FLAGS.model_dir, 'checkpoints')))
new_saver.restore(sess, tf.train.latest_checkpoint(os.path.join(FLAGS.model_dir, 'checkpoints')))
# graph = tf.get_default_graph()
# Get the placeholders from the graph by name
# input_x1 = graph.get_operation_by_name("input_x1").outputs[0]
input_x1 = graph.get_tensor_by_name("input_x1:0") # Tensor("input_x1:0", shape=(?, 15), dtype=int32)
input_x2 = graph.get_tensor_by_name("input_x2:0")
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
#dropout_emb = graph.get_tensor_by_name("dropout_emb:0")
# Tensors we want to evaluate
y_pred = graph.get_tensor_by_name("metrics/y_pred:0")
# vars = tf.get_collection('vars')
# for var in vars:
# print(var)
e = graph.get_tensor_by_name("cosine:0")
# Generate batches for one epoch
dataset = Dataset(data_file=input_file, is_training=False)
data = dataset.process_data(data_file=input_file, sequence_length=FLAGS.max_document_length)
batches = dataset.batch_iter(data, FLAGS.batch_size, 1, shuffle=False)
with open(output_file, 'w') as fo:
print("\nPredicting...\n")
lineno = 1
for batch in batches:
#print batch
#exit(1)
x1_batch, x2_batch, _, _ = zip(*batch)
y_pred_ = sess.run([y_pred], {input_x1: x1_batch, input_x2: x2_batch, dropout_keep_prob: 1.0})
for pred in y_pred_[0]:
fo.write('{}\t{}\n'.format(lineno, int(pred)))
lineno += 1
if __name__ == '__main__':
# Set to INFO for tracking training, default is WARN. ERROR for least messages
tf.logging.set_verbosity(tf.logging.WARN)
main(sys.argv[1], sys.argv[2])
if Upload==False:
file = open(sys.argv[1])
y_true=[]
for line in file:
y_true.append(int(line.strip().split('\t')[3]))
file.close()
file = open(sys.argv[2])
y_pred=[]
for line in file:
y_pred.append(int(line.strip().split('\t')[1]))
file.close()
#print(y_true)
from sklearn import metrics
import numpy as np
#####
# Do classification task,
# then get the ground truth and the predict label named y_true and y_pred
precision = metrics.precision_score(y_true, y_pred)
recall = metrics.recall_score(y_true, y_pred)
score = metrics.f1_score(y_true, y_pred,average='binary')
print(precision)
print(recall)
print('score: {0:f}'.format(score))
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
Kai/crab/NANOv7_Fri13/2017/tt_SL-GF/crab_cfg_2017_tt_SL-GF-TuneCP5up.py | import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2017_tt_SL-GF-TuneCP5up'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.maxMemoryMB = 2000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2017_tt_SL-GF-TuneCP5up.sh'
config.JobType.inputFiles = ['crab_script_2017_tt_SL-GF-TuneCP5up.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = ['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/TTToSemiLepton_HT500Njet9_TuneCP5up_PSweights_13TeV-powheg-pythia8/RunIIFall17NanoAODv7-PU2017_12Apr2018_Nano02Apr2020_102X_mc2017_realistic_v8-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
config.Data.outLFNDirBase = '/store/user/{user}/Fri13'.format(user=getUsernameFromCRIC())
config.Data.publication = True
config.Data.outputDatasetTag = 'Fri13'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
| []
| []
| [
"CMSSW_BASE"
]
| [] | ["CMSSW_BASE"] | python | 1 | 0 | |
tests/plugins/test_reactor_config.py | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from jsonschema import ValidationError
import io
import logging
import os
import pkg_resources
import pytest
from textwrap import dedent
import re
import yaml
import smtplib
from copy import deepcopy
import atomic_reactor
import koji
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.util import read_yaml
import atomic_reactor.koji_util
import atomic_reactor.pulp_util
import atomic_reactor.odcs_util
import osbs.conf
import osbs.api
from osbs.utils import RegistryURI
from atomic_reactor.plugins.pre_reactor_config import (ReactorConfig,
ReactorConfigPlugin,
get_config, WORKSPACE_CONF_KEY,
get_koji_session,
get_koji_path_info,
get_pulp_session,
get_odcs_session,
get_smtp_session,
get_openshift_session,
get_clusters_client_config_path,
get_docker_registry,
get_platform_to_goarch_mapping,
get_goarch_to_platform_mapping,
get_default_image_build_method,
get_flatpak_base_image,
CONTAINER_DEFAULT_BUILD_METHOD,
get_build_image_override,
NO_FALLBACK)
from tests.constants import TEST_IMAGE, REACTOR_CONFIG_MAP
from tests.docker_mock import mock_docker
from tests.fixtures import reactor_config_map # noqa
from flexmock import flexmock
class TestReactorConfigPlugin(object):
def prepare(self):
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow({'provider': 'git', 'uri': 'asd'},
TEST_IMAGE)
return tasker, workflow
@pytest.mark.parametrize(('fallback'), [
False,
True
])
@pytest.mark.parametrize(('config', 'valid'), [
("""\
version: 1
registries:
- url: https://container-registry.example.com/v2
auth:
cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
""",
True),
("""\
version: 1
registries:
- url: https://old-container-registry.example.com/v1
auth:
cfg_path: /var/run/secrets/atomic-reactor/v1-registry-dockercfg
- url: https://container-registry.example.com/v2
auth:
cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
""",
True),
("""\
version: 1
registries:
- url: https://old-container-registry.example.com/v1
auth:
cfg_path: /var/run/secrets/atomic-reactor/v1-registry-dockercfg
""",
False),
])
def test_get_docker_registry(self, config, fallback, valid):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
config_json = read_yaml(config, 'schemas/config.json')
docker_reg = {
'version': 'v2',
'insecure': False,
'secret': '/var/run/secrets/atomic-reactor/v2-registry-dockercfg',
'url': 'https://container-registry.example.com/v2',
}
if fallback:
if valid:
docker_fallback = docker_reg
expected = docker_reg
else:
docker_fallback = NO_FALLBACK
else:
docker_fallback = {}
expected = {
'url': 'https://container-registry.example.com',
'insecure': False,
'secret': '/var/run/secrets/atomic-reactor/v2-registry-dockercfg'
}
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig(config_json)
if valid:
docker_registry = get_docker_registry(workflow, docker_fallback)
assert docker_registry == expected
else:
if fallback:
with pytest.raises(KeyError):
get_docker_registry(workflow, docker_fallback)
else:
with pytest.raises(RuntimeError):
get_docker_registry(workflow, docker_fallback)
def test_no_config(self):
tasker, workflow = self.prepare()
conf = get_config(workflow)
assert isinstance(conf, ReactorConfig)
same_conf = get_config(workflow)
assert conf is same_conf
@pytest.mark.parametrize('basename', ['reactor-config.yaml', None])
def test_filename(self, tmpdir, basename):
filename = os.path.join(str(tmpdir), basename or 'config.yaml')
with open(filename, 'w'):
pass
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow,
config_path=str(tmpdir),
basename=filename)
assert plugin.run() is None
def test_filename_not_found(self):
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path='/not-found')
with pytest.raises(Exception):
plugin.run()
def test_no_schema_resource(self, tmpdir, caplog):
class FakeProvider(object):
def get_resource_stream(self, pkg, rsc):
raise IOError
# pkg_resources.resource_stream() cannot be mocked directly
# Instead mock the module-level function it calls.
(flexmock(pkg_resources)
.should_receive('get_provider')
.and_return(FakeProvider()))
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w'):
pass
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with caplog.atLevel(logging.ERROR), pytest.raises(Exception):
plugin.run()
captured_errs = [x.message for x in caplog.records()]
assert "unable to extract JSON schema, cannot validate" in captured_errs
@pytest.mark.parametrize('schema', [
# Invalid JSON
'{',
# Invalid schema
'{"properties": {"any": null}}',
])
def test_invalid_schema_resource(self, tmpdir, caplog, schema):
class FakeProvider(object):
def get_resource_stream(self, pkg, rsc):
return io.BufferedReader(io.BytesIO(schema))
# pkg_resources.resource_stream() cannot be mocked directly
# Instead mock the module-level function it calls.
(flexmock(pkg_resources)
.should_receive('get_provider')
.and_return(FakeProvider()))
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w'):
pass
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with caplog.atLevel(logging.ERROR), pytest.raises(Exception):
plugin.run()
captured_errs = [x.message for x in caplog.records()]
assert any("cannot validate" in x for x in captured_errs)
@pytest.mark.parametrize(('config', 'errors'), [ # noqa:F811
("""\
clusters:
foo:
- name: bar
max_concurrent_builds: 1
""", [
"validation error (at top level): "
"%r is a required property" % u'version',
]),
("""\
version: 1
clusters:
foo:
bar: 1
plat/form:
- name: foo
max_concurrent_builds: 1
""", [
"validation error (clusters.foo): None is not of type %r" % u'array',
"validation error (clusters.bar): 1 is not of type %r" % u'array',
re.compile(r"validation error \(clusters\): .*'plat/form'"),
]),
("""\
version: 1
clusters:
foo:
- name: 1
max_concurrent_builds: 1
- name: blah
max_concurrent_builds: one
- name: "2" # quoting prevents error
max_concurrent_builds: 2
- name: negative
max_concurrent_builds: -1
""", [
"validation error (clusters.foo[0].name): "
"1 is not of type %r" % u'string',
"validation error (clusters.foo[1].max_concurrent_builds): "
"'one' is not of type %r" % u'integer',
re.compile(r"validation error \(clusters\.foo\[3\]\.max_concurrent_builds\): -1(\.0)?"
r" is less than the minimum of 0"),
]),
("""\
version: 1
clusters:
foo:
- name: blah
max_concurrent_builds: 1
enabled: never
""", [
"validation error (clusters.foo[0].enabled): "
"'never' is not of type %r" % u'boolean',
]),
("""\
version: 1
clusters:
foo:
# missing name
- nam: bar
max_concurrent_builds: 1
# missing max_concurrent_builds
- name: baz
max_concurrrent_builds: 2
- name: bar
max_concurrent_builds: 4
extra: false
""", [
"validation error (clusters.foo[0]): "
"%r is a required property" % u'name',
"validation error (clusters.foo[1]): "
"%r is a required property" % u'max_concurrent_builds',
"validation error (clusters.foo[2]): "
"Additional properties are not allowed ('extra' was unexpected)",
])
])
def test_bad_cluster_config(self, tmpdir, caplog, reactor_config_map,
config, errors):
if reactor_config_map:
os.environ['REACTOR_CONFIG'] = dedent(config)
else:
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write(dedent(config))
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with caplog.atLevel(logging.ERROR), pytest.raises(ValidationError):
plugin.run()
os.environ.pop('REACTOR_CONFIG', None)
captured_errs = [x.message for x in caplog.records()]
for error in errors:
try:
# Match regexp
assert any(filter(error.match, captured_errs))
except AttributeError:
# String comparison
assert error in captured_errs
def test_bad_version(self, tmpdir):
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write("version: 2")
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with pytest.raises(ValueError):
plugin.run()
@pytest.mark.parametrize(('config', 'clusters'), [ # noqa:F811
# Empty config
("", []),
# Built-in default config
(yaml.dump(ReactorConfig.DEFAULT_CONFIG), []),
# Unknown key
("""\
version: 1
special: foo
""", []),
("""\
version: 1
clusters:
ignored:
- name: foo
max_concurrent_builds: 2
platform:
- name: one
max_concurrent_builds: 4
- name: two
max_concurrent_builds: 8
enabled: true
- name: three
max_concurrent_builds: 16
enabled: false
""", [
('one', 4),
('two', 8),
]),
])
def test_good_cluster_config(self, tmpdir, reactor_config_map, config, clusters):
if reactor_config_map and config:
os.environ['REACTOR_CONFIG'] = dedent(config)
else:
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write(dedent(config))
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
assert plugin.run() is None
os.environ.pop('REACTOR_CONFIG', None)
conf = get_config(workflow)
enabled = conf.get_enabled_clusters_for_platform('platform')
assert set([(x.name, x.max_concurrent_builds)
for x in enabled]) == set(clusters)
@pytest.mark.parametrize(('extra_config', 'fallback', 'error'), [ # noqa:F811
('clusters_client_config_dir: /the/path', None, None),
('clusters_client_config_dir: /the/path', '/unused/path', None),
(None, '/the/path', None),
(None, NO_FALLBACK, KeyError),
])
def test_cluster_client_config_path(self, tmpdir, reactor_config_map, extra_config, fallback,
error):
config = 'version: 1'
if extra_config:
config += '\n' + extra_config
if reactor_config_map and config:
os.environ['REACTOR_CONFIG'] = config
else:
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write(config)
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
assert plugin.run() is None
os.environ.pop('REACTOR_CONFIG', None)
if error:
with pytest.raises(error):
get_clusters_client_config_path(workflow, fallback)
else:
path = get_clusters_client_config_path(workflow, fallback)
assert path == '/the/path/osbs.conf'
@pytest.mark.parametrize('default', (
'release',
'beta',
'unsigned',
))
def test_odcs_config(self, tmpdir, default):
filename = str(tmpdir.join('config.yaml'))
with open(filename, 'w') as fp:
fp.write(dedent("""\
version: 1
odcs:
signing_intents:
- name: release
keys: [R123, R234]
- name: beta
keys: [R123, B456, B457]
- name: unsigned
keys: []
default_signing_intent: {default}
api_url: http://odcs.example.com
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
""".format(default=default)))
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
assert plugin.run() is None
odcs_config = get_config(workflow).get_odcs_config()
assert odcs_config.default_signing_intent == default
unsigned_intent = {'name': 'unsigned', 'keys': [], 'restrictiveness': 0}
beta_intent = {'name': 'beta', 'keys': ['R123', 'B456', 'B457'], 'restrictiveness': 1}
release_intent = {'name': 'release', 'keys': ['R123', 'R234'], 'restrictiveness': 2}
assert odcs_config.signing_intents == [
unsigned_intent, beta_intent, release_intent
]
assert odcs_config.get_signing_intent_by_name('release') == release_intent
assert odcs_config.get_signing_intent_by_name('beta') == beta_intent
assert odcs_config.get_signing_intent_by_name('unsigned') == unsigned_intent
with pytest.raises(ValueError):
odcs_config.get_signing_intent_by_name('missing')
assert odcs_config.get_signing_intent_by_keys(['R123', 'R234'])['name'] == 'release'
assert odcs_config.get_signing_intent_by_keys('R123 R234')['name'] == 'release'
assert odcs_config.get_signing_intent_by_keys(['R123'])['name'] == 'release'
assert odcs_config.get_signing_intent_by_keys('R123')['name'] == 'release'
assert odcs_config.get_signing_intent_by_keys(['R123', 'B456'])['name'] == 'beta'
assert odcs_config.get_signing_intent_by_keys(['B456', 'R123'])['name'] == 'beta'
assert odcs_config.get_signing_intent_by_keys('B456 R123')['name'] == 'beta'
assert odcs_config.get_signing_intent_by_keys('R123 B456 ')['name'] == 'beta'
assert odcs_config.get_signing_intent_by_keys(['B456'])['name'] == 'beta'
assert odcs_config.get_signing_intent_by_keys('B456')['name'] == 'beta'
assert odcs_config.get_signing_intent_by_keys([])['name'] == 'unsigned'
assert odcs_config.get_signing_intent_by_keys('')['name'] == 'unsigned'
with pytest.raises(ValueError):
assert odcs_config.get_signing_intent_by_keys(['missing'])
with pytest.raises(ValueError):
assert odcs_config.get_signing_intent_by_keys(['R123', 'R234', 'B457'])
def test_odcs_config_invalid_default_signing_intent(self, tmpdir):
filename = str(tmpdir.join('config.yaml'))
with open(filename, 'w') as fp:
fp.write(dedent("""\
version: 1
odcs:
signing_intents:
- name: release
keys: [R123]
- name: beta
keys: [R123, B456]
- name: unsigned
keys: []
default_signing_intent: spam
api_url: http://odcs.example.com
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
"""))
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
assert plugin.run() is None
with pytest.raises(ValueError) as exc_info:
get_config(workflow).get_odcs_config()
message = str(exc_info.value)
assert message == dedent("""\
unknown signing intent name "spam", valid names: unsigned, beta, release
""".rstrip())
@pytest.mark.parametrize('fallback', (True, False, None))
@pytest.mark.parametrize('method', [
'koji', 'pulp', 'odcs', 'smtp', 'arrangement_version',
'artifacts_allowed_domains', 'image_labels',
'image_label_info_url_format', 'image_equal_labels',
'openshift', 'group_manifests', 'platform_descriptors', 'prefer_schema1_digest',
'content_versions', 'registries', 'yum_proxy', 'source_registry', 'sources_command',
'required_secrets', 'worker_token_secrets', 'clusters',
])
def test_get_methods(self, fallback, method):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
if fallback is False:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = \
ReactorConfig(yaml.safe_load(REACTOR_CONFIG_MAP))
else:
if fallback:
fall_source = ReactorConfig(yaml.safe_load(REACTOR_CONFIG_MAP))
else:
fall_source = ReactorConfig(yaml.safe_load("version: 1"))
method_name = 'get_' + method
real_method = getattr(atomic_reactor.plugins.pre_reactor_config, method_name)
if fallback is True:
output = real_method(workflow, fall_source.conf[method])
else:
if fallback is False:
output = real_method(workflow)
else:
with pytest.raises(KeyError):
real_method(workflow)
return
expected = yaml.safe_load(REACTOR_CONFIG_MAP)[method]
if method == 'registries':
registries_cm = {}
for registry in expected:
reguri = RegistryURI(registry.get('url'))
regdict = {}
regdict['version'] = reguri.version
if registry.get('auth'):
regdict['secret'] = registry['auth']['cfg_path']
regdict['insecure'] = registry.get('insecure', False)
regdict['expected_media_types'] = registry.get('expected_media_types', [])
registries_cm[reguri.docker_uri] = regdict
if fallback:
output = real_method(workflow, registries_cm)
assert output == registries_cm
return
if method == 'source_registry':
expect = {
'uri': RegistryURI(expected['url']),
'insecure': expected.get('insecure', False)
}
if fallback:
output = real_method(workflow, expect)
assert output['insecure'] == expect['insecure']
assert output['uri'].uri == expect['uri'].uri
return
assert output == expected
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize(('config', 'expect'), [
("""\
version: 1
platform_descriptors:
- platform: x86_64
architecture: amd64
""",
{'x86_64': 'amd64',
'ppc64le': 'ppc64le'}),
])
def test_get_platform_to_goarch_mapping(self, fallback, config, expect):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
config_json = read_yaml(config, 'schemas/config.json')
workspace = workflow.plugin_workspace[ReactorConfigPlugin.key]
workspace[WORKSPACE_CONF_KEY] = ReactorConfig(config_json)
kwargs = {}
if fallback:
kwargs['descriptors_fallback'] = {'x86_64': 'amd64'}
platform_to_goarch = get_platform_to_goarch_mapping(workflow, **kwargs)
goarch_to_platform = get_goarch_to_platform_mapping(workflow, **kwargs)
for plat, goarch in expect.items():
assert platform_to_goarch[plat] == goarch
assert goarch_to_platform[goarch] == plat
@pytest.mark.parametrize(('config', 'expect'), [
("""\
version: 1
default_image_build_method: imagebuilder
""",
"imagebuilder"),
("""\
version: 1
""",
CONTAINER_DEFAULT_BUILD_METHOD),
])
def test_get_default_image_build_method(self, config, expect):
config_json = read_yaml(config, 'schemas/config.json')
_, workflow = self.prepare()
workspace = workflow.plugin_workspace.setdefault(ReactorConfigPlugin.key, {})
workspace[WORKSPACE_CONF_KEY] = ReactorConfig(config_json)
method = get_default_image_build_method(workflow)
assert method == expect
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize(('config', 'expect'), [
("""\
version: 1
build_image_override:
ppc64le: registry.example.com/buildroot-ppc64le:latest
arm: registry.example.com/buildroot-arm:latest
""",
{'ppc64le': 'registry.example.com/buildroot-ppc64le:latest',
'arm': 'registry.example.com/buildroot-arm:latest'}),
])
def test_get_build_image_override(self, fallback, config, expect):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
config_json = read_yaml(config, 'schemas/config.json')
workspace = workflow.plugin_workspace[ReactorConfigPlugin.key]
workspace[WORKSPACE_CONF_KEY] = ReactorConfig(config_json)
kwargs = {}
if fallback:
kwargs['fallback'] = expect
build_image_override = get_build_image_override(workflow, **kwargs)
assert build_image_override == expect
@pytest.mark.parametrize(('config', 'fallback', 'expect'), [
("""\
version: 1
flatpak:
base_image: fedora:latest
""",
"x", "fedora:latest"),
("""\
version: 1
flatpak: {}
""",
"x", "x"),
("""\
version: 1
""",
"x", "x"),
("""\
version: 1
""",
None, None),
("""\
version: 1
flatpak: {}
""",
None, None),
])
def test_get_flatpak_base_image(self, config, fallback, expect):
config_json = read_yaml(config, 'schemas/config.json')
_, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {
WORKSPACE_CONF_KEY: ReactorConfig(config_json)
}
kwargs = {}
if fallback:
kwargs['fallback'] = fallback
if expect:
base_image = get_flatpak_base_image(workflow, **kwargs)
assert base_image == expect
else:
with pytest.raises(KeyError):
get_flatpak_base_image(workflow, **kwargs)
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize(('config', 'raise_error'), [
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_principal: krb_principal
krb_keytab_path: /tmp/krb_keytab
""", False),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
ssl_certs_dir: /var/certs
""", False),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
""", False),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
""", True),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_principal: krb_principal
krb_keytab_path: /tmp/krb_keytab
ssl_certs_dir: /var/certs
""", True),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_keytab_path: /tmp/krb_keytab
""", True),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_principal: krb_principal
""", True),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_principal: krb_principal
ssl_certs_dir: /var/certs
""", True),
("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_keytab_path: /tmp/krb_keytab
ssl_certs_dir: /var/certs
""", True),
])
def test_get_koji_session(self, fallback, config, raise_error):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
if raise_error:
with pytest.raises(Exception):
read_yaml(config, 'schemas/config.json')
return
config_json = read_yaml(config, 'schemas/config.json')
auth_info = {
"proxyuser": config_json['koji']['auth'].get('proxyuser'),
"ssl_certs_dir": config_json['koji']['auth'].get('ssl_certs_dir'),
"krb_principal": config_json['koji']['auth'].get('krb_principal'),
"krb_keytab": config_json['koji']['auth'].get('krb_keytab_path')
}
fallback_map = {}
if fallback:
fallback_map = {'auth': deepcopy(auth_info), 'hub_url': config_json['koji']['hub_url']}
fallback_map['auth']['krb_keytab_path'] = fallback_map['auth'].pop('krb_keytab')
else:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = \
ReactorConfig(config_json)
(flexmock(atomic_reactor.koji_util)
.should_receive('create_koji_session')
.with_args(config_json['koji']['hub_url'], auth_info)
.once()
.and_return(True))
get_koji_session(workflow, fallback_map)
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize('root_url', (
'https://koji.example.com/root',
'https://koji.example.com/root/',
None
))
def test_get_koji_path_info(self, fallback, root_url):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
config = {
'version': 1,
'koji': {
'hub_url': 'https://koji.example.com/hub',
'auth': {
'ssl_certs_dir': '/var/certs'
}
}
}
expected_root_url = 'https://koji.example.com/root'
if root_url:
config['koji']['root_url'] = root_url
config_yaml = yaml.safe_dump(config)
expect_error = not root_url
if expect_error:
with pytest.raises(Exception):
read_yaml(config_yaml, 'schemas/config.json')
return
parsed_config = read_yaml(config_yaml, 'schemas/config.json')
fallback_map = {}
if fallback:
fallback_map = deepcopy(config['koji'])
else:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = \
ReactorConfig(parsed_config)
(flexmock(koji.PathInfo)
.should_receive('__init__')
.with_args(topdir=expected_root_url)
.once())
get_koji_path_info(workflow, fallback_map)
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize(('config', 'raise_error'), [
("""\
version: 1
pulp:
name: my-pulp
auth:
password: testpasswd
username: testuser
""", False),
("""\
version: 1
pulp:
name: my-pulp
auth:
ssl_certs_dir: /var/certs
""", False),
("""\
version: 1
pulp:
name: my-pulp
auth:
ssl_certs_dir: /var/certs
password: testpasswd
username: testuser
""", True),
("""\
version: 1
pulp:
name: my-pulp
auth:
ssl_certs_dir: /var/certs
password: testpasswd
""", True),
("""\
version: 1
pulp:
name: my-pulp
auth:
ssl_certs_dir: /var/certs
username: testuser
""", True),
("""\
version: 1
pulp:
name: my-pulp
auth:
username: testuser
""", True),
("""\
version: 1
pulp:
name: my-pulp
auth:
password: testpasswd
""", True),
])
def test_get_pulp_session(self, fallback, config, raise_error):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
if raise_error:
with pytest.raises(Exception):
read_yaml(config, 'schemas/config.json')
return
config_json = read_yaml(config, 'schemas/config.json')
auth_info = {
"pulp_secret_path": config_json['pulp']['auth'].get('ssl_certs_dir'),
"username": config_json['pulp']['auth'].get('username'),
"password": config_json['pulp']['auth'].get('password'),
"dockpulp_loglevel": None
}
fallback_map = {}
if fallback:
fallback_map = {'auth': deepcopy(auth_info), 'name': config_json['pulp']['name']}
fallback_map['auth']['ssl_certs_dir'] = fallback_map['auth'].pop('pulp_secret_path')
else:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig(config_json)
(flexmock(atomic_reactor.pulp_util.PulpHandler)
.should_receive('__init__')
.with_args(workflow, config_json['pulp']['name'], 'logger', **auth_info)
.once()
.and_return(None))
get_pulp_session(workflow, 'logger', fallback_map)
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize(('config', 'raise_error'), [
("""\
version: 1
odcs:
api_url: https://odcs.example.com/api/1
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
signing_intents:
- name: release
keys: [R123]
default_signing_intent: default
""", False),
("""\
version: 1
odcs:
api_url: https://odcs.example.com/api/1
auth:
ssl_certs_dir: nonexistent
signing_intents:
- name: release
keys: [R123]
default_signing_intent: default
""", False),
("""\
version: 1
odcs:
api_url: https://odcs.example.com/api/1
auth:
openidc_dir: /var/run/open_idc
signing_intents:
- name: release
keys: [R123]
default_signing_intent: default
""", False),
("""\
version: 1
odcs:
api_url: https://odcs.example.com/api/1
auth:
openidc_dir: /var/run/open_idc
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
signing_intents:
- name: release
keys: [R123]
default_signing_intent: default
""", True),
("""\
version: 1
odcs:
api_url: https://odcs.example.com/api/1
auth:
openidc_dir: /var/run/open_idc
signing_intents:
- name: release
keys: [R123]
""", True),
("""\
version: 1
odcs:
api_url: https://odcs.example.com/api/1
auth:
openidc_dir: /var/run/open_idc
default_signing_intent: default
""", True),
("""\
version: 1
odcs:
auth:
openidc_dir: /var/run/open_idc
signing_intents:
- name: release
keys: [R123]
default_signing_intent: default
""", True),
])
def test_get_odcs_session(self, tmpdir, fallback, config, raise_error):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
if raise_error:
with pytest.raises(Exception):
read_yaml(config, 'schemas/config.json')
return
config_json = read_yaml(config, 'schemas/config.json')
auth_info = {'insecure': config_json['odcs'].get('insecure', False)}
if 'openidc_dir' in config_json['odcs']['auth']:
config_json['odcs']['auth']['openidc_dir'] = str(tmpdir)
filename = str(tmpdir.join('token'))
with open(filename, 'w') as fp:
fp.write("my_token")
auth_info['token'] = "my_token"
ssl_dir_raise = False
if 'ssl_certs_dir' in config_json['odcs']['auth']:
if config_json['odcs']['auth']['ssl_certs_dir'] != "nonexistent":
config_json['odcs']['auth']['ssl_certs_dir'] = str(tmpdir)
filename = str(tmpdir.join('cert'))
with open(filename, 'w') as fp:
fp.write("my_cert")
auth_info['cert'] = filename
else:
ssl_dir_raise = True
fallback_map = {}
if fallback:
fallback_map = {'auth': deepcopy(auth_info),
'api_url': config_json['odcs']['api_url']}
fallback_map['auth']['ssl_certs_dir'] = config_json['odcs']['auth'].get('ssl_certs_dir')
fallback_map['auth']['openidc_dir'] = config_json['odcs']['auth'].get('openidc_dir')
else:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig(config_json)
if not ssl_dir_raise:
(flexmock(atomic_reactor.odcs_util.ODCSClient)
.should_receive('__init__')
.with_args(config_json['odcs']['api_url'], **auth_info)
.once()
.and_return(None))
get_odcs_session(workflow, fallback_map)
else:
with pytest.raises(KeyError):
get_odcs_session(workflow, fallback_map)
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize(('config', 'raise_error'), [
("""\
version: 1
smtp:
host: smtp.example.com
from_address: [email protected]
""", False),
("""\
version: 1
smtp:
from_address: [email protected]
""", True),
("""\
version: 1
smtp:
host: smtp.example.com
""", True),
("""\
version: 1
smtp:
""", True),
])
def test_get_smtp_session(self, fallback, config, raise_error):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
if raise_error:
with pytest.raises(Exception):
read_yaml(config, 'schemas/config.json')
return
config_json = read_yaml(config, 'schemas/config.json')
fallback_map = {}
if fallback:
fallback_map['host'] = config_json['smtp']['host']
else:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig(config_json)
(flexmock(smtplib.SMTP)
.should_receive('__init__')
.with_args(config_json['smtp']['host'])
.once()
.and_return(None))
get_smtp_session(workflow, fallback_map)
@pytest.mark.parametrize('fallback', (True, False))
@pytest.mark.parametrize('build_json_dir', [
None, "/tmp/build_json_dir",
])
@pytest.mark.parametrize(('config', 'raise_error'), [
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
""", False),
("""\
version: 1
openshift:
url: https://openshift.example.com
""", False),
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
krb_principal: principal
krb_keytab_path: /var/keytab
""", False),
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
krb_principal: principal
krb_keytab_path: /var/keytab
krb_cache_path: /var/krb/cache
""", False),
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
enable: True
""", False),
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
krb_keytab_path: /var/keytab
""", True),
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
krb_principal: principal
""", True),
("""\
version: 1
openshift:
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
""", True),
("""\
version: 1
openshift:
auth:
krb_principal: principal
krb_keytab_path: /var/keytab
""", True),
("""\
version: 1
openshift:
url: https://openshift.example.com
auth:
""", True),
("""\
version: 1
openshift:
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
""", True),
])
def test_get_openshift_session(self, fallback, build_json_dir, config, raise_error):
tasker, workflow = self.prepare()
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
if build_json_dir:
config += " build_json_dir: " + build_json_dir
if raise_error:
with pytest.raises(Exception):
read_yaml(config, 'schemas/config.json')
return
config_json = read_yaml(config, 'schemas/config.json')
auth_info = {
'openshift_url': config_json['openshift']['url'],
'verify_ssl': not config_json['openshift'].get('insecure', False),
'use_auth': False,
'conf_file': None,
'namespace': 'namespace',
'build_json_dir': build_json_dir
}
if config_json['openshift'].get('auth'):
if config_json['openshift']['auth'].get('krb_keytab_path'):
auth_info['kerberos_keytab'] =\
config_json['openshift']['auth'].get('krb_keytab_path')
if config_json['openshift']['auth'].get('krb_principal'):
auth_info['kerberos_principal'] =\
config_json['openshift']['auth'].get('krb_principal')
if config_json['openshift']['auth'].get('krb_cache_path'):
auth_info['kerberos_ccache'] =\
config_json['openshift']['auth'].get('krb_cache_path')
if config_json['openshift']['auth'].get('ssl_certs_dir'):
auth_info['client_cert'] =\
os.path.join(config_json['openshift']['auth'].get('ssl_certs_dir'), 'cert')
auth_info['client_key'] =\
os.path.join(config_json['openshift']['auth'].get('ssl_certs_dir'), 'key')
auth_info['use_auth'] = config_json['openshift']['auth'].get('enable', False)
fallback_map = {}
if fallback:
fallback_map = {'url': config_json['openshift']['url'],
'insecure': config_json['openshift'].get('insecure', False),
'build_json_dir': build_json_dir}
if config_json['openshift'].get('auth'):
fallback_map['auth'] = {}
fallback_map['auth']['krb_keytab_path'] =\
config_json['openshift']['auth'].get('krb_keytab_path')
fallback_map['auth']['krb_principal'] =\
config_json['openshift']['auth'].get('krb_principal')
fallback_map['auth']['enable'] =\
config_json['openshift']['auth'].get('enable', False)
fallback_map['auth']['krb_cache_path'] =\
config_json['openshift']['auth'].get('krb_cache_path')
fallback_map['auth']['ssl_certs_dir'] =\
config_json['openshift']['auth'].get('ssl_certs_dir')
else:
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig(config_json)
(flexmock(osbs.conf.Configuration)
.should_call('__init__')
.with_args(**auth_info)
.once())
(flexmock(osbs.api.OSBS)
.should_call('__init__')
.once())
flexmock(os, environ={'BUILD': '{"metadata": {"namespace": "namespace"}}'})
get_openshift_session(workflow, fallback_map)
| []
| []
| [
"REACTOR_CONFIG"
]
| [] | ["REACTOR_CONFIG"] | python | 1 | 0 | |
config.go | package main
import (
"archive/zip"
"bytes"
"context"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"reflect"
"regexp"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/service/iam"
"github.com/aws/aws-sdk-go-v2/service/ssm"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/blang/semver/v4"
"github.com/coveooss/gotemplate/v3/collections"
"github.com/fatih/color"
"github.com/hashicorp/go-getter"
"github.com/minio/selfupdate"
yaml "gopkg.in/yaml.v2"
)
const (
// ssm configuration
defaultSSMParameterFolder = "/default/tgf"
// ssm configuration used to fetch configs from a remote location
remoteDefaultConfigPath = "TGFConfig"
remoteConfigLocationParameter = "config-location"
remoteConfigPathsParameter = "config-paths"
// configuration files
configFile = ".tgf.config"
userConfigFile = "tgf.user.config"
tagSeparator = "-"
)
// TGFConfig contains the resulting configuration that will be applied
type TGFConfig struct {
Image string `yaml:"docker-image,omitempty" json:"docker-image,omitempty" hcl:"docker-image,omitempty"`
ImageVersion *string `yaml:"docker-image-version,omitempty" json:"docker-image-version,omitempty" hcl:"docker-image-version,omitempty"`
ImageTag *string `yaml:"docker-image-tag,omitempty" json:"docker-image-tag,omitempty" hcl:"docker-image-tag,omitempty"`
ImageBuild string `yaml:"docker-image-build,omitempty" json:"docker-image-build,omitempty" hcl:"docker-image-build,omitempty"`
ImageBuildFolder string `yaml:"docker-image-build-folder,omitempty" json:"docker-image-build-folder,omitempty" hcl:"docker-image-build-folder,omitempty"`
ImageBuildTag string `yaml:"docker-image-build-tag,omitempty" json:"docker-image-build-tag,omitempty" hcl:"docker-image-build-tag,omitempty"`
LogLevel string `yaml:"logging-level,omitempty" json:"logging-level,omitempty" hcl:"logging-level,omitempty"`
EntryPoint string `yaml:"entry-point,omitempty" json:"entry-point,omitempty" hcl:"entry-point,omitempty"`
Refresh time.Duration `yaml:"docker-refresh,omitempty" json:"docker-refresh,omitempty" hcl:"docker-refresh,omitempty"`
DockerOptions []string `yaml:"docker-options,omitempty" json:"docker-options,omitempty" hcl:"docker-options,omitempty"`
RecommendedImageVersion string `yaml:"recommended-image-version,omitempty" json:"recommended-image-version,omitempty" hcl:"recommended-image-version,omitempty"`
RequiredVersionRange string `yaml:"required-image-version,omitempty" json:"required-image-version,omitempty" hcl:"required-image-version,omitempty"`
RecommendedTGFVersion string `yaml:"tgf-recommended-version,omitempty" json:"tgf-recommended-version,omitempty" hcl:"tgf-recommended-version,omitempty"`
Environment map[string]string `yaml:"environment,omitempty" json:"environment,omitempty" hcl:"environment,omitempty"`
RunBefore string `yaml:"run-before,omitempty" json:"run-before,omitempty" hcl:"run-before,omitempty"`
RunAfter string `yaml:"run-after,omitempty" json:"run-after,omitempty" hcl:"run-after,omitempty"`
Aliases map[string]string `yaml:"alias,omitempty" json:"alias,omitempty" hcl:"alias,omitempty"`
UpdateVersion string `yaml:"update-version,omitempty" json:"update-version,omitempty" hcl:"update-version,omitempty"`
AutoUpdateDelay time.Duration `yaml:"auto-update-delay,omitempty" json:"auto-update-delay,omitempty" hcl:"auto-update-delay,omitempty"`
AutoUpdate bool `yaml:"auto-update,omitempty" json:"auto-update,omitempty" hcl:"auto-update,omitempty"`
runBeforeCommands, runAfterCommands []string
imageBuildConfigs []TGFConfigBuild // List of config built from previous build configs
tgf *TGFApplication
}
// TGFConfigBuild contains an entry specifying how to customize the current docker image
type TGFConfigBuild struct {
Instructions string
Folder string
Tag string
source string
}
var (
cachedAWSConfigExistCheck *bool
)
func resetCache() {
cachedAWSConfigExistCheck = nil
cachedAwsConfig = nil
}
func (cb TGFConfigBuild) hash() string {
h := md5.New()
io.WriteString(h, filepath.Base(filepath.Dir(cb.source)))
io.WriteString(h, cb.Instructions)
if cb.Folder != "" {
filepath.Walk(cb.Dir(), func(path string, info os.FileInfo, err error) error {
if info == nil || info.IsDir() || err != nil {
return nil
}
if !strings.Contains(path, dockerfilePattern) {
io.WriteString(h, fmt.Sprintf("%v", info.ModTime()))
}
return nil
})
}
return fmt.Sprintf("%x", h.Sum(nil))
}
// Dir returns the folder name relative to the source
func (cb TGFConfigBuild) Dir() string {
if cb.Folder == "" {
return filepath.Dir(cb.source)
}
if filepath.IsAbs(cb.Folder) {
return cb.Folder
}
return must(filepath.Abs(filepath.Join(filepath.Dir(cb.source), cb.Folder))).(string)
}
// GetTag returns the tag name that should be added to the image
func (cb TGFConfigBuild) GetTag() string {
tag := cb.Tag
if tag == "" {
tag = fmt.Sprintf("%s-%s", filepath.Base(filepath.Dir(cb.source)), cb.hash())
}
tagRegex := regexp.MustCompile(`[^a-zA-Z0-9\._-]`)
return tagRegex.ReplaceAllString(tag, "")
}
// InitConfig returns a properly initialized TGF configuration struct
func InitConfig(app *TGFApplication) *TGFConfig {
config := TGFConfig{Image: "coveo/tgf",
tgf: app,
Refresh: 1 * time.Hour,
AutoUpdateDelay: 2 * time.Hour,
AutoUpdate: true,
EntryPoint: "terragrunt",
LogLevel: "notice",
Environment: make(map[string]string),
imageBuildConfigs: []TGFConfigBuild{},
}
config.setDefaultValues()
config.ParseAliases()
return &config
}
func (config TGFConfig) String() string {
bytes, err := yaml.Marshal(config)
if err != nil {
return fmt.Sprintf("Error parsing TGFConfig: %v", err)
}
return string(bytes)
}
var cachedAwsConfig *aws.Config
func (tgfConfig *TGFConfig) getAwsConfig(assumeRoleDuration time.Duration) (aws.Config, error) {
if cachedAwsConfig != nil {
log.Debug("Using cached AWS config")
return *cachedAwsConfig, nil
}
log.Debugf("Creating new AWS config (assumeRoleDuration=%s)", assumeRoleDuration)
config, err := awsConfig.LoadDefaultConfig(
context.TODO(),
awsConfig.WithSharedConfigProfile(tgfConfig.tgf.AwsProfile),
awsConfig.WithLogger(awsLogger),
// The logger level controlled by the --aws-debug flag controls whether or not the logs are shown.
// With that in mind, we just let the AWS SDK blindly log and rely on the logger to decide if it should print or not.
awsConfig.WithClientLogMode(
aws.LogRetries|
aws.LogRequestWithBody|
aws.LogRequestEventMessage|
aws.LogResponseWithBody|
aws.LogResponseEventMessage,
),
awsConfig.WithAssumeRoleCredentialOptions(func(o *stscreds.AssumeRoleOptions) {
o.TokenProvider = func() (string, error) {
fmt.Fprintln(os.Stderr, "Touch your YubiKey...")
v, err := exec.Command("ykman", "oath", "accounts", "code", "arn:aws:iam::916842903476:mfa/wtrepanier", "--single").Output()
if err != nil {
fmt.Fprintln(os.Stderr, "Successfully retrived OATH code from YubiKey")
}
return strings.TrimSuffix(string(v), "\n"), err
}
if assumeRoleDuration > 0 {
o.Duration = assumeRoleDuration
}
}),
)
if err != nil {
return config, err
}
log.Debug("Fetching credentials for current AWS config")
creds, err := config.Credentials.Retrieve(context.TODO())
if err != nil {
return config, err
}
expiresIn := time.Until(creds.Expires)
if creds.CanExpire && expiresIn < (1*time.Hour) {
newDuration := guessAwsMaxAssumeRoleDuration(config)
log.Warningf(
"Credentials for current AWS session are set to expire in less than one hour (%s). Will extend to %s.",
expiresIn,
newDuration,
)
log.Warningf(
color.WhiteString("You should consider defining %s in your AWS config profile %s"),
color.HiBlueString("duration_seconds = %d", newDuration/time.Second),
color.HiBlueString(getPrettyAwsProfileName(*tgfConfig)),
)
shortConfig := config
config, err = tgfConfig.getAwsConfig(newDuration)
if err != nil {
log.Warning("Failed to extend current AWS session, will use the current short duration.", err)
config = shortConfig
}
}
log.Debug("Caching newly created AWS config for future calls")
cachedAwsConfig = &config
return config, nil
}
func guessAwsMaxAssumeRoleDuration(awsConfig aws.Config) time.Duration {
fallback := 1 * time.Hour
log.Debugf("Trying to figure out the max duration of an AWS assume role operation (fallback=%s)", fallback)
roleRegex := regexp.MustCompile(".*:assumed-role/(.*)/.*")
identity, err := sts.NewFromConfig(awsConfig).GetCallerIdentity(context.TODO(), &sts.GetCallerIdentityInput{})
if err != nil {
log.Debug("Failed, using fallback:", err)
return fallback
}
matches := roleRegex.FindStringSubmatch(*identity.Arn)
if len(matches) == 0 {
log.Debug("Failed, using fallback: Current role is not an assumed role")
return fallback
}
role, err := iam.NewFromConfig(awsConfig).GetRole(
context.TODO(),
&iam.GetRoleInput{
RoleName: &matches[1],
},
)
if err != nil {
log.Debug("Failed, using fallback:", err)
return fallback
}
maxDuration := time.Duration(*role.Role.MaxSessionDuration) * time.Second
log.Debugf("Max duration for current role (%s) is %s", *role.Role.Arn, maxDuration)
return maxDuration
}
func getPrettyAwsProfileName(tgfConfig TGFConfig) string {
if profile := tgfConfig.tgf.AwsProfile; profile != "" {
return profile
}
if profile := os.Getenv("AWS_PROFILE"); profile != "" {
return profile
}
return "default"
}
// InitAWS tries to open an AWS session and init AWS environment variable on success
func (config *TGFConfig) InitAWS() error {
if config.tgf.AwsProfile == "" && os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_PROFILE") != "" {
log.Warning("You set both AWS_ACCESS_KEY_ID and AWS_PROFILE, AWS_PROFILE will be ignored")
}
awsConfig, err := config.getAwsConfig(0)
if err != nil {
return err
}
creds, err := awsConfig.Credentials.Retrieve(context.TODO())
if err != nil {
return err
}
os.Unsetenv("AWS_PROFILE")
os.Unsetenv("AWS_DEFAULT_PROFILE")
for key, value := range map[string]string{
"AWS_ACCESS_KEY_ID": creds.AccessKeyID,
"AWS_SECRET_ACCESS_KEY": creds.SecretAccessKey,
"AWS_SESSION_TOKEN": creds.SessionToken,
"AWS_REGION": awsConfig.Region,
} {
os.Setenv(key, value)
if !config.tgf.ConfigDump {
// If we are saving the current configuration, we do not want to save the current credentials
config.Environment[key] = value
}
}
return nil
}
// setDefaultValues sets the uninitialized values from the config files and the parameter store
// Priorities (Higher overwrites lower values):
// 1. Configuration location files
// 2. SSM Parameter Config
// 3. tgf.user.config
// 4. .tgf.config
func (config *TGFConfig) setDefaultValues() {
app := config.tgf
//app.PsPath, app.ConfigLocation, app.ConfigFiles
type configData struct {
Name string
Raw string
Config *TGFConfig
}
configsData := []configData{}
// --config-dump output must not contain any logs to be valid YAML
// so make sure logs go to stderr in this case
if config.tgf.ConfigDump {
log.SetStdout(os.Stdout)
}
// Fetch SSM configs
if config.awsConfigExist() {
if err := config.InitAWS(); err != nil {
log.Fatal(err)
}
if app.ConfigLocation == "" {
values := config.readSSMParameterStore(app.PsPath)
app.ConfigLocation = values[remoteConfigLocationParameter]
if app.ConfigFiles == "" {
app.ConfigFiles = values[remoteConfigPathsParameter]
}
}
}
for _, configFile := range config.findRemoteConfigFiles(app.ConfigLocation, app.ConfigFiles) {
configsData = append(configsData, configData{Name: "RemoteConfigFile", Raw: configFile})
}
if config.awsConfigExist() {
// Only fetch SSM parameters if no ConfigFile was found
if len(configsData) == 0 {
ssmConfig := parseSsmConfig(config.readSSMParameterStore(app.PsPath))
if ssmConfig != "" {
configsData = append(configsData, configData{Name: "AWS/ParametersStore", Raw: ssmConfig})
}
}
}
// Fetch file configs
for _, configFile := range config.findConfigFiles(must(os.Getwd()).(string)) {
log.Debugln("Reading configuration from", configFile)
bytes, err := ioutil.ReadFile(configFile)
if err != nil {
log.Errorf("Error while loading configuration file %s\n%v", configFile, err)
continue
}
configsData = append(configsData, configData{Name: configFile, Raw: string(bytes)})
}
// Parse/Unmarshal configs
for i := range configsData {
configData := &configsData[i]
if err := collections.ConvertData(configData.Raw, config); err != nil {
log.Errorf("Error while loading configuration from %s\nConfiguration file must be valid YAML, JSON or HCL\n%v\nContent:\n%s", configData.Name, err, configData.Raw)
}
collections.ConvertData(configData.Raw, &configData.Config)
}
// Special case for image build configs and run before/after, we must build a list of instructions from all configs
for i := range configsData {
configData := &configsData[i]
if configData.Config == nil {
log.Errorf("Config from %s is nil. It did not load correctly", configData.Name)
continue
}
if configData.Config.ImageBuild != "" {
config.imageBuildConfigs = append([]TGFConfigBuild{{
Instructions: configData.Config.ImageBuild,
Folder: configData.Config.ImageBuildFolder,
Tag: configData.Config.ImageBuildTag,
source: configData.Name,
}}, config.imageBuildConfigs...)
}
if configData.Config.RunBefore != "" {
config.runBeforeCommands = append(config.runBeforeCommands, configData.Config.RunBefore)
}
if configData.Config.RunAfter != "" {
config.runAfterCommands = append(config.runAfterCommands, configData.Config.RunAfter)
}
}
// We reverse the execution of before scripts to ensure that more specific commands are executed last
config.runBeforeCommands = collections.AsList(config.runBeforeCommands).Reverse().Strings()
}
var reVersion = regexp.MustCompile(`(?P<version>\d+\.\d+(?:\.\d+){0,1})`)
var reVersionWithEndMarkers = regexp.MustCompile(`^` + reVersion.String() + `$`)
// https://regex101.com/r/ZKt4OP/5
var reImage = regexp.MustCompile(`^(?P<image>.*?)(?::(?:` + reVersion.String() + `(?:(?P<sep>[\.-])(?P<spec>.+))?|(?P<fix>.+)))?$`)
func (config *TGFConfig) validate() (errors []error) {
if strings.Contains(config.Image, ":") {
// It is possible that the : is there because we do not use a standard registry port, so we remove the port from the config.Image and
// check again if there is still a : in the image name before returning a warning
portRemoved := regexp.MustCompile(`.*:\d+/`).ReplaceAllString(config.Image, "")
if strings.Contains(portRemoved, ":") {
errors = append(errors, ConfigWarning(fmt.Sprintf("Image should not contain the version: %s", config.Image)))
}
}
if config.ImageVersion != nil && strings.ContainsAny(*config.ImageVersion, ":-") {
errors = append(errors, ConfigWarning(fmt.Sprintf("Image version parameter should not contain the image name nor the specialized version: %s", *config.ImageVersion)))
}
if config.ImageTag != nil && strings.ContainsAny(*config.ImageTag, ":") {
errors = append(errors, ConfigWarning(fmt.Sprintf("Image tag parameter should not contain the image name: %s", *config.ImageTag)))
}
if config.RecommendedTGFVersion != "" && version != locallyBuilt {
if valid, err := CheckVersionRange(version, config.RecommendedTGFVersion); err != nil {
errors = append(errors, fmt.Errorf("unable to check recommended tgf version %s vs %s: %v", version, config.RecommendedTGFVersion, err))
} else if !valid {
errors = append(errors, ConfigWarning(fmt.Sprintf("TGF v%s does not meet the recommended version range %s", version, config.RecommendedTGFVersion)))
}
}
if config.RequiredVersionRange != "" && config.ImageVersion != nil && *config.ImageVersion != "" && reVersion.MatchString(*config.ImageVersion) {
if valid, err := CheckVersionRange(*config.ImageVersion, config.RequiredVersionRange); err != nil {
errors = append(errors, fmt.Errorf("unable to check recommended image version %s vs %s: %v", *config.ImageVersion, config.RequiredVersionRange, err))
return
} else if !valid {
errors = append(errors, VersionMistmatchError(fmt.Sprintf("Image %s does not meet the required version range %s", config.GetImageName(), config.RequiredVersionRange)))
return
}
}
if config.RecommendedImageVersion != "" && config.ImageVersion != nil && *config.ImageVersion != "" && reVersion.MatchString(*config.ImageVersion) {
if valid, err := CheckVersionRange(*config.ImageVersion, config.RecommendedImageVersion); err != nil {
errors = append(errors, fmt.Errorf("unable to check recommended image version %s vs %s: %v", *config.ImageVersion, config.RecommendedImageVersion, err))
} else if !valid {
errors = append(errors, ConfigWarning(fmt.Sprintf("Image %s does not meet the recommended version range %s", config.GetImageName(), config.RecommendedImageVersion)))
}
}
return
}
// ValidateVersion ensures that the current version is compliant with the setting (mainly those in the parameter store1)
func (config *TGFConfig) ValidateVersion() bool {
version := config.tgf.ImageVersion
for _, err := range config.validate() {
switch err := err.(type) {
case ConfigWarning:
log.Warning(err)
case VersionMistmatchError:
log.Error(err)
if version == "-" {
// We consider this as a fatal error only if the version has not been explicitly specified on the command line
return false
}
default:
log.Error(err)
return false
}
}
return true
}
// IsPartialVersion returns true if the given version is partial (x.x instead of semver's x.x.x)
func (config *TGFConfig) IsPartialVersion() bool {
return config.ImageVersion != nil &&
reVersionWithEndMarkers.MatchString(*config.ImageVersion) &&
strings.Count(*config.ImageVersion, ".") == 1
}
// GetImageName returns the actual image name
func (config *TGFConfig) GetImageName() string {
var suffix string
if config.ImageVersion != nil {
suffix += *config.ImageVersion
}
if config.ImageTag != nil {
if suffix != "" && *config.ImageTag != "" {
suffix += tagSeparator
}
suffix += *config.ImageTag
}
if len(suffix) > 1 {
return fmt.Sprintf("%s:%s", config.Image, suffix)
}
return config.Image
}
// parseAliases will parse the original argument list and replace aliases only in the first argument.
func (config *TGFConfig) parseAliases(args []string) []string {
if len(args) > 0 {
if replace := String(config.Aliases[args[0]]); replace != "" {
var result collections.StringArray
replace, quoted := replace.Protect()
result = replace.Fields()
if len(quoted) > 0 {
for i := range result {
result[i] = result[i].RestoreProtected(quoted).ReplaceN(`="`, "=", 1).Trim(`"`)
}
}
return append(config.parseAliases(result.Strings()), args[1:]...)
}
}
return args
}
// ParseAliases checks if the actual command matches an alias and set the options according to the configuration
func (config *TGFConfig) ParseAliases() {
args := config.tgf.Unmanaged
if alias := config.parseAliases(args); len(alias) > 0 && len(args) > 0 && alias[0] != args[0] {
config.tgf.Unmanaged = nil
must(config.tgf.Application.Parse(alias))
}
}
func (config *TGFConfig) readSSMParameterStore(ssmParameterFolder string) map[string]string {
values := make(map[string]string)
awsConfig, err := config.getAwsConfig(0)
log.Debugf("Reading configuration from SSM %s in %s", ssmParameterFolder, awsConfig.Region)
if err != nil {
log.Warningf("Caught an error while creating an AWS session: %v", err)
return values
}
svc := ssm.NewFromConfig(awsConfig)
response, err := svc.GetParametersByPath(context.TODO(), &ssm.GetParametersByPathInput{
Path: aws.String(ssmParameterFolder),
Recursive: true,
WithDecryption: true,
})
if err != nil {
log.Warningf("Caught an error while reading from `%s` in SSM: %v", ssmParameterFolder, err)
return values
}
for _, parameter := range response.Parameters {
key := strings.TrimLeft(strings.Replace(*parameter.Name, ssmParameterFolder, "", 1), "/")
values[key] = *parameter.Value
}
return values
}
func (config *TGFConfig) findRemoteConfigFiles(location, files string) []string {
if location == "" {
return []string{}
}
if !strings.HasSuffix(location, "/") {
location += "/"
}
if files == "" {
files = remoteDefaultConfigPath
}
configPaths := strings.Split(files, ":")
tempDir := must(ioutil.TempDir("", "tgf-config-files")).(string)
defer os.RemoveAll(tempDir)
configs := []string{}
for _, configPath := range configPaths {
fullConfigPath := location + configPath
destConfigPath := path.Join(tempDir, configPath)
log.Debugln("Reading configuration from", fullConfigPath)
source := must(getter.Detect(fullConfigPath, must(os.Getwd()).(string), getter.Detectors)).(string)
err := getter.GetFile(destConfigPath, source)
if err == nil {
_, err = os.Stat(destConfigPath)
if os.IsNotExist(err) {
err = errors.New("config file was not found at the source")
}
}
if err != nil {
log.Warningf("Error fetching config at %s: %v", source, err)
continue
}
if content, err := ioutil.ReadFile(destConfigPath); err != nil {
log.Warningf("Error reading fetched config file %s: %v", configPath, err)
} else {
contentString := string(content)
if contentString != "" {
configs = append(configs, contentString)
}
}
}
return configs
}
func parseSsmConfig(parameterValues map[string]string) string {
ssmConfig := ""
for key, value := range parameterValues {
isDict := strings.HasPrefix(value, "{") && strings.HasSuffix(value, "}")
isList := strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]")
if !isDict && !isList {
value = fmt.Sprintf("\"%s\"", value)
}
ssmConfig += fmt.Sprintf("%s: %s\n", key, value)
}
return ssmConfig
}
// Check if there is an AWS configuration available.
//
// We call this function before trying to init an AWS session. This avoid trying to init a session in a non AWS context
// and having to wait for metadata resolution or generating an error.
func (config TGFConfig) awsConfigExist() (result bool) {
if cachedAWSConfigExistCheck != nil {
return *cachedAWSConfigExistCheck
}
defer func() { cachedAWSConfigExistCheck = &result }()
app := config.tgf
if !app.UseAWS {
log.Debugln("Not trying to read the config from AWS. It is disabled")
return false
}
log.Debugln("Checking if the TGF configuration should be read from AWS SSM. This will happen if any of the following are true:")
environmentVariablesExist := os.Getenv("AWS_PROFILE")+os.Getenv("AWS_ACCESS_KEY_ID")+os.Getenv("AWS_CONFIG_FILE")+os.Getenv("TGF_USE_AWS_CONFIG") != ""
log.Debugln(" - One of these env variables exist (AWS_PROFILE, AWS_ACCESS_KEY_ID, AWS_CONFIG_FILE, TGF_USE_AWS_CONFIG):", environmentVariablesExist)
if environmentVariablesExist {
// If any AWS identification variable is defined, we consider that we are in an AWS environment.
return true
}
_, err := exec.LookPath("aws")
awsCliIsInstalled := err == nil
log.Debugln(" - The AWS CLI is installed:", awsCliIsInstalled)
if awsCliIsInstalled {
// If aws program is installed, we also consider that we are in an AWS environment.
return true
}
// Otherwise, we check if the current user has a folder named .aws defined under its home directory.
awsFolderExists := false
if currentUser, _ := user.Current(); currentUser != nil {
if awsFolder, err := os.Stat(filepath.Join(currentUser.HomeDir, ".aws")); err == nil {
awsFolderExists = awsFolder.IsDir()
}
}
log.Debugln(" - The ~/.aws folder exists:", awsFolderExists)
return awsFolderExists
}
// Return the list of configuration files found from the current working directory up to the root folder
func (config TGFConfig) findConfigFiles(folder string) (result []string) {
app := config.tgf
configFiles := []string{userConfigFile, configFile}
if app.DisableUserConfig {
configFiles = []string{configFile}
}
for _, file := range configFiles {
file = filepath.Join(folder, file)
if _, err := os.Stat(file); !os.IsNotExist(err) {
result = append(result, file)
}
}
if parent := filepath.Dir(folder); parent != folder {
result = append(config.findConfigFiles(parent), result...)
}
return
}
func getTgfConfigFields() []string {
fields := []string{}
classType := reflect.ValueOf(TGFConfig{}).Type()
for i := 0; i < classType.NumField(); i++ {
tagValue := classType.Field(i).Tag.Get("yaml")
if tagValue != "" {
fields = append(fields, color.GreenString(strings.Replace(tagValue, ",omitempty", "", -1)))
}
}
return fields
}
// CheckVersionRange compare a version with a range of values
// Check https://github.com/blang/semver/blob/master/README.md for more information
func CheckVersionRange(version, compare string) (bool, error) {
if strings.Count(version, ".") == 1 {
version = version + ".9999" // Patch is irrelevant if major and minor are OK
}
v, err := semver.Make(version)
if err != nil {
return false, err
}
comp, err := semver.ParseRange(compare)
if err != nil {
return false, err
}
return comp(v), nil
}
// ConfigWarning is used to represent messages that should not be considered as critical error
type ConfigWarning string
func (e ConfigWarning) Error() string {
return string(e)
}
// VersionMistmatchError is used to describe an out of range version
type VersionMistmatchError string
func (e VersionMistmatchError) Error() string {
return string(e)
}
// Restart re-run the app with all the arguments passed
func (config *TGFConfig) Restart() int {
cmd := exec.Command(os.Args[0], os.Args[1:]...)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
if err := cmd.Run(); err != nil {
log.Errorln("Error on restart:", err)
return 1
}
return 0
}
// GetUpdateVersion fetches the latest tgf version number from the GITHUB_API
func (config *TGFConfig) GetUpdateVersion() (string, error) {
if config.UpdateVersion != "" {
// The target version number has been specified in the configuration to avoid
// hammering GitHub
return config.UpdateVersion, nil
}
resp, err := http.Get("https://api.github.com/repos/coveooss/tgf/releases/latest")
if err != nil {
return "", err
}
defer resp.Body.Close()
var jsonResponse map[string]string
json.NewDecoder(resp.Body).Decode(&jsonResponse)
latestVersion := jsonResponse["tag_name"]
if latestVersion == "" {
return "", errors.New("Error parsing json response")
}
return latestVersion[1:], nil
}
// ShouldUpdate evaluate wether tgf updater should run or not depending on cli options and config file
func (config *TGFConfig) ShouldUpdate() bool {
app := config.tgf
if app.AutoUpdateSet {
if app.AutoUpdate {
if version == locallyBuilt {
version = "0.0.0"
log.Debug("Auto update is forced locally. Checking version...")
} else {
log.Debug("Auto update is forced. Checking version...")
}
} else {
log.Debug("Auto update is force disabled. Bypassing update version check.")
return false
}
} else {
if !config.AutoUpdate {
log.Debug("Auto update is disabled in the config. Bypassing update version check.")
return false
} else if config.GetLastRefresh(autoUpdateFile) < config.AutoUpdateDelay {
log.Debugf("Less than %v since last check. Bypassing update version check.", config.AutoUpdateDelay.String())
return false
} else {
if version == locallyBuilt {
log.Debug("Running locally. Bypassing update version check.")
return false
}
log.Debug("An update is due. Checking version...")
}
}
return true
}
func (config *TGFConfig) getTgfFile(url string) (tgfFile io.ReadCloser, err error) {
// request the new zip file
resp, err := http.Get(url)
if err != nil {
return
} else if resp.StatusCode != 200 {
err = fmt.Errorf("HTTP status error %v", resp.StatusCode)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
zipReader, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
if err != nil {
return
}
tgfFile, err = zipReader.File[0].Open()
if err != nil {
log.Errorln("Failed to read new version rollback from bad update:", err)
return
}
return
}
// DoUpdate fetch the executable from the link, unzip it and replace it with the current
func (config *TGFConfig) DoUpdate(url string) (err error) {
savePath, err := ioutil.TempFile("", "tgf.previous-version")
if err != nil {
return
}
tgfFile, err := config.getTgfFile(url)
if err != nil {
return
}
if err = selfupdate.Apply(tgfFile, selfupdate.Options{OldSavePath: savePath.Name()}); err != nil {
if err := selfupdate.RollbackError(err); err != nil {
log.Errorln("Failed to rollback from bad update:", err)
}
}
return
}
// GetLastRefresh get the lastime the tgf update file was updated
func (config *TGFConfig) GetLastRefresh(autoUpdateFile string) time.Duration {
return lastRefresh(autoUpdateFile)
}
// SetLastRefresh set the lastime the tgf update file was updated
func (config *TGFConfig) SetLastRefresh(autoUpdateFile string) {
touchImageRefresh(autoUpdateFile)
}
| [
"\"AWS_PROFILE\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_PROFILE\"",
"\"AWS_PROFILE\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_CONFIG_FILE\"",
"\"TGF_USE_AWS_CONFIG\""
]
| []
| [
"TGF_USE_AWS_CONFIG",
"AWS_PROFILE",
"AWS_CONFIG_FILE",
"AWS_ACCESS_KEY_ID"
]
| [] | ["TGF_USE_AWS_CONFIG", "AWS_PROFILE", "AWS_CONFIG_FILE", "AWS_ACCESS_KEY_ID"] | go | 4 | 0 | |
qhub/deploy.py | import logging
import os
import re
from subprocess import CalledProcessError
from qhub.provider import terraform
from qhub.utils import timer, check_cloud_credentials
from qhub.provider.dns.cloudflare import update_record
from qhub.state import terraform_state_sync
logger = logging.getLogger(__name__)
def deploy_configuration(
config,
dns_provider,
dns_auto_provision,
disable_prompt,
skip_remote_state_provision,
):
logger.info(f'All qhub endpoints will be under https://{config["domain"]}')
with timer(logger, "deploying QHub"):
try:
guided_install(
config,
dns_provider,
dns_auto_provision,
disable_prompt,
skip_remote_state_provision,
)
except CalledProcessError as e:
logger.error(e.output)
raise e
def guided_install(
config,
dns_provider,
dns_auto_provision,
disable_prompt=False,
skip_remote_state_provision=False,
):
# 01 Check Environment Variables
check_cloud_credentials(config)
# Check that secrets required for terraform
# variables are set as required
check_secrets(config)
# 02 Create terraform backend remote state bucket
# backwards compatible with `qhub-config.yaml` which
# don't have `terraform_state` key
if (
(not skip_remote_state_provision)
and (config.get("terraform_state", {}).get("type", "") == "remote")
and (config.get("provider") != "local")
):
terraform_state_sync(config)
# 3 kubernetes-alpha provider requires that kubernetes be
# provisionioned before any "kubernetes_manifests" resources
terraform.init(directory="infrastructure")
terraform.apply(
directory="infrastructure",
targets=[
"module.kubernetes",
"module.kubernetes-initialization",
],
)
# 04 Create qhub initial state (up to nginx-ingress)
terraform.init(directory="infrastructure")
terraform.apply(
directory="infrastructure",
targets=[
"module.kubernetes",
"module.kubernetes-initialization",
"module.kubernetes-ingress",
],
)
cmd_output = terraform.output(directory="infrastructure")
# This is a bit ugly, but the issue we have at the moment is being unable
# to parse cmd_output as json on Github Actions.
ip_matches = re.findall(r'"ip": "(?!string)(.+)"', cmd_output)
hostname_matches = re.findall(r'"hostname": "(?!string)(.+)"', cmd_output)
if ip_matches:
ip_or_hostname = ip_matches[0]
elif hostname_matches:
ip_or_hostname = hostname_matches[0]
else:
raise ValueError(f"IP Address not found in: {cmd_output}")
# 05 Update DNS to point to qhub deployment
if dns_auto_provision and dns_provider == "cloudflare":
record_name, zone_name = (
config["domain"].split(".")[:-2],
config["domain"].split(".")[-2:],
)
record_name = ".".join(record_name)
zone_name = ".".join(zone_name)
if config["provider"] in {"do", "gcp", "azure"}:
update_record(zone_name, record_name, "A", ip_or_hostname)
if config.get("clearml", {}).get("enabled"):
add_clearml_dns(zone_name, record_name, "A", ip_or_hostname)
elif config["provider"] == "aws":
update_record(zone_name, record_name, "CNAME", ip_or_hostname)
if config.get("clearml", {}).get("enabled"):
add_clearml_dns(zone_name, record_name, "CNAME", ip_or_hostname)
else:
logger.info(
f"Couldn't update the DNS record for cloud provider: {config['provider']}"
)
elif not disable_prompt:
input(
f"Take IP Address {ip_or_hostname} and update DNS to point to "
f'"{config["domain"]}" [Press Enter when Complete]'
)
# 06 Full deploy QHub
terraform.apply(directory="infrastructure")
def add_clearml_dns(zone_name, record_name, record_type, ip_or_hostname):
logger.info(f"Setting DNS record for ClearML for record: {record_name}")
dns_records = [
f"app.clearml.{record_name}",
f"api.clearml.{record_name}",
f"files.clearml.{record_name}",
]
for dns_record in dns_records:
update_record(zone_name, dns_record, record_type, ip_or_hostname)
def check_secrets(config):
"""
Checks that the appropriate variables are set based on the current config.
These variables are prefixed with TF_VAR_ and are used to populate the
corresponding variables in the terraform deployment. e.g.
TF_VAR_prefect_token sets the prefect_token variable in Terraform. These
values are set in the terraform state but are not leaked when the
terraform render occurs.
"""
missing_env_vars = []
# Check prefect integration set up.
if "prefect" in config and config["prefect"]["enabled"]:
var = "TF_VAR_prefect_token"
if var not in os.environ:
missing_env_vars.append(var)
if missing_env_vars:
raise EnvironmentError(
"Some environment variables used to propagate secrets to the "
"terraform deployment were not set. Please set these before "
f"continuing: {', '.join(missing_env_vars)}"
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/telemetry/tracing/google/google_tracer.go | package google
import (
"os"
"strings"
texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace"
sdk_trace "go.opentelemetry.io/otel/sdk/trace"
"github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/telemetry/tracing"
"github.com/caos/zitadel/internal/telemetry/tracing/otel"
)
type Config struct {
ProjectID string
MetricPrefix string
Fraction float64
}
type Tracer struct {
otel.Tracer
}
func (c *Config) NewTracer() error {
if !envIsSet() {
return errors.ThrowInvalidArgument(nil, "GOOGL-sdh3a", "env not properly set, GOOGLE_APPLICATION_CREDENTIALS is misconfigured or missing")
}
sampler := sdk_trace.ParentBased(sdk_trace.TraceIDRatioBased(c.Fraction))
exporter, err := texporter.New(texporter.WithProjectID(c.ProjectID))
if err != nil {
return err
}
tracing.T = &Tracer{Tracer: *(otel.NewTracer(c.MetricPrefix, sampler, exporter))}
return nil
}
func envIsSet() bool {
gAuthCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return strings.Contains(gAuthCred, ".json")
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS\""
]
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | go | 1 | 0 | |
example/bucket/getObjectVersion.go | package main
import (
"context"
"fmt"
"os"
"net/url"
"net/http"
"github.com/tencentyun/cos-go-sdk-v5"
"github.com/tencentyun/cos-go-sdk-v5/debug"
)
func log_status(err error) {
if err == nil {
return
}
if cos.IsNotFoundError(err) {
// WARN
fmt.Println("WARN: Resource is not existed")
} else if e, ok := cos.IsCOSError(err); ok {
fmt.Printf("ERROR: Code: %v\n", e.Code)
fmt.Printf("ERROR: Message: %v\n", e.Message)
fmt.Printf("ERROR: Resource: %v\n", e.Resource)
fmt.Printf("ERROR: RequestId: %v\n", e.RequestID)
// ERROR
} else {
fmt.Printf("ERROR: %v\n", err)
// ERROR
}
}
func main() {
u, _ := url.Parse("https://test-1259654469.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{
BucketURL: u,
}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &debug.DebugRequestTransport{
RequestHeader: true,
RequestBody: true,
ResponseHeader: true,
ResponseBody: true,
},
},
})
keyMarker := ""
versionIdMarker := ""
isTruncated := true
opt := &cos.BucketGetObjectVersionsOptions{}
for isTruncated {
opt.KeyMarker = keyMarker
opt.VersionIdMarker = versionIdMarker
v, _, err := c.Bucket.GetObjectVersions(context.Background(), opt)
if err != nil {
log_status(err)
break
}
for _, vc := range v.Version {
fmt.Printf("Version: %v, %v, %v, %v\n", vc.Key, vc.Size, vc.VersionId, vc.IsLatest)
}
for _, dc := range v.DeleteMarker {
fmt.Printf("DeleteMarker: %v, %v, %v\n", dc.Key, dc.VersionId, dc.IsLatest)
}
keyMarker = v.NextKeyMarker
versionIdMarker = v.NextVersionIdMarker
isTruncated = v.IsTruncated
}
}
| [
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
]
| []
| [
"COS_SECRETKEY",
"COS_SECRETID"
]
| [] | ["COS_SECRETKEY", "COS_SECRETID"] | go | 2 | 0 | |
plugins/mock/mock_plugin.go | package main
import (
"fmt"
"os"
"sync"
"time"
v2 "github.com/cloudevents/sdk-go/v2"
"github.com/redhat-cne/cloud-event-proxy/pkg/common"
"github.com/redhat-cne/sdk-go/pkg/channel"
ceEvent "github.com/redhat-cne/sdk-go/pkg/event"
"github.com/redhat-cne/sdk-go/pkg/event/ptp"
"github.com/redhat-cne/sdk-go/pkg/pubsub"
"github.com/redhat-cne/sdk-go/pkg/types"
v1amqp "github.com/redhat-cne/sdk-go/v1/amqp"
v1pubsub "github.com/redhat-cne/sdk-go/v1/pubsub"
log "github.com/sirupsen/logrus"
)
// Copyright 2020 The Cloud Native Events Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var (
resourceAddress string = "/cluster/node/%s/mock"
config *common.SCConfiguration
)
// Start mock plugin to process events,metrics and status, expects rest api available to create publisher and subscriptions
func Start(wg *sync.WaitGroup, configuration *common.SCConfiguration, fn func(e interface{}) error) error { //nolint:deadcode,unused
config = configuration
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
log.Error("cannot find NODE_NAME environment variable")
return fmt.Errorf("cannot find NODE_NAME environment variable %s", nodeName)
}
// 1. Create event Publication
var pub pubsub.PubSub
var err error
if pub, err = createPublisher(fmt.Sprintf(resourceAddress, nodeName)); err != nil {
log.Errorf("failed to create a publisher %v", err)
return err
}
log.Printf("Created publisher %v", pub)
// 2.Create Status Listener : This listener will create events
// method to execute when ping is received
onStatusRequestFn := func(e v2.Event, d *channel.DataChan) error {
log.Infof("got status check call,fire events for publisher %s", pub.Resource)
re, err := createMockEvent(pub) // create a mock event
if err != nil {
log.Errorf("failed sending mock event on status pings %s", err)
} else {
_ = common.PublishEventViaAPI(config, re)
}
d.Type = channel.STATUS
return nil
}
// create amqp listener
v1amqp.CreateNewStatusListener(config.EventInCh, fmt.Sprintf("%s/%s", pub.Resource, "status"), onStatusRequestFn, fn)
//create events periodically
time.Sleep(5 * time.Second)
// create periodical events
wg.Add(1)
go func() {
defer wg.Done()
for range time.Tick(5 * time.Second) {
// create an event
if mEvent, err := createMockEvent(pub); err == nil {
mEvent.Type = string(ptp.PtpStateChange)
mEvent.Data.Values[0].Value = ptp.LOCKED
mEvent.Data.Values[1].Value = -200
if err = common.PublishEventViaAPI(config, mEvent); err != nil {
log.Errorf("error publishing events %s", err)
}
} else {
log.Errorf("error creating mock event")
}
}
}()
return nil
}
func createPublisher(address string) (pub pubsub.PubSub, err error) {
// this is loopback on server itself. Since current pod does not create any server
returnURL := fmt.Sprintf("%s%s", config.BaseURL, "dummy")
pubToCreate := v1pubsub.NewPubSub(types.ParseURI(returnURL), address)
pub, err = common.CreatePublisher(config, pubToCreate)
if err != nil {
log.Errorf("failed to create publisher %v", pub)
}
return pub, err
}
func createMockEvent(pub pubsub.PubSub) (ceEvent.Event, error) {
// create an event
data := ceEvent.Data{
Version: "v1",
Values: []ceEvent.DataValue{{
Resource: pub.Resource,
DataType: ceEvent.NOTIFICATION,
ValueType: ceEvent.ENUMERATION,
Value: ptp.ACQUIRING_SYNC,
},
{
Resource: pub.Resource,
DataType: ceEvent.METRIC,
ValueType: ceEvent.DECIMAL,
Value: "99.6",
},
},
}
e, err := common.CreateEvent(pub.ID, string(ptp.PtpStateChange), data)
return e, err
}
| [
"\"NODE_NAME\""
]
| []
| [
"NODE_NAME"
]
| [] | ["NODE_NAME"] | go | 1 | 0 | |
cmd/containerd-shim/main_unix.go | //go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"flag"
"fmt"
"io"
"net"
"os"
"os/signal"
"runtime"
"runtime/debug"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/pkg/process"
"github.com/containerd/containerd/protobuf"
"github.com/containerd/containerd/protobuf/proto"
ptypes "github.com/containerd/containerd/protobuf/types"
shimlog "github.com/containerd/containerd/runtime/v1"
"github.com/containerd/containerd/runtime/v1/shim"
shimapi "github.com/containerd/containerd/runtime/v1/shim/v1"
"github.com/containerd/containerd/sys/reaper"
"github.com/containerd/containerd/version"
"github.com/containerd/ttrpc"
"github.com/sirupsen/logrus"
exec "golang.org/x/sys/execabs"
"golang.org/x/sys/unix"
)
var (
debugFlag bool
versionFlag bool
namespaceFlag string
socketFlag string
addressFlag string
workdirFlag string
runtimeRootFlag string
criuFlag string
systemdCgroupFlag bool
containerdBinaryFlag string
bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
)
func parseFlags() {
flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs")
flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit")
flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim")
flag.StringVar(&socketFlag, "socket", "", "socket path to serve")
flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd")
flag.StringVar(&workdirFlag, "workdir", "", "path used to storage large temporary data")
flag.StringVar(&runtimeRootFlag, "runtime-root", process.RuncRoot, "root directory for the runtime")
flag.StringVar(&criuFlag, "criu", "", "path to criu binary (deprecated: do not use)")
flag.BoolVar(&systemdCgroupFlag, "systemd-cgroup", false, "set runtime to use systemd-cgroup")
// currently, the `containerd publish` utility is embedded in the daemon binary.
// The daemon invokes `containerd-shim -containerd-binary ...` with its own os.Executable() path.
flag.StringVar(&containerdBinaryFlag, "containerd-binary", "containerd", "path to containerd binary (used for `containerd publish`)")
flag.Parse()
}
func setRuntime() {
debug.SetGCPercent(40)
go func() {
for range time.Tick(30 * time.Second) {
debug.FreeOSMemory()
}
}()
if os.Getenv("GOMAXPROCS") == "" {
// If GOMAXPROCS hasn't been set, we default to a value of 2 to reduce
// the number of Go stacks present in the shim.
runtime.GOMAXPROCS(2)
}
}
func main() {
parseFlags()
if versionFlag {
fmt.Println("containerd-shim")
fmt.Println(" Version: ", version.Version)
fmt.Println(" Revision:", version.Revision)
fmt.Println(" Go version:", version.GoVersion)
fmt.Println("")
return
}
setRuntime()
if debugFlag {
logrus.SetLevel(logrus.DebugLevel)
}
stdout, stderr, err := openStdioKeepAlivePipes(workdirFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "containerd-shim: %s\n", err)
os.Exit(1)
}
defer func() {
stdout.Close()
stderr.Close()
}()
// redirect the following output into fifo to make sure that containerd
// still can read the log after restart
logrus.SetOutput(stdout)
if err := executeShim(); err != nil {
fmt.Fprintf(os.Stderr, "containerd-shim: %s\n", err)
os.Exit(1)
}
}
// If containerd server process dies, we need the shim to keep stdout/err reader
// FDs so that Linux does not SIGPIPE the shim process if it tries to use its end of
// these pipes.
func openStdioKeepAlivePipes(dir string) (io.ReadWriteCloser, io.ReadWriteCloser, error) {
background := context.Background()
keepStdoutAlive, err := shimlog.OpenShimStdoutLog(background, dir)
if err != nil {
return nil, nil, err
}
keepStderrAlive, err := shimlog.OpenShimStderrLog(background, dir)
if err != nil {
return nil, nil, err
}
return keepStdoutAlive, keepStderrAlive, nil
}
func executeShim() error {
// start handling signals as soon as possible so that things are properly reaped
// or if runtime exits before we hit the handler
signals, err := setupSignals()
if err != nil {
return err
}
dump := make(chan os.Signal, 32)
signal.Notify(dump, syscall.SIGUSR1)
path, err := os.Getwd()
if err != nil {
return err
}
server, err := newServer()
if err != nil {
return fmt.Errorf("failed creating server: %w", err)
}
sv, err := shim.NewService(
shim.Config{
Path: path,
Namespace: namespaceFlag,
WorkDir: workdirFlag,
SystemdCgroup: systemdCgroupFlag,
RuntimeRoot: runtimeRootFlag,
},
&remoteEventsPublisher{address: addressFlag},
)
if err != nil {
return err
}
logrus.Debug("registering ttrpc server")
shimapi.RegisterShimService(server, sv)
socket := socketFlag
if err := serve(context.Background(), server, socket); err != nil {
return err
}
logger := logrus.WithFields(logrus.Fields{
"pid": os.Getpid(),
"path": path,
"namespace": namespaceFlag,
})
go func() {
for range dump {
dumpStacks(logger)
}
}()
return handleSignals(logger, signals, server, sv)
}
// serve serves the ttrpc API over a unix socket at the provided path
// this function does not block
func serve(ctx context.Context, server *ttrpc.Server, path string) error {
var (
l net.Listener
err error
)
if path == "" {
f := os.NewFile(3, "socket")
l, err = net.FileListener(f)
f.Close()
path = "[inherited from parent]"
} else {
const (
abstractSocketPrefix = "\x00"
socketPathLimit = 106
)
p := strings.TrimPrefix(path, "unix://")
if len(p) == len(path) {
p = abstractSocketPrefix + p
}
if len(p) > socketPathLimit {
return fmt.Errorf("%q: unix socket path too long (> %d)", p, socketPathLimit)
}
l, err = net.Listen("unix", p)
}
if err != nil {
return err
}
logrus.WithField("socket", path).Debug("serving api on unix socket")
go func() {
defer l.Close()
if err := server.Serve(ctx, l); err != nil &&
!strings.Contains(err.Error(), "use of closed network connection") {
logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure")
}
}()
return nil
}
func handleSignals(logger *logrus.Entry, signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error {
var (
termOnce sync.Once
done = make(chan struct{})
)
for {
select {
case <-done:
return nil
case s := <-signals:
switch s {
case unix.SIGCHLD:
if err := reaper.Reap(); err != nil {
logger.WithError(err).Error("reap exit status")
}
case unix.SIGTERM, unix.SIGINT:
go termOnce.Do(func() {
ctx := context.TODO()
if err := server.Shutdown(ctx); err != nil {
logger.WithError(err).Error("failed to shutdown server")
}
// Ensure our child is dead if any
sv.Kill(ctx, &shimapi.KillRequest{
Signal: uint32(syscall.SIGKILL),
All: true,
})
sv.Delete(context.Background(), &ptypes.Empty{})
close(done)
})
case unix.SIGPIPE:
}
}
}
}
func dumpStacks(logger *logrus.Entry) {
var (
buf []byte
stackSize int
)
bufferLen := 16384
for stackSize == len(buf) {
buf = make([]byte, bufferLen)
stackSize = runtime.Stack(buf, true)
bufferLen *= 2
}
buf = buf[:stackSize]
logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}
type remoteEventsPublisher struct {
address string
}
func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error {
ns, _ := namespaces.Namespace(ctx)
encoded, err := protobuf.MarshalAnyToProto(event)
if err != nil {
return err
}
data, err := proto.Marshal(encoded)
if err != nil {
return err
}
cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns)
cmd.Stdin = bytes.NewReader(data)
b := bufPool.Get().(*bytes.Buffer)
defer func() {
b.Reset()
bufPool.Put(b)
}()
cmd.Stdout = b
cmd.Stderr = b
c, err := reaper.Default.Start(cmd)
if err != nil {
return err
}
status, err := reaper.Default.WaitTimeout(cmd, c, 30*time.Second)
if err != nil {
return fmt.Errorf("failed to publish event: %s: %w", b.String(), err)
}
if status != 0 {
return fmt.Errorf("failed to publish event: %s", b.String())
}
return nil
}
| [
"\"GOMAXPROCS\""
]
| []
| [
"GOMAXPROCS"
]
| [] | ["GOMAXPROCS"] | go | 1 | 0 | |
backend/config/environment.py | import os
SETTINGS_MODULE = "config.settings.local"
if os.environ.get("ENVIRONMENT") == "dev":
SETTINGS_MODULE = "config.settings.dev"
| []
| []
| [
"ENVIRONMENT"
]
| [] | ["ENVIRONMENT"] | python | 1 | 0 | |
src/app/server/server.go | package server
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"cloud.google.com/go/datastore"
"github.com/nervelife/learning-golang/src/app/data"
"google.golang.org/api/iterator"
)
var ctx context.Context
var projectID string
var client datastore.Client
func init() {
fmt.Println("Initializing server...")
ctx = context.Background()
projectID = os.Getenv("GOOGLE_CLOUD_PROJECT")
if projectID == "" {
log.Fatal("Project ID Not Found")
}
// GOOGLE_APPLICATION_CREDENTIALS
c, err := datastore.NewClient(ctx, projectID)
if err != nil {
log.Fatalf("Failed to create client %v", err)
}
client = *c
log.Println("Initilized...")
}
// Run the server
func Run() {
http.HandleFunc("/", IndexHandler)
http.HandleFunc("/save-author", SaveAuthorHandler)
http.HandleFunc("/get-all-authors", GetAllAuthors)
port := os.Getenv("PORT")
if port == "" {
port = "8088"
log.Printf("Defaulting to port %s", port)
}
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
// IndexHandler is an handler
func IndexHandler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
fmt.Fprint(w, "Hello, World!")
}
// SaveAuthorHandler is an handler
func SaveAuthorHandler(w http.ResponseWriter, r *http.Request) {
a := data.AuthorEntity{
Name: "Giang giang",
Alive: true,
}
aKey := datastore.IncompleteKey("Authors", nil)
if _, err := client.Put(ctx, aKey, &a); err != nil {
log.Fatalf("Error saving to datastore %v", err)
}
}
// GetAllAuthors is an function
func GetAllAuthors(w http.ResponseWriter, r *http.Request) {
query := datastore.NewQuery("Authors").Filter("alive >", false)
it := client.Run(ctx, query)
var authors []data.AuthorEntity
for {
var author data.AuthorEntity
key, err := it.Next(&author)
if err == iterator.Done {
break
}
if err != nil {
log.Fatalf("Error fetching next author: %v", err)
}
author.ID = key.ID
authors = append(authors, author)
}
w.Header().Add("Content-Type", "application/json")
json.NewEncoder(w).Encode(authors)
}
| [
"\"GOOGLE_CLOUD_PROJECT\"",
"\"PORT\""
]
| []
| [
"PORT",
"GOOGLE_CLOUD_PROJECT"
]
| [] | ["PORT", "GOOGLE_CLOUD_PROJECT"] | go | 2 | 0 | |
shared/src/main/java/com/sap/psr/vulas/shared/util/VulasConfiguration.java | package com.sap.psr.vulas.shared.util;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.URLDecoder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.JarInputStream;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.validation.constraints.NotNull;
import org.apache.commons.configuration.CompositeConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationConverter;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.MapConfiguration;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.configuration.SystemConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.sap.psr.vulas.shared.connectivity.Service;
import com.sap.psr.vulas.shared.connectivity.ServiceConnectionException;
/**
* Provides central read/write access to Vulas configuration.
*
* Vulas configuration is composed of multiple, individual {@link Configuration} items.
* When reading a given setting, the list of configuration items is searched until a configuration
* containing the respective setting is found.
*
* The list contains items in the following order:
*
* Writable map properties (added empty by default): Settings provided through {@link VulasConfiguration#setProperty(String, Object, String, boolean)}.
*
* System properties (added by default): Settings specified with the JVM option -D.
*
* Map properties (no default, added through {@link VulasConfiguration#addAfterSystemProperties(Map)}. Results in
* rebuilding the entire composite configuration.
*
* Properties files (added by default): Found in the current work directory (.) and its sub-directories.
*
* Properties files contained in JAR files (by default, the JAR from which {@link VulasConfiguration}
* is loaded is considered): Searched at the very end, these configuration
* items are useful for providing defaults.
*
* @see org.apache.commons.configuration.CompositeConfiguration
*
*/
public class VulasConfiguration {
private static Log log = null;
private static final synchronized Log getLog() {
if(VulasConfiguration.log==null)
VulasConfiguration.log = LogFactory.getLog(VulasConfiguration.class);
return VulasConfiguration.log;
}
private static final String[] LOG_PREFIXES = new String[] {"http", "https", "vulas"};
private static VulasConfiguration global = null;
public static final synchronized VulasConfiguration getGlobal() {
if(global==null)
global = new VulasConfiguration();
return global;
}
/**
* Used for retrieving actual configuration settings.
*/
private org.apache.commons.configuration.CompositeConfiguration cfg = new CompositeConfiguration();
private Path m2 = null;
//=============== Used for building and updating the composite configuration
/**
* All the configurations used to build the composite configuration.
* The single configurations are added by {@link VulasConfiguration#appendInitialConfigurations()}.
*/
private Map<Configuration, String> individualConfigurations = new LinkedHashMap<Configuration, String>();
private Configuration writableConfiguration = new MapConfiguration(new HashMap<String,Object>());
// Add the initial ones right away
public VulasConfiguration( ){
this.appendInitialConfigurations();
}
/**
* Regex used to discover configurations in the file system and (the root folder of) JAR files.
*/
private static final String propertiesRegex = "vulas-.*\\.properties";
/**
* Regex used to discover configurations in the folder BOOT-INF/classes/ of executable Spring JARs.
*/
private static final String propertiesRegexSpring = "BOOT-INF/classes/vulas-.*\\.properties";
public static final String SYS_PROP_CFG_LAYER = "System-Properties";
public static final String TRANSIENT_CFG_LAYER = "Transient-Config-Layer";
public static final String ENV_CFG_LAYER = "Environment-Variables";
/**
* Returns the mutable configuration object for read/write access.
* @return
*/
public org.apache.commons.configuration.Configuration getConfiguration() {
return cfg;
}
/**
* Builds the initial list of configurations that can be extended afterwards
* using {@link VulasConfiguration#appendConfigurationsFromJar(Class)}.
* The initial list only covers system properties and properties files found in
* the file system.
*/
private void appendInitialConfigurations() {
// Search for properties in FS
final Pattern pattern = Pattern.compile(VulasConfiguration.propertiesRegex);
final FilenamePatternSearch fs = new FilenamePatternSearch(pattern);
final Set<Path> paths = fs.search(Paths.get("."));
// Add: Writable map (takes all settings coming through setProperty)
addConfiguration(writableConfiguration, TRANSIENT_CFG_LAYER);
// Add: System properties
addConfiguration(new SystemConfiguration(), SYS_PROP_CFG_LAYER);
// Add: Properties in file system
String pathToFileAsString = null;
for(Path path: paths) {
try {
pathToFileAsString = URLDecoder.decode(path.toString(), "UTF-8");
Configuration config = new PropertiesConfiguration(new File(pathToFileAsString));
addConfiguration(config, pathToFileAsString);
} catch (ConfigurationException e) {
getLog().error("Could not create configuration from file [" + path + "]");
} catch (UnsupportedEncodingException e) {
getLog().error("Encoding not supported");
e.printStackTrace();
}
}
// Add: Environment variables
final Map<String, String> env = System.getenv();
Configuration env_config = new MapConfiguration(env);
addConfiguration(env_config, ENV_CFG_LAYER);
// Add: Properties in JAR files contained in classpath
final ClassLoader cl = VulasConfiguration.class.getClassLoader();
final Set<String> jar_paths = new HashSet<String>();
// Consider JARs known to URLClassLoader
if(cl instanceof URLClassLoader) {
jar_paths.addAll(FileUtil.getJarFilePaths((URLClassLoader)cl));
}
// Search for JARs containing specific configuration files, e.g., vulas-core.properties
else {
jar_paths.addAll(FileUtil.getJarFilePathsForResources(cl, new String[] {"vulas-core.properties", "vulas-java.properties"}));
}
// Search in all JARs
final Set<String> jar_paths_analyzed = new HashSet<String>();
for(String jar_path: jar_paths) {
if(!jar_paths_analyzed.contains(jar_path)) {
//getLog().info("Search configuration info in URL [" + urls[i] + "], JAR [" + jar_path + "]");
appendConfigurationsFromJarPath(jar_path);
jar_paths_analyzed.add(jar_path);
}
else {
//getLog().info("URL [" + urls[i] + "], JAR [" + jar_path + "] already analyzed for configuration info");
}
}
// Log configuration composition and actual settings
this.log(LOG_PREFIXES, " ");
}
private void addConfiguration(Configuration _cfg, String _source) {
if(!individualConfigurations.containsValue(_source)) {
individualConfigurations.put(_cfg, _source);
cfg.addConfiguration(_cfg);
VulasConfiguration.getLog().info("Added configuration [" + _cfg.getClass().getSimpleName() + "] from source [" + _source + "]");
} else {
VulasConfiguration.getLog().warn("Configuration [" + _cfg.getClass().getSimpleName() + "] from source [" + _source + "] already existed and will not be added another time");
}
}
/**
* Puts the given Configuration as a new layer at the given position and with the given name. If a layer with the same name
* already exists at the given position, it will be either deleted or shifted by one position according to the boolean argument.
* In combination with providing null as new configuration, this boolean flag can be used to remove existing layers.
*
* @param _cfg
* @param _source
* @param _position
* @param _replace_if_existing
*/
private boolean putConfiguration(Configuration _cfg, String _source, int _position, boolean _replace_if_existing) {
Map<Configuration, String> tmp = new LinkedHashMap<Configuration, String>();
boolean removed_existing = false;
int i=0;
for(Configuration c: individualConfigurations.keySet()) {
// Wrong position, just append the current layer
if(i!=_position) {
tmp.put(c, individualConfigurations.get(c));
}
// Correct position
else {
// Put new layer (if provided)
if(_cfg!=null)
tmp.put(_cfg, _source);
// Check if current layer at this position is to be removed (replaced)
final String name = individualConfigurations.get(c);
if(_replace_if_existing && name.equals(_source)) {
removed_existing = true;
}
else {
tmp.put(c, name);
}
}
i++;
}
individualConfigurations = tmp;
return removed_existing;
}
/**
* Rebuilds the composite configuration from the list of individual configurations.
* Called after {@link VulasConfiguration#addLayerAfterSysProps(String, Map, String, boolean)}, which adds
* a configuration in the middle of the list rather than appending it to the end.
* Rebuilding is necessary, since {@link CompositeConfiguration} only appends to the end.
*/
private void rebuild() {
cfg.clear();
for(Configuration config: individualConfigurations.keySet())
cfg.addConfiguration(config);
this.log(LOG_PREFIXES, " ");
}
/**
* Adds a {@link MapConfiguration} right after the {@link SystemConfiguration}, which is the second element,
* and before all other configurations. As such, contained settings get precedence before file-based configurations
* in file system or JAR files.
*
* @param _map
* @param _ignore_value if specified, elements will only be added if the value's string representation from this argument
* @param _ignore_null whether or not null values shall be ignored
* @return
* @throws IllegalArgumentException
*/
public void addLayerAfterSysProps(@NotNull String _layer_name, @NotNull Map<?,?> _map, String _ignore_value, boolean _ignore_null) {
final Map<String,Object> map = new HashMap<String,Object>();
Configuration config = null;
// Add value by value to the new layer
if(_map!=null) {
for(Object key: _map.keySet()) {
final Object value = _map.get(key);
if( (value!=null || !_ignore_null) && (_ignore_value==null || !value.toString().equals(_ignore_value)) ) {
map.put(key.toString(), _map.get(key));
}
}
config = new MapConfiguration(map);
}
final int no_layers_before = individualConfigurations.size();
final boolean removed_existing = putConfiguration(config, _layer_name, 2, true);
final int no_layers_after = individualConfigurations.size();
// Log message
final StringBuffer msg = new StringBuffer();
if(_map==null) {
if(removed_existing)
msg.append("Removed configuration layer [").append(_layer_name).append("] from 3rd position");
else
msg.append("No change of configuration layers");
}
else {
if(removed_existing)
msg.append("Replaced existing configuration layer [").append(_layer_name).append("] by new one with [").append(map.size()).append("] settings on 3rd position");
else
msg.append("Added new configuration layer [").append(_layer_name).append("] with [").append(map.size()).append("] settings on 3rd position");
}
msg.append(", old/new number of layers is [").append(no_layers_before).append("/").append(no_layers_after).append("]");
getLog().info(msg.toString());
if(_map!=null || removed_existing)
rebuild();
}
/**
* Returns the {@link Configuration} layer with the given name. If multiple layers with that name exist, the top-most layer will be returned.
* @param _layer_name
* @return
*/
public Configuration getConfigurationLayer(String _layer_name) {
for(Configuration c: this.individualConfigurations.keySet()) {
if(this.individualConfigurations.get(c).equals(_layer_name)) {
return c;
}
}
return null;
}
protected boolean appendConfigurationsFromJarPath(String _jar_path) {
final Map<String, Configuration> jar_entries = this.discoverConfigurationsInJarUri(_jar_path);
for(Map.Entry<String, Configuration> entry: jar_entries.entrySet()) {
this.addConfiguration(entry.getValue(), entry.getKey());
}
return !jar_entries.isEmpty();
}
/**
* Identifies configurations in the JAR from which the given class was loaded, and appends them to
* the configuration list from which the composite configuration will be built.
*
* Afterwards, if such configurations were found, the composite configuration is rebuilt
* from the updated configuration list.
*
* Returns true if configurations were found and added.
*
* @param _clazz
*/
protected boolean appendConfigurationsFromJar(Class<?> _clazz) {
final Map<String, Configuration> jar_entries = this.discoverConfigurationsInJar(_clazz);
for(Map.Entry<String, Configuration> entry: jar_entries.entrySet()) {
addConfiguration(entry.getValue(), entry.getKey());
}
return !jar_entries.isEmpty();
}
private Map<String, Configuration> discoverConfigurationsInJar(Class<?> _clazz) {
Map<String, Configuration> jar_configs = new HashMap<String, Configuration>();
// Get FS path for JAR (if any)
final String jar_path = FileUtil.getJarFilePath(_clazz);
if(jar_path==null) {
getLog().warn("Class [" + _clazz.getName() + "] not loaded from JAR");
} else {
jar_configs = discoverConfigurationsInJarUri(jar_path);
}
return jar_configs;
}
private Map<String, Configuration> discoverConfigurationsInJarUri(String _jar_path) {
final Map<String, Configuration> jar_configs = new HashMap<String, Configuration>();
JarFile jf = null;
try {
jf = new JarFile(Paths.get(_jar_path).toFile());
final Enumeration<JarEntry> entries_enum = jf.entries();
final Pattern pattern = Pattern.compile(VulasConfiguration.propertiesRegex);
final Pattern pattern_spring = Pattern.compile(VulasConfiguration.propertiesRegexSpring);
while(entries_enum.hasMoreElements()) {
final JarEntry entry = entries_enum.nextElement();
final String full_name = _jar_path + ">" + entry.getName();
// Evaluates regex(es)
final Matcher m = pattern.matcher(entry.getName());
final Matcher m_spring = pattern_spring.matcher(entry.getName());
if(m.matches() || m_spring.matches()) {
try {
final Properties prop = new Properties();
prop.load(jf.getInputStream(entry));
jar_configs.put(full_name, ConfigurationConverter.getConfiguration(prop));
} catch (Exception e) {
getLog().error("Error loading properties from JAR entry [" + full_name + "]: " + e.getMessage(), e);
}
} else if(entry.getName().endsWith(".jar") || entry.getName().endsWith(".war")) {
final Map<String, Configuration> nested_configs = this.discoverConfigurationsInNestedJar(full_name, new JarInputStream(jf.getInputStream(entry)));
if(nested_configs!=null && nested_configs.size()>0) {
jar_configs.putAll(nested_configs);
}
}
}
} catch (IOException e) {
getLog().error("Error searching for configurations in JAR [" + _jar_path + "]: " + e.getMessage(), e);
} finally {
if(jf!=null) {
try {
jf.close();
} catch (IOException e) {
getLog().error("Error closing JAR [" + _jar_path + "]: " + e.getMessage(), e);
}
}
}
return jar_configs;
}
private Map<String, Configuration> discoverConfigurationsInNestedJar(String _name, JarInputStream _jis) {
final Map<String, Configuration> jar_configs = new HashMap<String, Configuration>();
try {
final Pattern pattern = Pattern.compile(VulasConfiguration.propertiesRegex);
final Pattern pattern_spring = Pattern.compile(VulasConfiguration.propertiesRegexSpring);
JarEntry entry = null;
while( (entry=_jis.getNextJarEntry())!=null) {
final String full_name = _name + ">" + entry.getName();
// Evaluates regex(es)
final Matcher m = pattern.matcher(entry.getName());
final Matcher m_spring = pattern_spring.matcher(entry.getName());
if(m.matches() || m_spring.matches()) {
try {
final Properties prop = new Properties();
prop.load(new ByteArrayInputStream(this.readContent(_jis)));
jar_configs.put(full_name, ConfigurationConverter.getConfiguration(prop));
} catch (Exception e) {
getLog().error("Error loading properties from JAR entry [" + full_name + "]: " + e.getMessage(), e);
}
}
// Process nested JAR
else if(entry.getName().endsWith(".jar") || entry.getName().endsWith(".war")) {
final Map<String, Configuration> nested_configs = this.discoverConfigurationsInNestedJar(full_name, new JarInputStream(new ByteArrayInputStream(this.readContent(_jis))));
if(nested_configs!=null && nested_configs.size()>0) {
jar_configs.putAll(nested_configs);
}
}
}
} catch (IOException e) {
getLog().error("Error searching for configurations in JAR [" + _name + "]: " + e.getMessage(), e);
} finally {
if(_jis!=null) {
try {
_jis.close();
} catch (IOException e) {
getLog().error("Error closing JAR [" + _name + "]: " + e.getMessage(), e);
}
}
}
return jar_configs;
}
/**
* Reads the content of the current {@link JarEntry} from the given {@link JarInputStream} into a byte array.
* @param _jis
* @return
* @throws IOException
*/
private byte[] readContent(JarInputStream _jis) throws IOException {
byte[] bytes = new byte[1024];
while(_jis.read(bytes, 0, 1024)!=-1) {;} //read()
return bytes;
}
//=============== Stuff for accessing single shared configuration settings
public final static String MAND_SETTINGS = "vulas.shared.settings.mandatory";
public final static String OPTI_SETTINGS = "vulas.shared.settings.optional";
public final static String HOMEPAGE = "vulas.shared.homepage";
public final static String CHARSET = "vulas.shared.charset";
public final static String TMP_DIR = "vulas.shared.tmpDir";
public final static String VULAS_JIRA_USER = "vulas.jira.usr";
public final static String VULAS_JIRA_PWD = "vulas.jira.pwd";
public final static String M2_DIR = "vulas.shared.m2Dir";
public final static String SYS_PROPS = "vulas.shared.sys";
public final static String SYS_PROPS_CUSTOM = "vulas.shared.sys.custom";
public final static String ENV_VARS = "vulas.shared.env";
public final static String ENV_VARS_CUSTOM = "vulas.shared.env.custom";
/**
* Checks mandatory and optional settings and, where provided, the format.
* @throws ConfigurationException
*/
public void checkSettings() throws ConfigurationException {
// Optional settings
final String[] optional_settings = this.getStringArray(OPTI_SETTINGS, null);
if(optional_settings!=null && optional_settings.length>0) {
for(String s: optional_settings) {
if(this.isEmpty(s)) {
log.warn("Optional setting [" + s + "] not specified");
}
}
}
// Mandatory settings
final String[] mandatory_settings = this.getStringArray(MAND_SETTINGS, null);
final Set<String> not_specified = new HashSet<String>();
if(mandatory_settings!=null && mandatory_settings.length>0) {
for(String s: mandatory_settings) {
// Check if empty
if(this.isEmpty(s)) {
log.error("Mandatory setting [" + s + "] not specified");
not_specified.add(s);
}
}
}
// Check format (where provided)
final Iterator<String> iter = this.cfg.getKeys();
final Set<String> wrong_format = new HashSet<String>();
while(iter.hasNext()) {
final String key = iter.next();
if(key.startsWith("vulas.") && key.endsWith(".format") && !this.isEmpty(key)) {
final String key_to_check = key.substring(0, key.indexOf(".format"));
if(!this.isEmpty(key_to_check)) {
final String[] values_to_check = this.getStringArray(key_to_check, new String[] {});
for(String value_to_check: values_to_check) {
if(!value_to_check.matches(this.cfg.getString(key))) {
log.error("Setting [" + key_to_check + "], value [" + value_to_check + "] does not comply with the required format [" + this.cfg.getString(key) + "]");
wrong_format.add(key_to_check);
}
}
}
}
}
if(!not_specified.isEmpty() || !wrong_format.isEmpty())
throw new ConfigurationException("The following mandatory settings are not specified: [" + StringUtil.join(not_specified, ", ") + "], the following settings do not comply with the required format: [" + StringUtil.join(wrong_format, ", ") + "]");
}
/**
* Deletes all transient (not persisted) configuration settings.
* Returns true if the transient configuration layer contained values that were deleted, false otherwise.
*/
public boolean clearTransientProperties() {
final boolean contains_etries = !writableConfiguration.isEmpty();
if(contains_etries)
writableConfiguration.clear();
return contains_etries;
}
/**
* Returns true if the configuration does not contain the given setting or its value is an empty string.
*/
public boolean isEmpty(String _key) {
return !cfg.containsKey(_key) || cfg.getString(_key).equals("");
}
/**
* Reads the setting with the given key (recursively, if the key's value is the name of another setting).
* @param _key
* @param _default
* @return
*/
public Object getProperty(@NotNull String _key, Object _default) {
Object obj = cfg.getProperty(_key);
if(obj==null) {
obj = _default;
}
else if(obj instanceof String) {
String other_key = (String)obj;
if(this.cfg.containsKey(other_key)) {
obj = this.getProperty(other_key, _default);
}
}
return obj;
}
/**
* Sets the given setting to the specified value in case it is empty. Note that this setting is transient (not persisted).
* @param _key
* @param _value
*/
public void setPropertyIfEmpty(@NotNull String _key, Object _value) {
if(isEmpty(_key))
setProperty(_key, _value, null, false);
}
/**
* Sets the given setting to the specified value. Note that this setting is transient (not persisted).
* @param _key
* @param _value
*/
public void setProperty(@NotNull String _key, Object _value) {
setProperty(_key, _value, null, false);
}
public void setProperty(@NotNull String _key, Object _value, String _ignore_value, boolean _ignore_null) {
if( (_value!=null || !_ignore_null) && (_ignore_value==null || !_value.toString().equals(_ignore_value)) ) {
final Object before = cfg.getProperty(_key);
if(_value==null)
writableConfiguration.clearProperty(_key);
else
writableConfiguration.setProperty(_key, _value);
final Object after = cfg.getProperty(_key);
// Log everything (to be deleted)
//getLog().info("Setting [" + _key + "] value [before=" + before + ", arg=" + _value + ", after=" + after + "]");
// If the _value contains a comma, the new object will be a String array or ArrayList<String>
final ArrayList<String> array_list = new ArrayList<String>();
// Check that setting worked
if(after!=null && (after.getClass().isArray() || after.getClass().equals(array_list.getClass()))) {}
else if( (_value==null && after!=null) || (_value!=null && !_value.equals(after)) ) {
getLog().error("New value [" + _value + "] for setting [" + _key + "] not set: Before [" + before + "], after [" + after + "]");
}
}
}
/**
* Improves the method {@link Configuration#getStringArray(String)} by adding a default value, which is returned
* if the String array returned for the respective setting is either null or contains a single empty {@link String}.
* @param _key
* @param _default
* @return
*/
public String[] getStringArray(@NotNull String _key, String[] _default) {
String[] value = this.getConfiguration().getStringArray(_key);
if(value!=null && value.length>0 && !(value.length==1 && value[0].equals("")))
return value;
else
return _default;
}
/**
* Returns the configuration setting for the given key as {@link Path}. If no such setting exists, the tmp directory
* will be returned.
* @param _key
* @return
*/
public Path getDir(String _key) {
Path p = null;
if(!this.isEmpty(_key))
p = Paths.get(this.getConfiguration().getString(_key));
else
p = this.getTmpDir();
// Create if necessary
FileUtil.createDirectory(p);
return p;
}
/**
* Creates if necessary and returns the temporary directory to be used by Vulas. This is either the directory
* indicated by the configuration setting TMP_DIR (if any) or the OS' temporary directory.
*/
public Path getTmpDir() {
Path p = null;
if(!this.isEmpty(TMP_DIR))
p = Paths.get(cfg.getString(TMP_DIR));
else
p = Paths.get(System.getProperty("java.io.tmpdir"));
// Create if necessary
FileUtil.createDirectory(p);
return p;
}
public String getServiceUrl(Service _service) {
String value = null;
try {
value = getServiceUrl(_service, false);
} catch (ServiceConnectionException e) {
getLog().warn(e.getMessage(), e);
}
return value;
}
public String getServiceUrl(Service _service, boolean _throw_exception) throws ServiceConnectionException {
final String key = VulasConfiguration.getServiceUrlKey(_service);
final String value = cfg.getString(key, null);
if(_throw_exception && value==null)
throw new ServiceConnectionException("Service URL is not configured (parameter [" + key + "])", null);
return value;
}
public boolean hasServiceUrl(Service _service) {
final String key = VulasConfiguration.getServiceUrlKey(_service);
return !this.isEmpty(key);
}
public void setServiceUrl(Service _service, String _value) throws IllegalArgumentException {
final String key = VulasConfiguration.getServiceUrlKey(_service);
URI uri;
try {
uri = new URI(_value);
getConfiguration().setProperty(key, uri.toString());
getLog().info("Set [" + key + "] to URL [" + _value + "]");
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Invalid URL [" + _value + "], cannot set [" + key + "]");
}
}
public static String getServiceUrlKey(Service _service) {
return "vulas.shared." + _service.toString().toLowerCase() + ".serviceUrl";
}
/**
* Returns the path to the m2 repository.
* @return
* @throws IllegalStateException
*/
public Path getLocalM2Repository() throws IllegalStateException {
if(this.m2==null) {
String m2_path = null;
// Explicitly specified
if(!this.isEmpty(M2_DIR)) {
m2_path = cfg.getString(M2_DIR);
this.m2 = Paths.get(m2_path);
}
// Use other dir
else {
if(System.getenv("HOME")!=null) {
m2_path = System.getenv("HOME");
this.m2 = Paths.get(m2_path, ".m2", "repository");
}
else {
m2_path = getTmpDir().toString();
this.m2 = Paths.get(m2_path, "vulas-m2", "repository");
}
}
try {
// Create if not existing
if(!this.m2.toFile().exists())
Files.createDirectories(m2);
// Is writable?
if(!this.m2.toFile().canWrite())
throw new IllegalStateException("No write permission");
} catch (Exception e) {
getLog().info("Error configuring the m2 directory [" + m2_path + "], artifacts will not be cached: " + e.getMessage());
this.m2 = null;
throw new IllegalStateException("Error configuring the m2 directory [" + m2_path + "], artifacts will not be cached: " + e.getMessage(), e);
}
}
return this.m2;
}
/**
* Prints settings having the given prefixes to the log.
* @param _prefix
*/
public void log(String[] _prefix, String _indent) {
// Print all configurations considered
int count = 0;
for(Map.Entry<Configuration, String> entry: individualConfigurations.entrySet()) {
int count_entries=0;
final Iterator<String> iter = entry.getKey().getKeys();
while(iter.hasNext()) { count_entries++; iter.next(); }
VulasConfiguration.getLog().info("Configuration [" + ++count + "]: " + entry.getValue() + ", [" + count_entries + "] entries");
}
// Print actual values that result from that composition
final StringBuilder builder = new StringBuilder();
builder.append("Configuration with prefix(es) [");
for(int i=0; i<_prefix.length; i++) {
if(i>0) builder.append(", ");
builder.append(_prefix[i]);
}
builder.append("]");
getLog().info(builder.toString());
for(int i=0; i<_prefix.length; i++) {
final Configuration config = cfg.subset(_prefix[i]);
// Sort all the keys
final SortedSet<String> keys = new TreeSet<String>();
final Iterator<String> iter = config.getKeys();
while(iter.hasNext()) keys.add(iter.next());
// Print keys and values
for(String key: keys)
if(!isEmpty(_prefix[i] + "." + key))
getLog().info((_indent==null?"":_indent) + _prefix[i] + "." + key + "=" + config.getProperty(key).toString());
}
}
/**
* Returns a {@link StringList} containing items taken from the given configuration settings. Each configuration settings is
* expected to contain one or more values (comma-separated), which are trimmed and added to the {@link StringList}.
* @return
*/
public final StringList getStringList(String... _config_names) {
final StringList l = new StringList();
if(_config_names!=null && _config_names.length>0) {
for(String config_name: _config_names) {
l.addAll(this.getStringArray(config_name, new String[] {}), true);
}
}
return l;
}
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | java | 1 | 0 | |
pythonup/versions.py | import dataclasses
import os
import re
import shutil
import subprocess
import sys
import packaging.version
from . import installations, paths
class VersionNotFoundError(ValueError):
pass
def iter_installable_matches():
"""Iterate through CPython versions available for PythonUp to install.
"""
output = subprocess.check_output(
['python-build', '--definitions'], encoding='ascii',
)
for name in output.splitlines():
match = re.match(r'^(\d+\.\d+)\.\d+$', name)
if match:
yield match
@dataclasses.dataclass(order=True, frozen=True)
class Version:
major: int
minor: int
@classmethod
def parse(cls, name):
match = re.match(r'^(?P<major>\d+)\.(?P<minor>\d+)$', name)
if not match:
raise VersionNotFoundError(name)
return cls(
major=int(match.group('major')),
minor=int(match.group('minor')),
)
def __str__(self):
return self.name
@property
def name(self):
return f'{self.major}.{self.minor}'
@property
def python_commands(self):
return [paths.get_cmd_dir().joinpath(f'python{self.name}')]
@property
def pip_commands(self):
return [paths.get_cmd_dir().joinpath(f'pip{self.name}')]
def iter_matched_build_name(self):
"""Iterate through CPython version names matching this version.
"""
for match in iter_installable_matches():
if match.group(1) == self.name:
yield match.group(0)
def find_best_build_name(self):
return max(
self.iter_matched_build_name(),
key=packaging.version.Version,
)
def install(self, *, build_name=None):
if build_name is None:
build_name = self.find_best_build_name()
installation = self.find_installation(strict=False)
env = os.environ.copy()
if sys.platform == 'darwin':
opts = env.get('PYTHON_CONFIGURE_OPTS', '').split()
opts.append('--enable-framework')
env['PYTHON_CONFIGURE_OPTS'] = ' '.join(opts)
subprocess.check_call(
['python-build', build_name, str(installation.root)],
env=env,
)
return installation
def uninstall(self):
root = self.find_installation().root
shutil.rmtree(root)
return root
def find_installation(self, *, strict=True):
return installations.Installation.find(self, strict=strict)
def iter_versions():
exist_names = set()
for match in iter_installable_matches():
name = match.group(1)
if name not in exist_names:
exist_names.add(name)
yield Version.parse(name)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
hardware/testbenches/mesh/nx_aggregator/testbench/testbench.py | # Copyright 2021, Peter Birch, mailto:[email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cocotb
from cocotb.triggers import RisingEdge
from cocotb_bus.scoreboard import Scoreboard
from tb_base import TestbenchBase
from drivers.basic.unstrobed import UnstrobedMonitor
from drivers.io_common import IORole
from drivers.stream.io import StreamIO
from drivers.stream.init import StreamInitiator
from drivers.stream.resp import StreamResponder
class Testbench(TestbenchBase):
def __init__(self, dut):
""" Initialise the testbench.
Args:
dut: Pointer to the DUT
"""
super().__init__(dut)
# Wrap I/Os
self.node_id = self.dut.i_node_id
self.idle = self.dut.o_idle
# Setup drivers/monitors
self.inbound = StreamInitiator(
self, self.clk, self.rst,
StreamIO(self.dut, "inbound", IORole.RESPONDER)
)
self.passthrough = StreamInitiator(
self, self.clk, self.rst,
StreamIO(self.dut, "passthrough", IORole.RESPONDER)
)
self.outbound = StreamResponder(
self, self.clk, self.rst,
StreamIO(self.dut, "outbound", IORole.INITIATOR)
)
self.outputs = UnstrobedMonitor(
self, self.clk, self.rst, self.dut.o_outputs
)
# Create expected outbound queues
self.exp_stream = []
self.exp_output = []
# Create a scoreboard
imm_fail = (os.environ.get("FAIL_IMMEDIATELY", "no").lower() == "yes")
self.scoreboard = Scoreboard(self, fail_immediately=imm_fail)
self.scoreboard.add_interface(self.outbound, self.exp_stream, reorder_depth=100)
self.scoreboard.add_interface(self.outputs, self.exp_output)
async def initialise(self):
""" Initialise the DUT's I/O """
await super().initialise()
self.inbound.intf.initialise(IORole.INITIATOR)
self.passthrough.intf.initialise(IORole.INITIATOR)
self.outbound.intf.initialise(IORole.RESPONDER)
self.i_node_id <= 0
class testcase(cocotb.test):
def __call__(self, dut, *args, **kwargs):
async def __run_test():
tb = Testbench(dut)
await self._func(tb, *args, **kwargs)
while tb.exp_stream: await RisingEdge(tb.clk)
while tb.exp_output: await RisingEdge(tb.clk)
raise tb.scoreboard.result
return cocotb.decorators.RunningTest(__run_test(), self)
| []
| []
| [
"FAIL_IMMEDIATELY"
]
| [] | ["FAIL_IMMEDIATELY"] | python | 1 | 0 | |
plugins/deploy/packages/build/build.go | package gogurt
// Contains structures to ease running of common operations.
// TODO: Have RunCmds(exec.Command...) and it just runs through all in sequence, stopping if one errors.
import (
"fmt"
"math"
"os"
"os/exec"
"strconv"
"strings"
)
type ConfigureCmd struct {
Prefix string
Args []string
CC string
CPP string
CXX string
CFlags []string
CppFlags []string
CxxFlags []string
LdFlags []string
Libs []string
Paths []string
PkgConfigPaths []string
Dir string
}
func (configure ConfigureCmd) Cmd() *exec.Cmd {
args := append(configure.Args, "--prefix=" + configure.Prefix)
cmd := exec.Command("./configure", args...)
cmd.Env = os.Environ()
if len(configure.CC) > 0 {
cmd.Env = append(cmd.Env, "CC=" + configure.CC)
}
if len(configure.CPP) > 0 {
cmd.Env = append(cmd.Env, "CPP=" + configure.CPP)
}
if len(configure.CXX) > 0 {
cmd.Env = append(cmd.Env, "CXX=" + configure.CXX)
}
if len(configure.CFlags) > 0 {
cmd.Env = append(cmd.Env, "CFLAGS=" + strings.Join(configure.CFlags, " "))
}
if len(configure.CppFlags) > 0 {
cmd.Env = append(cmd.Env, "CPPFLAGS=" + strings.Join(configure.CppFlags, " "))
}
if len(configure.CxxFlags) > 0 {
cmd.Env = append(cmd.Env, "CXXFLAGS=" + strings.Join(configure.CxxFlags, " "))
}
if len(configure.LdFlags) > 0 {
cmd.Env = append(cmd.Env, "LDFLAGS=" + strings.Join(configure.LdFlags, " "))
}
if len(configure.Libs) > 0 {
cmd.Env = append(cmd.Env, "LIBS=" + strings.Join(configure.Libs, " "))
}
if len(configure.Paths) > 0 {
cmd.Env = append(cmd.Env, "PATH=" + strings.Join(configure.Paths, ":") + ":" + os.Getenv("PATH"))
}
if len(configure.PkgConfigPaths) > 0 {
cmd.Env = append(cmd.Env, "PKG_CONFIG_PATH=" + strings.Join(configure.PkgConfigPaths, ":"))
}
if len(configure.Dir) > 0 {
cmd.Dir = configure.Dir
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd
}
type MakeCmd struct {
Jobs uint
Args []string
Dir string
Paths []string
}
func (makeCmd MakeCmd) Cmd() *exec.Cmd {
jobs := int(math.Max(1, float64(makeCmd.Jobs)))
args := append(makeCmd.Args, "--jobs=" + strconv.Itoa(jobs))
cmd := exec.Command("make", args...)
cmd.Env = os.Environ()
if len(makeCmd.Paths) > 0 {
cmd.Env = append(cmd.Env, "PATH=" + strings.Join(makeCmd.Paths, ":") + ":" + os.Getenv("PATH"))
}
if len(makeCmd.Dir) > 0 {
cmd.Dir = makeCmd.Dir
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd
}
type CMakeCmd struct {
// Path to cmake binary
// If empty, we will look up cmake on $PATH
Path string
// TODO: Rename to InstallPrefix
Prefix string
// Used when searching for include files and libraries
// TODO: Rename to PrefixPath
PathPrefix []string
SourceDir string
BuildDir string
CacheEntries map[string]string
// Generator string // TODO: Add once we get Ninja
Paths[] string
CFlags []string
PkgConfigPaths []string
}
func (cmakeCmd CMakeCmd) Cmd() *exec.Cmd {
cacheEntries := make([]string, 1)
if len(cmakeCmd.Prefix) > 0 {
cacheEntries = append(cacheEntries, "-DCMAKE_INSTALL_PREFIX=" + cmakeCmd.Prefix)
}
for key, value := range cmakeCmd.CacheEntries {
cacheEntries = append(cacheEntries, fmt.Sprintf("-D%s=%s", key, value))
}
// Hacky way to use our own CMake if provided.
var cmd *exec.Cmd
if len(cmakeCmd.Path) > 0 {
cmd = exec.Command(cmakeCmd.Path, cacheEntries...)
} else {
cmd = exec.Command("cmake", cacheEntries...)
}
cmd.Args = append(cmd.Args, cmakeCmd.SourceDir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Dir = cmakeCmd.BuildDir
cmd.Env = os.Environ()
if len(cmakeCmd.PathPrefix) > 0 {
cmd.Env = append(cmd.Env, "CMAKE_PREFIX_PATH=" + strings.Join(cmakeCmd.PathPrefix, ":"))
}
if len(cmakeCmd.PkgConfigPaths) > 0 {
cmd.Env = append(cmd.Env, "PKG_CONFIG_PATH=" + strings.Join(cmakeCmd.PkgConfigPaths, ":"))
}
if len(cmakeCmd.Paths) > 0 {
cmd.Env = append(cmd.Env, "PATH=" + strings.Join(cmakeCmd.Paths, ":") + ":" + os.Getenv("PATH"))
}
fmt.Println(cmd)
return cmd
}
| [
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
Pixivic Crawler/main.pyw | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import sys
import threading
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtWidgets import QFileDialog
import config
import pixivic
import ui
class WorkThread(QThread): # 定义QThread的子类线程 Define subclass threads of QThread
signal = pyqtSignal(str)
def __int__(self):
super(WorkThread, self).__init__()
self.mode_thread = ''
def run(self): # 定义不同类型的线程任务
if self.mode_thread == 'download':
MainUi.thread_download_pic()
if self.mode_thread == 'start_button':
MainUi.thread_start_button_event()
if self.mode_thread == 'date_sure':
MainUi.thread_date_sure_event()
if self.mode_thread == 'is_sure':
MainUi.thread_id_sure_event()
self.signal.emit('结束')
class MainUi(QtWidgets.QMainWindow):
def __init__(self):
super(MainUi, self).__init__()
# 定义子窗口 Define child window
self.child_window = QtWidgets.QWidget()
self.cu = ChildUi()
self.cu.setup_ui(self.child_window)
self.ui_controls = ui.UiPixivicCrawler()
self.lock = threading.RLock()
# 初始化数值 Initialization value
self.img_url_list = []
self.keyword_list = []
self.mode = ''
self.res_num = 0
self.filed_num = 0
self.success_num = 0
self.running_thread_num = 0
self.completed_done_num = 0
self.folder_name = ''
self.token = ''
self.thread_start_button = WorkThread() # 给按钮耗时操作分配内存,初始化线程类型
self.thread_start_button.mode_thread = 'start_button'
self.thread_date_sure = WorkThread()
self.thread_date_sure.mode_thread = 'date_sure'
self.thread_id_sure = WorkThread()
self.thread_id_sure.mode_thread = 'is_sure'
self.t1 = WorkThread()
self.t2 = WorkThread()
self.t3 = WorkThread()
self.t4 = WorkThread()
self.t1.mode_thread = 'download'
self.t2.mode_thread = 'download'
self.t3.mode_thread = 'download'
self.t4.mode_thread = 'download'
def setup_ui(self, main_window) -> None: # 按键连接函数 Keys connect event functions
self.ui_controls.setupUi(main_window)
self.ui_controls.start_button.clicked.connect(self.start_button_event)
self.ui_controls.sure_keyword.clicked.connect(self.sure_keyword_event)
self.ui_controls.save_as.clicked.connect(self.save_as_event)
self.ui_controls.get_token_button.clicked.connect(
self.get_token_button_event)
self.ui_controls.date_sure.clicked.connect(self.date_sure_event)
self.ui_controls.id_sure.clicked.connect(self.id_sure_event)
self.ui_controls.actionSave_setting.triggered.connect(
self.save_setting_event)
self.ui_controls.exit.triggered.connect(main_window.close)
self.ui_controls.help.triggered.connect(self.help_event)
self.load_config()
def start_button_event(self) -> None:
self.thread_start_button.start()
def date_sure_event(self) -> None:
self.thread_date_sure.start()
def id_sure_event(self) -> None:
self.thread_id_sure.start()
# 搜索模式下的开始按钮 Start button in search mode
def thread_start_button_event(self) -> None:
path = self.ui_controls.path_box.text()
keyword = self.ui_controls.keyword.text()
num = int(self.ui_controls.num_box.text())
thread_num = int(self.ui_controls.threads_num_box.text())
is_filter = self.ui_controls.is_filter.isChecked()
self.mode = 'search'
self.folder_name = '%s %d张' % (keyword, num)
if all([path, keyword]):
self.ui_controls.printbox.appendPlainText('正在获取图片列表,请等待。')
self.disable(True)
self.img_url_list = self.pixivic_crawler.get_img_url_list(
keyword, num, 'search', is_filter)
self.res_num = len(self.img_url_list)
if self.res_num == 0:
self.disable(False)
# self.ui_controls.start_button.isEnabled(False)
self.ui_controls.printbox.appendPlainText(
'网络请求超时或者该关键词被屏蔽,请重试。')
else:
self.ui_controls.printbox.appendPlainText(
'成功获取图片列表,共%d张' % self.res_num)
self.ui_controls.printbox.appendPlainText('正在准备图片下载www')
path = self.ui_controls.path_box.text() + '\\' + self.folder_name
if not os.path.exists(path):
os.makedirs(path)
self.thread_start(thread_num)
def thread_date_sure_event(self) -> None:
self.mode = 'daily'
date = self.ui_controls.date_time.date().toString(Qt.ISODate)
num = int(self.ui_controls.num_box.text())
thread_num = int(self.ui_controls.threads_num_box.text())
is_filter = self.ui_controls.is_filter.isChecked()
path = self.ui_controls.path_box.text()
self.folder_name = '%s %d张' % (date, num)
if all([date, num, thread_num, path]):
self.disable(True)
self.ui_controls.printbox.appendPlainText('正在寻找对应图片,请稍等')
self.img_url_list = self.pixivic_crawler.get_img_url_list(
date, num, self.mode, is_filter)
self.res_num = len(self.img_url_list)
if self.res_num == 0:
self.disable(False)
# self.ui_controls.start_button.isEnabled(False)
self.ui_controls.printbox.appendPlainText(
'网络请求超时或者该关键词被屏蔽,请重试。')
else:
self.ui_controls.printbox.appendPlainText(
'成功获取图片列表,共%d张' % self.res_num)
self.ui_controls.printbox.appendPlainText('正在准备图片下载www')
path = self.ui_controls.path_box.text() + '\\' + self.folder_name
if not os.path.exists(path):
os.makedirs(path)
self.thread_start(thread_num)
def thread_id_sure_event(self) -> None: # mode3
path = self.ui_controls.path_box.text()
thread_num = int(self.ui_controls.threads_num_box.text())
if self.ui_controls.id_mode.currentIndex() == 0:
mode = 'artistId'
else: # self.ui_controls.id_mode.currentIndex() == 1
mode = 'illustsId'
art_id = self.ui_controls.id_of_art.text()
self.folder_name = art_id
if all([path, art_id]):
if mode == 'illustsId':
self.mode = 'art_id'
res = self.pixivic_crawler.get_art_id_url_list(art_id, mode)
if len(res) == 0:
self.ui_controls.printbox.appendPlainText(
'网络请求超时或者该作品或被屏蔽,请重试。')
else:
self.img_url_list = res[1:]
artist_data = res[0]
self.res_num = len(self.img_url_list)
self.disable(True)
self.ui_controls.printbox.appendPlainText('画师信息如下')
self.ui_controls.printbox.appendPlainText(
'名字:' + artist_data['name'])
self.ui_controls.printbox.appendPlainText(
'id:' + str(artist_data['id']))
self.ui_controls.printbox.appendPlainText(
'成功获取图片列表,共%d张' % self.res_num)
self.ui_controls.printbox.appendPlainText('正在准备图片下载www')
path = self.ui_controls.path_box.text() + '\\' + self.folder_name
if not os.path.exists(path):
os.makedirs(path)
if self.res_num >= 4:
self.thread_start(thread_num)
else:
self.thread_start(1)
if mode == 'artistId':
self.mode = 'art_id'
self.img_url_list = self.pixivic_crawler.get_art_id_url_list(
art_id, mode)
self.res_num = len(self.img_url_list)
if self.res_num == 0:
self.ui_controls.printbox.appendPlainText(
'网络请求超时或者该作者无作品或被屏蔽,请重试。')
else:
self.disable(True)
self.ui_controls.printbox.appendPlainText(
'成功获取图片列表,共%d张' % self.res_num)
self.ui_controls.printbox.appendPlainText('正在准备图片下载www')
path = self.ui_controls.path_box.text() + '\\' + self.folder_name
if not os.path.exists(path):
os.makedirs(path)
self.thread_start(thread_num)
def sure_keyword_event(self) -> None: # 确定关键词按钮 Sure keyword button
if self.ui_controls.keyword.text():
keyword = self.ui_controls.keyword.text()
if self.ui_controls.is_recommend.isChecked():
self.keyword_list = self.pixivic_crawler.get_recommend_keyword_list(
keyword)
self.ui_controls.keyword_list.clear()
if len(self.keyword_list) == 0:
# 网站返回的推荐关键词为空 The recommended keywords returned by the
# site are empty
self.ui_controls.printbox.clear()
self.ui_controls.printbox.appendPlainText(
'该关键词没有推荐搜索词,可能被网站屏蔽')
else:
for keyword_index, keyword_data in enumerate(
self.keyword_list, start=1):
item_text = '%d.%s (翻译:%s)' % (
keyword_index, keyword_data['keyword'], keyword_data['keywordTranslated'])
self.ui_controls.keyword_list.addItem(item_text)
self.ui_controls.printbox.clear()
self.ui_controls.printbox.appendPlainText('请在栏中选择并再次确定')
self.ui_controls.is_recommend.setChecked(False)
elif self.ui_controls.keyword_list.count() != 0:
# 输入关键词使用推荐 Enter keywords with using recommendations
keyword_index = self.ui_controls.keyword_list.currentIndex()
keyword = self.keyword_list[keyword_index]['keyword']
self.ui_controls.keyword.setText(keyword)
self.ui_controls.printbox.clear()
self.ui_controls.printbox.appendPlainText('请按开始以开始下载')
self.ui_controls.start_button.setEnabled(True)
# 确定了搜索词,开放运行按钮 Determine the search term, open the run button
else: # Enter keywords without using recommendations
self.ui_controls.printbox.clear()
self.ui_controls.printbox.appendPlainText('请按开始以开始下载')
self.ui_controls.start_button.setEnabled(True)
# 确定了搜索词,开放运行按钮 Determine the search term, open the run button
else: # 没输入关键词 No keywords entered
self.ui_controls.printbox.appendPlainText('请输入关键词')
def save_as_event(self) -> None:
# Get the save path
download_path = QFileDialog.getExistingDirectory(self, "选取文件夹", "./")
self.ui_controls.path_box.setText(download_path)
def get_token_button_event(self) -> None:
# 调用子窗口 Call the child window to recognize the CAPTCHA
self.child_window.show()
self.cu.refresh_captcha()
def thread_start(self, thread_num: int) -> None:
# 给线程分配内存 Allocates memory to threads
if thread_num >= 1:
self.t1.start()
self.t1.signal.connect(self._thread_download_done)
self.running_thread_num += 1
if thread_num >= 2:
self.t2.start()
self.t2.signal.connect(self._thread_download_done)
self.running_thread_num += 1
if thread_num >= 3:
self.t3.start()
self.t3.signal.connect(self._thread_download_done)
self.running_thread_num += 1
if thread_num >= 4:
self.t4.start()
self.t4.signal.connect(self._thread_download_done)
self.running_thread_num += 1
def save_setting_event(self) -> None:
thread_num = self.ui_controls.threads_num_box.text()
num = self.ui_controls.num_box.text()
is_recommend = self.ui_controls.is_recommend.isChecked()
is_filter = self.ui_controls.is_filter.isChecked()
path = self.ui_controls.path_box.text()
if all([thread_num, num, str(is_recommend),
str(is_filter), path, self.token]):
config.configuration_ini(
thread_num,
num,
self.token,
path,
is_recommend,
is_filter)
self.ui_controls.printbox.appendPlainText('成功保存设置')
self.ui_controls.sure_keyword.setEnabled(True)
self.ui_controls.date_sure.setEnabled(True)
else:
self.ui_controls.printbox.appendPlainText(
'保存的参数不完整,请检查路径和token是否获取')
def help_event(self) -> None:
self.ui_controls.printbox.appendPlainText('该功能将在之后开启')
#
def thread_download_pic(self) -> None:
# 用于下载图片的线程 The thread used to download the image
path = self.ui_controls.path_box.text() + '\\' + self.folder_name
while len(self.img_url_list) != 0:
self.lock.acquire()
try:
img_id = self.img_url_list.pop(0)
self.completed_done_num += 1
pic_num = self.completed_done_num
pic_name = re.findall(r"[0-9]+/([0-9]*_p[0-9]*.*)", img_id)[0]
pic_path = path + '\\%d.%s' % (pic_num, pic_name)
finally:
self.lock.release()
if pixivic.PixCrawler.download_pic(img_id, pic_path):
self.lock.acquire()
try:
self.success_num += 1
self.ui_controls.printbox.appendPlainText(
'图片下载中(%d/%d)' %
(self.success_num + self.filed_num, self.res_num))
finally:
self.lock.release()
elif pixivic.PixCrawler.download_pic(img_id, pic_path):
self.lock.acquire()
try:
self.success_num += 1
self.ui_controls.printbox.appendPlainText(
'图片下载中(%d/%d)' %
(self.success_num + self.filed_num, self.res_num))
finally:
self.lock.release()
else:
self.lock.acquire()
try:
self.filed_num += 1
self.ui_controls.printbox.appendPlainText(
'图片下载失败(%d/%d)' %
(self.success_num + self.filed_num, self.res_num))
finally:
self.lock.release()
def _thread_download_done(self) -> None:
# 线程下载完毕后的后续操作 The subsequent action after the thread has been
# downloaded
self.running_thread_num -= 1
if self.running_thread_num == 0:
if self.mode == 'search':
num = int(self.ui_controls.num_box.text())
keyword = self.ui_controls.keyword.text()
self.ui_controls.printbox.appendPlainText(
'下载完毕,本次请求下载共%d张,关键词是%s,成功%d张,失败%d张。' %
(num, keyword, self.success_num, self.filed_num))
self.ui_controls.printbox.appendPlainText('部分无法打开的图片被网站屏蔽,请忽略')
if self.ui_controls.is_save:
self.save_setting_event()
if self.mode == 'daily':
num = int(self.ui_controls.num_box.text())
self.ui_controls.printbox.appendPlainText(
'下载完毕,本次请求下载共%d张,成功%d张,失败%d张。' %
(num, self.success_num, self.filed_num))
self.ui_controls.printbox.appendPlainText('部分无法打开的图片被网站屏蔽,请忽略')
if self.ui_controls.is_save:
self.save_setting_event()
if self.mode == 'art_id':
num = int(self.ui_controls.num_box.text())
self.ui_controls.printbox.appendPlainText(
'下载完毕,本次请求下载共%d张,成功%d张,失败%d张。' %
(num, self.success_num, self.filed_num))
self.ui_controls.printbox.appendPlainText('部分无法打开的图片被网站屏蔽,请忽略')
if self.ui_controls.is_save:
self.save_setting_event()
# 初始化
self.img_url_list = []
self.keyword_list = []
self.mode = ''
self.res_num = 0
self.filed_num = 0
self.success_num = 0
self.running_thread_num = 0
self.completed_done_num = 0
self.folder_name = ''
self.disable(False)
# self.ui_controls.start_button.isEnabled(False)
@staticmethod
# 排除文件夹命名特殊字符 Exclude folder naming special characters
def filter_folder_name(string: str) -> str:
string = eval(repr(string).replace('/', '').replace('*', ''))
string = eval(repr(string).replace('<', '').replace('>', ''))
string = eval(repr(string).replace('|', '').replace('?', ''))
string = eval(repr(string).replace(':', '').replace('"', ''))
return string
def disable(self, mode: bool) -> None:
# 使主程序运行时,不允许用户按下按键操作 Do not allow the user to press a key while the
# main program is running
self.ui_controls.keyword.setReadOnly(mode)
self.ui_controls.num_box.setReadOnly(mode)
self.ui_controls.threads_num_box.setReadOnly(mode)
self.ui_controls.save_as.setEnabled(not mode)
self.ui_controls.start_button.setEnabled(not mode)
self.ui_controls.sure_keyword.setEnabled(not mode)
self.ui_controls.date_sure.setEnabled(not mode)
self.ui_controls.is_recommend.setCheckable(not mode)
self.ui_controls.id_sure.setEnabled(not mode)
def load_config(self) -> None:
# 载入配置文件 Loading configuration files
config_dict = config.read_ini()
if config_dict == {}:
self.ui_controls.printbox.appendPlainText('配置文件丢失,无法载入配置文件')
self.ui_controls.printbox.appendPlainText('请先选择保存目录再重新获取token')
self.ui_controls.num_box.setValue(300) # 默认参数设置
self.ui_controls.threads_num_box.setValue(4)
self.ui_controls.is_recommend.setChecked(True)
self.ui_controls.is_save.setChecked(True)
self.ui_controls.is_filter.setChecked(True)
self.ui_controls.sure_keyword.setEnabled(False)
self.ui_controls.date_sure.setEnabled(False)
else:
self.ui_controls.is_filter.setChecked(config_dict['is_filter'])
self.ui_controls.is_recommend.setChecked(
config_dict['is_recommend'])
self.ui_controls.num_box.setValue(config_dict['num'])
self.ui_controls.threads_num_box.setValue(
config_dict['threads_num'])
self.ui_controls.path_box.setText(config_dict['path'])
try:
self.pixivic_crawler = pixivic.PixCrawler(config_dict['token'])
self.token = config_dict['token']
except Exception:
self.ui_controls.printbox.appendPlainText(
'token过期,请按获取token以更新token')
self.ui_controls.sure_keyword.setEnabled(False)
self.ui_controls.date_sure.setEnabled(False)
class ChildUi(QtWidgets.QWidget):
def __init__(self):
super(ChildUi, self).__init__()
self.ui_controls = ui.UiGetToken()
self.vid = ''
self.img = ''
self.token = ''
def setup_ui(self, child_window) -> None:
self.ui_controls.setupUi(child_window)
self.ui_controls.buttonBox.accepted.connect(self.sure)
self.ui_controls.buttonBox.rejected.connect(child_window.close)
self.ui_controls.pushButton.clicked.connect(self.refresh_captcha)
def refresh_captcha(self) -> None: # 刷新验证码 Refresh verification code
self.img, self.vid = pixivic.get_verification_code()
pix = self.img.toqpixmap()
self.ui_controls.pic.setScaledContents(True) # 自适应QLabel大小
self.ui_controls.pic.setPixmap(pix)
self.ui_controls.pic_value_box.clear()
def sure(self) -> None:
captcha = self.ui_controls.pic_value_box.text()
self.token = pixivic.get_token(self.vid, captcha)
if self.token:
MainUi.child_window.close()
MainUi.token = self.token
MainUi.pixivic_crawler = pixivic.PixCrawler(self.token)
MainUi.ui_controls.printbox.appendPlainText('成功获取token')
MainUi.save_setting_event()
else:
self.ui_controls.label.setText('验证码过期或输入错误')
self.refresh_captcha()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
form = QtWidgets.QMainWindow()
MainUi = MainUi()
MainUi.setup_ui(form)
form.show()
sys.exit(app.exec_())
| []
| []
| []
| [] | [] | python | null | null | null |
PlantEmissionController/PlantEmissionController/asgi.py | """
ASGI config for PlantEmissionController project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PlantEmissionController.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/podman/common/create.go | package common
import (
"os"
"github.com/containers/common/pkg/auth"
"github.com/containers/common/pkg/completion"
commonFlag "github.com/containers/common/pkg/flag"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/spf13/cobra"
)
const sizeWithUnitFormat = "(format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))"
var containerConfig = registry.PodmanConfig()
// ContainerToPodOptions takes the Container and Pod Create options, assigning the matching values back to podCreate for the purpose of the libpod API
// For this function to succeed, the JSON tags in PodCreateOptions and ContainerCreateOptions need to match due to the Marshaling and Unmarshaling done.
// The types of the options also need to match or else the unmarshaling will fail even if the tags match
func ContainerToPodOptions(containerCreate *entities.ContainerCreateOptions, podCreate *entities.PodCreateOptions) error {
contMarshal, err := json.Marshal(containerCreate)
if err != nil {
return err
}
return json.Unmarshal(contMarshal, podCreate)
}
// DefineCreateFlags declares and instantiates the container create flags
func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, isInfra bool, clone bool) {
createFlags := cmd.Flags()
if !isInfra && !clone { // regular create flags
annotationFlagName := "annotation"
createFlags.StringSliceVar(
&cf.Annotation,
annotationFlagName, []string{},
"Add annotations to container (key=value)",
)
_ = cmd.RegisterFlagCompletionFunc(annotationFlagName, completion.AutocompleteNone)
attachFlagName := "attach"
createFlags.StringSliceVarP(
&cf.Attach,
attachFlagName, "a", []string{},
"Attach to STDIN, STDOUT or STDERR",
)
_ = cmd.RegisterFlagCompletionFunc(attachFlagName, AutocompleteCreateAttach)
authfileFlagName := "authfile"
createFlags.StringVar(
&cf.Authfile,
authfileFlagName, auth.GetDefaultAuthFile(),
"Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override",
)
_ = cmd.RegisterFlagCompletionFunc(authfileFlagName, completion.AutocompleteDefault)
blkioWeightFlagName := "blkio-weight"
createFlags.StringVar(
&cf.BlkIOWeight,
blkioWeightFlagName, "",
"Block IO weight (relative weight) accepts a weight value between 10 and 1000.",
)
_ = cmd.RegisterFlagCompletionFunc(blkioWeightFlagName, completion.AutocompleteNone)
blkioWeightDeviceFlagName := "blkio-weight-device"
createFlags.StringSliceVar(
&cf.BlkIOWeightDevice,
blkioWeightDeviceFlagName, []string{},
"Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`)",
)
_ = cmd.RegisterFlagCompletionFunc(blkioWeightDeviceFlagName, completion.AutocompleteDefault)
capAddFlagName := "cap-add"
createFlags.StringSliceVar(
&cf.CapAdd,
capAddFlagName, []string{},
"Add capabilities to the container",
)
_ = cmd.RegisterFlagCompletionFunc(capAddFlagName, completion.AutocompleteCapabilities)
capDropFlagName := "cap-drop"
createFlags.StringSliceVar(
&cf.CapDrop,
capDropFlagName, []string{},
"Drop capabilities from the container",
)
_ = cmd.RegisterFlagCompletionFunc(capDropFlagName, completion.AutocompleteCapabilities)
cgroupnsFlagName := "cgroupns"
createFlags.String(
cgroupnsFlagName, "",
"cgroup namespace to use",
)
_ = cmd.RegisterFlagCompletionFunc(cgroupnsFlagName, AutocompleteNamespace)
cgroupsFlagName := "cgroups"
createFlags.StringVar(
&cf.CgroupsMode,
cgroupsFlagName, cgroupConfig(),
`control container cgroup configuration ("enabled"|"disabled"|"no-conmon"|"split")`,
)
_ = cmd.RegisterFlagCompletionFunc(cgroupsFlagName, AutocompleteCgroupMode)
cidfileFlagName := "cidfile"
createFlags.StringVar(
&cf.CIDFile,
cidfileFlagName, "",
"Write the container ID to the file",
)
_ = cmd.RegisterFlagCompletionFunc(cidfileFlagName, completion.AutocompleteDefault)
deviceCgroupRuleFlagName := "device-cgroup-rule"
createFlags.StringSliceVar(
&cf.DeviceCgroupRule,
deviceCgroupRuleFlagName, []string{},
"Add a rule to the cgroup allowed devices list",
)
_ = cmd.RegisterFlagCompletionFunc(deviceCgroupRuleFlagName, completion.AutocompleteNone)
deviceReadIopsFlagName := "device-read-iops"
createFlags.StringSliceVar(
&cf.DeviceReadIOPs,
deviceReadIopsFlagName, []string{},
"Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000)",
)
_ = cmd.RegisterFlagCompletionFunc(deviceReadIopsFlagName, completion.AutocompleteDefault)
deviceWriteBpsFlagName := "device-write-bps"
createFlags.StringSliceVar(
&cf.DeviceWriteBPs,
deviceWriteBpsFlagName, []string{},
"Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb)",
)
_ = cmd.RegisterFlagCompletionFunc(deviceWriteBpsFlagName, completion.AutocompleteDefault)
deviceWriteIopsFlagName := "device-write-iops"
createFlags.StringSliceVar(
&cf.DeviceWriteIOPs,
deviceWriteIopsFlagName, []string{},
"Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000)",
)
_ = cmd.RegisterFlagCompletionFunc(deviceWriteIopsFlagName, completion.AutocompleteDefault)
createFlags.Bool(
"disable-content-trust", false,
"This is a Docker specific option and is a NOOP",
)
envFlagName := "env"
createFlags.StringArrayP(
envFlagName, "e", env(),
"Set environment variables in container",
)
_ = cmd.RegisterFlagCompletionFunc(envFlagName, completion.AutocompleteNone)
unsetenvFlagName := "unsetenv"
createFlags.StringArrayVar(
&cf.UnsetEnv,
unsetenvFlagName, []string{},
"Unset environment default variables in container",
)
_ = cmd.RegisterFlagCompletionFunc(unsetenvFlagName, completion.AutocompleteNone)
createFlags.BoolVar(
&cf.UnsetEnvAll,
"unsetenv-all", false,
"Unset all default environment variables in container",
)
if !registry.IsRemote() {
createFlags.BoolVar(
&cf.EnvHost,
"env-host", false, "Use all current host environment variables in container",
)
}
envFileFlagName := "env-file"
createFlags.StringSliceVar(
&cf.EnvFile,
envFileFlagName, []string{},
"Read in a file of environment variables",
)
_ = cmd.RegisterFlagCompletionFunc(envFileFlagName, completion.AutocompleteDefault)
exposeFlagName := "expose"
createFlags.StringSliceVar(
&cf.Expose,
exposeFlagName, []string{},
"Expose a port or a range of ports",
)
_ = cmd.RegisterFlagCompletionFunc(exposeFlagName, completion.AutocompleteNone)
groupAddFlagName := "group-add"
createFlags.StringSliceVar(
&cf.GroupAdd,
groupAddFlagName, []string{},
"Add additional groups to the primary container process. 'keep-groups' allows container processes to use supplementary groups.",
)
_ = cmd.RegisterFlagCompletionFunc(groupAddFlagName, completion.AutocompleteNone)
healthCmdFlagName := "health-cmd"
createFlags.StringVar(
&cf.HealthCmd,
healthCmdFlagName, "",
"set a healthcheck command for the container ('none' disables the existing healthcheck)",
)
_ = cmd.RegisterFlagCompletionFunc(healthCmdFlagName, completion.AutocompleteNone)
healthIntervalFlagName := "health-interval"
createFlags.StringVar(
&cf.HealthInterval,
healthIntervalFlagName, define.DefaultHealthCheckInterval,
"set an interval for the healthchecks (a value of disable results in no automatic timer setup)",
)
_ = cmd.RegisterFlagCompletionFunc(healthIntervalFlagName, completion.AutocompleteNone)
healthRetriesFlagName := "health-retries"
createFlags.UintVar(
&cf.HealthRetries,
healthRetriesFlagName, define.DefaultHealthCheckRetries,
"the number of retries allowed before a healthcheck is considered to be unhealthy",
)
_ = cmd.RegisterFlagCompletionFunc(healthRetriesFlagName, completion.AutocompleteNone)
healthStartPeriodFlagName := "health-start-period"
createFlags.StringVar(
&cf.HealthStartPeriod,
healthStartPeriodFlagName, define.DefaultHealthCheckStartPeriod,
"the initialization time needed for a container to bootstrap",
)
_ = cmd.RegisterFlagCompletionFunc(healthStartPeriodFlagName, completion.AutocompleteNone)
healthTimeoutFlagName := "health-timeout"
createFlags.StringVar(
&cf.HealthTimeout,
healthTimeoutFlagName, define.DefaultHealthCheckTimeout,
"the maximum time allowed to complete the healthcheck before an interval is considered failed",
)
_ = cmd.RegisterFlagCompletionFunc(healthTimeoutFlagName, completion.AutocompleteNone)
createFlags.BoolVar(
&cf.HTTPProxy,
"http-proxy", containerConfig.Containers.HTTPProxy,
"Set proxy environment variables in the container based on the host proxy vars",
)
hostUserFlagName := "hostuser"
createFlags.StringSliceVar(
&cf.HostUsers,
hostUserFlagName, []string{},
"Host user account to add to /etc/passwd within container",
)
_ = cmd.RegisterFlagCompletionFunc(hostUserFlagName, completion.AutocompleteNone)
imageVolumeFlagName := "image-volume"
createFlags.StringVar(
&cf.ImageVolume,
imageVolumeFlagName, DefaultImageVolume,
`Tells podman how to handle the builtin image volumes ("bind"|"tmpfs"|"ignore")`,
)
_ = cmd.RegisterFlagCompletionFunc(imageVolumeFlagName, AutocompleteImageVolume)
createFlags.BoolVar(
&cf.Init,
"init", false,
"Run an init binary inside the container that forwards signals and reaps processes",
)
initPathFlagName := "init-path"
createFlags.StringVar(
&cf.InitPath,
initPathFlagName, initPath(),
// Do not use the Value field for setting the default value to determine user input (i.e., non-empty string)
"Path to the container-init binary",
)
_ = cmd.RegisterFlagCompletionFunc(initPathFlagName, completion.AutocompleteDefault)
createFlags.BoolVarP(
&cf.Interactive,
"interactive", "i", false,
"Keep STDIN open even if not attached",
)
ipcFlagName := "ipc"
createFlags.String(
ipcFlagName, "",
"IPC namespace to use",
)
_ = cmd.RegisterFlagCompletionFunc(ipcFlagName, AutocompleteNamespace)
createFlags.String(
"kernel-memory", "",
"DEPRECATED: Option is just hear for compatibility with Docker",
)
// kernel-memory is deprecated in the runtime spec.
_ = createFlags.MarkHidden("kernel-memory")
logDriverFlagName := "log-driver"
createFlags.StringVar(
&cf.LogDriver,
logDriverFlagName, logDriver(),
"Logging driver for the container",
)
_ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, AutocompleteLogDriver)
logOptFlagName := "log-opt"
createFlags.StringSliceVar(
&cf.LogOptions,
logOptFlagName, []string{},
"Logging driver options",
)
_ = cmd.RegisterFlagCompletionFunc(logOptFlagName, AutocompleteLogOpt)
createFlags.BoolVar(
&cf.NoHealthCheck,
"no-healthcheck", false,
"Disable healthchecks on container",
)
createFlags.BoolVar(
&cf.OOMKillDisable,
"oom-kill-disable", false,
"Disable OOM Killer",
)
oomScoreAdjFlagName := "oom-score-adj"
createFlags.Int(
oomScoreAdjFlagName, 0,
"Tune the host's OOM preferences (-1000 to 1000)",
)
_ = cmd.RegisterFlagCompletionFunc(oomScoreAdjFlagName, completion.AutocompleteNone)
archFlagName := "arch"
createFlags.StringVar(
&cf.Arch,
archFlagName, "",
"use `ARCH` instead of the architecture of the machine for choosing images",
)
_ = cmd.RegisterFlagCompletionFunc(archFlagName, completion.AutocompleteArch)
osFlagName := "os"
createFlags.StringVar(
&cf.OS,
osFlagName, "",
"use `OS` instead of the running OS for choosing images",
)
_ = cmd.RegisterFlagCompletionFunc(osFlagName, completion.AutocompleteOS)
variantFlagName := "variant"
createFlags.StringVar(
&cf.Variant,
variantFlagName, "",
"Use `VARIANT` instead of the running architecture variant for choosing images",
)
_ = cmd.RegisterFlagCompletionFunc(variantFlagName, completion.AutocompleteNone)
pidsLimitFlagName := "pids-limit"
createFlags.Int64(
pidsLimitFlagName, pidsLimit(),
"Tune container pids limit (set -1 for unlimited)",
)
_ = cmd.RegisterFlagCompletionFunc(pidsLimitFlagName, completion.AutocompleteNone)
platformFlagName := "platform"
createFlags.StringVar(
&cf.Platform,
platformFlagName, "",
"Specify the platform for selecting the image. (Conflicts with --arch and --os)",
)
_ = cmd.RegisterFlagCompletionFunc(platformFlagName, completion.AutocompleteNone)
podIDFileFlagName := "pod-id-file"
createFlags.StringVar(
&cf.PodIDFile,
podIDFileFlagName, "",
"Read the pod ID from the file",
)
_ = cmd.RegisterFlagCompletionFunc(podIDFileFlagName, completion.AutocompleteDefault)
createFlags.BoolVar(
&cf.Privileged,
"privileged", false,
"Give extended privileges to container",
)
createFlags.BoolVarP(
&cf.PublishAll,
"publish-all", "P", false,
"Publish all exposed ports to random ports on the host interface",
)
pullFlagName := "pull"
createFlags.StringVar(
&cf.Pull,
pullFlagName, policy(),
`Pull image before creating ("always"|"missing"|"never")`,
)
_ = cmd.RegisterFlagCompletionFunc(pullFlagName, AutocompletePullOption)
createFlags.BoolVarP(
&cf.Quiet,
"quiet", "q", false,
"Suppress output information when pulling images",
)
createFlags.BoolVar(
&cf.ReadOnly,
"read-only", false,
"Make containers root filesystem read-only",
)
createFlags.BoolVar(
&cf.ReadOnlyTmpFS,
"read-only-tmpfs", true,
"When running containers in read-only mode mount a read-write tmpfs on /run, /tmp and /var/tmp",
)
requiresFlagName := "requires"
createFlags.StringSliceVar(
&cf.Requires,
requiresFlagName, []string{},
"Add one or more requirement containers that must be started before this container will start",
)
_ = cmd.RegisterFlagCompletionFunc(requiresFlagName, AutocompleteContainers)
restartFlagName := "restart"
createFlags.StringVar(
&cf.Restart,
restartFlagName, "",
`Restart policy to apply when a container exits ("always"|"no"|"on-failure"|"unless-stopped")`,
)
_ = cmd.RegisterFlagCompletionFunc(restartFlagName, AutocompleteRestartOption)
createFlags.BoolVar(
&cf.Rm,
"rm", false,
"Remove container (and pod if created) after exit",
)
createFlags.BoolVar(
&cf.RootFS,
"rootfs", false,
"The first argument is not an image but the rootfs to the exploded container",
)
sdnotifyFlagName := "sdnotify"
createFlags.StringVar(
&cf.SdNotifyMode,
sdnotifyFlagName, define.SdNotifyModeContainer,
`control sd-notify behavior ("container"|"conmon"|"ignore")`,
)
_ = cmd.RegisterFlagCompletionFunc(sdnotifyFlagName, AutocompleteSDNotify)
secretFlagName := "secret"
createFlags.StringArrayVar(
&cf.Secrets,
secretFlagName, []string{},
"Add secret to container",
)
_ = cmd.RegisterFlagCompletionFunc(secretFlagName, AutocompleteSecrets)
shmSizeFlagName := "shm-size"
createFlags.String(
shmSizeFlagName, shmSize(),
"Size of /dev/shm "+sizeWithUnitFormat,
)
_ = cmd.RegisterFlagCompletionFunc(shmSizeFlagName, completion.AutocompleteNone)
stopSignalFlagName := "stop-signal"
createFlags.StringVar(
&cf.StopSignal,
stopSignalFlagName, "",
"Signal to stop a container. Default is SIGTERM",
)
_ = cmd.RegisterFlagCompletionFunc(stopSignalFlagName, AutocompleteStopSignal)
stopTimeoutFlagName := "stop-timeout"
createFlags.UintVar(
&cf.StopTimeout,
stopTimeoutFlagName, containerConfig.Engine.StopTimeout,
"Timeout (in seconds) that containers stopped by user command have to exit. If exceeded, the container will be forcibly stopped via SIGKILL.",
)
_ = cmd.RegisterFlagCompletionFunc(stopTimeoutFlagName, completion.AutocompleteNone)
systemdFlagName := "systemd"
createFlags.StringVar(
&cf.Systemd,
systemdFlagName, "true",
`Run container in systemd mode ("true"|"false"|"always")`,
)
_ = cmd.RegisterFlagCompletionFunc(systemdFlagName, AutocompleteSystemdFlag)
personalityFlagName := "personality"
createFlags.StringVar(
&cf.Personality,
personalityFlagName, "",
"Configure execution domain using personality (e.g., LINUX/LINUX32)",
)
_ = cmd.RegisterFlagCompletionFunc(personalityFlagName, AutocompleteNamespace)
timeoutFlagName := "timeout"
createFlags.UintVar(
&cf.Timeout,
timeoutFlagName, 0,
"Maximum length of time a container is allowed to run. The container will be killed automatically after the time expires.",
)
_ = cmd.RegisterFlagCompletionFunc(timeoutFlagName, completion.AutocompleteNone)
commonFlag.OptionalBoolFlag(createFlags,
&cf.TLSVerify,
"tls-verify",
"Require HTTPS and verify certificates when contacting registries for pulling images",
)
tmpfsFlagName := "tmpfs"
createFlags.StringArrayVar(
&cf.TmpFS,
tmpfsFlagName, []string{},
"Mount a temporary filesystem (`tmpfs`) into a container",
)
_ = cmd.RegisterFlagCompletionFunc(tmpfsFlagName, completion.AutocompleteDefault)
createFlags.BoolVarP(
&cf.TTY,
"tty", "t", false,
"Allocate a pseudo-TTY for container",
)
timezoneFlagName := "tz"
createFlags.StringVar(
&cf.Timezone,
timezoneFlagName, containerConfig.TZ(),
"Set timezone in container",
)
_ = cmd.RegisterFlagCompletionFunc(timezoneFlagName, completion.AutocompleteNone) //TODO: add timezone completion
umaskFlagName := "umask"
createFlags.StringVar(
&cf.Umask,
umaskFlagName, containerConfig.Umask(),
"Set umask in container",
)
_ = cmd.RegisterFlagCompletionFunc(umaskFlagName, completion.AutocompleteNone)
ulimitFlagName := "ulimit"
createFlags.StringSliceVar(
&cf.Ulimit,
ulimitFlagName, ulimits(),
"Ulimit options",
)
_ = cmd.RegisterFlagCompletionFunc(ulimitFlagName, completion.AutocompleteNone)
userFlagName := "user"
createFlags.StringVarP(
&cf.User,
userFlagName, "u", "",
"Username or UID (format: <name|uid>[:<group|gid>])",
)
_ = cmd.RegisterFlagCompletionFunc(userFlagName, AutocompleteUserFlag)
utsFlagName := "uts"
createFlags.String(
utsFlagName, "",
"UTS namespace to use",
)
_ = cmd.RegisterFlagCompletionFunc(utsFlagName, AutocompleteNamespace)
mountFlagName := "mount"
createFlags.StringArrayVar(
&cf.Mount,
mountFlagName, []string{},
"Attach a filesystem mount to the container",
)
_ = cmd.RegisterFlagCompletionFunc(mountFlagName, AutocompleteMountFlag)
workdirFlagName := "workdir"
createFlags.StringVarP(
&cf.Workdir,
workdirFlagName, "w", "",
"Working directory inside the container",
)
_ = cmd.RegisterFlagCompletionFunc(workdirFlagName, completion.AutocompleteDefault)
seccompPolicyFlagName := "seccomp-policy"
createFlags.StringVar(
&cf.SeccompPolicy,
seccompPolicyFlagName, "default",
"Policy for selecting a seccomp profile (experimental)",
)
_ = cmd.RegisterFlagCompletionFunc(seccompPolicyFlagName, completion.AutocompleteDefault)
cgroupConfFlagName := "cgroup-conf"
createFlags.StringSliceVar(
&cf.CgroupConf,
cgroupConfFlagName, []string{},
"Configure cgroup v2 (key=value)",
)
_ = cmd.RegisterFlagCompletionFunc(cgroupConfFlagName, completion.AutocompleteNone)
pidFileFlagName := "pidfile"
createFlags.StringVar(
&cf.PidFile,
pidFileFlagName, "",
"Write the container process ID to the file")
_ = cmd.RegisterFlagCompletionFunc(pidFileFlagName, completion.AutocompleteDefault)
chrootDirsFlagName := "chrootdirs"
createFlags.StringSliceVar(
&cf.ChrootDirs,
chrootDirsFlagName, []string{},
"Chroot directories inside the container",
)
_ = cmd.RegisterFlagCompletionFunc(chrootDirsFlagName, completion.AutocompleteDefault)
passwdEntryName := "passwd-entry"
createFlags.StringVar(&cf.PasswdEntry, passwdEntryName, "", "Entry to write to /etc/passwd")
_ = cmd.RegisterFlagCompletionFunc(passwdEntryName, completion.AutocompleteNone)
if registry.IsRemote() {
_ = createFlags.MarkHidden("env-host")
_ = createFlags.MarkHidden("http-proxy")
} else {
createFlags.StringVar(
&cf.SignaturePolicy,
"signature-policy", "",
"`Pathname` of signature policy file (not usually used)",
)
_ = createFlags.MarkHidden("signature-policy")
}
createFlags.BoolVar(
&cf.Replace,
"replace", false,
`If a container with the same name exists, replace it`,
)
}
if isInfra || (!clone && !isInfra) { // infra container flags, create should also pick these up
sysctlFlagName := "sysctl"
createFlags.StringSliceVar(
&cf.Sysctl,
sysctlFlagName, []string{},
"Sysctl options",
)
//TODO: Add function for sysctl completion.
_ = cmd.RegisterFlagCompletionFunc(sysctlFlagName, completion.AutocompleteNone)
securityOptFlagName := "security-opt"
createFlags.StringArrayVar(
&cf.SecurityOpt,
securityOptFlagName, []string{},
"Security Options",
)
_ = cmd.RegisterFlagCompletionFunc(securityOptFlagName, AutocompleteSecurityOption)
subgidnameFlagName := "subgidname"
createFlags.StringVar(
&cf.SubUIDName,
subgidnameFlagName, "",
"Name of range listed in /etc/subgid for use in user namespace",
)
_ = cmd.RegisterFlagCompletionFunc(subgidnameFlagName, completion.AutocompleteSubgidName)
subuidnameFlagName := "subuidname"
createFlags.StringVar(
&cf.SubGIDName,
subuidnameFlagName, "",
"Name of range listed in /etc/subuid for use in user namespace",
)
_ = cmd.RegisterFlagCompletionFunc(subuidnameFlagName, completion.AutocompleteSubuidName)
gidmapFlagName := "gidmap"
createFlags.StringSliceVar(
&cf.GIDMap,
gidmapFlagName, []string{},
"GID map to use for the user namespace",
)
_ = cmd.RegisterFlagCompletionFunc(gidmapFlagName, completion.AutocompleteNone)
uidmapFlagName := "uidmap"
createFlags.StringSliceVar(
&cf.UIDMap,
uidmapFlagName, []string{},
"UID map to use for the user namespace",
)
_ = cmd.RegisterFlagCompletionFunc(uidmapFlagName, completion.AutocompleteNone)
usernsFlagName := "userns"
createFlags.String(
usernsFlagName, os.Getenv("PODMAN_USERNS"),
"User namespace to use",
)
_ = cmd.RegisterFlagCompletionFunc(usernsFlagName, AutocompleteUserNamespace)
cgroupParentFlagName := "cgroup-parent"
createFlags.StringVar(
&cf.CgroupParent,
cgroupParentFlagName, "",
"Optional parent cgroup for the container",
)
_ = cmd.RegisterFlagCompletionFunc(cgroupParentFlagName, completion.AutocompleteDefault)
var conmonPidfileFlagName string
if !isInfra {
conmonPidfileFlagName = "conmon-pidfile"
} else {
conmonPidfileFlagName = "infra-conmon-pidfile"
}
createFlags.StringVar(
&cf.ConmonPIDFile,
conmonPidfileFlagName, "",
"Path to the file that will receive the PID of conmon",
)
_ = cmd.RegisterFlagCompletionFunc(conmonPidfileFlagName, completion.AutocompleteDefault)
var entrypointFlagName string
if !isInfra {
entrypointFlagName = "entrypoint"
} else {
entrypointFlagName = "infra-command"
}
createFlags.String(entrypointFlagName, "",
"Overwrite the default ENTRYPOINT of the image",
)
_ = cmd.RegisterFlagCompletionFunc(entrypointFlagName, completion.AutocompleteNone)
hostnameFlagName := "hostname"
createFlags.StringVarP(
&cf.Hostname,
hostnameFlagName, "h", "",
"Set container hostname",
)
_ = cmd.RegisterFlagCompletionFunc(hostnameFlagName, completion.AutocompleteNone)
labelFlagName := "label"
createFlags.StringArrayVarP(
&cf.Label,
labelFlagName, "l", []string{},
"Set metadata on container",
)
_ = cmd.RegisterFlagCompletionFunc(labelFlagName, completion.AutocompleteNone)
labelFileFlagName := "label-file"
createFlags.StringSliceVar(
&cf.LabelFile,
labelFileFlagName, []string{},
"Read in a line delimited file of labels",
)
_ = cmd.RegisterFlagCompletionFunc(labelFileFlagName, completion.AutocompleteDefault)
if isInfra {
nameFlagName := "infra-name"
createFlags.StringVar(
&cf.Name,
nameFlagName, "",
"Assign a name to the container",
)
_ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
}
createFlags.Bool(
"help", false, "",
)
pidFlagName := "pid"
createFlags.StringVar(
&cf.PID,
pidFlagName, "",
"PID namespace to use",
)
_ = cmd.RegisterFlagCompletionFunc(pidFlagName, AutocompleteNamespace)
volumeDesciption := "Bind mount a volume into the container"
if registry.IsRemote() {
volumeDesciption = "Bind mount a volume into the container. Volume source will be on the server machine, not the client"
}
volumeFlagName := "volume"
createFlags.StringArrayVarP(
&cf.Volume,
volumeFlagName, "v", volumes(),
volumeDesciption,
)
_ = cmd.RegisterFlagCompletionFunc(volumeFlagName, AutocompleteVolumeFlag)
deviceFlagName := "device"
createFlags.StringSliceVar(
&cf.Devices,
deviceFlagName, devices(),
"Add a host device to the container",
)
_ = cmd.RegisterFlagCompletionFunc(deviceFlagName, completion.AutocompleteDefault)
deviceReadBpsFlagName := "device-read-bps"
createFlags.StringSliceVar(
&cf.DeviceReadBPs,
deviceReadBpsFlagName, []string{},
"Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)",
)
_ = cmd.RegisterFlagCompletionFunc(deviceReadBpsFlagName, completion.AutocompleteDefault)
volumesFromFlagName := "volumes-from"
createFlags.StringArrayVar(
&cf.VolumesFrom,
volumesFromFlagName, []string{},
"Mount volumes from the specified container(s)",
)
_ = cmd.RegisterFlagCompletionFunc(volumesFromFlagName, AutocompleteContainers)
}
if clone || !isInfra { // clone and create only flags, we need this level of separation so clone does not pick up all of the flags
nameFlagName := "name"
createFlags.StringVar(
&cf.Name,
nameFlagName, "",
"Assign a name to the container",
)
_ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
podFlagName := "pod"
createFlags.StringVar(
&cf.Pod,
podFlagName, "",
"Run container in an existing pod",
)
_ = cmd.RegisterFlagCompletionFunc(podFlagName, AutocompletePods)
cpuPeriodFlagName := "cpu-period"
createFlags.Uint64Var(
&cf.CPUPeriod,
cpuPeriodFlagName, 0,
"Limit the CPU CFS (Completely Fair Scheduler) period",
)
_ = cmd.RegisterFlagCompletionFunc(cpuPeriodFlagName, completion.AutocompleteNone)
cpuQuotaFlagName := "cpu-quota"
createFlags.Int64Var(
&cf.CPUQuota,
cpuQuotaFlagName, 0,
"Limit the CPU CFS (Completely Fair Scheduler) quota",
)
_ = cmd.RegisterFlagCompletionFunc(cpuQuotaFlagName, completion.AutocompleteNone)
cpuRtPeriodFlagName := "cpu-rt-period"
createFlags.Uint64Var(
&cf.CPURTPeriod,
cpuRtPeriodFlagName, 0,
"Limit the CPU real-time period in microseconds",
)
_ = cmd.RegisterFlagCompletionFunc(cpuRtPeriodFlagName, completion.AutocompleteNone)
cpuRtRuntimeFlagName := "cpu-rt-runtime"
createFlags.Int64Var(
&cf.CPURTRuntime,
cpuRtRuntimeFlagName, 0,
"Limit the CPU real-time runtime in microseconds",
)
_ = cmd.RegisterFlagCompletionFunc(cpuRtRuntimeFlagName, completion.AutocompleteNone)
cpuSharesFlagName := "cpu-shares"
createFlags.Uint64Var(
&cf.CPUShares,
cpuSharesFlagName, 0,
"CPU shares (relative weight)",
)
_ = cmd.RegisterFlagCompletionFunc(cpuSharesFlagName, completion.AutocompleteNone)
cpusetMemsFlagName := "cpuset-mems"
createFlags.StringVar(
&cf.CPUSetMems,
cpusetMemsFlagName, "",
"Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
)
_ = cmd.RegisterFlagCompletionFunc(cpusetMemsFlagName, completion.AutocompleteNone)
memoryFlagName := "memory"
createFlags.StringVarP(
&cf.Memory,
memoryFlagName, "m", "",
"Memory limit "+sizeWithUnitFormat,
)
_ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
memoryReservationFlagName := "memory-reservation"
createFlags.StringVar(
&cf.MemoryReservation,
memoryReservationFlagName, "",
"Memory soft limit "+sizeWithUnitFormat,
)
_ = cmd.RegisterFlagCompletionFunc(memoryReservationFlagName, completion.AutocompleteNone)
memorySwapFlagName := "memory-swap"
createFlags.StringVar(
&cf.MemorySwap,
memorySwapFlagName, "",
"Swap limit equal to memory plus swap: '-1' to enable unlimited swap",
)
_ = cmd.RegisterFlagCompletionFunc(memorySwapFlagName, completion.AutocompleteNone)
memorySwappinessFlagName := "memory-swappiness"
createFlags.Int64Var(
&cf.MemorySwappiness,
memorySwappinessFlagName, -1,
"Tune container memory swappiness (0 to 100, or -1 for system default)",
)
_ = cmd.RegisterFlagCompletionFunc(memorySwappinessFlagName, completion.AutocompleteNone)
}
//anyone can use these
cpusFlagName := "cpus"
createFlags.Float64Var(
&cf.CPUS,
cpusFlagName, 0,
"Number of CPUs. The default is 0.000 which means no limit",
)
_ = cmd.RegisterFlagCompletionFunc(cpusFlagName, completion.AutocompleteNone)
cpusetCpusFlagName := "cpuset-cpus"
createFlags.StringVar(
&cf.CPUSetCPUs,
cpusetCpusFlagName, "",
"CPUs in which to allow execution (0-3, 0,1)",
)
_ = cmd.RegisterFlagCompletionFunc(cpusetCpusFlagName, completion.AutocompleteNone)
}
| [
"\"PODMAN_USERNS\""
]
| []
| [
"PODMAN_USERNS"
]
| [] | ["PODMAN_USERNS"] | go | 1 | 0 | |
lib/ContigFilter_mlee/ContigFilter_mleeImpl.py | # -*- coding: utf-8 -*-
#BEGIN_HEADER
# The header block is where all import statments should live
import logging
import os
from pprint import pformat
from Bio import SeqIO
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.KBaseReportClient import KBaseReport
#END_HEADER
class ContigFilter_mlee:
'''
Module Name:
ContigFilter_mlee
Module Description:
A KBase module: ContigFilter
This sample module contains one small method that filters contigs.
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = "https://github.com/mollee55/ContigFilter.git"
GIT_COMMIT_HASH = "0d13a1e1a62ee662a29ac8b87c900314e876611d"
#BEGIN_CLASS_HEADER
# Class variables and functions can be defined in this block
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
# Any configuration parameters that are important should be parsed and
# saved in the constructor.
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
#END_CONSTRUCTOR
pass
def run_ContigFilter(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_ContigFilter
# Print statements to stdout/stderr are captured and available as the App log
logging.info('Starting run_ContigFilter function. Params=' + pformat(params))
# Step 1 - Parse/examine the parameters and catch any errors
# It is important to check that parameters exist and are defined, and that nice error
# messages are returned to users. Parameter values go through basic validation when
# defined in a Narrative App, but advanced users or other SDK developers can call
# this function directly, so validation is still important.
logging.info('Validating parameters.')
if 'workspace_name' not in params:
raise ValueError('Parameter workspace_name is not set in input arguments')
workspace_name = params['workspace_name']
if 'assembly_input_ref' not in params:
raise ValueError('Parameter assembly_input_ref is not set in input arguments')
assembly_input_ref = params['assembly_input_ref']
if 'min_length' not in params:
raise ValueError('Parameter min_length is not set in input arguments')
min_length_orig = params['min_length']
min_length = None
try:
min_length = int(min_length_orig)
except ValueError:
raise ValueError('Cannot parse integer from min_length parameter (' + str(min_length_orig) + ')')
if min_length < 0:
raise ValueError('min_length parameter cannot be negative (' + str(min_length) + ')')
# Step 2 - Download the input data as a Fasta and
# We can use the AssemblyUtils module to download a FASTA file from our Assembly data object.
# The return object gives us the path to the file that was created.
logging.info('Downloading Assembly data as a Fasta file.')
assemblyUtil = AssemblyUtil(self.callback_url)
fasta_file = assemblyUtil.get_assembly_as_fasta({'ref': assembly_input_ref})
# Step 3 - Actually perform the filter operation, saving the good contigs to a new fasta file.
# We can use BioPython to parse the Fasta file and build and save the output to a file.
good_contigs = []
n_total = 0
n_remaining = 0
for record in SeqIO.parse(fasta_file['path'], 'fasta'):
n_total += 1
if len(record.seq) >= min_length:
good_contigs.append(record)
n_remaining += 1
logging.info('Filtered Assembly to ' + str(n_remaining) + ' contigs out of ' + str(n_total))
filtered_fasta_file = os.path.join(self.shared_folder, 'filtered.fasta')
SeqIO.write(good_contigs, filtered_fasta_file, 'fasta')
# Step 4 - Save the new Assembly back to the system
logging.info('Uploading filtered Assembly data.')
new_assembly = assemblyUtil.save_assembly_from_fasta({'file': {'path': filtered_fasta_file},
'workspace_name': workspace_name,
'assembly_name': fasta_file['assembly_name']
})
# Step 5 - Build a Report and return
reportObj = {
'objects_created': [{'ref': new_assembly, 'description': 'Filtered contigs'}],
'text_message': 'Filtered Assembly to ' + str(n_remaining) + ' contigs out of ' + str(n_total)
}
report = KBaseReport(self.callback_url)
report_info = report.create({'report': reportObj, 'workspace_name': params['workspace_name']})
# STEP 6: contruct the output to send back
output = {'report_name': report_info['name'],
'report_ref': report_info['ref'],
'assembly_output': new_assembly,
'n_initial_contigs': n_total,
'n_contigs_removed': n_total - n_remaining,
'n_contigs_remaining': n_remaining
}
logging.info('returning:' + pformat(output))
#END run_ContigFilter
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_ContigFilter return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_ContigFilter_max(self, ctx, params):
"""
New app which filters contigs in an assembly using both a minimum and a maximum contig length
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_ContigFilter_max
# Check that the parameters are valid
for name in ['min_length', 'max_length', 'assembly_input_ref', 'workspace_name']:
if name not in params:
raise ValueError('Parameter "' + name + '" is required but missing')
if not isinstance(params['min_length'], int) or (params['min_length'] < 0):
raise ValueError('Min length must be a non-negative integer')
if not isinstance(params['max_length'], int) or (params['max_length'] < 0):
raise ValueError('Max length must be a non-negative integer')
if not isinstance(params['assembly_input_ref'], str) or not len(params['assembly_input_ref']):
raise ValueError('Pass in a valid assembly reference string')
print(params['min_length'], params['max_length'], params['assembly_input_ref'])
output = {}
assembly_util = AssemblyUtil(self.callback_url)
fasta_file = assembly_util.get_assembly_as_fasta({'ref': params['assembly_input_ref']})
print(fasta_file)
# Parse the downloaded file in FASTA format
parsed_assembly = SeqIO.parse(fasta_file['path'], 'fasta')
min_length = params['min_length']
max_length = params['max_length']
# Keep a list of contigs greater than min_length
good_contigs = []
# total contigs regardless of length
n_total = 0
# total contigs over the min_length
n_remaining = 0
for record in parsed_assembly:
n_total += 1
if len(record.seq) >= min_length and len(record.seq) <= max_length:
good_contigs.append(record)
n_remaining += 1
# Create a file to hold the filtered data
workspace_name = params['workspace_name']
filtered_path = os.path.join(self.shared_folder, 'filtered.fasta')
SeqIO.write(good_contigs, filtered_path, 'fasta')
# Upload the filtered data to the workspace
new_ref = assembly_util.save_assembly_from_fasta({
'file': {'path': filtered_path},
'workspace_name': workspace_name,
'assembly_name': fasta_file['assembly_name']
})
# Create an output summary message for the report
text_message = "".join([
'Filtered assembly to ',
str(n_remaining),
' contigs out of ',
str(n_total)
])
# Data for creating the report, referencing the assembly we uploaded
report_data = {
'objects_created': [
{'ref': new_ref, 'description': 'Filtered contigs'}
],
'text_message': text_message
}
# Initialize the report
kbase_report = KBaseReport(self.callback_url)
report = kbase_report.create({
'report': report_data,
'workspace_name': workspace_name
})
# Return the report reference and name in our results
output = {
'report_ref': report['ref'],
'report_name': report['name'],
'n_total': n_total,
'n_remaining': n_remaining,
'filtered_assembly_ref': new_ref
}
#END run_ContigFilter_max
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_ContigFilter_max return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| []
| []
| [
"SDK_CALLBACK_URL"
]
| [] | ["SDK_CALLBACK_URL"] | python | 1 | 0 | |
operator/common.go | package operator
/*
Copyright 2017-2018 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
log "github.com/Sirupsen/logrus"
"github.com/crunchydata/postgres-operator/util"
"os"
"text/template"
)
var COImagePrefix string
var COImageTag string
var CCPImagePrefix string
const jobPath = "/operator-conf/backup-job.json"
const ingestPath = "/operator-conf/pgo-ingest-watch-job.json"
const rmdatajobPath = "/operator-conf/rmdata-job.json"
const PVCPath = "/operator-conf/pvc.json"
const PVCSCPath = "/operator-conf/pvc-storageclass.json"
const UpgradeJobPath = "/operator-conf/cluster-upgrade-job-1.json"
var JobTemplate *template.Template
var UpgradeJobTemplate1 *template.Template
var PgpoolTemplate *template.Template
var PgpoolConfTemplate *template.Template
var PgpoolPasswdTemplate *template.Template
var PgpoolHBATemplate *template.Template
var ServiceTemplate1 *template.Template
var IngestjobTemplate *template.Template
var RmdatajobTemplate *template.Template
var PVCTemplate *template.Template
var PVCStorageClassTemplate *template.Template
var AffinityTemplate1 *template.Template
var ContainerResourcesTemplate1 *template.Template
var CollectTemplate1 *template.Template
var DeploymentTemplate1 *template.Template
var ReplicadeploymentTemplate1 *template.Template
var ReplicadeploymentTemplate1Shared *template.Template
func Initialize() {
CCPImagePrefix = os.Getenv("CCP_IMAGE_PREFIX")
if CCPImagePrefix == "" {
log.Debug("CCP_IMAGE_PREFIX not set, using default")
CCPImagePrefix = "crunchydata"
} else {
log.Debug("CCP_IMAGE_PREFIX set, using " + CCPImagePrefix)
}
COImagePrefix = os.Getenv("CO_IMAGE_PREFIX")
if COImagePrefix == "" {
log.Debug("CO_IMAGE_PREFIX not set, using default")
COImagePrefix = "crunchydata"
} else {
log.Debug("CO_IMAGE_PREFIX set, using " + COImagePrefix)
}
COImageTag = os.Getenv("CO_IMAGE_TAG")
if COImageTag == "" {
log.Error("CO_IMAGE_TAG not set, required ")
panic("CO_IMAGE_TAG env var not set")
}
JobTemplate = util.LoadTemplate(jobPath)
PgpoolTemplate = util.LoadTemplate("/operator-conf/pgpool-template.json")
PgpoolConfTemplate = util.LoadTemplate("/operator-conf/pgpool.conf")
PgpoolPasswdTemplate = util.LoadTemplate("/operator-conf/pool_passwd")
PgpoolHBATemplate = util.LoadTemplate("/operator-conf/pool_hba.conf")
ServiceTemplate1 = util.LoadTemplate("/operator-conf/cluster-service-1.json")
IngestjobTemplate = util.LoadTemplate(ingestPath)
RmdatajobTemplate = util.LoadTemplate(rmdatajobPath)
PVCTemplate = util.LoadTemplate(PVCPath)
PVCStorageClassTemplate = util.LoadTemplate(PVCSCPath)
//ReplicadeploymentTemplate1 = util.LoadTemplate("/operator-conf/cluster-replica-deployment-1.json")
//ReplicadeploymentTemplate1Shared = util.LoadTemplate("/operator-conf/cluster-replica-deployment-1-shared.json")
DeploymentTemplate1 = util.LoadTemplate("/operator-conf/cluster-deployment-1.json")
CollectTemplate1 = util.LoadTemplate("/operator-conf/collect.json")
AffinityTemplate1 = util.LoadTemplate("/operator-conf/affinity.json")
ContainerResourcesTemplate1 = util.LoadTemplate("/operator-conf/container-resources.json")
UpgradeJobTemplate1 = util.LoadTemplate(UpgradeJobPath)
}
| [
"\"CCP_IMAGE_PREFIX\"",
"\"CO_IMAGE_PREFIX\"",
"\"CO_IMAGE_TAG\""
]
| []
| [
"CCP_IMAGE_PREFIX",
"CO_IMAGE_PREFIX",
"CO_IMAGE_TAG"
]
| [] | ["CCP_IMAGE_PREFIX", "CO_IMAGE_PREFIX", "CO_IMAGE_TAG"] | go | 3 | 0 | |
tools/val_net_BirdNetPlus.py | #!/usr/local/bin/python3
import os, sys
from detectron2.data.datasets import register_coco_instances
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.config import get_cfg
from detectron2.engine import default_setup
import logging
import numpy as np
import cv2
from detectron2.engine import DefaultPredictor
import torch
import math
from birdview_detection_refiner import BirdviewDetectionRefiner
from utils_3d import _draw_projection_obstacle_to_cam
from object_3d import Object3d
from utils_calib import Calibration
import argparse
# Env paths
home = os.getenv('HOME')
detectron2_root = os.getenv('DETECTRON_ROOT')
'''
This script allows the user to:
1. Obtain the annotations in KITTI format of one or multiple checkpoints, to be evaluated with an external evaluator like https://github.com/cguindel/eval_kitti
2. Visualize and save the images resulting in both BEV and 3D as well
3. Change the evaluation parameters and kitti_root by arguments
'''
def parse_args():
parser = argparse.ArgumentParser(description='Validation script for BirdNet+')
parser.add_argument(
'--config_file', help="Name of the configuration to use without extension", default='Base-BirdNetPlus', type=str)
parser.add_argument(
'--ann_val', help="Validation file with the annotations in COCO format previously generated by the training script, without extension", default='annotations_kitti_validation_carpedcycRDHCENT_VPRES_12BIN', type=str)
parser.add_argument(
'--write', help="Write results in KITTI format", default=False, action="store_true")
parser.add_argument(
'--img2show', help="Show a fixed number of images, 0 to eliminate the visualization", default=0, type=int)
parser.add_argument(
'--save_img', help="Save images showed", default=False, action="store_true")
parser.add_argument(
'--eval_chkp', help="Starting from the second half of the checkpoints, the rest will be evaluated with a certain interval specified here, 1 to evaluate all of them", default=1, type=int)
parser.add_argument(
'--force_test', help="Name of the checkpoint to extract annotations or evaluate, empty disable this option", default='', type=str)
parser.add_argument(
'--score', help="Limitation for lower scores", default=0.01, type=float)
parser.add_argument(
'--nms', help="NMS IoU for the overlapping obstacles per class", default=0.3, type=float)
parser.add_argument(
'--kitti_root', help="Path of the KITTI dataset", default='/media/datasets/kitti/object/training', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# BEV parameters
bvres = 0.05
velodyne_h = 1.73
only_front = True
# BEV images
im_path = os.path.join(detectron2_root,'datasets/bv_kitti/image')
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
# Viewpoint calculation
def getfrombins(cl,bins):
bin_dist = np.linspace(-math.pi,math.pi,bins+1)
bin_res = (bin_dist[1]-bin_dist[0])/2.
bin = [bin_dist[i]-bin_res for i in range(len(bin_dist)-1)][cl]
return bin
idclass = { 0:'Car', 1:'Van', 2:'Truck', 3:'Pedestrian', 4:'Person_sitting', 5:'Cyclist', 6:'Tram', 7:'Misc', 8:'DontCare'}
idclass3 = { 0:'Car', 1:'Pedestrian', 2:'Cyclist'}
def catName(category_id,nclass):
if nclass > 3:
_idclass = idclass
elif nclass == 3:
_idclass = idclass3
strclass = _idclass.get(category_id, nclass)
return strclass
def prepareAnn(lbl, alpha, box, h=-1, w=-1, l=-1, x=-1000, y=-1000, z=-1000, ry=-10, score=None):
ann = [
lbl,
-1,
-1,
alpha,
box[0],box[1],box[2],box[3],
h,w,l,
x,y,z,
ry
]
if score is not None:
ann.append(score)
strAnn = ' '.join([str(x) for x in ann])
obj3d = Object3d(strAnn)
return ann, obj3d, strAnn
def prepare_for_coco_detection_KITTI(instance, output_folder, filename, write, kitti_calib_path, nclass, vp, bins, vp_res, hwrot, height_training):
# Extract important information from instance class
boxes = np.array(instance.get('pred_boxes').tensor)
scores = np.array(instance.get('scores'))
labels = np.array(instance.get('pred_classes'))
if vp_res:
alpha = np.array([rad for rad in instance.get('viewpoint_residual')]) if vp else np.ones((labels.shape))*(-10.00)
else:
alpha = np.array([getfrombins(cl,bins) for cl in instance.get('viewpoint')]) if vp else np.ones((labels.shape))*(-10.00)
h = np.array([[h,g] for h,g in instance.get('height')]) if height_training else np.array([-1,-1000]*labels.shape)
# Image BV
bv_image = cv2.imread(filename).astype(np.uint8)
if height_training:
bv_ground = None
else:
# Ground BV
bv_ground = np.fromfile(os.path.join(im_path,'ground_'+filename[-10:].split('.png')[0]+'.txt'),sep=' ')
bv_ground = bv_ground.reshape(bv_image.shape[0],bv_image.shape[1],1)
# Calibration for 3D
calib_file = os.path.join(kitti_calib_path,filename[-10:].split('.png')[0]+'.txt')
# Refiner for 3D
refiner = BirdviewDetectionRefiner(bv_image, bv_ground, bvres, velodyne_h, only_front)
im_ann = []
im_ann_obj = []
if write:
file_ann = open(os.path.join(output_folder,filename[-10:].split('.png')[0]+'.txt'), 'w+')
for k, box in enumerate(boxes):
lbl = catName(labels[k],nclass)
ann,obj3d,strAnn = prepareAnn(lbl,alpha[k],box,score=scores[k],h=h[k,0],z=h[k,1])
if hwrot and height_training:
refiner.refine_detection_rotated_wheight(obj3d)
elif hwrot:
refiner.refine_detection_rotated(obj3d)
else:
refiner.refine_detection(obj3d)
if obj3d.height == -1:
continue
# Project points to camera frame coordinates
calib = Calibration(calib_file)
p = calib.project_velo_to_rect(np.array([[obj3d.location.x,obj3d.location.y,obj3d.location.z]]))
# Change 2D bbox in BV getting 2D bbox in camera frame (projection)
_,_,bbox2D = _draw_projection_obstacle_to_cam(obj3d, calib_file, bvres, only_front, False)
if bbox2D == None:
continue
# Obtain alpha from yaw
obj3d.alpha = obj3d.yaw -(-math.atan2(p[0][2],p[0][0]) - 1.5*math.pi)
obj3d.alpha = obj3d.alpha%(2*math.pi)
if obj3d.alpha > math.pi:
obj3d.alpha -= 2*math.pi
elif obj3d.alpha < -math.pi:
obj3d.alpha += 2*math.pi
# After refinement
ann = [
obj3d.kind_name,
obj3d.truncated,
obj3d.occluded,
round(obj3d.alpha,6),
round(bbox2D[0],6),round(bbox2D[1],6),round(bbox2D[2],6),round(bbox2D[3],6),
round(obj3d.height,6), round(obj3d.width,6), round(obj3d.length,6),
round(p[0][0],6), round(p[0][1],6), round(p[0][2],6), # Camera coordinates
round(obj3d.yaw,6),
obj3d.score, # DON'T ROUND IT
]
im_ann.append(ann)
im_ann_obj.append(obj3d)
strAnn = ' '.join([str(x) for x in ann])
if write:
file_ann.write(strAnn+'\n')
if write:
file_ann.close()
return im_ann, im_ann_obj, instance
def main(config_file, ann_val, write, img2show, save_img, eval_chkp, force_test, score_thresh , nms_thresh, kitti_root ):
# KITTI paths
kitti_im_path = kitti_root+'/image_2'
kitti_calib_path = kitti_root+'/calib'
# LOGGER AND CONFIGURATION LOAD
logger = logging.getLogger("detectron2.trainer")
cfg = get_cfg()
cfg.merge_from_file(os.path.join(detectron2_root,"configs/{}.yaml".format(config_file)))
default_setup(cfg, None)
nclasses = cfg.MODEL.ROI_HEADS.NUM_CLASSES
optional_arguments = []
if cfg.VIEWPOINT:
optional_arguments.append('viewpoint')
if cfg.VIEWPOINT_RESIDUAL:
optional_arguments.append('vp_res')
if cfg.ROTATED_BOX_TRAINING:
optional_arguments.append('bbox3D')
if cfg.HEIGHT_TRAINING:
optional_arguments.append('height')
val_path = detectron2_root+"/datasets/bv_kitti/annotations/{}.json".format(ann_val)
register_coco_instances("birdview_val", {}, val_path, detectron2_root, optional_arguments)
toeval = []
models = os.listdir(cfg.OUTPUT_DIR)
for model in models:
if model.endswith('.pth') and not model=='model_final.pth':
toeval.append(model)
toeval.sort()
toeval = toeval[:-1]
if force_test:
toeval = [e for e in toeval if force_test in e]
f_eval = [folder.split('_')[1].split('.')[0] for folder in toeval]
elif eval_chkp!=0:
length = len(toeval)//2
toeval = toeval[length::eval_chkp]
toeval.append('model_final.pth')
f_eval = [folder.split('_')[1].split('.')[0] for folder in toeval]
else:
toeval = ['model_final.pth']
f_eval = ['final']
print('Checkpoints to be evaluated: ',toeval)
for checkpoint, eval_folder in zip(toeval,f_eval):
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, checkpoint)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thresh
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = nms_thresh
predictor = DefaultPredictor(cfg)
val_bv_dicts = DatasetCatalog.get("birdview_val")
val_bv_meta = MetadataCatalog.get("birdview_val")
obj_anns = []
kitti_results = []
c = 0
sample_idx = range(img2show) if img2show != 0 else [-1]
logger.info("Showing {} predictions".format(str(img2show)))
ann_outdir = os.path.join(cfg.OUTPUT_DIR,'annotations',eval_folder)
if not os.path.exists(ann_outdir):
os.makedirs(ann_outdir)
for image_id, d in enumerate(val_bv_dicts):
c += 1
file = os.path.join(ann_outdir,d["file_name"][-10:].split('.png')[0]+'.txt')
im = cv2.imread(d["file_name"])
print("Preparing prediction {}, from {}, image: {}".format(str(c),str(len(val_bv_dicts)),d["file_name"]))
if not os.path.exists(file) or write:
is_kitti_ann=False
# Inference
outputs = predictor(im)
list_anns, obj_anns, instances = prepare_for_coco_detection_KITTI(outputs["instances"].to("cpu"), ann_outdir, d["file_name"], write, kitti_calib_path, nclasses, cfg.VIEWPOINT, cfg.VP_BINS, cfg.VIEWPOINT_RESIDUAL, cfg.ROTATED_BOX_TRAINING, cfg.HEIGHT_TRAINING)
kitti_results.append(list_anns)
else:
is_kitti_ann=True
with open(file,'r') as f:
list_anns = f.read().splitlines()
kitti_results.append([anns.split(' ') for anns in list_anns] if list_anns else [])
for ann in list_anns:
obj_anns.append(Object3d(ann))
if c in sample_idx:
# Change BV aspect
nonzero = np.where(im>0)
im[nonzero]=255-im[nonzero]
im=cv2.bitwise_not(im)
kitti_im = cv2.imread(os.path.join(kitti_im_path,d["file_name"][-10:]))
calib_file = os.path.join(kitti_calib_path,d["file_name"][-10:].split('.png')[0]+'.txt')
# Show obstacles
for i, obj in enumerate(obj_anns):
kitti_im, im, _ = _draw_projection_obstacle_to_cam(obj, calib_file, bvres, only_front, True, kitti_im, im, is_kitti_ann=is_kitti_ann)
cv2.imshow('image',kitti_im)
cv2.imshow('bv_image',im)
if save_img:
im_outdir = os.path.join(cfg.OUTPUT_DIR,'images')
if not os.path.exists(im_outdir):
os.makedirs(im_outdir)
cv2.imwrite(os.path.join(im_outdir,'3D_'+d["file_name"][-10:]), kitti_im)
cv2.imwrite(os.path.join(im_outdir,'BEV_'+d["file_name"][-10:]), im)
cv2.waitKey(0)
cv2.destroyAllWindows()
elif c > max(sample_idx) and not write:
break
if __name__ == '__main__':
args = parse_args()
main(args.config_file, args.ann_val, args.write, args.img2show, args.save_img, args.eval_chkp, args.force_test, args.score, args.nms, args.kitti_root)
| []
| []
| [
"HOME",
"DETECTRON_ROOT"
]
| [] | ["HOME", "DETECTRON_ROOT"] | python | 2 | 0 | |
redshift_monitoring.py | from __future__ import print_function
import os
import sys
# Copyright 2016-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# add the lib directory to the path
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
sys.path.append(os.path.join(os.path.dirname(__file__), "sql"))
import boto3
import base64
import pg8000
import datetime
import json
import pgpasslib
#### Static Configuration
ssl = True
interval = '1 hour'
##################
__version__ = "1.4"
debug = False
pg8000.paramstyle = "qmark"
def run_external_commands(command_set_type, file_name, cursor, cluster):
if not os.path.exists(file_name):
return []
external_commands = None
try:
external_commands = json.load(open(file_name, 'r'))
except ValueError as e:
# handle a malformed user query set gracefully
if e.message == "No JSON object could be decoded":
return []
else:
raise
output_metrics = []
for command in external_commands:
if command['type'] == 'value':
cmd_type = "Query"
else:
cmd_type = "Canary"
print("Executing %s %s: %s" % (command_set_type, cmd_type, command['name']))
try:
t = datetime.datetime.now()
interval = run_command(cursor, command['query'])
value = cursor.fetchone()[0]
if value is None:
value = 0
# append a cloudwatch metric for the value, or the elapsed interval, based upon the configured 'type' value
if command['type'] == 'value':
output_metrics.append({
'MetricName': command['name'],
'Dimensions': [
{'Name': 'ClusterIdentifier', 'Value': cluster}
],
'Timestamp': t,
'Value': value,
'Unit': command['unit']
})
else:
output_metrics.append({
'MetricName': command['name'],
'Dimensions': [
{'Name': 'ClusterIdentifier', 'Value': cluster}
],
'Timestamp': t,
'Value': interval,
'Unit': 'Milliseconds'
})
except Exception as e:
print("Exception running external command %s" % command['name'])
print(e)
return output_metrics
def run_command(cursor, statement):
if debug:
print("Running Statement: %s" % statement)
t = datetime.datetime.now()
cursor.execute(statement)
interval = (datetime.datetime.now() - t).microseconds / 1000
return interval
def gather_service_class_stats(cursor, cluster):
metrics = []
runtime = run_command(cursor,'''
SELECT DATE_TRUNC('hour', a.service_class_start_time) AS metrics_ts,
TRIM(d.name) as service_class,
COUNT(a.query) AS query_count,
SUM(a.total_exec_time) AS sum_exec_time,
sum(case when a.total_queue_time > 0 then 1 else 0 end) count_queued_queries,
SUM(a.total_queue_time) AS sum_queue_time,
count(c.is_diskbased) as count_diskbased_segments
FROM stl_wlm_query a
JOIN stv_wlm_classification_config b ON a.service_class = b.action_service_class
LEFT OUTER JOIN (select query, SUM(CASE when is_diskbased = 't' then 1 else 0 end) is_diskbased
from svl_query_summary
group by query) c on a.query = c.query
JOIN stv_wlm_service_class_config d on a.service_class = d.service_class
WHERE a.service_class > 5
AND a.service_class_start_time > DATEADD(hour, -2, current_date)
GROUP BY DATE_TRUNC('hour', a.service_class_start_time),
d.name
''')
service_class_info = cursor.fetchall()
def add_metric(metric_name, service_class_id, metric_value, ts):
metrics.append({
'MetricName': metric_name,
'Dimensions': [{'Name': 'ClusterIdentifier', 'Value': cluster},
{'Name': 'ServiceClassID', 'Value': str(service_class_id)}],
'Timestamp': ts,
'Value': metric_value
})
for service_class in service_class_info:
add_metric('ServiceClass-Queued', service_class[1], service_class[4], service_class[0])
add_metric('ServiceClass-QueueTime', service_class[1], service_class[5], service_class[0])
add_metric('ServiceClass-Executed', service_class[1], service_class[2], service_class[0])
add_metric('ServiceClass-ExecTime', service_class[1], service_class[3], service_class[0])
add_metric('ServiceClass-DiskbasedQuerySegments', service_class[1], service_class[6], service_class[0])
return metrics
def gather_table_stats(cursor, cluster):
run_command(cursor,
"select /* Lambda CloudWatch Exporter */ \"schema\" || '.' || \"table\" as table, encoded, max_varchar, unsorted, stats_off, tbl_rows, skew_sortkey1, skew_rows from svv_table_info")
tables_not_compressed = 0
max_skew_ratio = 0
total_skew_ratio = 0
number_tables_skew = 0
number_tables = 0
max_skew_sort_ratio = 0
total_skew_sort_ratio = 0
number_tables_skew_sort = 0
number_tables_statsoff = 0
max_varchar_size = 0
max_unsorted_pct = 0
total_rows = 0
result = cursor.fetchall()
for table in result:
table_name, encoded, max_varchar, unsorted, stats_off, tbl_rows, skew_sortkey1, skew_rows = table
number_tables += 1
if encoded == 'N':
tables_not_compressed += 1
if skew_rows is not None:
if skew_rows > max_skew_ratio:
max_skew_ratio = skew_rows
total_skew_ratio += skew_rows
number_tables_skew += 1
if skew_sortkey1 is not None:
if skew_sortkey1 > max_skew_sort_ratio:
max_skew_sort_ratio = skew_sortkey1
total_skew_sort_ratio += skew_sortkey1
number_tables_skew_sort += 1
if stats_off is not None and stats_off > 5:
number_tables_statsoff += 1
if max_varchar is not None and max_varchar > max_varchar_size:
max_varchar_size = max_varchar
if unsorted is not None and unsorted > max_unsorted_pct:
max_unsorted_pct = unsorted
if tbl_rows is not None:
total_rows += tbl_rows
if number_tables_skew > 0:
avg_skew_ratio = total_skew_ratio / number_tables_skew
else:
avg_skew_ratio = 0
if number_tables_skew_sort > 0:
avg_skew_sort_ratio = total_skew_sort_ratio / number_tables_skew_sort
else:
avg_skew_sort_ratio = 0
# build up the metrics to put in cloudwatch
metrics = []
def add_metric(metric_name, value, unit):
metrics.append({
'MetricName': metric_name,
'Dimensions': [
{'Name': 'ClusterIdentifier', 'Value': cluster}
],
'Timestamp': datetime.datetime.utcnow(),
'Value': value,
'Unit': unit
})
units_count = 'Count'
units_none = 'None'
units_pct = 'Percent'
add_metric('TablesNotCompressed', tables_not_compressed, units_count)
add_metric('MaxSkewRatio', max_skew_ratio, units_none)
add_metric('MaxSkewSortRatio', max_skew_sort_ratio, units_none)
add_metric('AvgSkewRatio', avg_skew_ratio, units_none)
add_metric('AvgSkewSortRatio', avg_skew_sort_ratio, units_none)
add_metric('Tables', number_tables, units_count)
add_metric('Rows', total_rows, units_count)
add_metric('TablesStatsOff', number_tables_statsoff, units_count)
add_metric('MaxVarcharSize', max_varchar_size, units_none)
add_metric('MaxUnsorted', max_unsorted_pct, units_pct)
return metrics
# nasty hack for backward compatibility, to extract label values from os.environ or event
def get_config_value(labels, configs):
for l in labels:
for c in configs:
if l in c:
if debug:
print("Resolved label value %s from config" % l)
return c[l]
return None
def monitor_cluster(config_sources):
aws_region = get_config_value(['AWS_REGION'], config_sources)
set_debug = get_config_value(['DEBUG', 'debug', ], config_sources)
if set_debug is not None and ((isinstance(set_debug,bool) and set_debug) or set_debug.upper() == 'TRUE'):
global debug
debug = True
kms = boto3.client('kms', region_name=aws_region)
cw = boto3.client('cloudwatch', region_name=aws_region)
if debug:
print("Connected to AWS KMS & CloudWatch in %s" % aws_region)
user = get_config_value(['DbUser', 'db_user', 'dbUser'], config_sources)
host = get_config_value(['HostName', 'cluster_endpoint', 'dbHost', 'db_host'], config_sources)
port = int(get_config_value(['HostPort', 'db_port', 'dbPort'], config_sources))
database = get_config_value(['DatabaseName', 'db_name', 'db'], config_sources)
cluster = get_config_value(['ClusterName', 'cluster_name', 'clusterName'], config_sources)
global interval
interval = get_config_value(['AggregationInterval', 'agg_interval', 'aggregtionInterval'], config_sources)
set_debug = get_config_value(['debug', 'DEBUG'], config_sources)
if set_debug is not None:
global debug
debug = set_debug
pwd = None
try:
pwd = pgpasslib.getpass(host, port, database, user)
except pgpasslib.FileNotFound as e:
pass
# check if unencrypted password exists if no pgpasslib
if pwd is None:
pwd = get_config_value(['db_pwd'], config_sources)
# check for encrypted password if the above two don't exist
if pwd is None:
enc_password = get_config_value(['EncryptedPassword', 'encrypted_password', 'encrypted_pwd', 'dbPassword'],
config_sources)
# resolve the authorisation context, if there is one, and decrypt the password
auth_context = get_config_value('kms_auth_context', config_sources)
if auth_context is not None:
auth_context = json.loads(auth_context)
try:
if auth_context is None:
pwd = kms.decrypt(CiphertextBlob=base64.b64decode(enc_password))[
'Plaintext']
else:
pwd = kms.decrypt(CiphertextBlob=base64.b64decode(enc_password), EncryptionContext=auth_context)[
'Plaintext']
except:
print('KMS access failed: exception %s' % sys.exc_info()[1])
print('Encrypted Password: %s' % enc_password)
print('Encryption Context %s' % auth_context)
raise
# Connect to the cluster
try:
if debug:
print('Connecting to Redshift: %s' % host)
conn = pg8000.connect(database=database, user=user, password=pwd, host=host, port=port, ssl=ssl)
except:
print('Redshift Connection Failed: exception %s' % sys.exc_info()[1])
raise
if debug:
print('Successfully Connected to Cluster')
# create a new cursor for methods to run through
cursor = conn.cursor()
# set application name
set_name = "set application_name to 'RedshiftAdvancedMonitoring-v%s'" % __version__
if debug:
print(set_name)
cursor.execute(set_name)
# collect table statistics
put_metrics = gather_table_stats(cursor, cluster)
# collect service class statistics
put_metrics.extend(gather_service_class_stats(cursor, cluster))
# run the externally configured commands and append their values onto the put metrics
put_metrics.extend(run_external_commands('Redshift Diagnostic', 'monitoring-queries.json', cursor, cluster))
# run the supplied user commands and append their values onto the put metrics
put_metrics.extend(run_external_commands('User Configured', 'user-queries.json', cursor, cluster))
# add a metric for how many metrics we're exporting (whoa inception)
put_metrics.extend([{
'MetricName': 'CloudwatchMetricsExported',
'Dimensions': [
{'Name': 'ClusterIdentifier', 'Value': cluster}
],
'Timestamp': datetime.datetime.utcnow(),
'Value': len(put_metrics),
'Unit': 'Count'
}])
max_metrics = 20
group = 0
print("Publishing %s CloudWatch Metrics" % (len(put_metrics)))
for x in range(0, len(put_metrics), max_metrics):
group += 1
# slice the metrics into blocks of 20 or just the remaining metrics
put = put_metrics[x:(x + max_metrics)]
if debug:
print("Metrics group %s: %s Datapoints" % (group, len(put)))
print(put)
try:
cw.put_metric_data(
Namespace='Redshift',
MetricData=put
)
except:
print('Pushing metrics to CloudWatch failed: exception %s' % sys.exc_info()[1])
raise
cursor.close()
conn.close()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
k4a/pyk4a.py | import ctypes
import enum
import sys
import os
try:
dirPath = os.path.dirname(os.path.abspath(__file__))+r'/../vendor/azure_kinect/windows/amd64/'
print(dirPath)
_k4a = ctypes.CDLL(dirPath+r'k4a.dll')
os.environ['PATH'] = dirPath+';'+os.environ['PATH']
except Exception as e1:
try:
dirPath = r'C:/Program Files/Azure Kinect SDK v1.4.1/sdk/windows-desktop/amd64/release/bin/'
_k4a = ctypes.CDLL(dirPath+r'k4a.dll')
os.environ['PATH'] = dirPath+';'+os.environ['PATH']
except Exception as e2:
try:
_k4a = ctypes.CDLL('k4a.so')
except Exception as e3:
print("Failed to load library", e1, e2, e3)
sys.exit(1)
# K4A_DECLARE_HANDLE(k4a_device_t);
class _handle_k4a_device_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_device_t = ctypes.POINTER(_handle_k4a_device_t)
# K4A_DECLARE_HANDLE(k4a_capture_t);
class _handle_k4a_capture_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_capture_t = ctypes.POINTER(_handle_k4a_capture_t)
# K4A_DECLARE_HANDLE(k4a_image_t);
class _handle_k4a_image_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_image_t = ctypes.POINTER(_handle_k4a_image_t)
# K4A_DECLARE_HANDLE(k4a_transformation_t);
class _handle_k4a_transformation_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_transformation_t = ctypes.POINTER(_handle_k4a_transformation_t)
#class k4a_result_t(CtypeIntEnum):
K4A_RESULT_SUCCEEDED = 0
K4A_RESULT_FAILED = 1
#class k4a_buffer_result_t(CtypeIntEnum):
K4A_BUFFER_RESULT_SUCCEEDED = 0
K4A_BUFFER_RESULT_FAILED = 1
K4A_BUFFER_RESULT_TOO_SMALL = 2
#class k4a_wait_result_t(CtypeIntEnum):
K4A_WAIT_RESULT_SUCCEEDED = 0
K4A_WAIT_RESULT_FAILED = 1
K4A_WAIT_RESULT_TIMEOUT = 2
#class k4a_log_level_t(CtypeIntEnum):
K4A_LOG_LEVEL_CRITICAL = 0
K4A_LOG_LEVEL_ERROR = 1
K4A_LOG_LEVEL_WARNING = 2
K4A_LOG_LEVEL_INFO = 3
K4A_LOG_LEVEL_TRACE = 4
K4A_LOG_LEVEL_OFF = 5
#class k4a_depth_mode_t(CtypeIntEnum):
K4A_DEPTH_MODE_OFF = 0
K4A_DEPTH_MODE_NFOV_2X2BINNED = 1
K4A_DEPTH_MODE_NFOV_UNBINNED = 2
K4A_DEPTH_MODE_WFOV_2X2BINNED = 3
K4A_DEPTH_MODE_WFOV_UNBINNED = 4
K4A_DEPTH_MODE_PASSIVE_IR = 5
#class k4a_color_resolution_t(CtypeIntEnum):
K4A_COLOR_RESOLUTION_OFF = 0
K4A_COLOR_RESOLUTION_720P = 1
K4A_COLOR_RESOLUTION_1080P = 2
K4A_COLOR_RESOLUTION_1440P = 3
K4A_COLOR_RESOLUTION_1536P = 4
K4A_COLOR_RESOLUTION_2160P = 5
K4A_COLOR_RESOLUTION_3072P = 6
#class k4a_image_format_t(CtypeIntEnum):
K4A_IMAGE_FORMAT_COLOR_MJPG = 0
K4A_IMAGE_FORMAT_COLOR_NV12 = 1
K4A_IMAGE_FORMAT_COLOR_YUY2 = 2
K4A_IMAGE_FORMAT_COLOR_BGRA32 = 3
K4A_IMAGE_FORMAT_DEPTH16 = 4
K4A_IMAGE_FORMAT_IR16 = 5
K4A_IMAGE_FORMAT_CUSTOM8 = 6
K4A_IMAGE_FORMAT_CUSTOM16 = 7
K4A_IMAGE_FORMAT_CUSTOM = 8
#class k4a_transformation_interpolation_type_t(CtypeIntEnum):
K4A_TRANSFORMATION_INTERPOLATION_TYPE_NEAREST = 0
K4A_TRANSFORMATION_INTERPOLATION_TYPE_LINEAR = 1
#class k4a_fps_t(CtypeIntEnum):
K4A_FRAMES_PER_SECOND_5 = 0
K4A_FRAMES_PER_SECOND_15 = 1
K4A_FRAMES_PER_SECOND_30 = 2
#class k4a_color_control_command_t(CtypeIntEnum):
K4A_COLOR_CONTROL_EXPOSURE_TIME_ABSOLUTE = 0
K4A_COLOR_CONTROL_AUTO_EXPOSURE_PRIORITY = 1
K4A_COLOR_CONTROL_BRIGHTNESS = 2
K4A_COLOR_CONTROL_CONTRAST = 3
K4A_COLOR_CONTROL_SATURATION = 4
K4A_COLOR_CONTROL_SHARPNESS = 5
K4A_COLOR_CONTROL_WHITEBALANCE = 6
K4A_COLOR_CONTROL_BACKLIGHT_COMPENSATION = 7
K4A_COLOR_CONTROL_GAIN = 8
K4A_COLOR_CONTROL_POWERLINE_FREQUENCY = 9
#class k4a_color_control_mode_t(CtypeIntEnum):
K4A_COLOR_CONTROL_MODE_AUTO = 0
K4A_COLOR_CONTROL_MODE_MANUAL = 1
#class k4a_wired_sync_mode_t(CtypeIntEnum):
K4A_WIRED_SYNC_MODE_STANDALONE = 0
K4A_WIRED_SYNC_MODE_MASTER = 1
K4A_WIRED_SYNC_MODE_SUBORDINATE = 2
#class k4a_calibration_type_t(CtypeIntEnum):
K4A_CALIBRATION_TYPE_UNKNOWN = -1
K4A_CALIBRATION_TYPE_DEPTH = 0
K4A_CALIBRATION_TYPE_COLOR = 1
K4A_CALIBRATION_TYPE_GYRO = 2
K4A_CALIBRATION_TYPE_ACCEL = 3
K4A_CALIBRATION_TYPE_NUM = 4
#class k4a_calibration_model_type_t(CtypeIntEnum):
K4A_CALIBRATION_LENS_DISTORTION_MODEL_UNKNOWN = 0
K4A_CALIBRATION_LENS_DISTORTION_MODEL_THETA = 1
K4A_CALIBRATION_LENS_DISTORTION_MODEL_POLYNOMIAL_3K = 2
K4A_CALIBRATION_LENS_DISTORTION_MODEL_RATIONAL_6KT = 3
K4A_CALIBRATION_LENS_DISTORTION_MODEL_BROWN_CONRADY = 4
#class k4a_firmware_build_t(CtypeIntEnum):
K4A_FIRMWARE_BUILD_RELEASE = 0
K4A_FIRMWARE_BUILD_DEBUG = 1
#class k4a_firmware_signature_t(CtypeIntEnum):
K4A_FIRMWARE_SIGNATURE_MSFT = 0
K4A_FIRMWARE_SIGNATURE_TEST = 1
K4A_FIRMWARE_SIGNATURE_UNSIGNED = 2
#define K4A_SUCCEEDED(_result_) (_result_ == K4A_RESULT_SUCCEEDED)
def K4A_SUCCEEDED(result):
return result == K4A_RESULT_SUCCEEDED
#define K4A_FAILED(_result_) (!K4A_SUCCEEDED(_result_))
def K4A_FAILED(result):
return not K4A_SUCCEEDED(result)
# TODO(Andoryuuta): Callbacks, are these needed?
"""
typedef void(k4a_logging_message_cb_t)(void *context,
k4a_log_level_t level,
const char *file,
const int line,
const char *message);
typedef void(k4a_memory_destroy_cb_t)(void *buffer, void *context);
typedef uint8_t *(k4a_memory_allocate_cb_t)(int size, void **context);
"""
class _k4a_device_configuration_t(ctypes.Structure):
_fields_= [
("color_format", ctypes.c_int),
("color_resolution", ctypes.c_int),
("depth_mode", ctypes.c_int),
("camera_fps", ctypes.c_int),
("synchronized_images_only", ctypes.c_bool),
("depth_delay_off_color_usec", ctypes.c_int32),
("wired_sync_mode", ctypes.c_int),
("subordinate_delay_off_master_usec", ctypes.c_uint32),
("disable_streaming_indicator", ctypes.c_bool),
]
k4a_device_configuration_t = _k4a_device_configuration_t
class _k4a_calibration_extrinsics_t(ctypes.Structure):
_fields_= [
("rotation", ctypes.c_float * 9),
("translation", ctypes.c_float * 3),
]
k4a_calibration_extrinsics_t = _k4a_calibration_extrinsics_t
class _param(ctypes.Structure):
_fields_ = [
("cx", ctypes.c_float),
("cy", ctypes.c_float),
("fx", ctypes.c_float),
("fy", ctypes.c_float),
("k1", ctypes.c_float),
("k2", ctypes.c_float),
("k3", ctypes.c_float),
("k4", ctypes.c_float),
("k5", ctypes.c_float),
("k6", ctypes.c_float),
("codx", ctypes.c_float),
("cody", ctypes.c_float),
("p2", ctypes.c_float),
("p1", ctypes.c_float),
("metric_radius", ctypes.c_float),
]
class k4a_calibration_intrinsic_parameters_t(ctypes.Union):
_fields_= [
("param", _param),
("v", ctypes.c_float * 15),
]
class _k4a_calibration_intrinsics_t(ctypes.Structure):
_fields_= [
("type", ctypes.c_int),
("parameter_count", ctypes.c_uint),
("parameters", k4a_calibration_intrinsic_parameters_t),
]
k4a_calibration_intrinsics_t = _k4a_calibration_intrinsics_t
class _k4a_calibration_camera_t(ctypes.Structure):
_fields_= [
("extrinsics", k4a_calibration_extrinsics_t),
("intrinsics", k4a_calibration_intrinsics_t),
("resolution_width", ctypes.c_int),
("resolution_height", ctypes.c_int),
("metric_radius", ctypes.c_float),
]
k4a_calibration_camera_t = _k4a_calibration_camera_t
class _k4a_calibration_t(ctypes.Structure):
_fields_= [
("depth_camera_calibration", k4a_calibration_camera_t),
("color_camera_calibration", k4a_calibration_camera_t),
("extrinsics", (k4a_calibration_extrinsics_t * K4A_CALIBRATION_TYPE_NUM) * K4A_CALIBRATION_TYPE_NUM),
("depth_mode", ctypes.c_int),
("color_resolution", ctypes.c_int),
]
k4a_calibration_t = _k4a_calibration_t
class _k4a_version_t(ctypes.Structure):
_fields_= [
("major", ctypes.c_uint32),
("minor", ctypes.c_uint32),
("iteration", ctypes.c_uint32),
]
k4a_version_t = _k4a_version_t
class _k4a_hardware_version_t(ctypes.Structure):
_fields_= [
("rgb", k4a_version_t),
("depth", k4a_version_t),
("audio", k4a_version_t),
("depth_sensor", k4a_version_t),
("firmware_build", ctypes.c_int),
("firmware_signature", ctypes.c_int),
]
k4a_hardware_version_t = _k4a_hardware_version_t
class _xy(ctypes.Structure):
_fields_= [
("x", ctypes.c_float),
("y", ctypes.c_float),
]
class k4a_float2_t(ctypes.Union):
_fields_= [
("xy", _xy),
("v", ctypes.c_float * 2)
]
class _xyz(ctypes.Structure):
_fields_= [
("x", ctypes.c_float),
("y", ctypes.c_float),
("z", ctypes.c_float),
]
class k4a_float3_t(ctypes.Union):
_fields_= [
("xyz", _xyz),
("v", ctypes.c_float * 3)
]
class _k4a_imu_sample_t(ctypes.Structure):
_fields_= [
("temperature", ctypes.c_float),
("acc_sample", k4a_float3_t),
("acc_timestamp_usec", ctypes.c_uint64),
("gyro_sample", k4a_float3_t),
("gyro_timestamp_usec", ctypes.c_uint64),
]
k4a_imu_sample_t = _k4a_imu_sample_t
K4A_DEVICE_DEFAULT = 0
K4A_WAIT_INFINITE = -1
# TODO(Andoryuuta): Not sure if a single instance of the default config like this will work, might need a creation function.
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL = k4a_device_configuration_t()
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.color_format = K4A_IMAGE_FORMAT_COLOR_MJPG
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.color_resolution = K4A_COLOR_RESOLUTION_OFF
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.depth_mode = K4A_DEPTH_MODE_OFF
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.camera_fps = K4A_FRAMES_PER_SECOND_30
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.synchronized_images_only = False
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.depth_delay_off_color_usec = 0
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.wired_sync_mode = K4A_WIRED_SYNC_MODE_STANDALONE
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.subordinate_delay_off_master_usec = 0
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.disable_streaming_indicator = False
# Functions
#K4A_EXPORT k4a_result_t k4a_device_open(uint32_t index, k4a_device_t *device_handle);
k4a_device_open = _k4a.k4a_device_open
k4a_device_open.restype=ctypes.c_int
k4a_device_open.argtypes=(ctypes.c_uint32, ctypes.POINTER(k4a_device_t))
#K4A_EXPORT k4a_result_t k4a_device_start_cameras(k4a_device_t device_handle, const k4a_device_configuration_t *config);
k4a_device_start_cameras = _k4a.k4a_device_start_cameras
k4a_device_start_cameras.restype=ctypes.c_int
k4a_device_start_cameras.argtypes=(k4a_device_t, ctypes.POINTER(k4a_device_configuration_t))
"""
K4A_EXPORT k4a_result_t k4a_device_get_calibration(k4a_device_t device_handle,
const k4a_depth_mode_t depth_mode,
const k4a_color_resolution_t color_resolution,
k4a_calibration_t *calibration);
"""
k4a_device_get_calibration = _k4a.k4a_device_get_calibration
k4a_device_get_calibration.restype=ctypes.c_int
k4a_device_get_calibration.argtypes=(k4a_device_t, ctypes.c_int, ctypes.c_int, ctypes.POINTER(k4a_calibration_t))
"""
K4A_EXPORT k4a_wait_result_t k4a_device_get_capture(k4a_device_t device_handle,
k4a_capture_t *capture_handle,
int32_t timeout_in_ms);
"""
k4a_device_get_capture = _k4a.k4a_device_get_capture
k4a_device_get_capture.restype=ctypes.c_int
k4a_device_get_capture.argtypes=(k4a_device_t, ctypes.POINTER(k4a_capture_t), ctypes.c_int32)
#K4A_EXPORT void k4a_capture_release(k4a_capture_t capture_handle);
k4a_capture_release = _k4a.k4a_capture_release
k4a_capture_release.argtypes=(k4a_capture_t,)
#K4A_EXPORT void k4a_image_release(k4a_image_t image_handle);
k4a_image_release = _k4a.k4a_image_release
k4a_image_release.argtypes=(k4a_image_t,)
#K4A_EXPORT void k4a_device_stop_cameras(k4a_device_t device_handle);
k4a_device_stop_cameras = _k4a.k4a_device_stop_cameras
k4a_device_stop_cameras.argtypes=(k4a_device_t,)
#K4A_EXPORT void k4a_device_close(k4a_device_t device_handle);
k4a_device_close = _k4a.k4a_device_close
k4a_device_close.argtypes=(k4a_device_t,)
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
app.py | #!/usr/bin/env python
import urllib
import json
import os
import re
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
city_names=processlocation(req)
sector_names=processSector(req)
property_type=processPropertyType(req)
minimum_value=processMinimum(req)
maximum_value=processMaximum(req)
latest=processLatestProperties(req)
if minimum_value > maximum_value:
minimum_value,maximum_value=maximum_value,minimum_value
else:
minimum_value,maximum_value=minimum_value,maximum_value
baseurl = "https://fazendanatureza.com/bot/botarz.php?city_name="+city_names+"§or_name="+sector_names+"&minPrice="+minimum_value+"&maxPrice="+maximum_value+"&type="+property_type+"&LatestProperties="+ latest
result = urllib.urlopen(baseurl).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def processlocation(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("city")
return city
def processSector(req):
result = req.get("result")
parameters = result.get("parameters")
sector = parameters.get("Location")
return sector
def processMinimum(req):
result = req.get("result")
parameters = result.get("parameters")
minimum = parameters.get("number")
return minimum
def processMaximum(req):
result = req.get("result")
parameters = result.get("parameters")
maximum = parameters.get("number1")
return maximum
def processPropertyType(req):
result = req.get("result")
parameters = result.get("parameters")
propertyType = parameters.get("PropertyType")
return propertyType
def processLatestProperties(req):
result = req.get("result")
parameters = result.get("parameters")
latest = parameters.get("LatestProperties")
return latest
def makeWebhookResult(data):
row1_id=data[0]['p_id']
row1_title = data[0]['title']
row1_location=data[0]['address']
row1_price = data[0]['price']
row2_id=data[1]['p_id']
row2_title = data[1]['title']
row2_location=data[1]['address']
row2_price = data[1]['price']
# print(json.dumps(item, indent=4))
speech = "This is the response from server."+ row1_title
print("Response:")
print(speech)
if "unable" in row1_title:
message={
"text":row1_title
}
else:
message= {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": row1_title,
"subtitle": row1_location,
"item_url": "http://www.aarz.pk/property-detail?id="+row1_id,
"image_url": "http://www.aarz.pk/assets/images/properties/"+row1_id+"/"+row1_id+".actual.1.jpg" ,
"buttons": [{
"type": "web_url",
"url": "www.aarz.pk",
"title": "Open Web URL"
},
],
},
{
"title": row2_title,
"subtitle": row2_location,
"item_url": "http://www.aarz.pk/property-detail?id="+row2_id,
"image_url": "http://www.aarz.pk/assets/images/properties/"+row2_id+"/"+row2_id+".actual.1.jpg",
"buttons": [{
"type": "web_url",
"url": "www.aarz.pk",
"title": "Open Web URL"
},
]
}]
}
}
}
return {
"speech": speech,
"displayText": speech,
#"originalRequest":{"source":"facebook", "data":{"facebook": message}}
#"data": {"facebook": message},
# "contextOut": [],
#"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.