filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
command/server.go
|
package command
import (
"fmt"
"log"
"math/rand"
"net/http"
"os"
"strings"
"flag"
"time"
"github.com/ant0ine/go-json-rest/rest"
"github.com/mitchellh/cli"
"github.com/sirupsen/logrus"
"github.com/YotpoLtd/libra/api"
"github.com/YotpoLtd/libra/backend"
"github.com/YotpoLtd/libra/config"
"github.com/YotpoLtd/libra/nomad"
"github.com/YotpoLtd/libra/structs"
"gopkg.in/robfig/cron.v2"
)
// ServerCommand is a Command implementation prints the version.
type ServerCommand struct {
ConfDir string
Ui cli.Ui
}
func (c *ServerCommand) Help() string {
helpText := `
Usage: libra server [options]
Run a Libra server. The other commands require a server to be configured.
`
return strings.TrimSpace(helpText)
}
func (c *ServerCommand) Run(args []string) int {
serverFlags := flag.NewFlagSet("server", flag.ContinueOnError)
serverFlags.StringVar(&c.ConfDir, "conf", "/etc/libra", "Config directory for Libra")
if err := serverFlags.Parse(args); err != nil {
return 1
}
if os.Getenv("LOG_LEVEL") == "DEBUG" {
logrus.SetLevel(logrus.DebugLevel)
logrus.Debug("Log level set - DEBUG")
}
os.Setenv("LIBRA_CONFIG_DIR", c.ConfDir)
s := rest.NewApi()
logger := logrus.New()
w := logger.Writer()
defer w.Close()
loggingMw := &rest.AccessLogApacheMiddleware{
Logger: log.New(w, "[access] ", 0),
}
mw := []rest.Middleware{
loggingMw,
&rest.ContentTypeCheckerMiddleware{},
&rest.GzipMiddleware{},
&rest.JsonIndentMiddleware{},
&rest.PoweredByMiddleware{},
&rest.RecorderMiddleware{},
&rest.RecoverMiddleware{
EnableResponseStackTrace: true,
},
&rest.TimerMiddleware{},
}
s.Use(mw...)
router, err := rest.MakeRouter(
rest.Post("/scale", api.ScaleHandler),
rest.Post("/capacity", api.CapacityHandler),
rest.Post("/grafana", api.GrafanaHandler),
rest.Get("/backends", api.BackendsHandler),
rest.Get("/ping", api.PingHandler),
rest.Get("/", api.HomeHandler),
rest.Post("/restart", api.RestartHandler),
)
if err != nil {
logrus.Fatal(err)
}
s.SetApp(router)
cr, _, err := loadRules()
cr.Start()
if err != nil {
logrus.Errorf("Problem with the Libra server: %s", err)
return 1
}
err = http.ListenAndServe(":8646", s.MakeHandler())
if err != nil {
logrus.Errorf("Problem with the Libra server: %s", err)
return 1
}
return 0
}
func (c *ServerCommand) Synopsis() string {
return "Run a Libra server"
}
func loadRules() (*cron.Cron, []cron.EntryID, error) {
config, err := config.NewConfig(os.Getenv("LIBRA_CONFIG_DIR"))
if err != nil {
logrus.Errorf("Failed to read or parse config file: %s", err)
}
logrus.Info("Loaded and parsed configuration file")
n, err := nomad.NewClient(config.Nomad)
if err != nil {
log.Fatalf("Failed to create Nomad Client: %s", err)
}
logrus.Info("Successfully created Nomad Client")
dc, err := n.Agent().Datacenter()
if err != nil {
logrus.Fatalf(" Failed to get Nomad DC: %s", err)
}
logrus.Infof(" -> DC: %s", dc)
backends, err := backend.InitializeBackends(config.Backends)
if err != nil {
logrus.Fatalf("%s", err)
}
logrus.Info("")
logrus.Infof("Found %d backends", len(backends))
for name, b := range backends {
logrus.Infof(" -> %s (%s)", name, b.Info().Kind)
}
logrus.Info("")
logrus.Infof("Found %d jobs", len(config.Jobs))
cr := cron.New()
ids := []cron.EntryID{}
for _, job := range config.Jobs {
logrus.Infof(" -> Job: %s", job.Name)
for _, group := range job.Groups {
logrus.Infof(" --> Group: %s", group.Name)
logrus.Infof(" min_count = %d", group.MinCount)
logrus.Infof(" max_count = %d", group.MaxCount)
for name, rule := range group.Rules {
cfID, err := cr.AddFunc(rule.Period, createCronFunc(rule, &config.Nomad, job.Name, group.Name, group.MinCount, group.MaxCount))
if err != nil {
logrus.Errorf("Problem adding autoscaling rule to cron: %s", err)
return cr, ids, err
}
ids = append(ids, cfID)
logrus.Infof(" ----> Rule: %s", rule.Name)
if backends[rule.Backend] == nil {
return cr, ids, fmt.Errorf("Unknown backend: %s (%s)", rule.Backend, name)
}
rule.BackendInstance = backends[rule.Backend]
}
}
}
return cr, ids, nil
}
func createCronFunc(rule *structs.Rule, nomadConf *nomad.Config, job, group string, min, max int) func() {
return func() {
n := rand.Intn(10) // offset cron jobs slightly so they don't collide
time.Sleep(time.Duration(n) * time.Second)
backend.Work(rule, nomadConf, job, group, min, max)
}
}
|
[
"\"LOG_LEVEL\"",
"\"LIBRA_CONFIG_DIR\""
] |
[] |
[
"LIBRA_CONFIG_DIR",
"LOG_LEVEL"
] |
[]
|
["LIBRA_CONFIG_DIR", "LOG_LEVEL"]
|
go
| 2 | 0 | |
plugins/commands.py
|
import os
import math
import json
import time
import shutil
import heroku3
import requests
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from plugins.helpers import humanbytes
from database.filters_mdb import filter_stats
from database.users_mdb import add_user, find_user, all_users
@trojanz.on_message(filters.command('id') & (filters.private | filters.group))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
user_id = message.chat.id
await message.reply_text(
f"Your ID : `{user_id}`",
parse_mode="md",
quote=True
)
elif (chat_type == "group") or (chat_type == "supergroup"):
user_id = message.from_user.id
chat_id = message.chat.id
if message.reply_to_message:
reply_id = f"Replied User ID : `{message.reply_to_message.from_user.id}`"
else:
reply_id = ""
await message.reply_text(
f"Your ID : `{user_id}`\nThis Group ID : `{chat_id}`\n\n{reply_id}",
parse_mode="md",
quote=True
)
@trojanz.on_message(filters.command('info') & (filters.private | filters.group))
async def showinfo(client, message):
try:
cmd, id = message.text.split(" ", 1)
except:
id = False
pass
if id:
if (len(id) == 10 or len(id) == 9):
try:
checkid = int(id)
except:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
else:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
if Config.SAVE_USER == "yes":
name, username, dcid = await find_user(str(id))
else:
try:
user = await client.get_users(int(id))
name = str(user.first_name + (user.last_name or ""))
username = user.username
dcid = user.dc_id
except:
name = False
pass
if not name:
await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md")
return
else:
if message.reply_to_message:
name = str(message.reply_to_message.from_user.first_name\
+ (message.reply_to_message.from_user.last_name or ""))
id = message.reply_to_message.from_user.id
username = message.reply_to_message.from_user.username
dcid = message.reply_to_message.from_user.dc_id
else:
name = str(message.from_user.first_name\
+ (message.from_user.last_name or ""))
id = message.from_user.id
username = message.from_user.username
dcid = message.from_user.dc_id
if not str(username) == "None":
user_name = f"@{username}"
else:
user_name = "none"
await message.reply_text(
f"<b>Name</b> : {name}\n\n"
f"<b>User ID</b> : <code>{id}</code>\n\n"
f"<b>Username</b> : {user_name}\n\n"
f"<b>Permanant USER link</b> : <a href='tg://user?id={id}'>Click here!</a>\n\n"
f"<b>DC ID</b> : {dcid}\n\n",
quote=True,
parse_mode="html"
)
@trojanz.on_message((filters.private | filters.group) & filters.command('status'))
async def bot_status(client,message):
if str(message.from_user.id) not in Config.AUTH_USERS:
return
chats, filters = await filter_stats()
if Config.SAVE_USER == "yes":
users = await all_users()
userstats = f"> __**{users} users have interacted with your bot!**__\n\n"
else:
userstats = ""
if Config.HEROKU_API_KEY:
try:
server = heroku3.from_key(Config.HEROKU_API_KEY)
user_agent = (
'Mozilla/5.0 (Linux; Android 10; SM-G975F) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.149 Mobile Safari/537.36'
)
accountid = server.account().id
headers = {
'User-Agent': user_agent,
'Authorization': f'Bearer {Config.HEROKU_API_KEY}',
'Accept': 'application/vnd.heroku+json; version=3.account-quotas',
}
path = "/accounts/" + accountid + "/actions/get-quota"
request = requests.get("https://api.heroku.com" + path, headers=headers)
if request.status_code == 200:
result = request.json()
total_quota = result['account_quota']
quota_used = result['quota_used']
quota_left = total_quota - quota_used
total = math.floor(total_quota/3600)
used = math.floor(quota_used/3600)
hours = math.floor(quota_left/3600)
minutes = math.floor(quota_left/60 % 60)
days = math.floor(hours/24)
usedperc = math.floor(quota_used / total_quota * 100)
leftperc = math.floor(quota_left / total_quota * 100)
quota_details = f"""
**Heroku Account Status**
> __You have **{total} hours** of free dyno quota available each month.__
> __Dyno hours used this month__ ;
- **{used} hours** ( {usedperc}% )
> __Dyno hours remaining this month__ ;
- **{hours} hours** ( {leftperc}% )
- **Approximately {days} days!**
"""
else:
quota_details = ""
except:
print("Check your Heroku API key")
quota_details = ""
else:
quota_details = ""
uptime = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - Config.BOT_START_TIME))
try:
t, u, f = shutil.disk_usage(".")
total = humanbytes(t)
used = humanbytes(u)
free = humanbytes(f)
disk = "\n**Disk Details**\n\n" \
f"> USED : {used} / {total}\n" \
f"> FREE : {free}\n\n"
except:
disk = ""
await message.reply_text(
"**Current status of your bot!**\n\n"
f"> __**{filters}** filters across **{chats}** chats__\n\n"
f"{userstats}"
f"> __BOT Uptime__ : **{uptime}**\n\n"
f"{quota_details}"
f"{disk}",
quote=True,
parse_mode="md"
)
@trojanz.on_message(filters.command('start') & filters.private)
async def start(client, message):
await message.reply_text(
text=Script.START_MSG.format(message.from_user.mention),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Command Help", callback_data="help_data")
]
]
),
reply_to_message_id=message.message_id
)
if Config.SAVE_USER == "yes":
try:
await add_user(
str(message.from_user.id),
str(message.from_user.username),
str(message.from_user.first_name + " " + (message.from_user.last_name or "")),
str(message.from_user.dc_id)
)
except:
pass
@trojanz.on_message(filters.command('help') & filters.private)
async def help(client, message):
await message.reply_text(
text=Script.HELP_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Temanesia", url="https://t.me/temanesiaonline"),
InlineKeyboardButton("About Me", callback_data="about_data")
],
[
InlineKeyboardButton("Channel Kata", url="https://t.me/racauanhatii"),
InlineKeyboardButton("Bot Info", url="https://t.me/wysupportinfo")
]
]
),
reply_to_message_id=message.message_id
)
@trojanz.on_message(filters.command('about') & filters.private)
async def about(client, message):
await message.reply_text(
text=Script.ABOUT_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"SOURCE CODE", url="https://kepo lu ye wkwk")
],
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
),
reply_to_message_id=message.message_id
)
|
[] |
[] |
[
"WEBHOOK"
] |
[]
|
["WEBHOOK"]
|
python
| 1 | 0 | |
cs-script.py
|
import os
import io
import codecs
import sys
import html
import time
import sublime
import sublime_plugin
import subprocess
import shutil
import threading
from subprocess import Popen, PIPE, STDOUT
from os import path
version = '1.2.10' # build 0
os.environ["cs-script.st3.ver"] = version
if sys.version_info < (3, 3):
raise RuntimeError('CS-Script.ST3 works with Sublime Text 3 only')
# -------------------------
def is_script_file(file):
return file.endswith(".cs") or file.endswith(".csx")
def settings():
return sublime.load_settings("cs-script.sublime-settings")
def save_settings():
return sublime.save_settings("cs-script.sublime-settings")
# -------------------------
def on_plugin_loaded():
def which(file):
try:
out_file = os.path.join(plugin_dir, "..", "User", 'which.txt')
with open(out_file, "w") as f:
popen_redirect_tofile(['which', file], f).wait()
output = None
with open(out_file, "r") as f:
output = f.read().strip()
if os.path.exists(out_file):
os.remove(out_file)
return output
except Exception as e:
print('Cannot execute "which" for '+file+'.', e)
# on Mac the path to mono is not added to envar PATH
# so need to probe for it
if is_mac():
mono_path = settings().get('mono_path', None)
if not mono_path:
if path.exists('/usr/local/bin/mono'):
mono_path = '/usr/local/bin'
else:
mono_path = which("mono")
if mono_path:
print('Adding mono path to envar PATH.', mono_path)
os.environ["PATH"] += os.pathsep + mono_path
class CodeViewTextCommand(sublime_plugin.TextCommand):
# -----------------
def is_enabled(self):
return is_csharp(self.view)
# -----------------
def is_visible(self):
panel_name = self.view.window().active_panel()
if panel_name and panel_name == 'output.CS-Script':
panel = self.view.window().find_output_panel(panel_name[len('output.'):])
return not(panel is not None and panel.id() == self.view.id())
else:
return True
# -------------------------
plugin_dir = os.path.dirname(__file__)
plugin_name = path.basename(plugin_dir)
new_file_path = path.join(path.dirname(plugin_dir), 'User', 'cs-script', 'new_script.cs')
bin_dest = path.join(path.dirname(plugin_dir), 'User', 'cs-script'+ os.sep)
bin_src = path.join(plugin_dir, 'bin')
current_bin_dest = path.join(bin_dest+'syntaxer_v'+version)
if not os.path.isdir(current_bin_dest):
os.environ["new_deployment"] = 'true'
# -------------------------
def clear_old_versions_but(version):
if os.getenv("new_deployment") == 'true':
try:
if os.name == 'nt':
os.system('taskkill /f /im VBCSCompiler.exe') # stop roslyn server if it is runningif os.name == 'nt':
os.system('taskkill /f /im syntaxer.exe') # stop syntaxer
except:
pass
old_syntaxer_exe = path.join(bin_dest, 'syntaxer.exe')
try:
os.remove(old_syntaxer_exe)
except:
pass
sub_dirs = [name for name in os.listdir(bin_dest)
if os.path.isdir(os.path.join(bin_dest, name))]
for dir in sub_dirs:
if dir.startswith('syntaxer') and not dir.endswith(version):
try:
shutil.rmtree(path.join(bin_dest, dir))
except:
pass
# -------------------------
def ensure_default_config(csscriptApp):
config_file = path.join(path.dirname(csscriptApp), 'css_config.xml')
if not path.exists(config_file):
subprocess.Popen(to_args([csscriptApp, '-config:create']),
stdout=subprocess.PIPE,
cwd=path.dirname(csscriptApp),
shell=True).wait()
updated_config = ''
with open(config_file, "r") as f:
updated_config = f.read()
if os.name == 'nt':
updated_config = updated_config.replace("<useAlternativeCompiler></useAlternativeCompiler>",
"<useAlternativeCompiler>CSSRoslynProvider.dll</useAlternativeCompiler>")
need_explicit_tuple_ref = False
if need_explicit_tuple_ref:
updated_config = updated_config.replace("</defaultRefAssemblies>",
" %syntaxer_dir%"+os.sep+"System.ValueTuple.dll</defaultRefAssemblies>")
else:
updated_config = updated_config.replace(" %syntaxer_dir%"+os.sep+"System.ValueTuple.dll", "")
updated_config = updated_config.replace("<roslynDir></roslynDir>",
"<roslynDir>%syntaxer_dir%</roslynDir>")
with open(config_file, "w") as file:
file.write(updated_config)
else:
# update existing config to be compatible with the current cscs.exe
if os.getenv("new_deployment") == 'true':
with open(config_file, "r") as f:
updated_config = f.read()
updated_config = updated_config.replace("%syntaxer_dir%"+os.sep+"System.ValueTuple.dll", "")
with open(config_file, "w") as file:
file.write(updated_config)
# -------------------------
def ensure_default_roslyn_config(csscriptApp):
if os.getenv("new_deployment") == 'true':
if os.name == 'nt':
subprocess.Popen(to_args([csscriptApp, '-config:set:RoslynDir="'+current_bin_dest+'"']),
stdout=subprocess.PIPE,
cwd=path.dirname(csscriptApp),
shell=True).wait()
subprocess.Popen(to_args([csscriptApp, '-config:set:useAlternativeCompiler=CSSRoslynProvider.dll']),
stdout=subprocess.PIPE,
cwd=path.dirname(csscriptApp),
shell=True).wait()
# -------------------------
def deploy_shadow_bin(file_name, subdir = None):
if not path.exists(bin_dest):
os.makedirs(bin_dest)
dest_dir = bin_dest
if subdir:
dest_dir = path.join(dest_dir, subdir)
if not path.exists(dest_dir):
os.makedirs(dest_dir)
src = path.join(bin_src, file_name)
dest = path.join(dest_dir, file_name)
try:
# print('deploying', dest)
if not path.exists(dest) or os.stat(src).st_size != os.stat(dest).st_size:
shutil.copy2(src, dest_dir)
else:
shutil.copy2(src, dest_dir)
except Exception as ex :
print('deploy_shadow_bin', ex)
pass
return dest
# -------------------------
# deploy an initial copy of cscs.exe so syntaxer can start but clear csscriptApp
# so it can be later set from settings
if os.name == 'nt':
src = path.join(bin_src, 'nuget.win.exe')
dest = path.join(bin_src, 'nuget.exe')
if path.exists(src):
if path.exists(dest):
os.remove(dest)
os.rename(src, dest)
else:
src = path.join(bin_src, 'nuget.win.exe')
if path.exists(src):
os.remove(src)
deploy_shadow_bin('cscs.exe')
csscriptApp = None
deploy_shadow_bin('CSSRoslynProvider.dll')
syntaxerApp = deploy_shadow_bin('syntaxer.exe', "syntaxer_v"+version)
syntaxerPort = settings().get('server_port', 18000)
showTooltipOverGutter = settings().get('show_tooltip_over_gutter', True)
os.environ["syntaxer_dir"] = path.dirname(syntaxerApp)
# os.environ["CSSCRIPT_ROSLYN"] = path.dirname(syntaxerApp) may need to be the way for future
# print('syntaxer_dir', os.environ["syntaxer_dir"])
clear_old_versions_but(version)
# -------------------------
def read_engine_config():
global csscriptApp
deployment_dir = bin_src
deployment_dir = bin_dest
cscs_path = settings().get('cscs_path', './cscs.exe')
if cscs_path == None:
cscs_path = csscriptApp = path.join(deployment_dir, 'cscs.exe')
elif cscs_path:
if cscs_path == './cscs.exe':
csscriptApp = path.join(deployment_dir, 'cscs.exe')
else:
csscriptApp = os.path.abspath(os.path.expandvars(cscs_path))
# -------------------------
read_engine_config()
def print_config():
print('----------------')
print('cscs.exe: ', csscriptApp)
print('syntaxer.exe: ', syntaxerApp)
print('syntaxer port: ', syntaxerPort)
print('syntaxcheck_on_save: ', settings().get('syntaxcheck_on_save', True))
print('server_autostart: ', settings().get('server_autostart', True))
print('----------------')
# -------------------------
from .imports.utils import *
from .imports.syntaxer import *
from .imports.setup import *
csscript_setup.version = version
def get_css_version():
try:
version = ''
clr_version = ''
print('read ver:')
# //proc = subprocess.Popen(to_args([csscriptApp, "-ver"]), stdout=subprocess.PIPE, shell=True)
print(csscriptApp)
proc = popen_redirect([csscriptApp, "-ver"])
prefix = 'C# Script execution engine. Version'
clr_prefix = 'CLR:'
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
# print('-',line)
line = line.strip()
if prefix in line:
# C# Script execution engine. Version 3.19.1.0.
version = line[len(prefix):].strip().strip('.')
if clr_prefix in line:
# CLR: 4.0.30319.42000
ver_str = line.split(':')[1]
print('ver:',ver_str.split('(')[0].strip())
clr_version = line.split(':')[1].split('(')[0].strip()
return (version, clr_version)
except Exception as e:
print(e)
return (None, None)
# =================================================================================
# TODO
# Detect upgrade and fresh install for showing readme/help
# csscript_execute_and_wait should only be available on windows
# =================================================================================
formatted_views = {}
def is_formatted(view):
if view.id() in formatted_views.keys():
last_format_time = formatted_views[view.id()]
return time.time() - last_format_time < 2
return False
def mark_as_formatted(view):
formatted_views[view.id()] = time.time()
# =================================================================================
# C#/CS-Script pugin "new script" service
# =================================================================================
class csscript_new(sublime_plugin.TextCommand):
# -----------------
def run(self, edit):
backup_file = None
if os.path.exists(new_file_path):
backup_file = new_file_path+'.bak'
if os.path.exists(backup_file):
os.remove(backup_file)
os.rename(new_file_path, backup_file)
backup_comment = ''
if backup_file:
backup_comment = '// The previous content of this file has been saved into \n' + \
'// '+backup_file+' \n'
content = csscript_setup.prepare_new_script().replace('$backup_comment$', backup_comment)
with open(new_file_path, "w") as file:
file.write(content)
if os.path.exists(new_file_path):
sublime.active_window().open_file(new_file_path)
# =================================================================================
# C#/CS-Script plugin help service
# =================================================================================
class csscript_help(sublime_plugin.TextCommand):
# -----------------
def run(self, edit):
file = csscript_setup.prepare_readme()
if os.path.exists(file):
sublime.active_window().open_file(file)
else:
sublime.error_message('Cannot find '+file)
# =================================================================================
# C#/CS-Script CS-Script help service
# =================================================================================
class csscript_css_help(sublime_plugin.TextCommand):
# -----------------
def run(self, edit):
file = csscript_setup.prepare_css_help()
if os.path.exists(file):
sublime.active_window().open_file(file)
else:
sublime.error_message('Cannot find '+file)
# =================================================================================
# C#/CS-Script syntaxer restart service
# =================================================================================
class settings_listener(sublime_plugin.EventListener):
hooked = False
# -----------------
def on_activated(self, view):
if not settings_listener.hooked:
settings_listener.hooked = True
on_plugin_loaded()
self.callback()
os.environ['CSSCRIPT_SYNTAXER_PORT'] = str(syntaxerPort)
settings().add_on_change("cscs_path", self.callback)
settings().add_on_change("server_port", self.on_port_changed)
print_config()
def on_port_changed(self):
global syntaxerPort
# may be fired when setting are not available yet
try:
port = settings().get('server_port', 18000)
if syntaxerPort != port:
syntaxerPort = port
os.environ['CSSCRIPT_SYNTAXER_PORT'] = str(syntaxerPort)
except :
pass
def callback(self):
sublime.set_timeout_async(self.process_settings_change, 100)
def process_settings_change(self):
global csscriptApp
# may be fired when setting are not available yet
try:
if csscriptApp != settings().get('cscs_path', '<none>'):
read_engine_config()
# sublime.error_message('About to send '+csscriptApp)
set_engine_path(csscriptApp)
if settings().get('suppress_embedded_nuget_execution', False):
# the default nuget app on Linux (e.g. Mint 18) is incompatible with std.out redirection.
# This is valid for both both Python and .NET apps hosted by ST3. So suppress execution of 'nuget'
# by cscs.exe internally for resolving packages.
if os.name != 'nt':
os.environ["NUGET_INCOMPATIBLE_HOST"] = 'true'
else:
try:
os.unsetenv('NUGET_INCOMPATIBLE_HOST')
except Exception as e:
pass
ensure_default_config(csscriptApp)
ensure_default_roslyn_config(csscriptApp)
if os.getenv("new_deployment") != 'true' and os.getenv("engine_preloaded") != 'true':
os.environ["engine_preloaded"] = 'true'
# Preloading only improves the initial overhead for compiling but not for the intellisense.
# Important: must to wait a bit to allow Roslyn binaries to be done copied (if they ar being moved)
sublime.set_timeout(preload_engine, 5000)
except:
pass
# =================================================================================
# C#/CS-Script completion service
# =================================================================================
class csscript_listener(sublime_plugin.EventListener):
test_count = 0
suppress_post_save_checking = False
post_save_jobs = []
# -----------------
def __init__(self, *args, **kwargs):
sublime_plugin.EventListener.__init__(self, *args, **kwargs)
# -----------------
def on_activated(self, view):
pass
# -----------------
def on_window_command(self, vindow, command_name, args):
pass
# -----------------
def on_hover(self, view, point, hover_zone):
if is_output_panel(view) and view == sublime.active_window().find_output_panel(out_panel):
csscript_show_tooltip(view, point).do_output_panel()
elif is_csharp(view):
if hover_zone == sublime.HOVER_TEXT:
csscript_show_tooltip(view, point).do()
if showTooltipOverGutter and hover_zone == sublime.HOVER_GUTTER:
csscript_show_tooltip(view, point).do_gutter()
# -----------------
def on_post_text_command(self, view, command_name, args):
if command_name == 'drag_select':
if is_output_panel(view):
if 'by' in args.keys() and args['by'] == 'words':
try:
point = view.sel()[0].begin()
line_region = view.line(point)
line_text = view.substr(line_region)
view.sel().clear()
view.sel().add(line_region)
sublime.status_message('Navigating to clicked item...')
navigate_to_file_ref(line_text)
except:
pass
# -----------------
def on_load_async(self, view):
csscript_syntax_check.show_errors()
# view.assign_syntax('Packages/C#/C#.tmLanguage')
# view.assign_syntax('Packages/Text/Plain text.tmLanguage')
# -----------------
def on_modified(self, view):
if is_csharp(view):
# >>> view.scope_name(view.sel()[0].begin())
# string scope
# 'source.cs meta.class.source.cs meta.class.body.source.cs meta.method.source.cs meta.method.body.source.cs meta.method-call.source.cs string.quoted.double.source.cs '
# comment scope
# 'source.cs meta.class.source.cs meta.class.body.source.cs meta.method.source.cs meta.method.body.source.cs comment.line.double-slash.source.cs '
# comment.line.double-slash.source.cs
# string.quoted.double.source.cs
point = view.sel()[0].begin()
scope = view.scope_name(point)
inside_of_string = "string.quoted" in scope
inside_of_comment = "comment.line" in scope or "comment.block" in scope
if not inside_of_string and not inside_of_comment:
typed_char = view.substr(point-1)
if '.' == typed_char:
if settings().get('auto_trigger_autocomplete', True):
view.window().run_command("auto_complete")
elif ('(' == typed_char or ',' == typed_char) :
if settings().get('auto_trigger_tooltip', True):
view.window().run_command("csscript_pop_tooltip")
# -----------------
def on_post_save(self, view):
# if not is_csharp(view):m
# print('---------------')
# clear_console()
# sublime.log_commands(True)
if is_csharp(view):
# print('> on_post_save')
# view may be engaged in 'focus changing' activity (e.g. CodeMap)
# for i in range(5):
# if not is_csharp(sublime.active_window().active_view()):
# time.sleep(1)
active_view = sublime.active_window().active_view()
if active_view == view:
if not is_formatted(view) and settings().get('format_on_save', True):
# print('> formatting')
mark_as_formatted(view)
view.run_command("csscript_format_code")
view.run_command("save")
elif settings().get('syntaxcheck_on_save', True):
# print('>>>> syntax check')
view.window().run_command("csscript_syntax_check", {'skip_saving':True})
# -----------------
def is_enabled(self):
return is_csharp(sublime.active_window().active_view())
# -----------------
def on_query_completions(self, view, prefix, locations):
curr_doc = view.file_name()
if is_script_file(curr_doc):
completions = []
if not is_valid_selection(view):
sublime.status_message('Incompatible selection')
return completions
(curr_doc, location, as_temp_file) = get_saved_doc(view)
response = send_completion_request(curr_doc, location)
if as_temp_file:
os.remove(curr_doc)
completions = self.parse_response(response)
if completions:
return (completions, sublime.INHIBIT_EXPLICIT_COMPLETIONS | sublime.INHIBIT_WORD_COMPLETIONS)
# -----------------
def parse_response(self, response):
if not response:
return None
completions = []
error = None
for line in response.split('\n'):
if line.startswith('<error>'):
error = "Error: cannot get C# completion from the syntax server\n"
if not error:
parts = line.strip().split('|')
if len(parts) == 2:
completions.append((parts[0], parts[1]))
else:
error += line.replace('<error>', '')
if error:
print(error)
return completions
# =================================================================================
# CS-Script code formatter service
# =================================================================================
class csscript_show_config(sublime_plugin.TextCommand):
# -----------------
def run(self, edit):
ensure_default_config(csscriptApp)
config_file = path.join(path.dirname(csscriptApp), 'css_config.xml')
sublime.active_window().open_file(config_file)
# =================================================================================
# CS-Script code formatter service
# =================================================================================
class csscript_format_code(CodeViewTextCommand):
# -----------------
def run(self, edit):
mark_as_formatted(self.view)
sublime.status_message('Formatting script "'+self.view.file_name()+'"')
# position in text and in file mey not be the same depending on line endings type
text_location = -1
if len(self.view.sel()) > 0:
text_location = self.view.sel()[0].begin()
(curr_doc, file_location, as_temp_file) = get_saved_doc(self.view)
response = send_formatting_request(curr_doc, file_location)
if as_temp_file:
os.remove(curr_doc)
if response.startswith('<error>'):
print('Formatting error:', response.replace('<error>', ''))
else:
parts = response.split('\n', 1)
new_file_location = int(parts[0])
formatted_code = parts[1]
new_text_location = to_text_pos(formatted_code, new_file_location)
new_text = formatted_code.replace('\r', '')
self.view.replace(edit, sublime.Region(0, self.view.size()), new_text)
# with open(self.view.file_name(), "w") as file:
# file.write(formatted_code)
# surprisingly mapping of selection is not required. ST3 does it by itself
# self.view.sel().clear()
# self.view.sel().add(sublime.Region(new_text_location, new_text_location))
# print('formatting done')
# sublime.active_window().run_command("save")
# =================================================================================
# CS-Script async replecement service
# =================================================================================
# DONE
class csscript_resolve_using_async(CodeViewTextCommand):
# -----------------
def run(self, edit, **args):
parts = args['region'].split(',')
region = sublime.Region(int(parts[0]), int(parts[1]))
replacement = args['replacement']
self.view.replace(edit, region, replacement)
# =================================================================================
# CS-Script resolve missing usings service
# =================================================================================
# DONE
class csscript_resolve_using(CodeViewTextCommand):
inprogress= False
# -----------------
def run(self, edit, **args):
view = self.view
self.edit = edit
self.point = None
self.first_suffestion = False
if 'point' in args.keys():
self.point = int(args['point'])
self.first_suffestion = True
if self.point == None:
if len(view.sel()) == 0:
return
else:
self.point = view.sel()[0].begin()
busy_indicator.show('Analyzing')
sublime.set_timeout(self.do, 10)
# -----------------
def do(self):
try:
csscript_resolve_using.inprogress = True
view = self.view
word_region = view.word(self.point)
word_to_resolve = view.substr(word_region)
# print('view', view.file_name())
(saved_doc, location, as_temp_file) = get_saved_doc(view)
response = send_resolve_using_request(saved_doc, word_to_resolve)
busy_indicator.hide()
if as_temp_file:
os.remove(saved_doc)
if response == '<null>':
pass
elif response.startswith('<error>'):
print(response.replace('<error>', 'CS-Script error: '))
else:
items = response.split('\n')
def on_done(index):
if index != -1:
for region in self.view.lines(sublime.Region(0,self.view.size())):
line = self.view.substr(region)
if not line.startswith("//"):
# cannot use 'self.view.replace(self.edit...' as edit is already invalid
# so need to start a new command that ctreats 'edit'; Remember, 'edit' cannot be created from code
# self.view.replace(self.edit, sublime.Region(start, start), 'using '+items[index]+';'+'\n')
region = str(region.begin())+','+str(region.begin())
replacement = 'using '+items[index]+';'+'\n'
sublime.active_window().run_command("csscript_resolve_using_async", {'replacement':replacement, 'region':region})
sublime.active_window().run_command('save')
break
if self.first_suffestion:
on_done(0)
else:
self.view.show_popup_menu(items, on_done, 1)
except Exception as err:
print(err)
busy_indicator.hide()
csscript_resolve_using.inprogress = False
# =================================================================================
# CS-Script tooltip service (for invoking via hotkeys)
# =================================================================================
class csscript_pop_tooltip(CodeViewTextCommand):
# -----------------
def run(self, edit):
self.view.hide_popup()
point = self.view.sel()[0].begin()
left_char = self.view.substr(point-1)
right_char = self.view.substr(point)
line = self.view.line(point)
line_str = self.view.substr(line)
new_point = -1
if line_str.startswith('//css'):
csscript_show_tooltip(self.view, line.begin()).do()
return
if left_char == ' ' or left_char == '(' or left_char == ',' or right_char == ')':
new_point = point - 1
while self.view.substr(new_point) != '(':
new_point = new_point - 1
if new_point < line.begin():
new_point = -1
break
if new_point != -1:
hint = self.view.substr(sublime.Region(point, new_point))
csscript_show_tooltip(self.view, new_point+1, hint).do()
else:
csscript_show_tooltip(self.view, point).do()
# =================================================================================
# CS-Script references search service
# =================================================================================
class csscript_find_references(CodeViewTextCommand):
# -----------------
def run(self, edit):
clear_and_print_result_header(self.view.file_name())
sublime.set_timeout(self.do, 100)
def do(self):
(saved_doc, location, as_temp_file) = get_saved_doc(self.view)
response = send_resolve_references(saved_doc, location)
if as_temp_file:
os.remove(saved_doc)
response = response.replace(saved_doc, self.view.file_name())
if response == '<null>':
pass
elif response.startswith('<error>'):
print(response.replace('<error>', 'CS-Script error: '))
else:
output_view_write_line(out_panel, response)
# =================================================================================
# CS-Script project UI marshaling service (not in use yet)
# =================================================================================
class dispatcher(CodeViewTextCommand):
queue = []
# -----------------
def invoke(edit, action):
sublime.active_window().run_command('dispatcher', {'action': action})
pass
# -----------------
def run(self, edit, **args):
if 'action' in args.keys():
action = args['action']
try:
action(self, edit)
except Exception as ex:
print("dispatcher:", ex)
pass
# =================================================================================
# CS-Script project output panel service
# =================================================================================
class csscript_show_output(sublime_plugin.TextCommand):
# -----------------
def run(self, edit):
output_view_show(out_panel)
# =================================================================================
# CS-Script project resolver service
# =================================================================================
class csscript_about(sublime_plugin.TextCommand):
# -----------------
def run(self, edit):
def handle_line(line):
output_view_write_line(out_panel, line)
run_cscs(["-ver"], handle_line, header='CS-Script.ST3 - C# intellisense and execution plugin (v'+version+')')
# =================================================================================
# CS-Script project resolver service
# =================================================================================
# class csscript_load_proj(CodeViewTextCommand):
class csscript_list_proj_files(CodeViewTextCommand):
# -----------------
def handle_line(self, line):
curr_prefix = line.split(':', 1)[0]
if curr_prefix != self.prefix:
self.prefix = curr_prefix
# output_view_write_line(out_panel, '-------')
output_view_write_line(out_panel, line.replace(curr_prefix+':', curr_prefix+": "))
# -----------------
def run(self, edit):
view = self.view
self.prefix = 'file'
sublime.status_message('Checking script deficiencies for "'+self.view.file_name()+'"')
if self.view.is_dirty():
sublime.active_window().run_command("save")
sublime.set_timeout(self.do, 100)
else:
self.do()
# -----------------
def do(self):
def on_done():
output_view_write_line(out_panel, "---------------------\n[Script dependencies]")
run_doc_in_cscs(["-nl", '-l', "-proj:dbg"], self.view, self.handle_line, on_done)
# =================================================================================
# CS-Script project (sources only) resolver service
# =================================================================================
class csscript_list_proj_sources(CodeViewTextCommand):
# -----------------
def handle_line(self, line):
curr_prefix = line.split(':', 1)[0]
if curr_prefix != self.prefix:
self.prefix = curr_prefix
# don't separate for now
# output_view_write_line(out_panel, '-------')
if not line.endswith('dbg.cs'):
if curr_prefix.startswith('file'):
text = line.replace("file:", '')
if self.prev_line:
output_view_write_line(out_panel, item_boxed_prefix + self.prev_line)
else:
output_view_write_line(out_panel, 'Sources')
self.prev_line = text
# -----------------
def run(self, edit):
view = self.view
self.prefix = 'file'
self.prev_line = None
sublime.status_message('Checking script dependencies for "'+self.view.file_name()+'"')
if self.view.is_dirty():
sublime.active_window().run_command("save")
sublime.set_timeout(self.do, 100)
else:
self.do()
# -----------------
def do(self):
def on_done():
if self.prev_line:
output_view_write_line(out_panel, last_item_boxed_prefix + self.prev_line)
self.prev_line = None
output_view_write_line(out_panel, "---------------------\n[Script sources]")
run_doc_in_cscs(["-nl", '-l', "-proj:dbg"], self.view, self.handle_line, on_done)
# =================================================================================
# CS-Script syntax check service
# =================================================================================
# DONE
class csscript_syntax_check(CodeViewTextCommand):
errors = {}
instance = None
# -----------------
def run(self, edit, **args):
view = self.view
sublime.status_message('Checking syntax of "'+view.file_name()+'"')
if view.is_dirty() and not 'skip_saving' in args.keys():
sublime.active_window().run_command("save")
curr_doc = view.file_name()
clear_and_print_result_header(curr_doc)
if not path.exists(csscriptApp):
print('Error: cannot find CS-Script launcher - ', csscriptApp)
elif not curr_doc:
print('Error: cannot find out the document path')
else:
clear_and_print_result_header(curr_doc)
if '//css_nuget' in view.substr(sublime.Region(0, view.size())):
output_view_write_line(out_panel, "Resolving NuGet packages may take time...")
csscript_syntax_check.clear_errors()
proc = popen_redirect([csscriptApp, "-nl", '-l', "-check", curr_doc])
first_result = True
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
line = line.strip()
if first_result:
first_result = False
clear_and_print_result_header(curr_doc)
output_view_write_line(out_panel, line)
error_info = extract_location(line.strip())
if error_info:
file, line, column, context = error_info
file = os.path.abspath(file)
if file not in csscript_syntax_check.errors.keys():
csscript_syntax_check.errors[file] = []
csscript_syntax_check.errors[file].append((line, column, context))
output_view_write_line(out_panel, "[Syntax check]")
csscript_syntax_check.show_errors()
# -----------------
def has_errors(file):
for key in csscript_syntax_check.errors.keys():
if key.lower() == file.lower():
return True
return False
# -----------------
def get_errors(file, line): # line is 1-based
errors = []
for key in csscript_syntax_check.errors.keys():
if key.lower() == file.lower():
for error_info in csscript_syntax_check.errors[key]:
ln, col, cxt = error_info
if ln == line:
errors.append(error_info)
if len(errors) > 0:
break
return errors
# -----------------
def clear_errors():
for view in sublime.active_window().views():
if view.file_name():
view.erase_regions("cs-script.errors")
csscript_syntax_check.errors.clear()
# -----------------
def show_errors():
error_strong_appearence = settings().get('error_strong_appearence', False)
error_strong_appearence = True
for file in csscript_syntax_check.errors.keys():
view = find_file_view(file)
if view:
view.erase_regions("cs-script.errors")
regions = []
for line, column, context in csscript_syntax_check.errors[file]:
pt = view.text_point(line-1, column-1)
regions.append(view.word(pt))
# scope = settings().get('cs-script.syntaxerror_scope')
# https://www.sublimetext.com/docs/3/scope_naming.html
# http://docs.sublimetext.info/en/latest/reference/color_schemes.html
scope = 'invalid'
icon = 'Packages/'+plugin_name+'/images/error.png'
# icon = 'Packages/cs-script-sublime/images/error.png'
if error_strong_appearence:
flags = 0
else:
flags = sublime.DRAW_SQUIGGLY_UNDERLINE|sublime.DRAW_NO_FILL|sublime.DRAW_NO_OUTLINE
view.add_regions("cs-script.errors", regions, scope, icon, flags)
# =================================================================================
# C#/CS-Script kill running script service
# =================================================================================
class csscript_kills_script(sublime_plugin.TextCommand):
# -----------------
def is_enabled(self):
return csscript_execute_and_redirect.running_process != None
# -----------------
def is_visible(self):
panel_name = self.view.window().active_panel()
if panel_name and panel_name == 'output.CS-Script':
panel = self.view.window().find_output_panel(panel_name[len('output.'):])
return panel is not None and panel.id() == self.view.id()
else:
return False
# -----------------
def run(self, edit):
if csscript_execute_and_redirect.running_process:
try:
pid = csscript_execute_and_redirect.running_process.pid
sublime.status_message('Terminating...')
# extremely important to kill all process children
if os.name == 'posix':
subprocess.Popen(['pkill', '-TERM', '-P', str(pid)])
else:
send_pkill_request(pid, 'cscs')
except:
pass
# =================================================================================
# CS-Script tooltip service
# =================================================================================
class csscript_show_tooltip():
def __init__(self, view, point, hint=''):
self.view = view
self.point = point
self.location = point
self.hint = hint
# -----------------
def do_output_panel(self):
only_over_process_line = True
if only_over_process_line:
mouse_line, mouse_column = self.view.rowcol(self.point)
mouse_region = self.view.line(self.point)
line = self.view.substr(mouse_region)
else:
line_reg = self.view.line(self.view.text_point(2, 0))
line = self.view.substr(line_reg)
# print('>>>>>>', line)
if line.startswith('[Started pid: ') and csscript_execute_and_redirect.running_process:
try:
pid = int(line.replace('[Started pid: ','').replace(']',''))
link = '<a href="'+str(self.point)+'">Terminate process '+str(pid)+'</a>'
def terminate(arg):
sublime.status_message('Terminating...')
self.view.hide_popup()
# extremely important to kill all process children
if os.name == 'posix':
subprocess.Popen(['pkill', '-TERM', '-P', str(pid)])
else:
send_pkill_request(pid, 'cscs')
html = """
<body id=show-scope>
<style>
body { margin: 0; padding: 5; }
p { margin-top: 0;}
</style>
%s
</body>
""" % (link)
self.view.show_popup(html, location=self.point, flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY, max_width=600, on_navigate=terminate)
except :
pass
# -----------------
def do_gutter(self):
if csscript_resolve_using.inprogress: return
file = self.view.file_name()
line, column = self.view.rowcol(self.point)
errors = ''
for line, column, error in csscript_syntax_check.get_errors(file, line+1):
errors = errors + error.strip() + '<br>'
html = """
<body id=show-scope>
<style>
body { margin: 0; padding: 5; }
p { margin-top: 0;}
</style>
<p>%s</p>
</body>
""" % (errors)
self.view.show_popup(html, location=self.point, flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY, max_width=600)
# -----------------
def do(self):
if csscript_resolve_using.inprogress: return
# check if we are over the error region
# NOTE: view.text_point and view.rowcol operate in 0-based units and C# compiler errors are reported in 1-based ones
mouse_line, mouse_column = self.view.rowcol(self.point)
mouse_region = self.view.word(self.point)
for line, column, error in csscript_syntax_check.get_errors(self.view.file_name(), mouse_line+1):
error_region = self.view.word(self.view.text_point(line-1,column-1))
if error_region == mouse_region:
link = ''
# doesn't work yet
if 'CS0103' in error:
link = '<a href="'+str(self.point)+'">Try to fix it</a>'
html = """
<body id=show-scope>
<style>
body { margin: 0; padding: 5; }
p { margin-top: 0;}
</style>
<p>%s</p>
%s
</body>
""" % (error, link)
# html = '<body id=show-scope>'+error+'</body>'
self.view.show_popup(html, location=self.point, flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY, max_width=600, on_navigate=lambda x: self._try_to_fix(x))
return
(curr_doc, location, as_temp_file) = get_saved_doc(self.view, self.location)
text = send_tooltip_request(curr_doc, location, self.hint, settings().get('auto_tooltip_light_content', False))
if as_temp_file:
os.remove(curr_doc)
if text:
if text == '<null>':
# print('tooltip null')
return
elif text.startswith('<error>'):
print(text.replace('<error>', 'CS-Script generate tooltip error: '))
else:
self._show(text, self.point)
# -----------------
def _try_to_fix(self, point):
self.view.hide_popup()
sublime.set_timeout(lambda : sublime.active_window().run_command("csscript_resolve_using", {'point': point}), 100)
pass
# -----------------
def _go_to_definition(self):
self.view.hide_popup()
#unfortunately setting selection doesn't work when invoked from OnPopupHide callback
# self.view.sel().clear()
# self.view.sel().add(sublime.Region(self.location, self.location))
# sublime.active_window().run_command("csscript_goto_definition")
(curr_doc, location, as_temp_file) = get_saved_doc(self.view, self.location)
csscript_goto_definition.do(curr_doc, location, as_temp_file)
# -----------------
def decorate(self, text):
text = text.replace('\r', '')
def deco_section(text, style):
parts = text.split(':', 1)
if len(parts) > 1:
title = parts[0]
rest = parts[1]
return '<span class="title">'+title+':</span><span class="'+style+'">'+rest+'</span>\n'
else:
return '<span class="'+style+'">'+text+'</span>\n'
parts = text.split('\n', 1)
if len(parts) > 1:
sugnature = deco_section(parts[0],'sig')
doc = parts[1].strip()
exc = ''
pos = doc.find('Exceptions:')
if pos != -1:
exc = '<br>'+deco_section(doc[pos:], 'exc')+'<br>\n'
doc = doc[:pos].strip()
doc = '<br><span class="doc">'+doc+'</span><br>'
text = sugnature + doc + exc
else:
text = deco_section(text,'sig')
return text
# -----------------
def _show(self, text, text_point):
import html
if self.view.is_popup_visible():
return
text_value = html.escape(text, quote=False)
text_value = self.decorate(text_value)
text_value = text_value.replace('\n', '<br>')
html = """
<body id=show-scope>
<style>
body { margin: 0; padding: 5; }
p { margin-top: 0; }
a {
font-family: sans-serif;
font-size: 1.05rem;
}
span.title {
font-style: italic;
font-weight: bold;
font-size: 1.1rem;
padding: 0px;
}
span.doc {
font-style: italic;
}
span.exc {
padding: 15px;
}
span.sig {
}
</style>
<p>%s</p>
$go_def_link
</body>
""" % (text_value)
if self.hint:
html = html.replace('$go_def_link', '')
else:
html = html.replace('$go_def_link','<a href="dummy">Go to Definition</a>')
# print('---------------------')
# print(html)
# print('---------------------')
flg = sublime.HIDE_ON_MOUSE_MOVE|sublime.HIDE_ON_MOUSE_MOVE_AWAY # testing
flg = sublime.HIDE_ON_MOUSE_MOVE_AWAY
self.view.show_popup(html, flags=flg, location=text_point, max_width=600, on_navigate=lambda x: self._go_to_definition())
# =================================================================================
# CS-Script execute service with STD out redirection
# =================================================================================
class csscript_execute_and_redirect(CodeViewTextCommand):
running_process = None
# -----------------
def run(self, edit):
if is_mac():
sublime.error_message('On Mac you will need to start terminal manually and execute "mono cscs.exe <script path>"')
return
if csscript_execute_and_redirect.running_process:
print("Previous C# script is still running...")
return
sublime.status_message('Executing script "'+self.view.file_name()+'"')
if self.view.is_dirty():
csscript_listener.suppress_post_save_checking = True
sublime.active_window().run_command("save")
curr_doc = self.view.file_name()
def run():
script = curr_doc
clear_and_print_result_header(self.view.file_name())
process = popen_redirect([csscriptApp, "-nl", '-l', script])
output_view_write_line(out_panel, '[Started pid: '+str(process.pid)+']', True)
csscript_execute_and_redirect.running_process = process
def process_line(output, ignore_empty = False):
try:
output = output.decode('utf-8').rstrip()
if not ignore_empty or output != '':
output_view_append(out_panel, output)
except UnicodeDecodeError:
append_output('<Decoding error. You may want to adjust script output encoding in settings.>')
# process.terminate()
while process.poll() is None: # may not read the last few lines of output
output = process.stdout.readline()
process_line(output)
while (True): # drain any remaining data
try:
output = process.stdout.readline()
if output == b'':
break;
process_line(output, ignore_empty=True)
except :
pass
csscript_execute_and_redirect.running_process = None
output_view_write_line(out_panel, "[Execution completed]")
#must be done in a separate thread otherwise line rendering is suspended until process exits
sublime.set_timeout_async(run, 10)
# =================================================================================
# CS-Script build executable from the script
# =================================================================================
class csscript_build_exe(CodeViewTextCommand):
# -----------------
def run(self, edit):
view = self.view
self.prefix = 'file'
sublime.status_message('Building executable from the script "'+self.view.file_name()+'"')
if self.view.is_dirty():
sublime.active_window().run_command("save")
sublime.set_timeout(self.do, 100)
else:
self.do()
# -----------------
def do(self):
script_file = self.view.file_name()
pre, ext = os.path.splitext(script_file)
exe_file = pre + '.exe'
def handle_line(line):
output_view_write_line(out_panel, line)
def on_done():
if path.exists(exe_file):
output_view_write_line(out_panel,'Script is converted into executable ' + exe_file)
output_view_write_line(out_panel, "---------------------\n[Build exe]")
run_doc_in_cscs(["-nl", '-l', "-e"], self.view, handle_line, on_done)
# =================================================================================
# CS-Script execute service. Shell remains visible after the process termination
# =================================================================================
class csscript_execute_and_wait(CodeViewTextCommand):
# -----------------
def run(self, edit):
sublime.active_window().run_command("save")
curr_doc = self.view.file_name()
if not path.exists(csscriptApp):
print('Error: cannot find CS-Script launcher - ', csscriptApp)
else:
if os.name == 'nt':
proc = subprocess.Popen(to_args([csscriptApp, "-nl", '-l', '-wait', curr_doc]))
else:
# Linux and Mac
env = os.environ.copy()
env['SCRIPT_FILE'] = curr_doc
cwd = os.path.dirname(curr_doc)
css_command = to_args([csscriptApp, "-nl", '-l', '%SCRIPT_FILE%'])[0] # will wrap into quotations
command = "bash -c \"{0} ; exec bash\"".format(css_command)
args =[TerminalSelector.get(), '-e', command]
if 'NUGET_INCOMPATIBLE_HOST' in env:
del env['NUGET_INCOMPATIBLE_HOST']
subprocess.Popen(args, cwd=cwd, env=env)
# =================================================================================
# CS-Script go-to-next-result service
# =================================================================================
class csscript_next_result(sublime_plugin.WindowCommand):
# -----------------
def run(self):
view_name = sublime.active_window().active_panel()
if not view_name:
return
if view_name == 'output.exec':
self.window.run_command('next_result')
else:
if view_name.startswith('output.'):
view_name = view_name.replace('output.', '')
view = sublime.active_window().find_output_panel(view_name)
if not view or not view.window():
return
caret_point = view.sel()[0].begin()
caret_line_region = view.line(caret_point)
line_regions = view.lines(sublime.Region(0, view.size()))
next_location_index = -1
locations = []
for rg in line_regions:
line = view.substr(rg).strip()
info = extract_location(line)
if info:
if next_location_index == -1 and rg == caret_line_region:
next_location_index = len(locations)
locations.append((rg, line))
if len(locations) > 0:
next_location_index = next_location_index + 1
if next_location_index >= len(locations):
next_location_index = 0
line_region, line_text = locations[next_location_index]
view.sel().clear()
view.sel().add(line_region)
view.run_command('append', {'characters': ''}) # to force repainting the selection
navigate_to_file_ref(line_text)
# =================================================================================
# CS-Script go-to-definition service
# =================================================================================
class csscript_goto_definition(CodeViewTextCommand):
# -----------------
def run(self, edit):
view = self.view
curr_doc = self.view.file_name()
if is_script_file(curr_doc):
if not is_valid_selection(self.view):
sublime.status_message('Incompatible selection')
return
(curr_doc, location, as_temp_file) = get_saved_doc(view)
csscript_goto_definition.do(curr_doc, location, as_temp_file)
# -----------------
def do(curr_doc, location, as_temp_file):
response = send_resolve_request(curr_doc, location)
if as_temp_file:
os.remove(curr_doc)
path = csscript_goto_definition.parse_response(response)
if path:
fiel_name = os.path.basename(path).split(':')[0].lower()
if fiel_name.endswith('.dll') or fiel_name.endswith('.exe'):
dir_path = os.path.dirname(path)
sublime.active_window().run_command('open_dir', { 'dir': dir_path })
else:
sublime.active_window().open_file(path, sublime.ENCODED_POSITION)
# -----------------
def parse_response(response):
if not response:
return None
error = None
fileName = None
lineNum = None
for line in response.split('\n'):
if line.startswith('<error>'):
error = "Error: cannot resolve C# symbol\n"
if not error:
if line.startswith('file:'):
fileName = line[5:].strip()
if line.startswith('line:'):
lineNum = line[5:].strip()
else:
error += line.replace('<error>', '')
if error:
print(error)
elif fileName:
if fileName.endswith('.tmp'):
possible_oriuginal_file = fileName[:-4]
if os.path.exists(possible_oriuginal_file):
fileName = possible_oriuginal_file
# print("{0}:{1}:0".format(fileName, lineNum))
return "{0}:{1}:0".format(fileName, lineNum)
# =================================================================================
# CS-Script go-to-definition service
# =================================================================================
class csscript_show_output_panel(sublime_plugin.WindowCommand):
# -----------------
def run(self):
view = sublime.active_window().active_view()
if sublime.active_window().active_panel() == 'output.'+out_panel:
output_view_hide(out_panel)
else:
output_view_show(out_panel)
|
[] |
[] |
[
"engine_preloaded",
"NUGET_INCOMPATIBLE_HOST",
"CSSCRIPT_ROSLYN",
"new_deployment",
"syntaxer_dir",
"PATH",
"CSSCRIPT_SYNTAXER_PORT",
"cs-script.st3.ver"
] |
[]
|
["engine_preloaded", "NUGET_INCOMPATIBLE_HOST", "CSSCRIPT_ROSLYN", "new_deployment", "syntaxer_dir", "PATH", "CSSCRIPT_SYNTAXER_PORT", "cs-script.st3.ver"]
|
python
| 8 | 0 | |
tools/program.py
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from collections import OrderedDict
import paddle.fluid as fluid
from ppcls.optimizer import LearningRateBuilder
from ppcls.optimizer import OptimizerBuilder
from ppcls.modeling import architectures
from ppcls.modeling.loss import CELoss
from ppcls.modeling.loss import MixCELoss
from ppcls.modeling.loss import JSDivLoss
from ppcls.modeling.loss import GoogLeNetLoss
from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.collective import DistributedStrategy
from ema import ExponentialMovingAverage
def create_feeds(image_shape, use_mix=None):
"""
Create feeds as model input
Args:
image_shape(list[int]): model input shape, such as [3, 224, 224]
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
feeds(dict): dict of model input variables
"""
feeds = OrderedDict()
feeds['image'] = fluid.data(
name="feed_image", shape=[None] + image_shape, dtype="float32")
if use_mix:
feeds['feed_y_a'] = fluid.data(
name="feed_y_a", shape=[None, 1], dtype="int64")
feeds['feed_y_b'] = fluid.data(
name="feed_y_b", shape=[None, 1], dtype="int64")
feeds['feed_lam'] = fluid.data(
name="feed_lam", shape=[None, 1], dtype="float32")
else:
feeds['label'] = fluid.data(
name="feed_label", shape=[None, 1], dtype="int64")
return feeds
def create_dataloader(feeds):
"""
Create a dataloader with model input variables
Args:
feeds(dict): dict of model input variables
Returns:
dataloader(fluid dataloader):
"""
trainer_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
capacity = 64 if trainer_num <= 1 else 8
dataloader = fluid.io.DataLoader.from_generator(
feed_list=feeds,
capacity=capacity,
use_double_buffer=True,
iterable=True)
return dataloader
def create_model(architecture, image, classes_num, is_train):
"""
Create a model
Args:
architecture(dict): architecture information,
name(such as ResNet50) is needed
image(variable): model input variable
classes_num(int): num of classes
Returns:
out(variable): model output variable
"""
name = architecture["name"]
params = architecture.get("params", {})
if "is_test" in params:
params['is_test'] = not is_train
model = architectures.__dict__[name](**params)
out = model.net(input=image, class_dim=classes_num)
return out
def create_loss(out,
feeds,
architecture,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
"""
Create a loss for optimization, such as:
1. CrossEnotry loss
2. CrossEnotry loss with label smoothing
3. CrossEnotry loss with mix(mixup, cutmix, fmix)
4. CrossEnotry loss with label smoothing and (mixup, cutmix, fmix)
5. GoogLeNet loss
Args:
out(variable): model output variable
feeds(dict): dict of model input variables
architecture(dict): architecture information,
name(such as ResNet50) is needed
classes_num(int): num of classes
epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
loss(variable): loss variable
"""
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out[0], out[1], out[2], target)
if use_distillation:
assert len(out) == 2, ("distillation output length must be 2, "
"but got {}".format(len(out)))
loss = JSDivLoss(class_dim=classes_num, epsilon=epsilon)
return loss(out[1], out[0])
if use_mix:
loss = MixCELoss(class_dim=classes_num, epsilon=epsilon)
feed_y_a = feeds['feed_y_a']
feed_y_b = feeds['feed_y_b']
feed_lam = feeds['feed_lam']
return loss(out, feed_y_a, feed_y_b, feed_lam)
else:
loss = CELoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out, target)
def create_metric(out,
feeds,
architecture,
topk=5,
classes_num=1000,
use_distillation=False):
"""
Create measures of model accuracy, such as top1 and top5
Args:
out(variable): model output variable
feeds(dict): dict of model input variables(included label)
topk(int): usually top5
classes_num(int): num of classes
Returns:
fetchs(dict): dict of measures
"""
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
softmax_out = out[0]
else:
# just need student label to get metrics
if use_distillation:
out = out[1]
softmax_out = fluid.layers.softmax(out, use_cudnn=False)
fetchs = OrderedDict()
# set top1 to fetchs
top1 = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=1)
fetchs['top1'] = (top1, AverageMeter('top1', '.4f', need_avg=True))
# set topk to fetchs
k = min(topk, classes_num)
topk = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=k)
topk_name = 'top{}'.format(k)
fetchs[topk_name] = (topk, AverageMeter(topk_name, '.4f', need_avg=True))
return fetchs
def create_fetchs(out,
feeds,
architecture,
topk=5,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
"""
Create fetchs as model outputs(included loss and measures),
will call create_loss and create_metric(if use_mix).
Args:
out(variable): model output variable
feeds(dict): dict of model input variables.
If use mix_up, it will not include label.
architecture(dict): architecture information,
name(such as ResNet50) is needed
topk(int): usually top5
classes_num(int): num of classes
epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
fetchs(dict): dict of model outputs(included loss and measures)
"""
fetchs = OrderedDict()
loss = create_loss(out, feeds, architecture, classes_num, epsilon, use_mix,
use_distillation)
fetchs['loss'] = (loss, AverageMeter('loss', '7.4f', need_avg=True))
if not use_mix:
metric = create_metric(out, feeds, architecture, topk, classes_num,
use_distillation)
fetchs.update(metric)
return fetchs
def create_optimizer(config):
"""
Create an optimizer using config, usually including
learning rate and regularization.
Args:
config(dict): such as
{
'LEARNING_RATE':
{'function': 'Cosine',
'params': {'lr': 0.1}
},
'OPTIMIZER':
{'function': 'Momentum',
'params':{'momentum': 0.9},
'regularizer':
{'function': 'L2', 'factor': 0.0001}
}
}
Returns:
an optimizer instance
"""
# create learning_rate instance
lr_config = config['LEARNING_RATE']
lr_config['params'].update({
'epochs': config['epochs'],
'step_each_epoch':
config['total_images'] // config['TRAIN']['batch_size'],
})
lr = LearningRateBuilder(**lr_config)()
# create optimizer instance
opt_config = config['OPTIMIZER']
opt = OptimizerBuilder(**opt_config)
return opt(lr)
def dist_optimizer(config, optimizer):
"""
Create a distributed optimizer based on a normal optimizer
Args:
config(dict):
optimizer(): a normal optimizer
Returns:
optimizer: a distributed optimizer
"""
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = 10
dist_strategy = DistributedStrategy()
dist_strategy.nccl_comm_num = 1
dist_strategy.fuse_all_reduce_ops = True
dist_strategy.exec_strategy = exec_strategy
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def mixed_precision_optimizer(config, optimizer):
use_fp16 = config.get('use_fp16', False)
amp_scale_loss = config.get('amp_scale_loss', 1.0)
use_dynamic_loss_scaling = config.get('use_dynamic_loss_scaling', False)
if use_fp16:
optimizer = fluid.contrib.mixed_precision.decorate(
optimizer,
init_loss_scaling=amp_scale_loss,
use_dynamic_loss_scaling=use_dynamic_loss_scaling)
return optimizer
def build(config, main_prog, startup_prog, is_train=True):
"""
Build a program using a model and an optimizer
1. create feeds
2. create a dataloader
3. create a model
4. create fetchs
5. create an optimizer
Args:
config(dict): config
main_prog(): main program
startup_prog(): startup program
is_train(bool): train or valid
Returns:
dataloader(): a bridge between the model and the data
fetchs(dict): dict of model outputs(included loss and measures)
"""
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
use_mix = config.get('use_mix') and is_train
use_distillation = config.get('use_distillation')
feeds = create_feeds(config.image_shape, use_mix=use_mix)
dataloader = create_dataloader(feeds.values())
out = create_model(config.ARCHITECTURE, feeds['image'],
config.classes_num, is_train)
fetchs = create_fetchs(
out,
feeds,
config.ARCHITECTURE,
config.topk,
config.classes_num,
epsilon=config.get('ls_epsilon'),
use_mix=use_mix,
use_distillation=use_distillation)
if is_train:
optimizer = create_optimizer(config)
lr = optimizer._global_learning_rate()
fetchs['lr'] = (lr, AverageMeter('lr', 'f', need_avg=False))
optimizer = mixed_precision_optimizer(config, optimizer)
optimizer = dist_optimizer(config, optimizer)
optimizer.minimize(fetchs['loss'][0])
if config.get('use_ema'):
global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter(
)
ema = ExponentialMovingAverage(
config.get('ema_decay'), thres_steps=global_steps)
ema.update()
return dataloader, fetchs, ema
return dataloader, fetchs
def compile(config, program, loss_name=None):
"""
Compile the program
Args:
config(dict): config
program(): the program which is wrapped by
loss_name(str): loss name
Returns:
compiled_program(): a compiled program
"""
build_strategy = fluid.compiler.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10
compiled_program = fluid.CompiledProgram(program).with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
return compiled_program
total_step = 0
def run(dataloader,
exe,
program,
fetchs,
epoch=0,
mode='train',
vdl_writer=None):
"""
Feed data to the model and fetch the measures and loss
Args:
dataloader(fluid dataloader):
exe():
program():
fetchs(dict): dict of measures and the loss
epoch(int): epoch of training or validation
model(str): log only
Returns:
"""
fetch_list = [f[0] for f in fetchs.values()]
metric_list = [f[1] for f in fetchs.values()]
for m in metric_list:
m.reset()
batch_time = AverageMeter('elapse', '.3f')
tic = time.time()
for idx, batch in enumerate(dataloader()):
metrics = exe.run(program=program, feed=batch, fetch_list=fetch_list)
batch_time.update(time.time() - tic)
tic = time.time()
for i, m in enumerate(metrics):
metric_list[i].update(m[0], len(batch[0]))
fetchs_str = ''.join([str(m.value) + ' '
for m in metric_list] + [batch_time.value]) + 's'
if vdl_writer:
global total_step
logger.scaler('loss', metrics[0][0], total_step, vdl_writer)
total_step += 1
if mode == 'eval':
logger.info("{:s} step:{:<4d} {:s}s".format(mode, idx, fetchs_str))
else:
epoch_str = "epoch:{:<3d}".format(epoch)
step_str = "{:s} step:{:<4d}".format(mode, idx)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(epoch_str, "HEADER")
if idx == 0 else epoch_str,
logger.coloring(step_str, "PURPLE"),
logger.coloring(fetchs_str, 'OKGREEN')))
end_str = ''.join([str(m.mean) + ' '
for m in metric_list] + [batch_time.total]) + 's'
if mode == 'eval':
logger.info("END {:s} {:s}s".format(mode, end_str))
else:
end_epoch_str = "END epoch:{:<3d}".format(epoch)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(end_epoch_str, "RED"),
logger.coloring(mode, "PURPLE"),
logger.coloring(end_str, "OKGREEN")))
# return top1_acc in order to save the best model
if mode == 'valid':
return fetchs["top1"][1].avg
|
[] |
[] |
[
"PADDLE_TRAINERS_NUM"
] |
[]
|
["PADDLE_TRAINERS_NUM"]
|
python
| 1 | 0 | |
core/launcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Launching of programs, folders, URLs, etc.."""
from __future__ import print_function, unicode_literals, absolute_import
import sys
import os
import subprocess
import copy
import re
from .lnp import lnp
from . import hacks, paths, log, terminal
def toggle_autoclose():
"""Toggle automatic closing of the UI when launching DF."""
lnp.userconfig['autoClose'] = not lnp.userconfig.get_bool('autoClose')
lnp.userconfig.save_data()
def get_df_executable():
"""Returns the path of the executable needed to launch Dwarf Fortress."""
spawn_terminal = False
if sys.platform == 'win32':
if ('legacy' in lnp.df_info.variations and
lnp.df_info.version <= '0.31.14'):
df_filename = 'dwarfort.exe'
else:
df_filename = 'Dwarf Fortress.exe'
elif sys.platform == 'darwin' and lnp.df_info.version <= '0.28.181.40d':
df_filename = 'Dwarf Fortress.app'
else:
# Linux/OSX: Run DFHack if available and enabled
if (os.path.isfile(paths.get('df', 'dfhack')) and
hacks.is_dfhack_enabled()):
df_filename = 'dfhack'
spawn_terminal = True
else:
df_filename = 'df'
if lnp.args.df_executable:
df_filename = lnp.args.df_executable
return df_filename, spawn_terminal
def run_df(force=False):
"""Launches Dwarf Fortress."""
validation_result = lnp.settings.validate_config()
if validation_result:
if not lnp.ui.on_invalid_config(validation_result):
return
df_filename, spawn_terminal = get_df_executable()
executable = paths.get('df', df_filename)
result = run_program(executable, force, True, spawn_terminal)
if (force and not result) or result is False:
log.e('Could not launch ' + executable)
raise Exception('Failed to run Dwarf Fortress.')
for prog in lnp.autorun:
utility = paths.get('utilities', prog)
if os.access(utility, os.F_OK):
run_program(utility)
if lnp.userconfig.get_bool('autoClose'):
sys.exit()
return result
def run_program(path, force=False, is_df=False, spawn_terminal=False):
"""
Launches an external program.
Params:
path
The path of the program to launch.
spawn_terminal
Whether or not to spawn a new terminal for this app.
Used only for DFHack.
"""
path = os.path.abspath(path)
check_nonchild = ((spawn_terminal and sys.platform.startswith('linux')) or
(sys.platform == 'darwin' and (
path.endswith('.app') or spawn_terminal)))
is_running = program_is_running(path, check_nonchild)
if not force and is_running:
log.i(path + ' is already running')
lnp.ui.on_program_running(path, is_df)
return None
try:
workdir = os.path.dirname(path)
# pylint:disable=redefined-variable-type
run_args = path
if spawn_terminal and not sys.platform.startswith('win'):
run_args = terminal.get_terminal_command([path,])
elif path.endswith('.jar'): # Explicitly launch JAR files with Java
run_args = ['java', '-jar', os.path.basename(path)]
elif path.endswith('.app'): # OS X application bundle
run_args = ['open', path]
workdir = path
environ = os.environ
if lnp.bundle:
environ = copy.deepcopy(os.environ)
if ('TCL_LIBRARY' in environ and
sys._MEIPASS in environ['TCL_LIBRARY']): # pylint:disable=no-member
del environ['TCL_LIBRARY']
if ('TK_LIBRARY' in environ and
sys._MEIPASS in environ['TK_LIBRARY']): # pylint:disable=no-member
del environ['TK_LIBRARY']
lnp.running[path] = subprocess.Popen(
run_args, cwd=workdir, env=environ)
return True
except OSError:
sys.excepthook(*sys.exc_info())
return False
def program_is_running(path, nonchild=False):
"""
Returns True if a program is currently running.
Params:
path
The path of the program.
nonchild
If set to True, attempts to check for the process among all
running processes, not just known child processes. Used for
DFHack on Linux and OS X; currently unsupported for Windows.
"""
if nonchild:
ps = subprocess.Popen(['ps', 'axww'], stdout=subprocess.PIPE)
s = ps.stdout.read()
ps.wait()
encoding = sys.getfilesystemencoding()
if encoding is None:
#Encoding was not detected, assume UTF-8
encoding = 'UTF-8'
s = s.decode(encoding, 'replace')
return re.search('\\B%s( |$)' % re.escape(path), s, re.M) is not None
else:
if path not in lnp.running:
return False
else:
lnp.running[path].poll()
return lnp.running[path].returncode is None
def open_folder_idx(i):
"""Opens the folder specified by index i, as listed in PyLNP.json."""
open_file(os.path.join(
paths.get('root'), lnp.config['folders'][i][1].replace(
'<df>', paths.get('df'))))
def open_savegames():
"""Opens the save game folder."""
open_file(paths.get('save'))
def open_link_idx(i):
"""Opens the link specified by index i, as listed in PyLNP.json."""
open_url(lnp.config['links'][i][1])
def open_url(url):
"""Launches a web browser to the Dwarf Fortress webpage."""
import webbrowser
webbrowser.open(url)
def open_file(path):
"""
Opens a file with the system default viewer for the respective file type.
Params:
path
The file path to open.
"""
path = os.path.normpath(path)
# pylint: disable=broad-except, bare-except
try:
if sys.platform == 'darwin':
subprocess.check_call(['open', '--', path])
elif sys.platform.startswith('linux'):
subprocess.check_call(['xdg-open', path])
elif sys.platform in ['windows', 'win32']:
os.startfile(path)
else:
log.e('Unknown platform, cannot open file')
except:
log.e('Could not open file ' + path)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
mackerel-plugin-haproxy/haproxy.go
|
package main
import (
"encoding/csv"
"errors"
"flag"
"fmt"
"io"
"net/http"
"os"
"strconv"
"time"
mp "github.com/mackerelio/go-mackerel-plugin"
)
var graphdef = map[string](mp.Graphs){
"haproxy.total.sessions": mp.Graphs{
Label: "HAProxy Total Sessions",
Unit: "integer",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "sessions", Label: "Sessions", Diff: true},
},
},
"haproxy.total.bytes": mp.Graphs{
Label: "HAProxy Total Bytes",
Unit: "integer",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "bytes_in", Label: "Bytes In", Diff: true},
mp.Metrics{Name: "bytes_out", Label: "Bytes Out", Diff: true},
},
},
"haproxy.total.connection_errors": mp.Graphs{
Label: "HAProxy Total Connection Errors",
Unit: "integer",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "connection_errors", Label: "Connection Errors", Diff: true},
},
},
}
// HAProxyPlugin mackerel plugin for haproxy
type HAProxyPlugin struct {
URI string
Username string
Password string
}
// FetchMetrics interface for mackerelplugin
func (p HAProxyPlugin) FetchMetrics() (map[string]float64, error) {
client := &http.Client{
Timeout: time.Duration(5) * time.Second,
}
requestURI := p.URI + ";csv;norefresh"
req, err := http.NewRequest("GET", requestURI, nil)
if err != nil {
return nil, err
}
if p.Username != "" {
req.SetBasicAuth(p.Username, p.Password)
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Request failed. Status: %s, URI: %s", resp.Status, requestURI)
}
return p.parseStats(resp.Body)
}
func (p HAProxyPlugin) parseStats(statsBody io.Reader) (map[string]float64, error) {
stat := make(map[string]float64)
reader := csv.NewReader(statsBody)
for {
columns, err := reader.Read()
if err == io.EOF {
break
}
if len(columns) < 60 {
return nil, errors.New("Length of stats csv is too short. Specifed uri may be wrong.")
}
if columns[1] != "BACKEND" {
continue
}
var data float64
data, err = strconv.ParseFloat(columns[7], 64)
if err != nil {
return nil, errors.New("cannot get values")
}
stat["sessions"] += data
data, err = strconv.ParseFloat(columns[8], 64)
if err != nil {
return nil, errors.New("cannot get values")
}
stat["bytes_in"] += data
data, err = strconv.ParseFloat(columns[9], 64)
if err != nil {
return nil, errors.New("cannot get values")
}
stat["bytes_out"] += data
data, err = strconv.ParseFloat(columns[13], 64)
if err != nil {
return nil, errors.New("cannot get values")
}
stat["connection_errors"] += data
}
return stat, nil
}
// GraphDefinition interface for mackerelplugin
func (p HAProxyPlugin) GraphDefinition() map[string](mp.Graphs) {
return graphdef
}
func main() {
optURI := flag.String("uri", "", "URI")
optScheme := flag.String("scheme", "http", "Scheme")
optHost := flag.String("host", "localhost", "Hostname")
optPort := flag.String("port", "80", "Port")
optPath := flag.String("path", "/", "Path")
optUsername := flag.String("username", "", "Username for Basic Auth")
optPassword := flag.String("password", "", "Password for Basic Auth")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var haproxy HAProxyPlugin
if *optURI != "" {
haproxy.URI = *optURI
} else {
haproxy.URI = fmt.Sprintf("%s://%s:%s%s", *optScheme, *optHost, *optPort, *optPath)
}
if *optUsername != "" {
haproxy.Username = *optUsername
}
if *optPassword != "" {
haproxy.Password = *optPassword
}
helper := mp.NewMackerelPlugin(haproxy)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.Tempfile = fmt.Sprintf("/tmp/mackerel-plugin-haproxy")
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
helper.OutputDefinitions()
} else {
helper.OutputValues()
}
}
|
[
"\"MACKEREL_AGENT_PLUGIN_META\""
] |
[] |
[
"MACKEREL_AGENT_PLUGIN_META"
] |
[]
|
["MACKEREL_AGENT_PLUGIN_META"]
|
go
| 1 | 0 | |
cache/replicate.go
|
package cache
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"time"
)
const (
ReplicateActionInsert = "insert"
ReplicateActionDelete = "delete"
)
type Replicate struct {
Action string `json:"action"`
Key string `json:"key"`
Data map[string]interface{} `json:"data"`
}
var replicateBucket = make(chan Replicate, 10)
func SendDataToSlave() {
for {
select {
case data, open := <-replicateBucket:
client := http.Client{
Timeout: time.Second * 5,
}
fmt.Println("replicateBucket is open : ", open, " : ",data)
buf, _ := json.Marshal(data)
io := bytes.NewReader(buf)
req, err := http.NewRequest(http.MethodPost, os.Getenv("SLAVE_URL") + "/replicate", io)
if err != nil {
fmt.Printf("SlaveConnectionFailed: failed to replicate data to slave, %s\n", err.Error())
continue
}
headers := http.Header{}
headers.Set("Content-Type", "application/json")
req.Header = headers
res, err := client.Do(req)
if err != nil {
fmt.Printf("SlaveConnectionFailed: failed to replicate data to slave, %s\n", err.Error())
continue
}
if res.StatusCode == http.StatusNotFound {
fmt.Println("SlaveNotReachable: check if slave is running on " + os.Getenv("SLAVE_URL"))
continue
}
if res.StatusCode == http.StatusAccepted {
fmt.Println("SlaveReplicatino: data replicated")
continue
}
}
}
}
func ReplicateData(meta *Replicate) {
switch meta.Action {
case ReplicateActionInsert:
Set(meta.Data)
case ReplicateActionDelete:
Delete(meta.Key)
}
}
|
[
"\"SLAVE_URL\"",
"\"SLAVE_URL\""
] |
[] |
[
"SLAVE_URL"
] |
[]
|
["SLAVE_URL"]
|
go
| 1 | 0 | |
conan/build.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from cpt.packager import ConanMultiPackager
import os
if __name__ == "__main__":
login_username = os.getenv("CONAN_LOGIN_USERNAME")
username = os.getenv("CONAN_USERNAME")
tag_version = os.getenv("CONAN_PACKAGE_VERSION", os.getenv("TRAVIS_TAG"))
package_version = tag_version.replace("v", "")
package_name_unset = "SET-CONAN_PACKAGE_NAME-OR-CONAN_REFERENCE"
package_name = os.getenv("CONAN_PACKAGE_NAME", package_name_unset)
reference = "{}/{}".format(package_name, package_version)
channel = os.getenv("CONAN_CHANNEL", "stable")
upload = os.getenv("CONAN_UPLOAD")
stable_branch_pattern = os.getenv("CONAN_STABLE_BRANCH_PATTERN", r"v\d+\.\d+\.\d+.*")
test_folder = os.getenv("CPT_TEST_FOLDER", os.path.join("conan", "test_package"))
upload_only_when_stable = os.getenv("CONAN_UPLOAD_ONLY_WHEN_STABLE", True)
header_only = os.getenv("CONAN_HEADER_ONLY", False)
pure_c = os.getenv("CONAN_PURE_C", False)
disable_shared = os.getenv("CONAN_DISABLE_SHARED_BUILD", "False")
if disable_shared == "True" and package_name == package_name_unset:
raise Exception("CONAN_DISABLE_SHARED_BUILD: True is only supported when you define CONAN_PACKAGE_NAME")
builder = ConanMultiPackager(username=username,
reference=reference,
channel=channel,
login_username=login_username,
upload=upload,
stable_branch_pattern=stable_branch_pattern,
upload_only_when_stable=upload_only_when_stable,
test_folder=test_folder)
if header_only == "False":
builder.add_common_builds(pure_c=pure_c)
else:
builder.add()
filtered_builds = []
for settings, options, env_vars, build_requires, reference in builder.items:
if disable_shared == "False" or not options["{}:shared".format(package_name)]:
filtered_builds.append([settings, options, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
|
[] |
[] |
[
"CONAN_USERNAME",
"CONAN_HEADER_ONLY",
"CONAN_CHANNEL",
"CONAN_UPLOAD_ONLY_WHEN_STABLE",
"CONAN_DISABLE_SHARED_BUILD",
"CPT_TEST_FOLDER",
"CONAN_UPLOAD",
"CONAN_STABLE_BRANCH_PATTERN",
"CONAN_PACKAGE_VERSION",
"CONAN_PACKAGE_NAME",
"CONAN_LOGIN_USERNAME",
"CONAN_PURE_C",
"TRAVIS_TAG"
] |
[]
|
["CONAN_USERNAME", "CONAN_HEADER_ONLY", "CONAN_CHANNEL", "CONAN_UPLOAD_ONLY_WHEN_STABLE", "CONAN_DISABLE_SHARED_BUILD", "CPT_TEST_FOLDER", "CONAN_UPLOAD", "CONAN_STABLE_BRANCH_PATTERN", "CONAN_PACKAGE_VERSION", "CONAN_PACKAGE_NAME", "CONAN_LOGIN_USERNAME", "CONAN_PURE_C", "TRAVIS_TAG"]
|
python
| 13 | 0 | |
vendor/src/github.com/miekg/pkcs11/pkcs11_test.go
|
// Copyright 2013 Miek Gieben. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs11
// These tests depend on SoftHSM and the library being in
// in /usr/lib/softhsm/libsofthsm.so
import (
"fmt"
"math/big"
"os"
"testing"
)
/*
This test supports the following environment variables:
* SOFTHSM_LIB: complete path to libsofthsm.so
* SOFTHSM_TOKENLABEL
* SOFTHSM_PRIVKEYLABEL
* SOFTHSM_PIN
*/
func setenv(t *testing.T) *Ctx {
lib := "/usr/lib/softhsm/libsofthsm.so"
if x := os.Getenv("SOFTHSM_LIB"); x != "" {
lib = x
}
t.Logf("loading %s", lib)
p := New(lib)
if p == nil {
t.Fatal("Failed to init lib")
}
return p
}
func TestSetenv(t *testing.T) {
wd, _ := os.Getwd()
os.Setenv("SOFTHSM_CONF", wd+"/softhsm.conf")
lib := "/usr/lib/softhsm/libsofthsm.so"
if x := os.Getenv("SOFTHSM_LIB"); x != "" {
lib = x
}
p := New(lib)
if p == nil {
t.Fatal("Failed to init pkcs11")
}
p.Destroy()
return
}
func getSession(p *Ctx, t *testing.T) SessionHandle {
if e := p.Initialize(); e != nil {
t.Fatalf("init error %s\n", e)
}
slots, e := p.GetSlotList(true)
if e != nil {
t.Fatalf("slots %s\n", e)
}
session, e := p.OpenSession(slots[0], CKF_SERIAL_SESSION)
if e != nil {
t.Fatalf("session %s\n", e)
}
if e := p.Login(session, CKU_USER, pin); e != nil {
t.Fatalf("user pin %s\n", e)
}
return session
}
func TestInitialize(t *testing.T) {
p := setenv(t)
if e := p.Initialize(); e != nil {
t.Fatalf("init error %s\n", e)
}
p.Finalize()
p.Destroy()
}
func finishSession(p *Ctx, session SessionHandle) {
p.Logout(session)
p.CloseSession(session)
p.Finalize()
p.Destroy()
}
func TestGetInfo(t *testing.T) {
p := setenv(t)
session := getSession(p, t)
defer finishSession(p, session)
info, err := p.GetInfo()
if err != nil {
t.Fatalf("non zero error %s\n", err)
}
if info.ManufacturerID != "SoftHSM" {
t.Fatal("ID should be SoftHSM")
}
t.Logf("%+v\n", info)
}
func TestFindObject(t *testing.T) {
p := setenv(t)
session := getSession(p, t)
defer finishSession(p, session)
// There are 2 keys in the db with this tag
template := []*Attribute{NewAttribute(CKA_LABEL, "MyFirstKey")}
if e := p.FindObjectsInit(session, template); e != nil {
t.Fatalf("failed to init: %s\n", e)
}
obj, b, e := p.FindObjects(session, 2)
if e != nil {
t.Fatalf("failed to find: %s %v\n", e, b)
}
if e := p.FindObjectsFinal(session); e != nil {
t.Fatalf("failed to finalize: %s\n", e)
}
if len(obj) != 2 {
t.Fatal("should have found two objects")
}
}
func TestGetAttributeValue(t *testing.T) {
p := setenv(t)
session := getSession(p, t)
defer finishSession(p, session)
// There are at least two RSA keys in the hsm.db, objecthandle 1 and 2.
template := []*Attribute{
NewAttribute(CKA_PUBLIC_EXPONENT, nil),
NewAttribute(CKA_MODULUS_BITS, nil),
NewAttribute(CKA_MODULUS, nil),
NewAttribute(CKA_LABEL, nil),
}
// ObjectHandle two is the public key
attr, err := p.GetAttributeValue(session, ObjectHandle(2), template)
if err != nil {
t.Fatalf("err %s\n", err)
}
for i, a := range attr {
t.Logf("attr %d, type %d, valuelen %d", i, a.Type, len(a.Value))
if a.Type == CKA_MODULUS {
mod := big.NewInt(0)
mod.SetBytes(a.Value)
t.Logf("modulus %s\n", mod.String())
}
}
}
func TestDigest(t *testing.T) {
p := setenv(t)
session := getSession(p, t)
defer finishSession(p, session)
e := p.DigestInit(session, []*Mechanism{NewMechanism(CKM_SHA_1, nil)})
if e != nil {
t.Fatalf("DigestInit: %s\n", e)
}
hash, e := p.Digest(session, []byte("this is a string"))
if e != nil {
t.Fatalf("digest: %s\n", e)
}
hex := ""
for _, d := range hash {
hex += fmt.Sprintf("%x", d)
}
// Teststring create with: echo -n "this is a string" | sha1sum
if hex != "517592df8fec3ad146a79a9af153db2a4d784ec5" {
t.Fatalf("wrong digest: %s", hex)
}
}
func TestDigestUpdate(t *testing.T) {
p := setenv(t)
session := getSession(p, t)
defer finishSession(p, session)
if e := p.DigestInit(session, []*Mechanism{NewMechanism(CKM_SHA_1, nil)}); e != nil {
t.Fatalf("DigestInit: %s\n", e)
}
if e := p.DigestUpdate(session, []byte("this is ")); e != nil {
t.Fatalf("DigestUpdate: %s\n", e)
}
if e := p.DigestUpdate(session, []byte("a string")); e != nil {
t.Fatalf("DigestUpdate: %s\n", e)
}
hash, e := p.DigestFinal(session)
if e != nil {
t.Fatalf("DigestFinal: %s\n", e)
}
hex := ""
for _, d := range hash {
hex += fmt.Sprintf("%x", d)
}
// Teststring create with: echo -n "this is a string" | sha1sum
if hex != "517592df8fec3ad146a79a9af153db2a4d784ec5" {
t.Fatalf("wrong digest: %s", hex)
}
}
func testDestroyObject(t *testing.T) {
p := setenv(t)
session := getSession(p, t)
defer finishSession(p, session)
p.Logout(session) // log out the normal user
if e := p.Login(session, CKU_SO, "1234"); e != nil {
t.Fatalf("security officer pin %s\n", e)
}
template := []*Attribute{
NewAttribute(CKA_LABEL, "MyFirstKey")}
if e := p.FindObjectsInit(session, template); e != nil {
t.Fatalf("failed to init: %s\n", e)
}
obj, _, e := p.FindObjects(session, 1)
if e != nil || len(obj) == 0 {
t.Fatalf("failed to find objects\n")
}
if e := p.FindObjectsFinal(session); e != nil {
t.Fatalf("failed to finalize: %s\n", e)
}
if e := p.DestroyObject(session, obj[0]); e != nil {
t.Fatal("DestroyObject failed: %s\n", e)
}
}
// ExampleSign shows how to sign some data with a private key.
// Note: error correction is not implemented in this example.
func ExampleSign() {
p := setenv(nil)
p.Initialize()
defer p.Destroy()
defer p.Finalize()
slots, _ := p.GetSlotList(true)
session, _ := p.OpenSession(slots[0], CKF_SERIAL_SESSION|CKF_RW_SESSION)
defer p.CloseSession(session)
p.Login(session, CKU_USER, "1234")
defer p.Logout(session)
publicKeyTemplate := []*Attribute{
NewAttribute(CKA_KEY_TYPE, CKO_PUBLIC_KEY),
NewAttribute(CKA_TOKEN, true),
NewAttribute(CKA_ENCRYPT, true),
NewAttribute(CKA_PUBLIC_EXPONENT, []byte{3}),
NewAttribute(CKA_MODULUS_BITS, 1024),
NewAttribute(CKA_LABEL, "MyFirstKey"),
}
privateKeyTemplate := []*Attribute{
NewAttribute(CKA_KEY_TYPE, CKO_PRIVATE_KEY),
NewAttribute(CKA_TOKEN, true),
NewAttribute(CKA_PRIVATE, true),
NewAttribute(CKA_SIGN, true),
NewAttribute(CKA_LABEL, "MyFirstKey"),
}
pub, priv, _ := p.GenerateKeyPair(session,
[]*Mechanism{NewMechanism(CKM_RSA_PKCS_KEY_PAIR_GEN, nil)},
publicKeyTemplate, privateKeyTemplate)
p.SignInit(session, []*Mechanism{NewMechanism(CKM_SHA1_RSA_PKCS, nil)}, priv)
// Sign something with the private key.
data := []byte("Lets sign this data")
sig, _ := p.Sign(session, data)
fmt.Printf("%v validate with %v\n", sig, pub)
}
|
[
"\"SOFTHSM_LIB\"",
"\"SOFTHSM_LIB\""
] |
[] |
[
"SOFTHSM_LIB"
] |
[]
|
["SOFTHSM_LIB"]
|
go
| 1 | 0 | |
src/olympia/lib/settings_base.py
|
# -*- coding: utf-8 -*-
# Django settings for olympia project.
import datetime
import logging
import os
import socket
from django.utils.functional import lazy
from django.core.urlresolvers import reverse_lazy
import environ
env = environ.Env()
ALLOWED_HOSTS = [
'.allizom.org',
'.mozilla.org',
'.mozilla.com',
'.mozilla.net',
]
# jingo-minify settings
CACHEBUST_IMGS = True
try:
# If we have build ids available, we'll grab them here and add them to our
# CACHE_PREFIX. This will let us not have to flush memcache during updates
# and it will let us preload data into it before a production push.
from build import BUILD_ID_CSS, BUILD_ID_JS
build_id = "%s%s" % (BUILD_ID_CSS[:2], BUILD_ID_JS[:2])
except ImportError:
build_id = ""
# jingo-minify: Style sheet media attribute default
CSS_MEDIA_DEFAULT = 'all'
# Make filepaths relative to the root of olympia.
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT = os.path.join(BASE_DIR, '..', '..')
def path(*folders):
return os.path.join(ROOT, *folders)
# We need to track this because hudson can't just call its checkout "olympia".
# It puts it in a dir called "workspace". Way to be, hudson.
ROOT_PACKAGE = os.path.basename(ROOT)
DEBUG = True
DEBUG_PROPAGATE_EXCEPTIONS = True
SILENCED_SYSTEM_CHECKS = (
# Recommendation to use OneToOneField instead of ForeignKey(unique=True)
# but our translations are the way they are...
'fields.W342',
# TEMPLATE_DIRS is required by jingo, remove this line here once we
# get rid of jingo
'1_8.W001',
)
# LESS CSS OPTIONS (Debug only).
LESS_PREPROCESS = True # Compile LESS with Node, rather than client-side JS?
LESS_LIVE_REFRESH = False # Refresh the CSS on save?
LESS_BIN = 'lessc'
# Path to stylus (to compile .styl files).
STYLUS_BIN = 'stylus'
# Path to cleancss (our CSS minifier).
CLEANCSS_BIN = 'cleancss'
# Path to uglifyjs (our JS minifier).
UGLIFY_BIN = 'uglifyjs' # Set as None to use YUI instead (at your risk).
FLIGTAR = '[email protected]'
EDITORS_EMAIL = '[email protected]'
SENIOR_EDITORS_EMAIL = '[email protected]'
THEMES_EMAIL = '[email protected]'
ABUSE_EMAIL = '[email protected]'
NOBODY_EMAIL = '[email protected]'
# Add Access-Control-Allow-Origin: * header for the new API with
# django-cors-headers.
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/v3/.*$'
INTERNAL_DOMAINS = ['localhost:3000']
CORS_ENDPOINT_OVERRIDES = [
(r'^/api/v3/internal/accounts/login/?$', {
'CORS_ORIGIN_ALLOW_ALL': False,
'CORS_ORIGIN_WHITELIST': INTERNAL_DOMAINS,
'CORS_ALLOW_CREDENTIALS': True,
}),
(r'^/api/v3/accounts/login/?$', {
'CORS_ORIGIN_ALLOW_ALL': False,
'CORS_ORIGIN_WHITELIST': INTERNAL_DOMAINS,
'CORS_ALLOW_CREDENTIALS': True,
}),
(r'^/api/v3/internal/.*$', {
'CORS_ORIGIN_ALLOW_ALL': False,
'CORS_ORIGIN_WHITELIST': INTERNAL_DOMAINS,
}),
]
DATABASES = {
'default': env.db(default='mysql://root:@localhost/olympia')
}
DATABASES['default']['OPTIONS'] = {'sql_mode': 'STRICT_ALL_TABLES'}
DATABASES['default']['TEST_CHARSET'] = 'utf8'
DATABASES['default']['TEST_COLLATION'] = 'utf8_general_ci'
# Run all views in a transaction unless they are decorated not to.
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Pool our database connections up for 300 seconds
DATABASES['default']['CONN_MAX_AGE'] = 300
# A database to be used by the services scripts, which does not use Django.
# The settings can be copied from DATABASES, but since its not a full Django
# database connection, only some values are supported.
SERVICES_DATABASE = {
'NAME': DATABASES['default']['NAME'],
'USER': DATABASES['default']['USER'],
'PASSWORD': DATABASES['default']['PASSWORD'],
'HOST': DATABASES['default']['HOST'],
'PORT': DATABASES['default']['PORT'],
}
DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',)
# Put the aliases for your slave databases in this list.
SLAVE_DATABASES = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Accepted locales
# Note: If you update this list, don't forget to also update the locale
# permissions in the database.
AMO_LANGUAGES = (
'af', 'ar', 'bg', 'bn-BD', 'ca', 'cs', 'da', 'de', 'dsb',
'el', 'en-GB', 'en-US', 'es', 'eu', 'fa', 'fi', 'fr', 'ga-IE', 'he', 'hu',
'hsb', 'id', 'it', 'ja', 'ka', 'ko', 'nn-NO', 'mk', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl', 'sq', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
# Explicit conversion of a shorter language code into a more specific one.
SHORTER_LANGUAGES = {
'en': 'en-US', 'ga': 'ga-IE', 'pt': 'pt-PT', 'sv': 'sv-SE', 'zh': 'zh-CN'
}
# Not shown on the site, but .po files exist and these are available on the
# L10n dashboard. Generally languages start here and move into AMO_LANGUAGES.
HIDDEN_LANGUAGES = ('cy', 'hr', 'sr', 'sr-Latn', 'tr')
def lazy_langs(languages):
from product_details import product_details
if not product_details.languages:
return {}
return dict([(i.lower(), product_details.languages[i]['native'])
for i in languages])
# Where product details are stored see django-mozilla-product-details
PROD_DETAILS_DIR = path('src', 'olympia', 'lib', 'product_json')
PROD_DETAILS_STORAGE = 'olympia.lib.product_details_backend.NoCachePDFileStorage' # noqa
# Override Django's built-in with our native names
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
RTL_LANGUAGES = ('ar', 'fa', 'fa-IR', 'he')
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
LOCALE_PATHS = (
path('locale'),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# The host currently running the site. Only use this in code for good reason;
# the site is designed to run on a cluster and should continue to support that
HOSTNAME = socket.gethostname()
# The front end domain of the site. If you're not running on a cluster this
# might be the same as HOSTNAME but don't depend on that. Use this when you
# need the real domain.
DOMAIN = HOSTNAME
# Full base URL for your main site including protocol. No trailing slash.
# Example: https://addons.mozilla.org
SITE_URL = 'http://%s' % DOMAIN
# Domain of the services site. This is where your API, and in-product pages
# live.
SERVICES_DOMAIN = 'services.%s' % DOMAIN
# Full URL to your API service. No trailing slash.
# Example: https://services.addons.mozilla.org
SERVICES_URL = 'http://%s' % SERVICES_DOMAIN
# The domain of the mobile site.
MOBILE_DOMAIN = 'm.%s' % DOMAIN
# The full url of the mobile site.
MOBILE_SITE_URL = 'http://%s' % MOBILE_DOMAIN
# Filter IP addresses of allowed clients that can post email through the API.
ALLOWED_CLIENTS_EMAIL_API = env.list('ALLOWED_CLIENTS_EMAIL_API', default=[])
# Auth token required to authorize inbound email.
INBOUND_EMAIL_SECRET_KEY = env('INBOUND_EMAIL_SECRET_KEY', default='')
# Validation key we need to send in POST response.
INBOUND_EMAIL_VALIDATION_KEY = env('INBOUND_EMAIL_VALIDATION_KEY', default='')
# Domain emails should be sent to.
INBOUND_EMAIL_DOMAIN = env('INBOUND_EMAIL_DOMAIN', default=DOMAIN)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('user-media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/user-media/'
# Absolute path to a temporary storage area
TMP_PATH = path('tmp')
# Tarballs in DUMPED_APPS_PATH deleted 30 days after they have been written.
DUMPED_APPS_DAYS_DELETE = 3600 * 24 * 30
# Tarballs in DUMPED_USERS_PATH deleted 30 days after they have been written.
DUMPED_USERS_DAYS_DELETE = 3600 * 24 * 30
# path that isn't just one /, and doesn't require any locale or app.
SUPPORTED_NONAPPS_NONLOCALES_PREFIX = (
'api/v3',
'blocked/blocklists.json',
)
# paths that don't require an app prefix
SUPPORTED_NONAPPS = (
'about', 'admin', 'apps', 'blocklist', 'contribute.json', 'credits',
'developer_agreement', 'developer_faq', 'developers', 'editors', 'faq',
'jsi18n', 'review_guide', 'google1f3e37b7351799a5.html',
'robots.txt', 'statistics', 'services', 'sunbird', 'static', 'user-media',
'__version__',
)
DEFAULT_APP = 'firefox'
# paths that don't require a locale prefix
SUPPORTED_NONLOCALES = (
'contribute.json', 'google1f3e37b7351799a5.html', 'robots.txt', 'services',
'downloads', 'blocklist', 'static', 'user-media', '__version__',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'this-is-a-dummy-key-and-its-overridden-for-prod-servers'
# Templates
# We don't want jingo's template loaded to pick up templates for third party
# apps that don't use Jinja2. The Following is a list of prefixes for jingo to
# ignore.
JINGO_EXCLUDE_APPS = (
'django_extensions',
'admin',
'rest_framework',
'waffle',
)
JINGO_EXCLUDE_PATHS = (
'users/email',
'reviews/emails',
'editors/emails',
'amo/emails',
'devhub/email/revoked-key-email.ltxt',
'devhub/email/new-key-email.ltxt',
# Django specific templates
'registration/password_reset_subject.txt'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
path('media', 'docs'),
path('src/olympia/templates'),
),
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'olympia.amo.context_processors.app',
'olympia.amo.context_processors.i18n',
'olympia.amo.context_processors.global_settings',
'olympia.amo.context_processors.static_url',
'jingo_minify.helpers.build_ids',
),
'loaders': (
'olympia.lib.template_loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
}
}
]
# jingo still looks at TEMPLATE_DIRS
TEMPLATE_DIRS = TEMPLATES[0]['DIRS']
def JINJA_CONFIG():
import jinja2
from django.conf import settings
from django.core.cache import cache
config = {
'extensions': [
'olympia.amo.ext.cache',
'puente.ext.i18n',
'waffle.jinja.WaffleExtension',
'jinja2.ext.do',
'jinja2.ext.with_',
'jinja2.ext.loopcontrols'
],
'finalize': lambda x: x if x is not None else '',
'autoescape': True,
}
if False and not settings.DEBUG:
# We're passing the _cache object directly to jinja because
# Django can't store binary directly; it enforces unicode on it.
# Details: http://jinja.pocoo.org/2/documentation/api#bytecode-cache
# and in the errors you get when you try it the other way.
bc = jinja2.MemcachedBytecodeCache(cache._cache,
"%sj2:" % settings.CACHE_PREFIX)
config['cache_size'] = -1 # Never clear the cache
config['bytecode_cache'] = bc
return config
X_FRAME_OPTIONS = 'DENY'
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_SECONDS = 31536000
MIDDLEWARE_CLASSES = (
# AMO URL middleware comes first so everyone else sees nice URLs.
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'olympia.amo.middleware.LocaleAndAppURLMiddleware',
# Mobile detection should happen in Zeus.
'mobility.middleware.DetectMobileMiddleware',
'mobility.middleware.XMobileMiddleware',
'olympia.amo.middleware.RemoveSlashMiddleware',
# Munging REMOTE_ADDR must come before ThreadRequest.
'commonware.middleware.SetRemoteAddrFromForwardedFor',
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'waffle.middleware.WaffleMiddleware',
# CSP and CORS need to come before CommonMiddleware because they might
# need to add headers to 304 responses returned by CommonMiddleware.
'csp.middleware.CSPMiddleware',
'corsheaders.middleware.CorsMiddleware',
'olympia.amo.middleware.CommonMiddleware',
'olympia.amo.middleware.NoVarySessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'olympia.amo.middleware.AuthenticationMiddlewareWithoutAPI',
'commonware.log.ThreadRequestMiddleware',
'olympia.search.middleware.ElasticsearchExceptionMiddleware',
'session_csrf.CsrfMiddleware',
# This should come after authentication middleware
'olympia.access.middleware.ACLMiddleware',
'commonware.middleware.ScrubRequestOnException',
)
# Auth
AUTH_USER_MODEL = 'users.UserProfile'
# Override this in the site settings.
ROOT_URLCONF = 'olympia.urls'
INSTALLED_APPS = (
'olympia.core',
'olympia.amo', # amo comes first so it always takes precedence.
'olympia.abuse',
'olympia.access',
'olympia.accounts',
'olympia.activity',
'olympia.addons',
'olympia.api',
'olympia.applications',
'olympia.bandwagon',
'olympia.blocklist',
'olympia.browse',
'olympia.compat',
'olympia.devhub',
'olympia.discovery',
'olympia.editors',
'olympia.files',
'olympia.internal_tools',
'olympia.legacy_api',
'olympia.legacy_discovery',
'olympia.lib.es',
'olympia.pages',
'olympia.reviews',
'olympia.search',
'olympia.stats',
'olympia.tags',
'olympia.translations',
'olympia.users',
'olympia.versions',
'olympia.zadmin',
# Third party apps
'product_details',
'cronjobs',
'csp',
'aesfield',
'django_extensions',
'raven.contrib.django',
'rest_framework',
'waffle',
'jingo_minify',
'puente',
# Django contrib apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Has to load after auth
'django_statsd',
)
# These apps are only needed in a testing environment. They are added to
# INSTALLED_APPS by settings_test.py (which is itself loaded by setup.cfg by
# py.test)
TEST_INSTALLED_APPS = (
'olympia.translations.tests.testapp',
)
# Tells the extract script what files to look for l10n in and what function
# handles the extraction. The puente library expects this.
PUENTE = {
'BASE_DIR': ROOT,
# Tells the extract script what files to look for l10n in and what function
# handles the extraction.
'DOMAIN_METHODS': {
'django': [
('src/olympia/**.py', 'python'),
# Make sure we're parsing django-admin templates with the django
# template extractor
(
'src/olympia/zadmin/templates/admin/*.html',
'django_babel.extract.extract_django'
),
('src/olympia/**/templates/**.html', 'jinja2'),
('**/templates/**.lhtml', 'jinja2'),
],
'djangojs': [
# We can't say **.js because that would dive into mochikit
# and timeplot and all the other baggage we're carrying.
# Timeplot, in particular, crashes the extractor with bad
# unicode data.
('static/js/**-all.js', 'ignore'),
('static/js/**-min.js', 'ignore'),
('static/js/*.js', 'javascript'),
('static/js/amo2009/**.js', 'javascript'),
('static/js/common/**.js', 'javascript'),
('static/js/impala/**.js', 'javascript'),
('static/js/zamboni/**.js', 'javascript'),
],
},
}
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
'restyle/css': (
'css/restyle/restyle.less',
),
# CSS files common to the entire site.
'zamboni/css': (
'css/legacy/main.css',
'css/legacy/main-mozilla.css',
'css/legacy/jquery-lightbox.css',
'css/legacy/autocomplete.css',
'css/zamboni/zamboni.css',
'css/zamboni/tags.css',
'css/zamboni/tabs.css',
'css/impala/formset.less',
'css/impala/suggestions.less',
'css/impala/header.less',
'css/impala/moz-tab.css',
'css/impala/footer.less',
'css/impala/faux-zamboni.less',
'css/zamboni/themes.less',
),
'zamboni/impala': (
'css/impala/base.css',
'css/legacy/jquery-lightbox.css',
'css/impala/site.less',
'css/impala/typography.less',
'css/impala/forms.less',
'css/common/invisible-upload.less',
'css/impala/header.less',
'css/impala/footer.less',
'css/impala/moz-tab.css',
'css/impala/hovercards.less',
'css/impala/toplist.less',
'css/impala/carousel.less',
'css/impala/reviews.less',
'css/impala/buttons.less',
'css/impala/promos.less',
'css/impala/addon_details.less',
'css/impala/policy.less',
'css/impala/expando.less',
'css/impala/popups.less',
'css/impala/l10n.less',
'css/impala/contributions.less',
'css/impala/lightbox.less',
'css/impala/prose.less',
'css/impala/abuse.less',
'css/impala/paginator.less',
'css/impala/listing.less',
'css/impala/versions.less',
'css/impala/users.less',
'css/impala/collections.less',
'css/impala/tooltips.less',
'css/impala/search.less',
'css/impala/suggestions.less',
'css/impala/jquery.minicolors.css',
'css/impala/personas.less',
'css/impala/login.less',
'css/impala/dictionaries.less',
'css/impala/apps.less',
'css/impala/formset.less',
'css/impala/tables.less',
'css/impala/compat.less',
'css/impala/fxa-migration.less',
),
'zamboni/stats': (
'css/impala/stats.less',
),
'zamboni/discovery-pane': (
'css/zamboni/discovery-pane.css',
'css/impala/promos.less',
'css/legacy/jquery-lightbox.css',
),
'zamboni/devhub': (
'css/impala/tooltips.less',
'css/zamboni/developers.css',
'css/zamboni/docs.less',
'css/impala/developers.less',
'css/impala/personas.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/impala/formset.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/refunds.less',
'css/devhub/buttons.less',
'css/devhub/in-app-config.less',
),
'zamboni/devhub_impala': (
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/devhub/dashboard.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/search.less',
'css/devhub/refunds.less',
'css/impala/devhub-api.less',
),
'zamboni/editors': (
'css/zamboni/editors.styl',
'css/zamboni/unlisted.less',
),
'zamboni/themes_review': (
'css/zamboni/developers.css',
'css/zamboni/editors.styl',
'css/zamboni/themes_review.styl',
),
'zamboni/files': (
'css/lib/syntaxhighlighter/shCoreDefault.css',
'css/zamboni/files.css',
),
'zamboni/mobile': (
'css/zamboni/mobile.css',
'css/mobile/typography.less',
'css/mobile/forms.less',
'css/mobile/header.less',
'css/mobile/search.less',
'css/mobile/listing.less',
'css/mobile/footer.less',
'css/impala/fxa-migration.less',
'css/mobile/notifications.less',
),
'zamboni/admin': (
'css/zamboni/admin-django.css',
'css/zamboni/admin-mozilla.css',
'css/zamboni/admin_features.css',
# Datepicker styles and jQuery UI core.
'css/zamboni/jquery-ui/custom-1.7.2.css',
),
},
'js': {
# JS files common to the entire site (pre-impala).
'common': (
'js/lib/raven.min.js',
'js/common/raven-config.js',
'js/lib/underscore.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/zamboni/tabs.js',
'js/common/keys.js',
# jQuery UI
'js/lib/jquery-ui/core.js',
'js/lib/jquery-ui/position.js',
'js/lib/jquery-ui/widget.js',
'js/lib/jquery-ui/menu.js',
'js/lib/jquery-ui/mouse.js',
'js/lib/jquery-ui/autocomplete.js',
'js/lib/jquery-ui/datepicker.js',
'js/lib/jquery-ui/sortable.js',
'js/zamboni/helpers.js',
'js/zamboni/global.js',
'js/amo2009/global.js',
'js/common/ratingwidget.js',
'js/lib/jquery-ui/jqModal.js',
'js/zamboni/l10n.js',
'js/zamboni/debouncer.js',
# Homepage
'js/impala/promos.js',
'js/zamboni/homepage.js',
# Add-ons details page
'js/lib/jquery-ui/ui.lightbox.js',
'js/zamboni/contributions.js',
'js/zamboni/addon_details.js',
'js/impala/abuse.js',
'js/zamboni/reviews.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
# Unicode: needs to be loaded after collections.js which listens to
# an event fired in this file.
'js/zamboni/unicode.js',
# Collections
'js/zamboni/collections.js',
# Users
'js/zamboni/users.js',
# Password length and strength
'js/zamboni/password-strength.js',
# Search suggestions
'js/impala/forms.js',
'js/impala/ajaxcache.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
),
# Impala and Legacy: Things to be loaded at the top of the page
'preload': (
'js/lib/jquery-1.12.0.js',
'js/lib/jquery.browser.js',
'js/impala/preloaded.js',
'js/zamboni/analytics.js',
),
# Impala: Things to be loaded at the bottom
'impala': (
'js/lib/raven.min.js',
'js/common/raven-config.js',
'js/lib/underscore.js',
'js/impala/carousel.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/jquery.pjax.js',
'js/impala/footer.js',
'js/common/keys.js',
# jQuery UI
'js/lib/jquery-ui/core.js',
'js/lib/jquery-ui/position.js',
'js/lib/jquery-ui/widget.js',
'js/lib/jquery-ui/mouse.js',
'js/lib/jquery-ui/menu.js',
'js/lib/jquery-ui/autocomplete.js',
'js/lib/jquery-ui/datepicker.js',
'js/lib/jquery-ui/sortable.js',
# Firefox Accounts
'js/lib/uri.js',
'js/common/fxa-login.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/ajaxcache.js',
'js/zamboni/helpers.js',
'js/zamboni/global.js',
'js/impala/global.js',
'js/common/ratingwidget.js',
'js/lib/jquery-ui/jqModal.js',
'js/zamboni/l10n.js',
'js/impala/forms.js',
# Homepage
'js/impala/promos.js',
'js/impala/homepage.js',
# Add-ons details page
'js/lib/jquery-ui/ui.lightbox.js',
'js/zamboni/contributions.js',
'js/impala/addon_details.js',
'js/impala/abuse.js',
'js/impala/reviews.js',
# Browse listing pages
'js/impala/listing.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
# Persona creation
'js/common/upload-image.js',
'js/lib/jquery.minicolors.js',
'js/impala/persona_creation.js',
# Unicode: needs to be loaded after collections.js which listens to
# an event fired in this file.
'js/zamboni/unicode.js',
# Collections
'js/zamboni/collections.js',
'js/impala/collections.js',
# Users
'js/zamboni/users.js',
'js/impala/users.js',
# Search
'js/impala/serializers.js',
'js/impala/search.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
# Login
'js/impala/login.js',
),
'zamboni/discovery': (
'js/lib/jquery-1.12.0.js',
'js/lib/jquery.browser.js',
'js/lib/underscore.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/impala/carousel.js',
'js/zamboni/analytics.js',
# Add-ons details
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/jquery-ui/ui.lightbox.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
'js/zamboni/debouncer.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/promos.js',
'js/zamboni/discovery_addons.js',
'js/zamboni/discovery_pane.js',
),
'zamboni/discovery-video': (
'js/lib/popcorn-1.0.js',
'js/zamboni/discovery_video.js',
),
'zamboni/devhub': (
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/common/upload-base.js',
'js/common/upload-addon.js',
'js/common/upload-image.js',
'js/impala/formset.js',
'js/zamboni/devhub.js',
'js/zamboni/validator.js',
'js/lib/jquery.timeago.js',
),
'zamboni/editors': (
'js/lib/highcharts.src.js',
'js/zamboni/editors.js',
'js/lib/jquery.hoverIntent.js', # Used by jquery.zoomBox.
'js/lib/jquery.zoomBox.js', # Used by themes_review.
'js/zamboni/themes_review_templates.js',
'js/zamboni/themes_review.js',
),
'zamboni/files': (
'js/lib/diff_match_patch_uncompressed.js',
'js/lib/syntaxhighlighter/xregexp-min.js',
'js/lib/syntaxhighlighter/shCore.js',
'js/lib/syntaxhighlighter/shLegacy.js',
'js/lib/syntaxhighlighter/shBrushAppleScript.js',
'js/lib/syntaxhighlighter/shBrushAS3.js',
'js/lib/syntaxhighlighter/shBrushBash.js',
'js/lib/syntaxhighlighter/shBrushCpp.js',
'js/lib/syntaxhighlighter/shBrushCSharp.js',
'js/lib/syntaxhighlighter/shBrushCss.js',
'js/lib/syntaxhighlighter/shBrushDiff.js',
'js/lib/syntaxhighlighter/shBrushJava.js',
'js/lib/syntaxhighlighter/shBrushJScript.js',
'js/lib/syntaxhighlighter/shBrushPhp.js',
'js/lib/syntaxhighlighter/shBrushPlain.js',
'js/lib/syntaxhighlighter/shBrushPython.js',
'js/lib/syntaxhighlighter/shBrushSass.js',
'js/lib/syntaxhighlighter/shBrushSql.js',
'js/lib/syntaxhighlighter/shBrushVb.js',
'js/lib/syntaxhighlighter/shBrushXml.js',
'js/zamboni/storage.js',
'js/zamboni/files_templates.js',
'js/zamboni/files.js',
),
'zamboni/mobile': (
'js/lib/jquery-1.12.0.js',
'js/lib/jquery.browser.js',
'js/lib/underscore.js',
'js/lib/jqmobile.js',
'js/lib/jquery.cookie.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/zamboni/analytics.js',
'js/lib/format.js',
'js/zamboni/mobile/buttons.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/footer.js',
'js/zamboni/personas_core.js',
'js/zamboni/mobile/personas.js',
'js/zamboni/helpers.js',
'js/zamboni/mobile/general.js',
'js/common/ratingwidget.js',
# Firefox Accounts
'js/lib/uri.js',
'js/common/fxa-login.js',
),
'zamboni/stats': (
'js/lib/highcharts.src.js',
'js/impala/stats/csv_keys.js',
'js/impala/stats/helpers.js',
'js/impala/stats/dateutils.js',
'js/impala/stats/manager.js',
'js/impala/stats/controls.js',
'js/impala/stats/overview.js',
'js/impala/stats/topchart.js',
'js/impala/stats/chart.js',
'js/impala/stats/table.js',
'js/impala/stats/stats.js',
),
'zamboni/admin': (
'js/zamboni/admin.js',
'js/zamboni/admin_features.js',
'js/zamboni/admin_validation.js',
),
# This is included when DEBUG is True. Bundle in <head>.
'debug': (
'js/debug/less_setup.js',
'js/lib/less.js',
'js/debug/less_live.js',
),
}
}
# Caching
# Prefix for cache keys (will prevent collisions when running parallel copies)
CACHE_PREFIX = 'amo:%s:' % build_id
KEY_PREFIX = CACHE_PREFIX
FETCH_BY_ID = True
# Number of seconds a count() query should be cached. Keep it short because
# it's not possible to invalidate these queries.
CACHE_COUNT_TIMEOUT = 60
# To enable pylibmc compression (in bytes)
PYLIBMC_MIN_COMPRESS_LEN = 0 # disabled
# External tools.
JAVA_BIN = '/usr/bin/java'
# Add-on download settings.
PRIVATE_MIRROR_URL = '/_privatefiles'
# File paths
ADDON_ICONS_DEFAULT_PATH = os.path.join(ROOT, 'static', 'img', 'addon-icons')
CA_CERT_BUNDLE_PATH = os.path.join(
ROOT, 'src/olympia/amo/certificates/roots.pem')
# URL paths
# paths for images, e.g. mozcdn.com/amo or '/static'
VAMO_URL = 'https://versioncheck.addons.mozilla.org'
NEW_PERSONAS_UPDATE_URL = VAMO_URL + '/%(locale)s/themes/update-check/%(id)d'
# Outgoing URL bouncer
REDIRECT_URL = 'https://outgoing.prod.mozaws.net/v1/'
REDIRECT_SECRET_KEY = ''
# Allow URLs from these servers. Use full domain names.
REDIRECT_URL_WHITELIST = ['addons.mozilla.org']
# Default to short expiration; check "remember me" to override
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# See: https://github.com/mozilla/addons-server/issues/1789
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_AGE = 2592000
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN # bug 608797
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
# These should have app+locale at the start to avoid redirects
LOGIN_URL = reverse_lazy('users.login')
LOGOUT_URL = reverse_lazy('users.logout')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# When logging in with browser ID, a username is created automatically.
# In the case of duplicates, the process is recursive up to this number
# of times.
MAX_GEN_USERNAME_TRIES = 50
# PayPal Settings
PAYPAL_API_VERSION = '78'
PAYPAL_APP_ID = ''
# URLs for various calls.
PAYPAL_API_URL = 'https://api-3t.paypal.com/nvp'
PAYPAL_CGI_URL = 'https://www.paypal.com/cgi-bin/webscr'
PAYPAL_PAY_URL = 'https://svcs.paypal.com/AdaptivePayments/'
PAYPAL_FLOW_URL = 'https://paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_PERMISSIONS_URL = 'https://svcs.paypal.com/Permissions/'
PAYPAL_JS_URL = 'https://www.paypalobjects.com/js/external/dg.js'
# Permissions for the live or sandbox servers
PAYPAL_EMBEDDED_AUTH = {'USER': '', 'PASSWORD': '', 'SIGNATURE': ''}
# The PayPal cert that we'll use for checking.
# When None, the Mozilla CA bundle is used to look it up.
PAYPAL_CERT = None
# Contribution limit, one time and monthly
MAX_CONTRIBUTION = 1000
# Email settings
ADDONS_EMAIL = "Mozilla Add-ons <[email protected]>"
DEFAULT_FROM_EMAIL = ADDONS_EMAIL
# Email goes to the console by default. s/console/smtp/ for regular delivery
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Please use all lowercase for the blacklist.
EMAIL_BLACKLIST = (
'[email protected]',
)
# Please use all lowercase for the QA whitelist.
EMAIL_QA_WHITELIST = ()
# URL for Add-on Validation FAQ.
VALIDATION_FAQ_URL = ('https://wiki.mozilla.org/Add-ons/Reviewers/Guide/'
'AddonReviews#Step_2:_Automatic_validation')
# Celery
BROKER_URL = os.environ.get('BROKER_URL',
'amqp://olympia:olympia@localhost:5672/olympia')
BROKER_CONNECTION_TIMEOUT = 0.1
BROKER_HEARTBEAT = 60 * 15
CELERY_DEFAULT_QUEUE = 'default'
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND',
'redis://localhost:6379/1')
CELERY_IGNORE_RESULT = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_IMPORTS = (
'olympia.lib.crypto.tasks',
'olympia.lib.es.management.commands.reindex',
)
# We have separate celeryds for processing devhub & images as fast as possible
# Some notes:
# - always add routes here instead of @task(queue=<name>)
# - when adding a queue, be sure to update deploy.py so that it gets restarted
CELERY_ROUTES = {
# Priority.
# If your tasks need to be run as soon as possible, add them here so they
# are routed to the priority queue.
'olympia.addons.tasks.index_addons': {'queue': 'priority'},
'olympia.addons.tasks.unindex_addons': {'queue': 'priority'},
'olympia.addons.tasks.save_theme': {'queue': 'priority'},
'olympia.addons.tasks.save_theme_reupload': {'queue': 'priority'},
'olympia.bandwagon.tasks.index_collections': {'queue': 'priority'},
'olympia.bandwagon.tasks.unindex_collections': {'queue': 'priority'},
'olympia.users.tasks.index_users': {'queue': 'priority'},
'olympia.users.tasks.unindex_users': {'queue': 'priority'},
# Other queues we prioritize below.
# AMO Devhub.
'olympia.devhub.tasks.convert_purified': {'queue': 'devhub'},
'olympia.devhub.tasks.flag_binary': {'queue': 'devhub'},
'olympia.devhub.tasks.get_preview_sizes': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_file_validation_result': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_upload_validation_result': {
'queue': 'devhub'},
'olympia.devhub.tasks.resize_icon': {'queue': 'devhub'},
'olympia.devhub.tasks.resize_preview': {'queue': 'devhub'},
'olympia.devhub.tasks.send_welcome_email': {'queue': 'devhub'},
'olympia.devhub.tasks.submit_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_file_path': {'queue': 'devhub'},
# This is currently used only by validation tasks.
# This puts the chord_unlock task on the devhub queue. Which means anything
# that uses chord() or group() must also be running in this queue or must
# be on a worker that listens to the same queue.
'celery.chord_unlock': {'queue': 'devhub'},
'olympia.devhub.tasks.compatibility_check': {'queue': 'devhub'},
# Images.
'olympia.bandwagon.tasks.resize_icon': {'queue': 'images'},
'olympia.users.tasks.resize_photo': {'queue': 'images'},
'olympia.users.tasks.delete_photo': {'queue': 'images'},
'olympia.devhub.tasks.resize_icon': {'queue': 'images'},
'olympia.devhub.tasks.resize_preview': {'queue': 'images'},
# AMO validator.
'olympia.zadmin.tasks.bulk_validate_file': {'queue': 'limited'},
# AMO
'olympia.amo.tasks.delete_anonymous_collections': {'queue': 'amo'},
'olympia.amo.tasks.delete_logs': {'queue': 'amo'},
'olympia.amo.tasks.delete_stale_contributions': {'queue': 'amo'},
'olympia.amo.tasks.send_email': {'queue': 'amo'},
'olympia.amo.tasks.set_modified_on_object': {'queue': 'amo'},
# Addons
'olympia.addons.tasks.calc_checksum': {'queue': 'addons'},
'olympia.addons.tasks.delete_persona_image': {'queue': 'addons'},
'olympia.addons.tasks.delete_preview_files': {'queue': 'addons'},
'olympia.addons.tasks.update_incompatible_appversions': {
'queue': 'addons'},
'olympia.addons.tasks.version_changed': {'queue': 'addons'},
# API
'olympia.api.tasks.process_results': {'queue': 'api'},
'olympia.api.tasks.process_webhook': {'queue': 'api'},
# Crons
'olympia.addons.cron._update_addon_average_daily_users': {'queue': 'cron'},
'olympia.addons.cron._update_addon_download_totals': {'queue': 'cron'},
'olympia.addons.cron._update_addons_current_version': {'queue': 'cron'},
'olympia.addons.cron._update_appsupport': {'queue': 'cron'},
'olympia.addons.cron._update_daily_theme_user_counts': {'queue': 'cron'},
'olympia.bandwagon.cron._drop_collection_recs': {'queue': 'cron'},
'olympia.bandwagon.cron._update_collections_subscribers': {
'queue': 'cron'},
'olympia.bandwagon.cron._update_collections_votes': {'queue': 'cron'},
# Bandwagon
'olympia.bandwagon.tasks.collection_meta': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.collection_votes': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.collection_watchers': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.delete_icon': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.resize_icon': {'queue': 'bandwagon'},
# Editors
'olympia.editors.tasks.add_commentlog': {'queue': 'editors'},
'olympia.editors.tasks.add_versionlog': {'queue': 'editors'},
'olympia.editors.tasks.approve_rereview': {'queue': 'editors'},
'olympia.editors.tasks.reject_rereview': {'queue': 'editors'},
'olympia.editors.tasks.send_mail': {'queue': 'editors'},
# Files
'olympia.files.tasks.extract_file': {'queue': 'files'},
# Crypto
'olympia.lib.crypto.tasks.sign_addons': {'queue': 'crypto'},
# Search
'olympia.lib.es.management.commands.reindex.create_new_index': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.delete_indexes': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.flag_database': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.index_data': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.unflag_database': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.update_aliases': {
'queue': 'search'},
# Reviews
'olympia.reviews.models.check_spam': {'queue': 'reviews'},
'olympia.reviews.tasks.addon_bayesian_rating': {'queue': 'reviews'},
'olympia.reviews.tasks.addon_grouped_rating': {'queue': 'reviews'},
'olympia.reviews.tasks.addon_review_aggregates': {'queue': 'reviews'},
'olympia.reviews.tasks.update_denorm': {'queue': 'reviews'},
# Stats
'olympia.stats.tasks.addon_total_contributions': {'queue': 'stats'},
'olympia.stats.tasks.index_collection_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_download_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_theme_user_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_update_counts': {'queue': 'stats'},
'olympia.stats.tasks.update_addons_collections_downloads': {
'queue': 'stats'},
'olympia.stats.tasks.update_collections_total': {'queue': 'stats'},
'olympia.stats.tasks.update_global_totals': {'queue': 'stats'},
'olympia.stats.tasks.update_google_analytics': {'queue': 'stats'},
# Tags
'olympia.tags.tasks.update_all_tag_stats': {'queue': 'tags'},
'olympia.tags.tasks.update_tag_stat': {'queue': 'tags'},
# Users
'olympia.users.tasks.delete_photo': {'queue': 'users'},
'olympia.users.tasks.resize_photo': {'queue': 'users'},
'olympia.users.tasks.update_user_ratings_task': {'queue': 'users'},
# Zadmin
'olympia.zadmin.tasks.add_validation_jobs': {'queue': 'zadmin'},
'olympia.zadmin.tasks.admin_email': {'queue': 'zadmin'},
'olympia.zadmin.tasks.celery_error': {'queue': 'zadmin'},
'olympia.zadmin.tasks.fetch_langpack': {'queue': 'zadmin'},
'olympia.zadmin.tasks.fetch_langpacks': {'queue': 'zadmin'},
'olympia.zadmin.tasks.notify_compatibility': {'queue': 'zadmin'},
'olympia.zadmin.tasks.notify_compatibility_chunk': {'queue': 'zadmin'},
'olympia.zadmin.tasks.update_maxversions': {'queue': 'zadmin'},
}
# This is just a place to store these values, you apply them in your
# task decorator, for example:
# @task(time_limit=CELERY_TIME_LIMITS['lib...']['hard'])
# Otherwise your task will use the default settings.
CELERY_TIME_LIMITS = {
# The reindex management command can take up to 3 hours to run.
'olympia.lib.es.management.commands.reindex': {
'soft': 10800, 'hard': 14400},
}
# When testing, we always want tasks to raise exceptions. Good for sanity.
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised.
# The task can catch that and recover but should exit ASAP. Note that there is
# a separate, shorter timeout for validation tasks.
CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 30
# Logging
LOG_LEVEL = logging.DEBUG
HAS_SYSLOG = True # syslog is used if HAS_SYSLOG and NOT DEBUG.
SYSLOG_TAG = "http_app_addons"
SYSLOG_TAG2 = "http_app_addons2"
# See PEP 391 and log_settings.py for formatting help. Each section of
# LOGGING will get merged into the corresponding section of
# log_settings.py. Handlers and log levels are set up automatically based
# on LOG_LEVEL and DEBUG unless you set them here. Messages will not
# propagate through a logger unless propagate: True is set.
LOGGING_CONFIG = None
LOGGING = {
'loggers': {
'amo.validator': {'level': logging.WARNING},
'amqplib': {'handlers': ['null']},
'caching.invalidation': {'handlers': ['null']},
'caching': {'level': logging.ERROR},
'elasticsearch': {'handlers': ['null']},
'rdflib': {'handlers': ['null']},
'z.task': {'level': logging.INFO},
'z.es': {'level': logging.INFO},
's.client': {'level': logging.INFO},
},
}
# CSP Settings
PROD_CDN_HOST = 'https://addons.cdn.mozilla.net'
ANALYTICS_HOST = 'https://ssl.google-analytics.com'
CSP_REPORT_URI = '/__cspreport__'
CSP_REPORT_ONLY = False
CSP_EXCLUDE_URL_PREFIXES = ()
# NOTE: CSP_DEFAULT_SRC MUST be set otherwise things not set
# will default to being open to anything.
CSP_DEFAULT_SRC = (
"'self'",
)
CSP_BASE_URI = (
"'self'",
# Required for the legacy discovery pane.
'https://addons.mozilla.org',
)
CSP_CONNECT_SRC = (
"'self'",
'https://sentry.prod.mozaws.net',
)
CSP_FORM_ACTION = (
"'self'",
'https://developer.mozilla.org',
)
CSP_FONT_SRC = (
"'self'",
PROD_CDN_HOST,
)
CSP_CHILD_SRC = (
"'self'",
'https://ic.paypal.com',
'https://paypal.com',
'https://www.google.com/recaptcha/',
'https://www.paypal.com',
)
CSP_FRAME_SRC = CSP_CHILD_SRC
CSP_IMG_SRC = (
"'self'",
'data:', # Used in inlined mobile css.
'blob:', # Needed for image uploads.
'https://www.paypal.com',
ANALYTICS_HOST,
PROD_CDN_HOST,
'https://static.addons.mozilla.net', # CDN origin server.
'https://sentry.prod.mozaws.net',
)
CSP_MEDIA_SRC = (
'https://videos.cdn.mozilla.net',
)
CSP_OBJECT_SRC = ("'none'",)
CSP_SCRIPT_SRC = (
'https://ssl.google-analytics.com/ga.js',
'https://www.google.com/recaptcha/',
'https://www.gstatic.com/recaptcha/',
PAYPAL_JS_URL,
PROD_CDN_HOST,
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
PROD_CDN_HOST,
)
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is true, allow everything, toggled to
# False on -dev and stage.
# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710
ENGAGE_ROBOTS = True
# Read-only mode setup.
READ_ONLY = False
# Turn on read-only mode in local_settings.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('SLAVE_DATABASES'):
raise Exception("We need at least one slave database.")
slave = env['SLAVE_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('olympia.users.backends.NoAuthForYou',)
# Add in the read-only middleware before csrf middleware.
extra = 'olympia.amo.middleware.ReadOnlyMiddleware'
before = 'session_csrf.CsrfMiddleware'
m = list(env['MIDDLEWARE_CLASSES'])
m.insert(m.index(before), extra)
env['MIDDLEWARE_CLASSES'] = tuple(m)
# Uploaded file limits
MAX_ICON_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_IMAGE_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_VIDEO_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_PHOTO_UPLOAD_SIZE = MAX_ICON_UPLOAD_SIZE
MAX_PERSONA_UPLOAD_SIZE = 300 * 1024
MAX_REVIEW_ATTACHMENT_UPLOAD_SIZE = 5 * 1024 * 1024
# RECAPTCHA: overload the following key settings in local_settings.py
# with your keys.
NOBOT_RECAPTCHA_PUBLIC_KEY = ''
NOBOT_RECAPTCHA_PRIVATE_KEY = ''
# Send Django signals asynchronously on a background thread.
ASYNC_SIGNALS = True
# Performance for persona pagination, we hardcode the number of
# available pages when the filter is up-and-coming.
PERSONA_DEFAULT_PAGES = 10
REDIS_LOCATION = os.environ.get(
'REDIS_LOCATION',
'redis://localhost:6379/0?socket_timeout=0.5')
def get_redis_settings(uri):
import urlparse
urlparse.uses_netloc.append('redis')
result = urlparse.urlparse(uri)
options = dict(urlparse.parse_qsl(result.query))
if 'socket_timeout' in options:
options['socket_timeout'] = float(options['socket_timeout'])
return {
'HOST': result.hostname,
'PORT': result.port,
'PASSWORD': result.password,
'DB': int((result.path or '0').lstrip('/')),
'OPTIONS': options
}
# This is used for `django-cache-machine`
REDIS_BACKEND = REDIS_LOCATION
REDIS_BACKENDS = {
'master': get_redis_settings(REDIS_LOCATION)
}
# Number of seconds before celery tasks will abort addon validation:
VALIDATOR_TIMEOUT = 110
# Max number of warnings/errors to show from validator. Set to None for no
# limit.
VALIDATOR_MESSAGE_LIMIT = 500
# Feature flags
UNLINK_SITE_STATS = True
# Set to True if we're allowed to use X-SENDFILE.
XSENDFILE = True
XSENDFILE_HEADER = 'X-SENDFILE'
MOBILE_COOKIE = 'mamo'
DEFAULT_SUGGESTED_CONTRIBUTION = 5
# Path to `ps`.
PS_BIN = '/bin/ps'
BLOCKLIST_COOKIE = 'BLOCKLIST_v1'
# The maximum file size that is shown inside the file viewer.
FILE_VIEWER_SIZE_LIMIT = 1048576
# The maximum file size that you can have inside a zip file.
FILE_UNZIP_SIZE_LIMIT = 104857600
# How long to delay tasks relying on file system to cope with NFS lag.
NFS_LAG_DELAY = 3
# A whitelist of domains that the authentication script will redirect to upon
# successfully logging in or out.
VALID_LOGIN_REDIRECTS = {
'builder': 'https://builder.addons.mozilla.org',
'builderstage': 'https://builder-addons.allizom.org',
'buildertrunk': 'https://builder-addons-dev.allizom.org',
}
# Elasticsearch
ES_HOSTS = [os.environ.get('ELASTICSEARCH_LOCATION', '127.0.0.1:9200')]
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = {
'default': 'addons',
'stats': 'addons_stats',
}
ES_TIMEOUT = 30
ES_DEFAULT_NUM_REPLICAS = 2
ES_DEFAULT_NUM_SHARDS = 5
# Default AMO user id to use for tasks.
TASK_USER_ID = 4757633
# If this is False, tasks and other jobs that send non-critical emails should
# use a fake email backend.
SEND_REAL_EMAIL = False
STATSD_HOST = 'localhost'
STATSD_PORT = 8125
STATSD_PREFIX = 'amo'
# The django statsd client to use, see django-statsd for more.
STATSD_CLIENT = 'django_statsd.clients.normal'
GRAPHITE_HOST = 'localhost'
GRAPHITE_PORT = 2003
GRAPHITE_PREFIX = 'amo'
GRAPHITE_TIMEOUT = 1
# IP addresses of servers we use as proxies.
KNOWN_PROXIES = []
# Blog URL
DEVELOPER_BLOG_URL = 'http://blog.mozilla.com/addons/feed/'
LOGIN_RATELIMIT_USER = 5
LOGIN_RATELIMIT_ALL_USERS = '15/m'
CSRF_FAILURE_VIEW = 'olympia.amo.views.csrf_failure'
# Testing responsiveness without rate limits.
CELERY_DISABLE_RATE_LIMITS = True
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'olympia.amo.utils.LocalFileStorage'
# This is the signing server for signing files.
SIGNING_SERVER = ''
# And how long we'll give the server to respond.
SIGNING_SERVER_TIMEOUT = 10
# Hotfix addons (don't sign those, they're already signed by Mozilla.
HOTFIX_ADDON_GUIDS = ['[email protected]',
'[email protected]']
# Minimum Firefox version for default to compatible addons to be signed.
MIN_D2C_VERSION = '4'
# Minimum Firefox version for not default to compatible addons to be signed.
MIN_NOT_D2C_VERSION = '37'
# True when the Django app is running from the test suite.
IN_TEST_SUITE = False
# Temporary flag to work with navigator.mozPay() on devices that don't
# support it natively.
SIMULATE_NAV_PAY = False
# When the dev. agreement gets updated and you need users to re-accept it
# change this date. You won't want to do this for minor format changes.
# The tuple is passed through to datetime.date, so please use a valid date
# tuple. If the value is None, then it will just not be used at all.
DEV_AGREEMENT_LAST_UPDATED = None
# If you want to allow self-reviews for add-ons/apps, then enable this.
# In production we do not want to allow this.
ALLOW_SELF_REVIEWS = False
# Modify the user-agents we check for in django-mobility
# (Android has since changed its user agent).
MOBILE_USER_AGENTS = ('mozilla.+mobile|android|fennec|iemobile|'
'iphone|opera (?:mini|mobi)')
# Credentials for accessing Google Analytics stats.
GOOGLE_ANALYTICS_CREDENTIALS = {}
# Which domain to access GA stats for. If not set, defaults to DOMAIN.
GOOGLE_ANALYTICS_DOMAIN = None
# Used for general web API access.
GOOGLE_API_CREDENTIALS = ''
# Google translate settings.
GOOGLE_TRANSLATE_API_URL = 'https://www.googleapis.com/language/translate/v2'
GOOGLE_TRANSLATE_REDIRECT_URL = (
'https://translate.google.com/#auto/{lang}/{text}')
# Language pack fetcher settings
LANGPACK_OWNER_EMAIL = '[email protected]'
LANGPACK_DOWNLOAD_BASE = 'https://ftp.mozilla.org/pub/mozilla.org/'
LANGPACK_PATH_DEFAULT = '%s/releases/%s/win32/xpi/'
# E.g. https://ftp.mozilla.org/pub/mozilla.org/firefox/releases/23.0/SHA512SUMS
LANGPACK_MANIFEST_PATH = '../../SHA512SUMS'
LANGPACK_MAX_SIZE = 5 * 1024 * 1024 # 5MB should be more than enough
# This saves us when we upgrade jingo-minify (jsocol/jingo-minify@916b054c).
JINGO_MINIFY_USE_STATIC = True
# Whitelist IP addresses of the allowed clients that can post email
# through the API.
WHITELISTED_CLIENTS_EMAIL_API = []
# Allow URL style format override. eg. "?format=json"
URL_FORMAT_OVERRIDE = 'format'
# Add on used to collect stats (!technical dept around!)
ADDON_COLLECTOR_ID = 11950
# Connection to the hive server.
HIVE_CONNECTION = {
'host': 'peach-gw.peach.metrics.scl3.mozilla.com',
'port': 10000,
'user': 'amo_prod',
'password': '',
'auth_mechanism': 'PLAIN',
}
# Enable ETags (based on response content) on every view in CommonMiddleware.
USE_ETAGS = True
# CDN Host is blank on local installs, overwritten in dev/stage/prod envs.
# Useful to force some dynamic content to be served from the CDN.
CDN_HOST = ''
# Static
STATIC_ROOT = path('site-static')
STATIC_URL = '/static/'
JINGO_MINIFY_ROOT = path('static')
STATICFILES_DIRS = (
path('static'),
JINGO_MINIFY_ROOT
)
NETAPP_STORAGE = TMP_PATH
GUARDED_ADDONS_PATH = ROOT + u'/guarded-addons'
# These are key files that must be present on disk to encrypt/decrypt certain
# database fields.
AES_KEYS = {
# 'api_key:secret': os.path.join(ROOT, 'path', 'to', 'file.key'),
}
# Time in seconds for how long a JWT auth token created by developers with
# their API key can live. When developers are creating auth tokens they cannot
# set the expiration any longer than this.
MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME = 5 * 60
# django-rest-framework-jwt settings:
JWT_AUTH = {
# Use HMAC using SHA-256 hash algorithm. It should be the default, but we
# want to make sure it does not change behind our backs.
# See https://github.com/jpadilla/pyjwt/blob/master/docs/algorithms.rst
'JWT_ALGORITHM': 'HS256',
# This adds some padding to timestamp validation in case client/server
# clocks are off.
'JWT_LEEWAY': 5,
# Expiration for non-apikey jwt tokens. Since this will be used by our
# frontend clients we want a longer expiration than normal, matching the
# session cookie expiration.
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=SESSION_COOKIE_AGE),
# We don't allow refreshes, instead we simply have a long duration.
'JWT_ALLOW_REFRESH': False,
# Prefix for non-apikey jwt tokens. Should be different from 'JWT' which we
# already used for api key tokens.
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
}
REST_FRAMEWORK = {
# Set this because the default is to also include:
# 'rest_framework.renderers.BrowsableAPIRenderer'
# Which it will try to use if the client accepts text/html.
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'olympia.api.authentication.JSONWebTokenAuthentication',
),
# Set parser classes to include the fix for
# https://github.com/tomchristie/django-rest-framework/issues/3951
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'olympia.api.parsers.MultiPartParser',
),
# Add our custom exception handler, that wraps all exceptions into
# Responses and not just the ones that are api-related.
'EXCEPTION_HANDLER': 'olympia.api.exceptions.custom_exception_handler',
# Enable pagination
'PAGE_SIZE': 25,
# Use our pagination class by default, which allows clients to request a
# different page size.
'DEFAULT_PAGINATION_CLASS': (
'olympia.api.paginator.CustomPageNumberPagination'),
# Use json by default when using APIClient.
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
# Use http://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15
# We can't use the default because we don't use django timezone support.
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%SZ',
}
# This is the DSN to the local Sentry service. It might be overridden in
# site-specific settings files as well.
SENTRY_DSN = os.environ.get('SENTRY_DSN')
# Automatically do 'from olympia import amo' when running shell_plus.
SHELL_PLUS_POST_IMPORTS = (
('olympia', 'amo'),
)
DEFAULT_FXA_CONFIG_NAME = 'default'
INTERNAL_FXA_CONFIG_NAME = 'internal'
ALLOWED_FXA_CONFIGS = ['default']
|
[] |
[] |
[
"ELASTICSEARCH_LOCATION",
"BROKER_URL",
"SENTRY_DSN",
"REDIS_LOCATION",
"CELERY_RESULT_BACKEND"
] |
[]
|
["ELASTICSEARCH_LOCATION", "BROKER_URL", "SENTRY_DSN", "REDIS_LOCATION", "CELERY_RESULT_BACKEND"]
|
python
| 5 | 0 | |
helmapi/initialize/initialize.go
|
package initialize
import (
"fmt"
"log"
"os"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/gates"
)
func debug(format string, v ...interface{}) {
if settings.Debug {
format = fmt.Sprintf("[debug] %s\n", format)
log.Output(2, fmt.Sprintf(format, v...))
}
}
const FeatureGateOCI = gates.Gate("HELM_EXPERIMENTAL_OCI")
var settings = cli.New()
var ActionConfig *action.Configuration = new(action.Configuration)
func init() {
helmDriver := os.Getenv("HELM_DRIVER")
if err := ActionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, debug); err != nil {
log.Fatal(err)
}
}
|
[
"\"HELM_DRIVER\""
] |
[] |
[
"HELM_DRIVER"
] |
[]
|
["HELM_DRIVER"]
|
go
| 1 | 0 | |
docs/source/conf.py
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import sphinx_rtd_theme
# import pkg_resources
# -*- coding: utf-8 -*-
#
# Pyro documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 15 17:16:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx', #
'sphinx.ext.todo', #
'sphinx.ext.mathjax', #
'sphinx.ext.ifconfig', #
'sphinx.ext.viewcode', #
'sphinx.ext.githubpages', #
'sphinx.ext.graphviz', #
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Disable documentation inheritance so as to avoid inheriting
# docstrings in a different format, e.g. when the parent class
# is a PyTorch class.
autodoc_inherit_docstrings = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyro'
copyright = u'2017-2018, Uber Technologies, Inc'
author = u'Uber AI Labs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = ''
if 'READTHEDOCS' not in os.environ:
# if developing locally, use pyro.__version__ as version
from pyro import __version__ # noqaE402
version = __version__
# release version
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# do not prepend module name to functions
add_module_names = False
# -- Options for HTML output ----------------------------------------------
# logo
html_logo = '_static/img/pyro_logo_wide.png'
# logo
html_favicon = '_static/img/favicon/favicon.ico'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navigation_depth': 3,
'logo_only': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/pyro.css'
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pyrodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pyro.tex', u'Pyro Documentation', u'Uber AI Labs', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'pyro', u'Pyro Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pyro', u'Pyro Documentation', author, 'Pyro',
'Deep Universal Probabilistic Programming.', 'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'torch': ('https://pytorch.org/docs/master/', None),
'funsor': ('http://funsor.pyro.ai/en/stable/', None),
'opt_einsum': ('https://optimized-einsum.readthedocs.io/en/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'Bio': ('https://biopython.readthedocs.io/en/latest/', None),
'horovod': ('https://horovod.readthedocs.io/en/stable/', None),
}
# document class constructors (__init__ methods):
""" comment out this functionality for now;
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
"""
def setup(app):
app.add_stylesheet('css/pyro.css')
# app.connect("autodoc-skip-member", skip)
# @jpchen's hack to get rtd builder to install latest pytorch
# See similar line in the install section of .travis.yml
if 'READTHEDOCS' in os.environ:
os.system('pip install torch==1.5.0+cpu torchvision==0.6.0+cpu '
'-f https://download.pytorch.org/whl/torch_stable.html')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/agent.go
|
package main
import (
"github.com/123shang60/image-load/pkg/common"
"os"
"github.com/123shang60/image-load/pkg/dockerCore"
"github.com/123shang60/image-load/pkg/register"
"github.com/123shang60/image-load/pkg/router"
)
func main() {
// 初始化 docker cli
dockerCore.Init()
// 定时注册启动
go register.RegistAgent()
addr := os.Getenv("addr")
if addr == "" {
addr = "127.0.0.1"
}
port := os.Getenv("port")
if port == "" {
port = "8081"
}
r := router.NewAgentRouter()
err := r.Run(addr + ":" + port)
if err != nil {
common.Logger().Panic(err)
}
}
|
[
"\"addr\"",
"\"port\""
] |
[] |
[
"addr",
"port"
] |
[]
|
["addr", "port"]
|
go
| 2 | 0 | |
internal/pkg/runtime/engines/singularity/process_linux.go
|
// Copyright (c) 2018-2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package singularity
import (
"debug/elf"
"encoding/binary"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"reflect"
"runtime"
"strings"
"syscall"
"unsafe"
"github.com/sylabs/singularity/internal/pkg/security"
"github.com/sylabs/singularity/internal/pkg/util/user"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sylabs/singularity/internal/pkg/instance"
"github.com/sylabs/singularity/internal/pkg/sylog"
"golang.org/x/crypto/ssh/terminal"
)
const defaultShell = "/bin/sh"
// Convert an ELF architecture into a GOARCH-style string. This is not an
// exhaustive list, so there is a default for rare cases. Adapted from
// https://golang.org/src/cmd/internal/objfile/elf.go
func elfToGoArch(elfFile *elf.File) string {
switch elfFile.Machine {
case elf.EM_386:
return "386"
case elf.EM_X86_64:
return "amd64"
case elf.EM_ARM:
return "arm"
case elf.EM_AARCH64:
return "arm64"
case elf.EM_PPC64:
if elfFile.ByteOrder == binary.LittleEndian {
return "ppc64le"
}
return "ppc64"
case elf.EM_S390:
return "s390x"
}
return "UNKNOWN"
}
func (engine *EngineOperations) checkExec() error {
shell := engine.EngineConfig.GetShell()
if shell == "" {
shell = defaultShell
}
// Make sure the shell exists
if _, err := os.Stat(shell); os.IsNotExist(err) {
return fmt.Errorf("shell %s doesn't exist in container", shell)
}
args := engine.EngineConfig.OciConfig.Process.Args
env := engine.EngineConfig.OciConfig.Process.Env
// match old behavior of searching path
oldpath := os.Getenv("PATH")
defer func() {
os.Setenv("PATH", oldpath)
engine.EngineConfig.OciConfig.Process.Args = args
engine.EngineConfig.OciConfig.Process.Env = env
}()
for _, keyval := range env {
if strings.HasPrefix(keyval, "PATH=") {
os.Setenv("PATH", keyval[5:])
break
}
}
// If args[0] is an absolute path, exec.LookPath() looks for
// this file directly instead of within PATH
if _, err := exec.LookPath(args[0]); err == nil {
return nil
}
// If args[0] isn't executable (either via PATH or absolute path),
// look for alternative approaches to handling it
switch args[0] {
case "/.singularity.d/actions/exec":
if p, err := exec.LookPath("/.exec"); err == nil {
args[0] = p
return nil
}
if p, err := exec.LookPath(args[1]); err == nil {
sylog.Warningf("container does not have %s, calling %s directly", args[0], args[1])
args[1] = p
args = args[1:]
return nil
}
return fmt.Errorf("no executable %s found", args[1])
case "/.singularity.d/actions/shell":
if p, err := exec.LookPath("/.shell"); err == nil {
args[0] = p
return nil
}
if p, err := exec.LookPath(shell); err == nil {
sylog.Warningf("container does not have %s, calling %s directly", args[0], shell)
args[0] = p
return nil
}
return fmt.Errorf("no %s found inside container", shell)
case "/.singularity.d/actions/run":
if p, err := exec.LookPath("/.run"); err == nil {
args[0] = p
return nil
}
if p, err := exec.LookPath("/singularity"); err == nil {
args[0] = p
return nil
}
return fmt.Errorf("no run driver found inside container")
case "/.singularity.d/actions/start":
if _, err := exec.LookPath(shell); err != nil {
return fmt.Errorf("no %s found inside container, can't run instance", shell)
}
args = []string{shell, "-c", `echo "instance start script not found"`}
return nil
case "/.singularity.d/actions/test":
if p, err := exec.LookPath("/.test"); err == nil {
args[0] = p
return nil
}
return fmt.Errorf("no test driver found inside container")
}
return fmt.Errorf("no %s found inside container", args[0])
}
// StartProcess starts the process
func (engine *EngineOperations) StartProcess(masterConn net.Conn) error {
isInstance := engine.EngineConfig.GetInstance()
bootInstance := isInstance && engine.EngineConfig.GetBootInstance()
shimProcess := false
if err := os.Chdir(engine.EngineConfig.OciConfig.Process.Cwd); err != nil {
if err := os.Chdir(engine.EngineConfig.GetHomeDest()); err != nil {
os.Chdir("/")
}
}
if err := engine.checkExec(); err != nil {
return err
}
if engine.EngineConfig.File.MountDev == "minimal" || engine.EngineConfig.GetContain() {
// If on a terminal, reopen /dev/console so /proc/self/fd/[0-2
// will point to /dev/console. This is needed so that tty and
// ttyname() on el6 will return the correct answer. Newer
// ttyname() functions might work because they will search
// /dev if the value of /proc/self/fd/X doesn't exist, but
// they won't work if another /dev/pts/X is allocated in its
// place. Also, programs that don't use ttyname() and instead
// directly do readlink() on /proc/self/fd/X need this.
for fd := 0; fd <= 2; fd++ {
if !terminal.IsTerminal(fd) {
continue
}
consfile, err := os.OpenFile("/dev/console", os.O_RDWR, 0600)
if err != nil {
sylog.Debugf("Could not open minimal /dev/console, skipping replacing tty descriptors")
break
}
sylog.Debugf("Replacing tty descriptors with /dev/console")
consfd := int(consfile.Fd())
for ; fd <= 2; fd++ {
if !terminal.IsTerminal(fd) {
continue
}
syscall.Close(fd)
syscall.Dup3(consfd, fd, 0)
}
consfile.Close()
break
}
}
args := engine.EngineConfig.OciConfig.Process.Args
env := engine.EngineConfig.OciConfig.Process.Env
if engine.EngineConfig.OciConfig.Linux != nil {
namespaces := engine.EngineConfig.OciConfig.Linux.Namespaces
for _, ns := range namespaces {
if ns.Type == specs.PIDNamespace {
if !engine.EngineConfig.GetNoInit() {
shimProcess = true
}
break
}
}
}
for _, img := range engine.EngineConfig.GetImageList() {
if err := syscall.Close(int(img.Fd)); err != nil {
return fmt.Errorf("failed to close file descriptor for %s", img.Path)
}
}
for _, fd := range engine.EngineConfig.GetOpenFd() {
if err := syscall.Close(fd); err != nil {
return fmt.Errorf("aborting failed to close file descriptor: %s", err)
}
}
if err := security.Configure(&engine.EngineConfig.OciConfig.Spec); err != nil {
return fmt.Errorf("failed to apply security configuration: %s", err)
}
if (!isInstance && !shimProcess) || bootInstance || engine.EngineConfig.GetInstanceJoin() {
err := syscall.Exec(args[0], args, env)
if err != nil {
// We know the shell exists at this point, so let's inspect its architecture
shell := engine.EngineConfig.GetShell()
if shell == "" {
shell = defaultShell
}
self, errElf := elf.Open(shell)
if errElf != nil {
return fmt.Errorf("failed to open %s for inspection: %s", shell, errElf)
}
defer self.Close()
if elfArch := elfToGoArch(self); elfArch != runtime.GOARCH {
return fmt.Errorf("image targets %s, cannot run on %s", elfArch, runtime.GOARCH)
}
// Assume a missing shared library on ENOENT
if err == syscall.ENOENT {
return fmt.Errorf("exec %s failed: a shared library is likely missing in the image", args[0])
}
// Return the raw error as a last resort
return fmt.Errorf("exec %s failed: %s", args[0], err)
}
}
// Spawn and wait container process, signal handler
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Env = env
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: isInstance,
}
errChan := make(chan error, 1)
statusChan := make(chan syscall.WaitStatus, 1)
signals := make(chan os.Signal, 1)
if err := cmd.Start(); err != nil {
return fmt.Errorf("exec %s failed: %s", args[0], err)
}
go func() {
errChan <- cmd.Wait()
}()
// Modify argv argument and program name shown in /proc/self/comm
name := "sinit"
argv0str := (*reflect.StringHeader)(unsafe.Pointer(&os.Args[0]))
argv0 := (*[1 << 30]byte)(unsafe.Pointer(argv0str.Data))[:argv0str.Len]
progname := make([]byte, argv0str.Len)
if len(name) > argv0str.Len {
return fmt.Errorf("program name too short")
}
copy(progname, name)
copy(argv0, progname)
ptr := unsafe.Pointer(&progname[0])
if _, _, err := syscall.Syscall(syscall.SYS_PRCTL, syscall.PR_SET_NAME, uintptr(ptr), 0); err != 0 {
return syscall.Errno(err)
}
// Manage all signals
signal.Notify(signals)
masterConn.Close()
for {
select {
case s := <-signals:
sylog.Debugf("Received signal %s", s.String())
switch s {
case syscall.SIGCHLD:
for {
var status syscall.WaitStatus
wpid, err := syscall.Wait4(-1, &status, syscall.WNOHANG, nil)
if wpid <= 0 || err != nil {
// We break the loop since an error occurred
break
}
if wpid == cmd.Process.Pid {
statusChan <- status
}
}
default:
signal := s.(syscall.Signal)
// EPERM and EINVAL are deliberately ignored because they can't be
// returned in this context, this process is PID 1, so it has the
// permissions to send signals to its childs and EINVAL would
// mean to update the Go runtime or the kernel to something more
// stable :)
if isInstance {
if err := syscall.Kill(-cmd.Process.Pid, signal); err == syscall.ESRCH {
sylog.Debugf("No child process, exiting ...")
os.Exit(128 + int(signal))
}
} else if engine.EngineConfig.GetSignalPropagation() {
if err := syscall.Kill(cmd.Process.Pid, signal); err == syscall.ESRCH {
sylog.Debugf("No child process, exiting ...")
os.Exit(128 + int(signal))
}
}
}
case err := <-errChan:
if e, ok := err.(*exec.ExitError); ok {
status, ok := e.Sys().(syscall.WaitStatus)
if !ok {
return fmt.Errorf("command exit with error: %s", err)
}
statusChan <- status
} else if e, ok := err.(*os.SyscallError); ok {
// handle possible race with Wait4 call above by ignoring ECHILD
// error because child process was already catched
if e.Err.(syscall.Errno) != syscall.ECHILD {
sylog.Fatalf("error while waiting container process: %s", e.Error())
}
}
if !isInstance {
if len(statusChan) > 0 {
status := <-statusChan
if status.Signaled() {
os.Exit(128 + int(status.Signal()))
}
os.Exit(status.ExitStatus())
} else if err == nil {
os.Exit(0)
}
sylog.Fatalf("command exited with unknown error: %s", err)
}
}
}
}
// PostStartProcess will execute code in master context after execution of container
// process, typically to write instance state/config files or execute post start OCI hook
func (engine *EngineOperations) PostStartProcess(pid int) error {
sylog.Debugf("Post start process")
if engine.EngineConfig.GetInstance() {
uid := os.Getuid()
name := engine.CommonConfig.ContainerID
if err := os.Chdir("/"); err != nil {
return fmt.Errorf("failed to change directory to /: %s", err)
}
file, err := instance.Add(name, instance.SingSubDir)
if err != nil {
return err
}
pw, err := user.GetPwUID(uint32(uid))
if err != nil {
return err
}
file.User = pw.Name
file.Pid = pid
file.PPid = os.Getpid()
file.Image = engine.EngineConfig.GetImage()
// by default we add all namespaces except the user namespace which
// is added conditionally. This delegates checks to the C starter code
// which will determine if a namespace needs to be joined by
// comparing namespace inodes
path := fmt.Sprintf("/proc/%d/ns", pid)
namespaces := []struct {
nstype string
ns specs.LinuxNamespaceType
}{
{"pid", specs.PIDNamespace},
{"uts", specs.UTSNamespace},
{"ipc", specs.IPCNamespace},
{"mnt", specs.MountNamespace},
{"cgroup", specs.CgroupNamespace},
{"net", specs.NetworkNamespace},
}
for _, n := range namespaces {
nspath := filepath.Join(path, n.nstype)
engine.EngineConfig.OciConfig.AddOrReplaceLinuxNamespace(string(n.ns), nspath)
}
for _, ns := range engine.EngineConfig.OciConfig.Linux.Namespaces {
if ns.Type == specs.UserNamespace {
nspath := filepath.Join(path, "user")
engine.EngineConfig.OciConfig.AddOrReplaceLinuxNamespace(specs.UserNamespace, nspath)
file.UserNs = true
break
}
}
// grab configuration to store in instance file
file.Config, err = json.Marshal(engine.CommonConfig)
if err != nil {
return err
}
return file.Update()
}
return nil
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
cmd/list.go
|
package cmd
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/krafugo/gops/utils"
)
// listCmd represents the list command
var listCmd = &cobra.Command{
Use: "list",
Short: "List all of available templates",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 1 {
template, err := utils.New("", args[0])
if err != nil {
fmt.Println("Error reading template: ", err)
} else {
fmt.Println(template)
}
} else {
listTemplates()
}
},
}
func init() {
rootCmd.AddCommand(listCmd)
}
// listTemplates shows all of templates available
func listTemplates() {
ext := ".tmpl"
tmplPath := os.Getenv("GOPS_SCHEMA")
// os.FileInfo
files, err := ioutil.ReadDir(tmplPath)
if err != nil {
fmt.Println("Error listing templates: ", err)
}
fmt.Printf("————[ List of available Templates ]————\n\n")
for _, file := range files {
f := file.Name()
if strings.HasSuffix(f, ext) {
fmt.Println("\t + " + strings.Replace(f, ext, "", 1))
}
}
fmt.Printf("\n> You can choose anyone of above templates!\n")
}
|
[
"\"GOPS_SCHEMA\""
] |
[] |
[
"GOPS_SCHEMA"
] |
[]
|
["GOPS_SCHEMA"]
|
go
| 1 | 0 | |
runtests.py
|
#!/usr/bin/env python
import sys
import os
from os.path import dirname, abspath
from optparse import OptionParser
from django.conf import settings, global_settings
import django
# For convenience configure settings if they are not pre-configured or if we
# haven't been provided settings to use by environment variable.
if not settings.configured and not os.environ.get('DJANGO_SETTINGS_MODULE'):
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'moderation',
'tests',
],
SERIALIZATION_MODULES = {},
MEDIA_URL = '/media/',
STATIC_URL = '/static/',
ROOT_URLCONF = 'tests.urls.default',
DJANGO_MODERATION_MODERATORS = (
'[email protected]',
),
DEBUG=True,
SITE_ID=1,
)
django.setup()
from django.test.runner import DiscoverRunner
def runtests(*test_args, **kwargs):
if 'south' in settings.INSTALLED_APPS:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
if not test_args:
test_args = ['tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
test_runner = DiscoverRunner(pattern='*.py', verbosity=kwargs.get('verbosity', 1),
interactive=kwargs.get('interactive', False), failfast=kwargs.get('failfast'))
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--failfast', action='store_true', default=False, dest='failfast')
(options, args) = parser.parse_args()
runtests(failfast=options.failfast, *args)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
get_cases.py
|
import json
import psycopg2
from psycopg2 import sql
import os
DB_HOST = os.getenv('DB_HOST')
DB_NAME = os.getenv('DB_NAME')
DB_USER = os.getenv('DB_USER')
DB_PASSWORD = os.getenv('DB_PASSWORD')
class CourtCase():
def __init__(self, case_row):
self.court_date, self.hearing_type, self.case_id, self.caption, self.person_name = case_row
def to_dict(self):
court_case_dict = {
"court_date": self.court_date.isoformat(timespec="minutes"),
"hearing_type": self.hearing_type,
"case_id": self.case_id,
"caption": self.caption,
"person_name": self.person_name
}
return court_case_dict
def to_json(self):
return json.dumps(self.to_dict())
def connect_to_db():
conn = psycopg2.connect(
host=DB_HOST,
database=DB_NAME,
user=DB_USER,
password=DB_PASSWORD)
return conn
def query_cases(db_conn, name):
cur = db_conn.cursor()
cases_query = sql.SQL("select {fields} from {field1} c inner join {field2} p on p.id = c.person_id where p.name = {search_name};").format(
fields=sql.SQL(",").join([
sql.Identifier("c", "court_date"),
sql.Identifier("c", "hearing_type"),
sql.Identifier("c", "case_id"),
sql.Identifier("c", "caption"),
sql.Identifier("p", "name")
]),
field1=sql.Identifier("court_cases", "court_case"),
field2=sql.Identifier("court_cases", "person"),
search_name=sql.Placeholder()
)
cur.execute(cases_query, (name,))
rows = cur.fetchall()
cur.close()
response_list = []
for row in rows:
case = CourtCase(row)
response_list.append(case.to_dict())
return json.dumps(response_list)
|
[] |
[] |
[
"DB_PASSWORD",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASSWORD", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
PiControl/settings.py
|
"""
Django settings for PiPool project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
APP_NAME = os.environ.get("APP_NAME", "SomethingAwesome")
API_TOKEN = os.environ.get("API_TOKEN", "SomethingVeryAwesome")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", "someSecretKey")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("APP_ENV", True)
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
LOGIN_URL = '/login'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'PiControl',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'PiControl.middleware.LoginRequiredMiddleware',
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware',
]
ROOT_URLCONF = 'PiControl.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CORS_ORIGIN_WHITELIST = (
'parkourben99.gitlab.io',
'bowling.ayles.com.au',
'localhost:8080'
)
WSGI_APPLICATION = 'PiControl.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ.get("TIME_ZONE", "Australia/Adelaide")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'public')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
LOGIN_REDIRECT_URL = '/'
ROLLBAR = {
'access_token': os.environ.get("ROLLBAR_KEY", "someSecretKey"),
'environment': 'development' if DEBUG else 'production',
'branch': 'PiController',
'root': BASE_DIR,
}
|
[] |
[] |
[
"APP_ENV",
"TIME_ZONE",
"ROLLBAR_KEY",
"SECRET_KEY",
"API_TOKEN",
"APP_NAME"
] |
[]
|
["APP_ENV", "TIME_ZONE", "ROLLBAR_KEY", "SECRET_KEY", "API_TOKEN", "APP_NAME"]
|
python
| 6 | 0 | |
hoodie-common/src/main/java/com/uber/hoodie/common/util/FSUtils.java
|
/*
* Copyright (c) 2016 Uber Technologies, Inc. ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.uber.hoodie.common.util;
import com.google.common.base.Preconditions;
import com.uber.hoodie.common.model.HoodieLogFile;
import com.uber.hoodie.common.model.HoodiePartitionMetadata;
import com.uber.hoodie.common.table.timeline.HoodieInstant;
import com.uber.hoodie.exception.HoodieIOException;
import com.uber.hoodie.exception.InvalidHoodiePathException;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
/**
* Utility functions related to accessing the file storage
*/
public class FSUtils {
private static final Logger LOG = LogManager.getLogger(FSUtils.class);
// Log files are of this pattern - .b5068208-e1a4-11e6-bf01-fe55135034f3_20170101134598.log.1
private static final Pattern LOG_FILE_PATTERN = Pattern.compile("\\.(.*)_(.*)\\.(.*)\\.([0-9]*)");
private static final String LOG_FILE_PREFIX = ".";
private static final int MAX_ATTEMPTS_RECOVER_LEASE = 10;
private static final long MIN_CLEAN_TO_KEEP = 10;
private static final long MIN_ROLLBACK_TO_KEEP = 10;
private static final String HOODIE_ENV_PROPS_PREFIX = "HOODIE_ENV_";
public static Configuration prepareHadoopConf(Configuration conf) {
conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
// look for all properties, prefixed to be picked up
for (Entry<String, String> prop : System.getenv().entrySet()) {
if (prop.getKey().startsWith(HOODIE_ENV_PROPS_PREFIX)) {
LOG.info("Picking up value for hoodie env var :" + prop.getKey());
conf.set(prop.getKey()
.replace(HOODIE_ENV_PROPS_PREFIX, "")
.replaceAll("_DOT_", "."),
prop.getValue());
}
}
return conf;
}
public static FileSystem getFs(String path, Configuration conf) {
FileSystem fs;
conf = prepareHadoopConf(conf);
try {
fs = new Path(path).getFileSystem(conf);
} catch (IOException e) {
throw new HoodieIOException("Failed to get instance of " + FileSystem.class.getName(),
e);
}
LOG.info(
String.format("Hadoop Configuration: fs.defaultFS: [%s], Config:[%s], FileSystem: [%s]",
conf.getRaw("fs.defaultFS"), conf.toString(), fs.toString()));
return fs;
}
public static String makeDataFileName(String commitTime, int taskPartitionId, String fileId) {
return String.format("%s_%d_%s.parquet", fileId, taskPartitionId, commitTime);
}
public static String makeTempDataFileName(String partitionPath, String commitTime,
int taskPartitionId, String fileId, int stageId, long taskAttemptId) {
return String.format("%s_%s_%d_%s_%d_%d.parquet", partitionPath.replace("/", "-"), fileId,
taskPartitionId, commitTime, stageId, taskAttemptId);
}
public static String maskWithoutFileId(String commitTime, int taskPartitionId) {
return String.format("*_%s_%s.parquet", taskPartitionId, commitTime);
}
public static String maskWithoutTaskPartitionId(String commitTime, String fileId) {
return String.format("%s_*_%s.parquet", fileId, commitTime);
}
public static String maskWithOnlyCommitTime(String commitTime) {
return String.format("*_*_%s.parquet", commitTime);
}
public static String getCommitFromCommitFile(String commitFileName) {
return commitFileName.split("\\.")[0];
}
public static String getCommitTime(String fullFileName) {
return fullFileName.split("_")[2].split("\\.")[0];
}
public static long getFileSize(FileSystem fs, Path path) throws IOException {
return fs.getFileStatus(path).getLen();
}
public static String getFileId(String fullFileName) {
return fullFileName.split("_")[0];
}
/**
* Gets all partition paths assuming date partitioning (year, month, day) three levels down.
*/
public static List<String> getAllFoldersThreeLevelsDown(FileSystem fs, String basePath)
throws IOException {
List<String> datePartitions = new ArrayList<>();
FileStatus[] folders = fs.globStatus(new Path(basePath + "/*/*/*"));
for (FileStatus status : folders) {
Path path = status.getPath();
datePartitions.add(String.format("%s/%s/%s", path.getParent().getParent().getName(),
path.getParent().getName(), path.getName()));
}
return datePartitions;
}
public static String getRelativePartitionPath(Path basePath, Path partitionPath) {
String partitionFullPath = partitionPath.toString();
int partitionStartIndex = partitionFullPath.lastIndexOf(basePath.getName());
return partitionFullPath.substring(partitionStartIndex + basePath.getName().length() + 1);
}
/**
* Obtain all the partition paths, that are present in this table, denoted by presence of {@link
* com.uber.hoodie.common.model.HoodiePartitionMetadata#HOODIE_PARTITION_METAFILE}
*/
public static List<String> getAllFoldersWithPartitionMetaFile(FileSystem fs, String basePathStr)
throws IOException {
List<String> partitions = new ArrayList<>();
Path basePath = new Path(basePathStr);
RemoteIterator<LocatedFileStatus> allFiles = fs.listFiles(new Path(basePathStr), true);
while (allFiles.hasNext()) {
Path filePath = allFiles.next().getPath();
if (filePath.getName().equals(HoodiePartitionMetadata.HOODIE_PARTITION_METAFILE)) {
partitions.add(getRelativePartitionPath(basePath, filePath.getParent()));
}
}
return partitions;
}
public static List<String> getAllPartitionPaths(FileSystem fs, String basePathStr,
boolean assumeDatePartitioning)
throws IOException {
if (assumeDatePartitioning) {
return getAllFoldersThreeLevelsDown(fs, basePathStr);
} else {
return getAllFoldersWithPartitionMetaFile(fs, basePathStr);
}
}
public static String getFileExtension(String fullName) {
Preconditions.checkNotNull(fullName);
String fileName = (new File(fullName)).getName();
int dotIndex = fileName.indexOf('.');
return dotIndex == -1 ? "" : fileName.substring(dotIndex);
}
public static String getInstantTime(String name) {
return name.replace(getFileExtension(name), "");
}
/**
* Get the file extension from the log file
*/
public static String getFileExtensionFromLog(Path logPath) {
Matcher matcher = LOG_FILE_PATTERN.matcher(logPath.getName());
if (!matcher.find()) {
throw new InvalidHoodiePathException(logPath, "LogFile");
}
return matcher.group(3);
}
/**
* Get the first part of the file name in the log file. That will be the fileId. Log file do not
* have commitTime in the file name.
*/
public static String getFileIdFromLogPath(Path path) {
Matcher matcher = LOG_FILE_PATTERN.matcher(path.getName());
if (!matcher.find()) {
throw new InvalidHoodiePathException(path, "LogFile");
}
return matcher.group(1);
}
/**
* Check if the file is a parquet file of a log file. Then get the fileId appropriately.
*/
public static String getFileIdFromFilePath(Path filePath) {
if (FSUtils.isLogFile(filePath)) {
return FSUtils.getFileIdFromLogPath(filePath);
}
return FSUtils.getFileId(filePath.getName());
}
/**
* Get the first part of the file name in the log file. That will be the fileId. Log file do not
* have commitTime in the file name.
*/
public static String getBaseCommitTimeFromLogPath(Path path) {
Matcher matcher = LOG_FILE_PATTERN.matcher(path.getName());
if (!matcher.find()) {
throw new InvalidHoodiePathException(path, "LogFile");
}
return matcher.group(2);
}
/**
* Get the last part of the file name in the log file and convert to int.
*/
public static int getFileVersionFromLog(Path logPath) {
Matcher matcher = LOG_FILE_PATTERN.matcher(logPath.getName());
if (!matcher.find()) {
throw new InvalidHoodiePathException(logPath, "LogFile");
}
return Integer.parseInt(matcher.group(4));
}
public static String makeLogFileName(String fileId, String logFileExtension,
String baseCommitTime, int version) {
return LOG_FILE_PREFIX + String
.format("%s_%s%s.%d", fileId, baseCommitTime, logFileExtension, version);
}
public static String maskWithoutLogVersion(String commitTime, String fileId,
String logFileExtension) {
return LOG_FILE_PREFIX + String.format("%s_%s%s*", fileId, commitTime, logFileExtension);
}
public static boolean isLogFile(Path logPath) {
Matcher matcher = LOG_FILE_PATTERN.matcher(logPath.getName());
if (!matcher.find()) {
return false;
}
return true;
}
/**
* Get the latest log file written from the list of log files passed in
*/
public static Optional<HoodieLogFile> getLatestLogFile(Stream<HoodieLogFile> logFiles) {
return logFiles.sorted(Comparator
.comparing(s -> s.getLogVersion(),
Comparator.reverseOrder())).findFirst();
}
/**
* Get all the log files for the passed in FileId in the partition path
*/
public static Stream<HoodieLogFile> getAllLogFiles(FileSystem fs, Path partitionPath,
final String fileId, final String logFileExtension, final String baseCommitTime)
throws IOException {
return Arrays.stream(fs.listStatus(partitionPath,
path -> path.getName().startsWith("." + fileId) && path.getName()
.contains(logFileExtension)))
.map(HoodieLogFile::new).filter(s -> s.getBaseCommitTime().equals(baseCommitTime));
}
/**
* Get the latest log version for the fileId in the partition path
*/
public static Optional<Integer> getLatestLogVersion(FileSystem fs, Path partitionPath,
final String fileId, final String logFileExtension, final String baseCommitTime)
throws IOException {
Optional<HoodieLogFile> latestLogFile =
getLatestLogFile(
getAllLogFiles(fs, partitionPath, fileId, logFileExtension, baseCommitTime));
if (latestLogFile.isPresent()) {
return Optional.of(latestLogFile.get().getLogVersion());
}
return Optional.empty();
}
public static int getCurrentLogVersion(FileSystem fs, Path partitionPath,
final String fileId, final String logFileExtension, final String baseCommitTime)
throws IOException {
Optional<Integer> currentVersion =
getLatestLogVersion(fs, partitionPath, fileId, logFileExtension, baseCommitTime);
// handle potential overflow
return (currentVersion.isPresent()) ? currentVersion.get() : HoodieLogFile.LOGFILE_BASE_VERSION;
}
/**
* computes the next log version for the specified fileId in the partition path
*/
public static int computeNextLogVersion(FileSystem fs, Path partitionPath, final String fileId,
final String logFileExtension, final String baseCommitTime) throws IOException {
Optional<Integer> currentVersion =
getLatestLogVersion(fs, partitionPath, fileId, logFileExtension, baseCommitTime);
// handle potential overflow
return (currentVersion.isPresent()) ? currentVersion.get() + 1
: HoodieLogFile.LOGFILE_BASE_VERSION;
}
public static int getDefaultBufferSize(final FileSystem fs) {
return fs.getConf().getInt("io.file.buffer.size", 4096);
}
public static Short getDefaultReplication(FileSystem fs, Path path) {
return fs.getDefaultReplication(path);
}
public static Long getDefaultBlockSize(FileSystem fs, Path path) {
return fs.getDefaultBlockSize(path);
}
/**
* When a file was opened and the task died without closing the stream, another task executor
* cannot open because the existing lease will be active. We will try to recover the lease, from
* HDFS. If a data node went down, it takes about 10 minutes for the lease to be rocovered. But if
* the client dies, this should be instant.
*/
public static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p)
throws IOException, InterruptedException {
LOG.info("Recover lease on dfs file " + p);
// initiate the recovery
boolean recovered = false;
for (int nbAttempt = 0; nbAttempt < MAX_ATTEMPTS_RECOVER_LEASE; nbAttempt++) {
LOG.info("Attempt " + nbAttempt + " to recover lease on dfs file " + p);
recovered = dfs.recoverLease(p);
if (recovered) {
break;
}
// Sleep for 1 second before trying again. Typically it takes about 2-3 seconds to recover
// under default settings
Thread.sleep(1000);
}
return recovered;
}
public static void deleteOlderCleanMetaFiles(FileSystem fs, String metaPath,
Stream<HoodieInstant> instants) {
//TODO - this should be archived when archival is made general for all meta-data
// skip MIN_CLEAN_TO_KEEP and delete rest
instants.skip(MIN_CLEAN_TO_KEEP).map(s -> {
try {
return fs.delete(new Path(metaPath, s.getFileName()), false);
} catch (IOException e) {
throw new HoodieIOException("Could not delete clean meta files" + s.getFileName(),
e);
}
});
}
public static void deleteOlderRollbackMetaFiles(FileSystem fs, String metaPath,
Stream<HoodieInstant> instants) {
//TODO - this should be archived when archival is made general for all meta-data
// skip MIN_ROLLBACK_TO_KEEP and delete rest
instants.skip(MIN_ROLLBACK_TO_KEEP).map(s -> {
try {
return fs.delete(new Path(metaPath, s.getFileName()), false);
} catch (IOException e) {
throw new HoodieIOException(
"Could not delete rollback meta files " + s.getFileName(), e);
}
});
}
public static void createPathIfNotExists(FileSystem fs, Path partitionPath) throws IOException {
if (!fs.exists(partitionPath)) {
fs.mkdirs(partitionPath);
}
}
public static Long getSizeInMB(long sizeInBytes) {
return sizeInBytes / (1024 * 1024);
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
src/test/java/act/conf/ConfigKeyHelperTest.java
|
package act.conf;
import act.Act;
import act.TestBase;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.osgl.$;
import org.osgl.exception.NotAppliedException;
import org.osgl.util.C;
import org.osgl.util.E;
import org.osgl.util.S;
public class ConfigKeyHelperTest extends TestBase {
ConfigKeyHelper helper = new ConfigKeyHelper(new $.F0<Act.Mode>() {
@Override
public Act.Mode apply() throws NotAppliedException, $.Break {
return Act.Mode.DEV;
}
}, ConfigKeyHelperTest.class.getClassLoader());
private C.Map<String, Object> conf;
@Before
public void prepare() {
conf = C.newMap();
}
@Test
public void fetchBooleanByEnabled() {
put(FakedConfigKey.GATEWAY_ENABLED, "true");
eq(true, helper.getConfiguration(FakedConfigKey.GATEWAY_ENABLED, conf));
}
@Test
public void fetchBooleanByDisabled() {
put(FakedConfigKey.GATEWAY_DISABLED, "false");
eq(true, helper.getConfiguration(FakedConfigKey.GATEWAY_ENABLED, conf));
}
@Test
public void fetchBooleanByEnabledWithoutSuffix() {
conf.put(S.beforeLast(FakedConfigKey.GATEWAY_ENABLED.key(), "."), "true");
eq(true, helper.getConfiguration(FakedConfigKey.GATEWAY_ENABLED, conf));
eq(false, helper.getConfiguration(FakedConfigKey.GATEWAY_DISABLED, conf));
}
@Test
public void fetchBooleanByEnabledWithBooleanTypePut() {
put(FakedConfigKey.GATEWAY_ENABLED, true);
eq(true, helper.getConfiguration(FakedConfigKey.GATEWAY_ENABLED, conf));
}
@Test
public void fetchInt() {
put(FakedConfigKey.CONN_CNT, "10");
eq(10, helper.getConfiguration(FakedConfigKey.CONN_CNT, conf));
put(FakedConfigKey.CONN_CNT, 30);
eq(30, helper.getConfiguration(FakedConfigKey.CONN_CNT, conf));
}
@Test
public void fetchIntWithModeConf() {
put(FakedConfigKey.CONN_CNT, "10");
eq(10, helper.getConfiguration(FakedConfigKey.CONN_CNT, conf));
}
@Test
public void fetchLong() {
put(FakedConfigKey.CONN_TTL, "100");
eq(100l, helper.getConfiguration(FakedConfigKey.CONN_TTL, conf));
conf.put(FakedConfigKey.CONN_TTL.key(), Long.MAX_VALUE);
eq(Long.MAX_VALUE, helper.getConfiguration(FakedConfigKey.CONN_TTL, conf));
}
@Test
public void fetchFromSysProps() {
put(FakedConfigKey.SOURCE_VERSION, "${java.version}");
eq(System.getProperty("java.version"), helper.getConfiguration(FakedConfigKey.SOURCE_VERSION, conf));
}
@Test
public void fetchFromSysEnv() {
put(FakedConfigKey.PATH, "${PATH}");
eq(System.getenv("PATH"), helper.getConfiguration(FakedConfigKey.PATH, conf));
}
@Test
@Ignore
public void fetchImpl() {
E.tbd("fetchImpl");
}
@Test
@Ignore
public void fetchDir() {
E.tbd();
}
private void put(ConfigKey key, Object v) {
conf.put(key.key(), v);
}
private void put(String key, Object v) {
conf.put(key, v);
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
java
| 1 | 0 | |
go/scion-pki/testcrypto/testcrypto_test.go
|
// Copyright 2020 Anapaya Systems
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testcrypto_test
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/scionproto/scion/go/lib/addr"
"github.com/scionproto/scion/go/lib/scrypto/cppki"
"github.com/scionproto/scion/go/lib/xtest"
"github.com/scionproto/scion/go/scion-pki/testcrypto"
"github.com/scionproto/scion/go/scion-pki/trcs"
)
func TestCmd(t *testing.T) {
if _, bazel := os.LookupEnv("TEST_UNDECLARED_OUTPUTS_DIR"); bazel {
t.Skip("Test can't run through bazel because of symlinks and docker not playing nice")
}
outDir, cleanF := xtest.MustTempDir("", "testcrypto")
defer cleanF()
topo := "./testdata/test.topo"
err := testcrypto.Testcrypto(topo, outDir, false, false, asValidity)
require.NoError(t, err)
allASes := []addr.IA{
xtest.MustParseIA("1-ff00:0:110"),
xtest.MustParseIA("1-ff00:0:120"),
xtest.MustParseIA("1-ff00:0:130"),
xtest.MustParseIA("1-ff00:0:111"),
xtest.MustParseIA("1-ff00:0:131"),
xtest.MustParseIA("2-ff00:0:210"),
xtest.MustParseIA("2-ff00:0:220"),
}
for _, as := range allASes {
checkAS(t, outDir, as)
}
issuers := []addr.IA{
xtest.MustParseIA("1-ff00:0:110"),
xtest.MustParseIA("1-ff00:0:111"),
xtest.MustParseIA("2-ff00:0:210"),
}
for _, issuer := range issuers {
checkIssuer(t, outDir, issuer)
}
voters := []addr.IA{
xtest.MustParseIA("1-ff00:0:120"),
xtest.MustParseIA("1-ff00:0:111"),
xtest.MustParseIA("1-ff00:0:131"),
xtest.MustParseIA("2-ff00:0:210"),
xtest.MustParseIA("2-ff00:0:220"),
}
for _, voter := range voters {
checkVoter(t, outDir, voter)
}
checkISD(t, outDir, 1)
checkISD(t, outDir, 2)
}
func checkISD(t *testing.T, outDir string, isd addr.ISD) {
isdDir := filepath.Join(outDir, fmt.Sprintf("ISD%d", isd))
trcFile := filepath.Join(isdDir, "trcs", fmt.Sprintf("ISD%d-B1-S1.trc", isd))
assert.NoError(t, trcs.RunVerify([]string{trcFile}, trcFile))
}
func checkAS(t *testing.T, outDir string, ia addr.IA) {
d := testcrypto.CryptoASDir(ia, testcrypto.NewOut(outDir))
checkFileExists(t, filepath.Join(d, "cp-as.key"))
validateChain(t, filepath.Join(d, fmt.Sprintf("%s.pem", fmtIA(ia))))
}
func checkIssuer(t *testing.T, outDir string, ia addr.IA) {
d := testcrypto.CryptoCADir(ia, testcrypto.NewOut(outDir))
checkFileExists(t, filepath.Join(d, "cp-ca.key"))
checkFileExists(t, filepath.Join(d, "cp-root.key"))
certName := fmt.Sprintf("%s.root.crt", fmtIA(ia))
validateCert(t, filepath.Join(d, certName), cppki.Root)
certName = fmt.Sprintf("%s.ca.crt", fmtIA(ia))
validateCert(t, filepath.Join(d, certName), cppki.CA)
}
func checkVoter(t *testing.T, outDir string, ia addr.IA) {
d := testcrypto.CryptoVotingDir(ia, testcrypto.NewOut(outDir))
checkFileExists(t, filepath.Join(d, "sensitive-voting.key"))
checkFileExists(t, filepath.Join(d, "regular-voting.key"))
sensitiveName := fmt.Sprintf("%s.sensitive.crt", fmtIA(ia))
validateCert(t, filepath.Join(d, sensitiveName), cppki.Sensitive)
regularName := fmt.Sprintf("%s.regular.crt", fmtIA(ia))
validateCert(t, filepath.Join(d, regularName), cppki.Regular)
}
func fmtIA(ia addr.IA) string {
return addr.FormatIA(ia, addr.WithFileSeparator(), addr.WithDefaultPrefix())
}
func checkFileExists(t *testing.T, file string) {
t.Helper()
_, err := os.Stat(file)
require.NoError(t, err, "File %s must exist", file)
}
func validateChain(t *testing.T, file string) {
t.Helper()
certs, err := cppki.ReadPEMCerts(file)
require.NoError(t, err, "Cert %s should exist", file)
require.Len(t, certs, 2, "Cert %s should contain 2 certs", file)
act, err := cppki.ValidateCert(certs[0])
assert.NoError(t, err, "Cert[0] of %s should be valid", file)
assert.Equal(t, cppki.AS, act, "Cert[0] of %s should be of AS type", file)
act, err = cppki.ValidateCert(certs[1])
assert.NoError(t, err, "Cert[1] of %s should be valid", file)
assert.Equal(t, cppki.CA, act, "Cert[1] of %s should be of CA type", file)
}
func validateCert(t *testing.T, file string, ct cppki.CertType) {
t.Helper()
certs, err := cppki.ReadPEMCerts(file)
require.NoError(t, err, "Cert %s should exist", file)
require.Len(t, certs, 1, "Cert %s should contain 1 certs", file)
act, err := cppki.ValidateCert(certs[0])
assert.NoError(t, err, "Cert %s should be valid", file)
assert.Equal(t, ct, act, "Cert %s should be of %s type", file, ct)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
test/integ_tests/conftest.py
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
import os
import boto3
import numpy as np
import pytest
from botocore.exceptions import ClientError
from braket.pennylane_plugin import BraketAwsQubitDevice, BraketLocalQubitDevice
DEVICE_ARN = "arn:aws:braket:::device/quantum-simulator/amazon/sv1"
np.random.seed(42)
# ==========================================================
# Some useful global variables
# single qubit unitary matrix
U = np.array(([[0.5 - 0.5j, 0.5 + 0.5j], [0.5 + 0.5j, 0.5 - 0.5j]]))
# two qubit unitary matrix
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)
# single qubit Hermitian observable
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
# single qubit Kraus operator
K = [
np.array([[0.4 - 0.4j, 0.4 + 0.4j], [0.4 + 0.4j, 0.4 - 0.4j]]),
np.array([[0, 0.6j], [-0.6j, 0]]),
]
# two qubit Kraus operator
K2 = [np.kron(mat1, mat2) for mat1 in K for mat2 in K]
# ==========================================================
# PennyLane devices
# List of all devices.
sv_devices = [(BraketAwsQubitDevice, DEVICE_ARN), (BraketLocalQubitDevice, "braket_sv")]
dm_devices = [(BraketLocalQubitDevice, "braket_dm")]
devices = sv_devices + dm_devices
# List of all device shortnames
shortname_and_backends = [(d.short_name, backend) for (d, backend) in devices]
# List of local devices
local_devices = [(BraketLocalQubitDevice, "braket_sv"), (BraketLocalQubitDevice, "braket_sv")]
# ==========================================================
# AWS resources
session = boto3.session.Session(profile_name=os.environ["AWS_PROFILE"])
account_id = session.client("sts").get_caller_identity()["Account"]
bucket_name = f"amazon-braket-pennylane-plugin-integ-tests-{account_id}"
s3_bucket = session.resource("s3").Bucket(bucket_name)
s3_client = session.client("s3")
# Create bucket if it doesn't exist
try:
# Determine if bucket exists
s3_client.head_bucket(Bucket=bucket_name)
except ClientError as e:
error_code = e.response["Error"]["Code"]
if error_code == "404":
s3_bucket.create(
ACL="private", CreateBucketConfiguration={"LocationConstraint": session.region_name}
)
# ==========================================================
# pytest fixtures
@pytest.fixture
def s3():
"""
S3 bucket and prefix, supplied as pytest arguments
"""
current_test_path = os.environ.get("PYTEST_CURRENT_TEST")
s3_prefix = current_test_path.rsplit(".py")[0].replace("test/", "")
return bucket_name, s3_prefix
@pytest.fixture
def tol(shots):
"""Numerical tolerance to be used in tests."""
if shots == 0:
# analytic expectation values can be computed,
# so we can generally use a smaller tolerance
return {"atol": 0.01, "rtol": 0}
# for non-zero shots, there will be additional
# noise and stochastic effects; will need to increase
# the tolerance
return {"atol": 0.05, "rtol": 0.1}
@pytest.fixture
def init_state(scope="session"):
"""Fixture to create an n-qubit initial state"""
def _init_state(n):
state = np.random.random([2 ** n]) + np.random.random([2 ** n]) * 1j
state /= np.linalg.norm(state)
return state
return _init_state
@pytest.fixture(params=devices)
def device(request, shots, extra_kwargs):
"""Fixture to initialize and return a PennyLane device"""
device, backend = request.param
def _device(n):
return device(wires=n, shots=shots, **extra_kwargs(device, backend))
return _device
@pytest.fixture(params=sv_devices)
def sv_device(request, shots, extra_kwargs):
"""Fixture to initialize and return a PennyLane device"""
device, backend = request.param
def _device(n):
return device(wires=n, shots=shots, **extra_kwargs(device, backend))
return _device
@pytest.fixture(params=dm_devices)
def dm_device(request, shots, extra_kwargs):
"""Fixture to initialize and return a PennyLane device"""
device, backend = request.param
def _device(n):
return device(wires=n, shots=shots, **extra_kwargs(device, backend))
return _device
@pytest.fixture(params=local_devices)
def local_device(request, shots, extra_kwargs):
"""Fixture to initialize and return a PennyLane device"""
device, backend = request.param
def _device(n):
return device(wires=n, shots=shots, **extra_kwargs(device, backend))
return _device
@pytest.fixture
def extra_kwargs(s3):
"""Fixture to determine extra kwargs for devices"""
def _extra_kwargs(device_class, backend):
signature = inspect.signature(device_class).parameters
kwargs = {}
if "device_arn" in signature:
kwargs["device_arn"] = backend
else:
kwargs["backend"] = backend
if "s3_destination_folder" in signature:
kwargs["s3_destination_folder"] = s3
return kwargs
return _extra_kwargs
|
[] |
[] |
[
"AWS_PROFILE",
"PYTEST_CURRENT_TEST"
] |
[]
|
["AWS_PROFILE", "PYTEST_CURRENT_TEST"]
|
python
| 2 | 0 | |
dlp/nox.py
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
@nox.session
def default(session):
"""Run the unit test suite.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.dlp_v2',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session
@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'unit-' + py
default(session)
@nox.session
@nox.parametrize('py', ['2.7', '3.6'])
def system(session, py):
"""Run the system test suite."""
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
session.interpreter = 'python{}'.format(py)
session.virtualenv_dirname = 'sys-' + py
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
session.install('pytest')
session.install('-e', '.')
session.run('py.test', '--quiet',
os.path.join('tests', 'system'), *session.posargs)
@nox.session
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.chdir(os.path.dirname(__file__))
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
[] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 1 | 0 | |
newspaper-app/newspaper_project/asgi.py
|
"""
ASGI config for newspaper_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newspaper_project.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/spouts/EmailSentimentAnalyzer/imap_spout.py
|
import imaplib
import pprint
import os
import tarfile
import errno
import time
import io
import stat
SPOUT = '/pfs/out'
def open_pipe(path_to_file, attempts=0, timeout=2, sleep_int=5):
if attempts < timeout :
flags = os.O_WRONLY # Refer to "man 2 open".
mode = stat.S_IWUSR # This is 0o400.
umask = 0o777 ^ mode # Prevents always downgrading umask to 0.
umask_original = os.umask(umask)
try:
file = os.open(path_to_file, flags, mode)
# you must open the pipe as binary to prevent line-buffering problems.
return os.fdopen(file, "wb")
except OSError as oe:
print ('{0} attempt of {1}; error opening file: {2}'.format(attempts + 1, timeout, oe))
os.umask(umask_original)
time.sleep(sleep_int)
return open_pipe(path_to_file, attempts + 1)
finally:
os.umask(umask_original)
return None
unspecified_value = 'not specified';
imap_host = os.getenv('IMAP_SERVER', 'imap.gmail.com')
imap_user = os.getenv('IMAP_LOGIN', unspecified_value)
imap_pass = os.getenv('IMAP_PASSWORD', unspecified_value)
imap_inbox = os.getenv('IMAP_INBOX', 'Inbox')
imap_processed_box = os.getenv('IMAP_PROCESSED_BOX', 'Processed')
if ((imap_pass == unspecified_value) or (imap_user == unspecified_value)):
print("imap spout error: IMAP_LOGIN and IMAP_PASSWORD environment variables not set.")
exit(-1)
# connect to host using SSL
imap = imaplib.IMAP4_SSL(imap_host)
## login to server
imap.login(imap_user, imap_pass)
try:
imap.create(imap_processed_box)
except imaplib.IMAP4.error as im4e:
print("error creating processed box: {}".format(im4e))
pass
while (True):
print("checking for emails...")
## select the mailbox for reading messages from
imap.select(imap_inbox)
typ, data = imap.uid("search", None, 'ALL')
all_emails = data[0].split()
number_of_emails = len(data[0].split())
if number_of_emails > 0:
print("{} new emails.".format(number_of_emails))
mySpout = open_pipe(SPOUT)
if mySpout is None:
print ('error opening file: {}'.format(SPOUT))
exit(-2)
# To use a tarfile object with a named pipe, you must use the "w|" mode
# which makes it not seekable
print("Creating tarstream...")
try:
tarStream = tarfile.open(fileobj=mySpout,mode="w|", encoding='utf-8')
except tarfile.TarError as te:
print('error creating tarstream: {0}'.format(te))
exit(-2)
for current in range(number_of_emails):
current_uid = all_emails[current]
typ, email_data = imap.uid("fetch", current_uid, '(RFC822)')
current_email_rfc822 = email_data[0][1].decode('utf-8')
name = "{}.mbox".format(current_uid)
print("Creating tar archive entry for message {}...".format(current_uid))
tarHeader = tarfile.TarInfo()
tarHeader.size = len(current_email_rfc822)
tarHeader.mode = 0o600
tarHeader.name = name
print("Writing tarfile to spout for message {}...".format(current_uid))
try:
with io.BytesIO(current_email_rfc822.encode('utf-8')) as email:
tarStream.addfile(tarinfo=tarHeader, fileobj=email)
except tarfile.TarError as te:
print('error writing message {0} to tarstream: {1}'.format(current_uid, te))
exit(-2)
print("copying message {} to {}".format(current_uid, imap_processed_box))
copyResult = imap.uid("copy", current_uid, imap_processed_box)
if copyResult[0] == "OK":
print("Deleting message {} from {}".format(current_uid, imap_inbox))
mov, data = imap.uid("store", current_uid, "+FLAGS", "(\Deleted)")
imap.expunge()
else:
print("Error copying message {} to {}".format(current_uid, imap_processed_box))
exit(-2)
tarStream.close()
mySpout.close()
else:
print("No new emails...")
print("waiting for new emails...")
time.sleep(5)
imap.close()
|
[] |
[] |
[
"IMAP_INBOX",
"IMAP_PROCESSED_BOX",
"IMAP_LOGIN",
"IMAP_PASSWORD",
"IMAP_SERVER"
] |
[]
|
["IMAP_INBOX", "IMAP_PROCESSED_BOX", "IMAP_LOGIN", "IMAP_PASSWORD", "IMAP_SERVER"]
|
python
| 5 | 0 | |
src/black/files.py
|
from functools import lru_cache
import io
import os
from pathlib import Path
import sys
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Pattern,
Sequence,
Tuple,
Union,
TYPE_CHECKING,
)
from mypy_extensions import mypyc_attr
from pathspec import PathSpec
from pathspec.patterns.gitwildmatch import GitWildMatchPatternError
if sys.version_info >= (3, 11):
try:
import tomllib
except ImportError:
# Help users on older alphas
import tomli as tomllib
else:
import tomli as tomllib
from black.output import err
from black.report import Report
from black.handle_ipynb_magics import jupyter_dependencies_are_installed
if TYPE_CHECKING:
import colorama # noqa: F401
@lru_cache()
def find_project_root(srcs: Sequence[str]) -> Tuple[Path, str]:
"""Return a directory containing .git, .hg, or pyproject.toml.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
Returns a two-tuple with the first element as the project root path and
the second element as a string describing the method by which the
project root was discovered.
"""
if not srcs:
srcs = [str(Path.cwd().resolve())]
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (directory / ".git").exists():
return directory, ".git directory"
if (directory / ".hg").is_dir():
return directory, ".hg directory"
if (directory / "pyproject.toml").is_file():
return directory, "pyproject.toml"
return directory, "file system root"
def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]:
"""Find the absolute filepath to a pyproject.toml if it exists"""
path_project_root, _ = find_project_root(path_search_start)
path_pyproject_toml = path_project_root / "pyproject.toml"
if path_pyproject_toml.is_file():
return str(path_pyproject_toml)
try:
path_user_pyproject_toml = find_user_pyproject_toml()
return (
str(path_user_pyproject_toml)
if path_user_pyproject_toml.is_file()
else None
)
except (PermissionError, RuntimeError) as e:
# We do not have access to the user-level config directory, so ignore it.
err(f"Ignoring user configuration directory due to {e!r}")
return None
@mypyc_attr(patchable=True)
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
"""Parse a pyproject toml file, pulling out relevant parts for Black
If parsing fails, will raise a tomllib.TOMLDecodeError
"""
with open(path_config, "rb") as f:
pyproject_toml = tomllib.load(f)
config = pyproject_toml.get("tool", {}).get("black", {})
return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
@lru_cache()
def find_user_pyproject_toml() -> Path:
r"""Return the path to the top-level user configuration for black.
This looks for ~\.black on Windows and ~/.config/black on Linux and other
Unix systems.
May raise:
- RuntimeError: if the current user has no homedir
- PermissionError: if the current process cannot access the user's homedir
"""
if sys.platform == "win32":
# Windows
user_config_path = Path.home() / ".black"
else:
config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
user_config_path = Path(config_root).expanduser() / "black"
return user_config_path.resolve()
@lru_cache()
def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
lines: List[str] = []
if gitignore.is_file():
with gitignore.open(encoding="utf-8") as gf:
lines = gf.readlines()
try:
return PathSpec.from_lines("gitwildmatch", lines)
except GitWildMatchPatternError as e:
err(f"Could not parse {gitignore}: {e}")
raise
def normalize_path_maybe_ignore(
path: Path,
root: Path,
report: Optional[Report] = None,
) -> Optional[str]:
"""Normalize `path`. May return `None` if `path` was ignored.
`report` is where "path ignored" output goes.
"""
try:
abspath = path if path.is_absolute() else Path.cwd() / path
normalized_path = abspath.resolve()
try:
root_relative_path = normalized_path.relative_to(root).as_posix()
except ValueError:
if report:
report.path_ignored(
path, f"is a symbolic link that points outside {root}"
)
return None
except OSError as e:
if report:
report.path_ignored(path, f"cannot be read because {e}")
return None
return root_relative_path
def path_is_excluded(
normalized_path: str,
pattern: Optional[Pattern[str]],
) -> bool:
match = pattern.search(normalized_path) if pattern else None
return bool(match and match.group(0))
def gen_python_files(
paths: Iterable[Path],
root: Path,
include: Pattern[str],
exclude: Pattern[str],
extend_exclude: Optional[Pattern[str]],
force_exclude: Optional[Pattern[str]],
report: Report,
gitignore: Optional[PathSpec],
*,
verbose: bool,
quiet: bool,
) -> Iterator[Path]:
"""Generate all files under `path` whose paths are not excluded by the
`exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
but are included by the `include` regex.
Symbolic links pointing outside of the `root` directory are ignored.
`report` is where output about exclusions goes.
"""
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
for child in paths:
normalized_path = normalize_path_maybe_ignore(child, root, report)
if normalized_path is None:
continue
# First ignore files matching .gitignore, if passed
if gitignore is not None and gitignore.match_file(normalized_path):
report.path_ignored(child, "matches the .gitignore file content")
continue
# Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
normalized_path = "/" + normalized_path
if child.is_dir():
normalized_path += "/"
if path_is_excluded(normalized_path, exclude):
report.path_ignored(child, "matches the --exclude regular expression")
continue
if path_is_excluded(normalized_path, extend_exclude):
report.path_ignored(
child, "matches the --extend-exclude regular expression"
)
continue
if path_is_excluded(normalized_path, force_exclude):
report.path_ignored(child, "matches the --force-exclude regular expression")
continue
if child.is_dir():
# If gitignore is None, gitignore usage is disabled, while a Falsey
# gitignore is when the directory doesn't have a .gitignore file.
yield from gen_python_files(
child.iterdir(),
root,
include,
exclude,
extend_exclude,
force_exclude,
report,
gitignore + get_gitignore(child) if gitignore is not None else None,
verbose=verbose,
quiet=quiet,
)
elif child.is_file():
if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
verbose=verbose, quiet=quiet
):
continue
include_match = include.search(normalized_path) if include else True
if include_match:
yield child
def wrap_stream_for_windows(
f: io.TextIOWrapper,
) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
"""
Wrap stream with colorama's wrap_stream so colors are shown on Windows.
If `colorama` is unavailable, the original stream is returned unmodified.
Otherwise, the `wrap_stream()` function determines whether the stream needs
to be wrapped for a Windows environment and will accordingly either return
an `AnsiToWin32` wrapper or the original stream.
"""
try:
from colorama.initialise import wrap_stream
except ImportError:
return f
else:
# Set `strip=False` to avoid needing to modify test_express_diff_with_color.
return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
|
[] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
python
| 1 | 0 | |
jina/__init__.py
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# do not change this line manually
# this is managed by git tag and updated on every release
__version__ = '0.6.8'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.65'
import platform
import sys
# do some os-wise patches
if sys.version_info < (3, 7, 0):
raise OSError('Jina requires Python 3.7 and above, but yours is %s' % sys.version_info)
if sys.version_info >= (3, 8, 0) and platform.system() == 'Darwin':
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method
set_start_method('fork')
from datetime import datetime
from types import SimpleNamespace
import os
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
__uptime__ = datetime.now().strftime('%Y%m%d%H%M%S')
# update on MacOS
# 1. clean this tuple,
# 2. grep -ohE "\'JINA_.*?\'" **/*.py | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = ('JINA_ARRAY_QUANT',
'JINA_BINARY_DELIMITER',
'JINA_CONTRIB_MODULE',
'JINA_CONTRIB_MODULE_IS_LOADING',
'JINA_CONTROL_PORT',
'JINA_DB_COLLECTION',
'JINA_DB_HOSTNAME',
'JINA_DB_NAME',
'JINA_DB_PASSWORD',
'JINA_DB_USERNAME',
'JINA_DEFAULT_HOST',
'JINA_DISABLE_UVLOOP',
'JINA_EXECUTOR_WORKDIR',
'JINA_FULL_CLI',
'JINA_IPC_SOCK_TMP',
'JINA_LOG_CONFIG',
'JINA_LOG_NO_COLOR',
'JINA_POD_NAME',
'JINA_PROFILING',
'JINA_RANDOM_PORTS',
'JINA_SOCKET_HWM',
'JINA_TEST_GPU',
'JINA_TEST_PRETRAINED',
'JINA_VCS_VERSION',
'JINA_WARN_UNNAMED')
__default_host__ = os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unable_to_load_pretrained_model_msg__ = 'Executor depending on pretrained model file could not find the pretrained model'
__binary_delimiter__ = os.environ.get('JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57').encode()
JINA_GLOBAL = SimpleNamespace()
JINA_GLOBAL.imported = SimpleNamespace()
JINA_GLOBAL.imported.executors = False
JINA_GLOBAL.imported.drivers = False
JINA_GLOBAL.imported.hub = False
JINA_GLOBAL.logserver = SimpleNamespace()
def import_classes(namespace: str, targets=None,
show_import_table: bool = False, import_once: bool = False):
"""
Import all or selected executors into the runtime. This is called when Jina is first imported for registering the YAML
constructor beforehand. It can be also used to import third-part or external executors.
:param namespace: the namespace to import
:param targets: the list of executor names to import
:param show_import_table: show the import result as a table
:param import_once: import everything only once, to avoid repeated import
"""
import os, re
from .logging import default_logger
if namespace == 'jina.executors':
import_type = 'ExecutorType'
if import_once and JINA_GLOBAL.imported.executors:
return
elif namespace == 'jina.drivers':
import_type = 'DriverType'
if import_once and JINA_GLOBAL.imported.drivers:
return
elif namespace == 'jina.hub':
import_type = 'ExecutorType'
if import_once and JINA_GLOBAL.imported.hub:
return
else:
raise TypeError(f'namespace: {namespace} is unrecognized')
from setuptools import find_packages
import pkgutil
from pkgutil import iter_modules
try:
path = os.path.dirname(pkgutil.get_loader(namespace).path)
except AttributeError:
if namespace == 'jina.hub':
default_logger.debug(f'hub submodule is not initialized. Please try "git submodule update --init"')
return {}
modules = set()
for info in iter_modules([path]):
if (namespace != 'jina.hub' and not info.ispkg) or (namespace == 'jina.hub' and info.ispkg):
modules.add('.'.join([namespace, info.name]))
for pkg in find_packages(path):
modules.add('.'.join([namespace, pkg]))
pkgpath = path + '/' + pkg.replace('.', '/')
for info in iter_modules([pkgpath]):
if (namespace != 'jina.hub' and not info.ispkg) or (namespace == 'jina.hub' and info.ispkg):
modules.add('.'.join([namespace, pkg, info.name]))
# filter
ignored_module_pattern = r'\.tests|\.api|\.bump_version'
modules = {m for m in modules if not re.findall(ignored_module_pattern, m)}
from collections import defaultdict
load_stat = defaultdict(list)
bad_imports = []
if isinstance(targets, str):
targets = {targets}
elif isinstance(targets, list):
targets = set(targets)
elif targets is None:
targets = {}
else:
raise TypeError(f'target must be a set, but received {targets!r}')
depend_tree = {}
import importlib
from .helper import colored
for m in modules:
try:
mod = importlib.import_module(m)
for k in dir(mod):
# import the class
if (getattr(mod, k).__class__.__name__ == import_type) and (not targets or k in targets):
try:
_c = getattr(mod, k)
load_stat[m].append(
(k, True, colored('▸', 'green').join(f'{vvv.__name__}' for vvv in _c.mro()[:-1][::-1])))
d = depend_tree
for vvv in _c.mro()[:-1][::-1]:
if vvv.__name__ not in d:
d[vvv.__name__] = {}
d = d[vvv.__name__]
d['module'] = m
if k in targets:
targets.remove(k)
if not targets:
return # target execs are all found and loaded, return
try:
# load the default request for this executor if possible
from .executors.requests import get_default_reqs
get_default_reqs(type.mro(getattr(mod, k)))
except ValueError:
pass
except Exception as ex:
load_stat[m].append((k, False, ex))
bad_imports.append('.'.join([m, k]))
if k in targets:
raise ex # target class is found but not loaded, raise return
except Exception as ex:
load_stat[m].append(('', False, ex))
bad_imports.append(m)
if targets:
raise ImportError(f'{targets} can not be found in jina')
if show_import_table:
from .helper import print_load_table, print_dep_tree_rst
print_load_table(load_stat)
else:
if bad_imports:
if namespace != 'jina.hub':
default_logger.error(
f'theses modules or classes can not be imported {bad_imports}. '
f'You can use `jina check` to list all executors and drivers')
else:
default_logger.warning(
f'due to the missing dependencies or bad implementations, {bad_imports} can not be imported '
f'if you are using these executors/drivers, they wont work. '
f'You can use `jina check` to list all executors and drivers')
if namespace == 'jina.executors':
JINA_GLOBAL.imported.executors = True
elif namespace == 'jina.drivers':
JINA_GLOBAL.imported.drivers = True
elif namespace == 'jina.hub':
JINA_GLOBAL.imported.hub = True
return depend_tree
# driver first, as executor may contain driver
import_classes('jina.drivers', show_import_table=False, import_once=True)
import_classes('jina.executors', show_import_table=False, import_once=True)
import_classes('jina.hub', show_import_table=False, import_once=True)
# manually install the default signal handler
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
def set_nofile(nofile_atleast=4096):
"""
sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
"""
try:
import resource as res
except ImportError: # Windows
res = None
from .logging import default_logger
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return soft, hard
set_nofile()
|
[] |
[] |
[
"JINA_BINARY_DELIMITER",
"JINA_DEFAULT_HOST",
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY"
] |
[]
|
["JINA_BINARY_DELIMITER", "JINA_DEFAULT_HOST", "OBJC_DISABLE_INITIALIZE_FORK_SAFETY"]
|
python
| 3 | 0 | |
src/main/java/org/kie/kogito/kafka/PingKafka.java
|
package org.kie.kogito.kafka;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
public class PingKafka {
public static void main(String[] args) {
String bootstrapServer = System.getenv("BOOTSTRAP_SERVERS");
if (bootstrapServer != null && bootstrapServer.length() > 0) {
Properties config = new Properties();
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
AdminClient adminClient = AdminClient.create(config);
try {
Set<String> topics = adminClient.listTopics().names().get();
topics.forEach(topic -> System.out.println("Name: " + topic));
System.out.println("Finished.");
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
}
}
|
[
"\"BOOTSTRAP_SERVERS\""
] |
[] |
[
"BOOTSTRAP_SERVERS"
] |
[]
|
["BOOTSTRAP_SERVERS"]
|
java
| 1 | 0 | |
internal/service/wire_gen.go
|
// Code generated by Wire. DO NOT EDIT.
//go:generate wire
//+build !wireinject
package service
import (
"github.com/czeslavo/snappy/internal/adapters"
"github.com/czeslavo/snappy/internal/application"
"github.com/czeslavo/snappy/internal/ports"
"github.com/czeslavo/snappy/internal/service/config"
"github.com/sirupsen/logrus"
"net/http"
"os"
)
// Injectors from wire.go:
func BuildService() (*Service, error) {
configConfig, err := config.ReadConfigFromEnv()
if err != nil {
return nil, err
}
httpPort := configConfig.HTTPPort
snapshotsDirectory := configConfig.SnapshotsDirectory
snapshotsFileSystemRepository, err := adapters.NewSnapshotsFileSystemRepository(snapshotsDirectory)
if err != nil {
return nil, err
}
getLatestSnapshotHandler := application.NewGetLatestSnapshotHandler(snapshotsFileSystemRepository)
fieldLogger := provideLogger()
httpServer := ports.NewHTTPServer(httpPort, getLatestSnapshotHandler, fieldLogger)
client := _wireClientValue
cameraURL := configConfig.CameraURL
jpegCamera, err := adapters.NewJPEGCamera(client, cameraURL)
if err != nil {
return nil, err
}
takeSnapshotHandler := application.NewTakeSnapshotHandler(jpegCamera, snapshotsFileSystemRepository, fieldLogger)
zipSnapshotsArchiver := adapters.NewZipSnapshotArchiver()
ftpUploader := provideFtpUploader(configConfig, fieldLogger)
archiveAllSnapshotsHandler := application.NewArchiveAllSnapshotsHandler(snapshotsFileSystemRepository, zipSnapshotsArchiver, ftpUploader, fieldLogger)
ticker := ports.NewTicker(takeSnapshotHandler, archiveAllSnapshotsHandler, configConfig, fieldLogger)
service := &Service{
HttpServer: httpServer,
Ticker: ticker,
Logger: fieldLogger,
Config: configConfig,
}
return service, nil
}
var (
_wireClientValue = &http.Client{}
)
// wire.go:
func provideLogger() logrus.FieldLogger {
level := logrus.DebugLevel
if env := os.Getenv("LOG_LEVEL"); env != "" {
l, err := logrus.ParseLevel(os.Getenv("LOG_LEVEL"))
if err == nil {
level = l
}
}
logger := logrus.New()
logger.SetLevel(level)
return logger
}
func provideFtpUploader(conf config.Config, logger logrus.FieldLogger) adapters.FtpUploader {
return adapters.NewFtpUploader(adapters.Credentials{
Username: conf.FtpUsername,
Password: conf.FtpPassword,
}, conf.FtpHost, conf.FtpTargetDirectory, logger)
}
|
[
"\"LOG_LEVEL\"",
"\"LOG_LEVEL\""
] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
go
| 1 | 0 | |
plugins/git4idea/src/git4idea/commands/GitHandler.java
|
/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package git4idea.commands;
import com.intellij.execution.ExecutionException;
import com.intellij.execution.configurations.GeneralCommandLine;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ModalityState;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.vcs.FilePath;
import com.intellij.openapi.vcs.ProcessEventListener;
import com.intellij.openapi.vcs.VcsException;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VfsUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.EventDispatcher;
import com.intellij.util.Processor;
import com.intellij.vcsUtil.VcsFileUtil;
import git4idea.GitVcs;
import git4idea.config.GitVcsApplicationSettings;
import git4idea.config.GitVcsSettings;
import git4idea.config.GitVersionSpecialty;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.git4idea.http.GitAskPassXmlRpcHandler;
import org.jetbrains.git4idea.ssh.GitSSHHandler;
import org.jetbrains.git4idea.ssh.GitXmlRpcSshService;
import java.io.File;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.util.*;
import java.util.concurrent.LinkedBlockingQueue;
/**
* A handler for git commands
*/
public abstract class GitHandler {
protected final Project myProject;
protected final GitCommand myCommand;
private final HashSet<Integer> myIgnoredErrorCodes = new HashSet<Integer>(); // Error codes that are ignored for the handler
private final List<VcsException> myErrors = Collections.synchronizedList(new ArrayList<VcsException>());
private final List<String> myLastOutput = Collections.synchronizedList(new ArrayList<String>());
private final int LAST_OUTPUT_SIZE = 5;
protected static final Logger LOG = Logger.getInstance(GitHandler.class.getName());
final GeneralCommandLine myCommandLine;
@SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"})
Process myProcess;
private boolean myStdoutSuppressed; // If true, the standard output is not copied to version control console
private boolean myStderrSuppressed; // If true, the standard error is not copied to version control console
private final File myWorkingDirectory;
private boolean myEnvironmentCleanedUp = true; // the flag indicating that environment has been cleaned up, by default is true because there is nothing to clean
private int myHandlerNo;
private Processor<OutputStream> myInputProcessor; // The processor for stdin
// if true process might be cancelled
// note that access is safe because it accessed in unsynchronized block only after process is started, and it does not change after that
@SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"})
private boolean myIsCancellable = true;
private Integer myExitCode; // exit code or null if exit code is not yet available
@SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"})
@NonNls
private Charset myCharset = Charset.forName("UTF-8"); // Character set to use for IO
private final EventDispatcher<ProcessEventListener> myListeners = EventDispatcher.create(ProcessEventListener.class);
@SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"})
protected boolean mySilent; // if true, the command execution is not logged in version control view
protected final GitVcs myVcs;
private final Map<String, String> myEnv;
private GitVcsApplicationSettings myAppSettings;
private GitVcsSettings myProjectSettings;
private Runnable mySuspendAction; // Suspend action used by {@link #suspendWriteLock()}
private Runnable myResumeAction; // Resume action used by {@link #resumeWriteLock()}
private long myStartTime; // git execution start timestamp
private static final long LONG_TIME = 10 * 1000;
@Nullable private ModalityState myState;
@Nullable private String myUrl;
/**
* A constructor
*
* @param project a project
* @param directory a process directory
* @param command a command to execute (if empty string, the parameter is ignored)
*/
protected GitHandler(@NotNull Project project, @NotNull File directory, @NotNull GitCommand command) {
myProject = project;
myCommand = command;
myAppSettings = GitVcsApplicationSettings.getInstance();
myProjectSettings = GitVcsSettings.getInstance(myProject);
myEnv = new HashMap<String, String>(System.getenv());
myVcs = GitVcs.getInstance(project);
myWorkingDirectory = directory;
myCommandLine = new GeneralCommandLine();
if (myAppSettings != null) {
myCommandLine.setExePath(myAppSettings.getPathToGit());
}
myCommandLine.setWorkDirectory(myWorkingDirectory);
if (command.name().length() > 0) {
myCommandLine.addParameter(command.name());
}
}
/**
* A constructor
*
* @param project a project
* @param vcsRoot a process directory
* @param command a command to execute
*/
protected GitHandler(final Project project, final VirtualFile vcsRoot, final GitCommand command) {
this(project, VfsUtil.virtualToIoFile(vcsRoot), command);
}
/**
* @return multicaster for listeners
*/
protected ProcessEventListener listeners() {
return myListeners.getMulticaster();
}
/**
* Add error code to ignored list
*
* @param code the code to ignore
*/
public void ignoreErrorCode(int code) {
myIgnoredErrorCodes.add(code);
}
/**
* Check if error code should be ignored
*
* @param code a code to check
* @return true if error code is ignorable
*/
public boolean isIgnoredErrorCode(int code) {
return myIgnoredErrorCodes.contains(code);
}
/**
* add error to the error list
*
* @param ex an error to add to the list
*/
public void addError(VcsException ex) {
myErrors.add(ex);
}
public void addLastOutput(String line) {
if (myLastOutput.size() < LAST_OUTPUT_SIZE) {
myLastOutput.add(line);
} else {
myLastOutput.add(0, line);
Collections.rotate(myLastOutput, -1);
}
}
public List<String> getLastOutput() {
return myLastOutput;
}
/**
* @return unmodifiable list of errors.
*/
public List<VcsException> errors() {
return Collections.unmodifiableList(myErrors);
}
/**
* @return a context project
*/
public Project project() {
return myProject;
}
/**
* @return the current working directory
*/
public File workingDirectory() {
return myWorkingDirectory;
}
/**
* @return the current working directory
*/
public VirtualFile workingDirectoryFile() {
final VirtualFile file = LocalFileSystem.getInstance().findFileByIoFile(workingDirectory());
if (file == null) {
throw new IllegalStateException("The working directly should be available: " + workingDirectory());
}
return file;
}
@SuppressWarnings("NullableProblems")
public void setUrl(@NotNull String url) {
myUrl = url;
}
protected boolean isRemote() {
return myUrl != null;
}
/**
* Add listener to handler
*
* @param listener a listener
*/
protected void addListener(ProcessEventListener listener) {
myListeners.addListener(listener);
}
/**
* End option parameters and start file paths. The method adds {@code "--"} parameter.
*/
public void endOptions() {
myCommandLine.addParameter("--");
}
/**
* Add string parameters
*
* @param parameters a parameters to add
*/
@SuppressWarnings({"WeakerAccess"})
public void addParameters(@NonNls @NotNull String... parameters) {
addParameters(Arrays.asList(parameters));
}
/**
* Add parameters from the list
*
* @param parameters the parameters to add
*/
public void addParameters(List<String> parameters) {
checkNotStarted();
for (String parameter : parameters) {
myCommandLine.addParameter(escapeParameterIfNeeded(parameter));
}
}
@NotNull
private String escapeParameterIfNeeded(@NotNull String parameter) {
if (escapeNeeded(parameter)) {
return parameter.replaceAll("\\^", "^^^^");
}
return parameter;
}
private boolean escapeNeeded(@NotNull String parameter) {
return SystemInfo.isWindows && isCmd() && parameter.contains("^");
}
private boolean isCmd() {
return myAppSettings.getPathToGit().toLowerCase().endsWith("cmd");
}
@NotNull
private String unescapeCommandLine(@NotNull String commandLine) {
if (escapeNeeded(commandLine)) {
return commandLine.replaceAll("\\^\\^\\^\\^", "^");
}
return commandLine;
}
/**
* Add file path parameters. The parameters are made relative to the working directory
*
* @param parameters a parameters to add
* @throws IllegalArgumentException if some path is not under root.
*/
public void addRelativePaths(@NotNull FilePath... parameters) {
addRelativePaths(Arrays.asList(parameters));
}
/**
* Add file path parameters. The parameters are made relative to the working directory
*
* @param filePaths a parameters to add
* @throws IllegalArgumentException if some path is not under root.
*/
@SuppressWarnings({"WeakerAccess"})
public void addRelativePaths(@NotNull final Collection<FilePath> filePaths) {
checkNotStarted();
for (FilePath path : filePaths) {
myCommandLine.addParameter(VcsFileUtil.relativePath(myWorkingDirectory, path));
}
}
/**
* Add file path parameters. The parameters are made relative to the working directory
*
* @param files a parameters to add
* @throws IllegalArgumentException if some path is not under root.
*/
public void addRelativePathsForFiles(@NotNull final Collection<File> files) {
checkNotStarted();
for (File file : files) {
myCommandLine.addParameter(VcsFileUtil.relativePath(myWorkingDirectory, file));
}
}
/**
* Add virtual file parameters. The parameters are made relative to the working directory
*
* @param files a parameters to add
* @throws IllegalArgumentException if some path is not under root.
*/
@SuppressWarnings({"WeakerAccess"})
public void addRelativeFiles(@NotNull final Collection<VirtualFile> files) {
checkNotStarted();
for (VirtualFile file : files) {
myCommandLine.addParameter(VcsFileUtil.relativePath(myWorkingDirectory, file));
}
}
/**
* Adds "--progress" parameter. Usable for long operations, such as clone or fetch.
* @return is "--progress" parameter supported by this version of Git.
*/
public boolean addProgressParameter() {
if (GitVersionSpecialty.ABLE_TO_USE_PROGRESS_IN_REMOTE_COMMANDS.existsIn(myVcs.getVersion())) {
addParameters("--progress");
return true;
}
return false;
}
/**
* check that process is not started yet
*
* @throws IllegalStateException if process has been already started
*/
private void checkNotStarted() {
if (isStarted()) {
throw new IllegalStateException("The process has been already started");
}
}
/**
* check that process is started
*
* @throws IllegalStateException if process has not been started
*/
protected final void checkStarted() {
if (!isStarted()) {
throw new IllegalStateException("The process is not started yet");
}
}
/**
* @return true if process is started
*/
public final synchronized boolean isStarted() {
return myProcess != null;
}
/**
* Set new value of cancellable flag (by default true)
*
* @param value a new value of the flag
*/
public void setCancellable(boolean value) {
checkNotStarted();
myIsCancellable = value;
}
/**
* @return cancellable state
*/
public boolean isCancellable() {
return myIsCancellable;
}
/**
* Start process
*/
@SuppressWarnings("UseOfSystemOutOrSystemErr")
public synchronized void start() {
checkNotStarted();
try {
myStartTime = System.currentTimeMillis();
if (!myProject.isDefault() && !mySilent && (myVcs != null)) {
myVcs.showCommandLine("cd " + myWorkingDirectory);
myVcs.showCommandLine(printableCommandLine());
LOG.info("cd " + myWorkingDirectory);
LOG.info(printableCommandLine());
}
else {
LOG.debug("cd " + myWorkingDirectory);
LOG.debug(printableCommandLine());
}
if (ApplicationManager.getApplication().isUnitTestMode()) {
System.out.println("cd " + myWorkingDirectory);
System.out.println(printableCommandLine());
}
// setup environment
GitRemoteProtocol remoteProtocol = GitRemoteProtocol.fromUrl(myUrl);
if (remoteProtocol == GitRemoteProtocol.SSH && myProjectSettings.isIdeaSsh()) {
GitXmlRpcSshService ssh = ServiceManager.getService(GitXmlRpcSshService.class);
myEnv.put(GitSSHHandler.GIT_SSH_ENV, ssh.getScriptPath().getPath());
myHandlerNo = ssh.registerHandler(new GitSSHGUIHandler(myProject, myState));
myEnvironmentCleanedUp = false;
myEnv.put(GitSSHHandler.SSH_HANDLER_ENV, Integer.toString(myHandlerNo));
int port = ssh.getXmlRcpPort();
myEnv.put(GitSSHHandler.SSH_PORT_ENV, Integer.toString(port));
LOG.debug(String.format("handler=%s, port=%s", myHandlerNo, port));
}
else if (remoteProtocol == GitRemoteProtocol.HTTP) {
GitHttpAuthService service = ServiceManager.getService(GitHttpAuthService.class);
myEnv.put(GitAskPassXmlRpcHandler.GIT_ASK_PASS_ENV, service.getScriptPath().getPath());
assert myUrl != null : "myUrl can't be null here";
GitHttpAuthenticator httpAuthenticator = service.createAuthenticator(myProject, myState, myCommand, myUrl);
myHandlerNo = service.registerHandler(httpAuthenticator);
myEnvironmentCleanedUp = false;
myEnv.put(GitAskPassXmlRpcHandler.GIT_ASK_PASS_HANDLER_ENV, Integer.toString(myHandlerNo));
int port = service.getXmlRcpPort();
myEnv.put(GitAskPassXmlRpcHandler.GIT_ASK_PASS_PORT_ENV, Integer.toString(port));
LOG.debug(String.format("handler=%s, port=%s", myHandlerNo, port));
addAuthListener(httpAuthenticator);
}
myCommandLine.getEnvironment().clear();
myCommandLine.getEnvironment().putAll(myEnv);
// start process
myProcess = startProcess();
startHandlingStreams();
}
catch (Throwable t) {
cleanupEnv();
myListeners.getMulticaster().startFailed(t);
}
}
private void addAuthListener(@NotNull final GitHttpAuthenticator authenticator) {
// TODO this code should be located in GitLineHandler, and the other remote code should be move there as well
if (this instanceof GitLineHandler) {
((GitLineHandler)this).addLineListener(new GitLineHandlerAdapter() {
private boolean myAuthFailed;
@Override
public void onLineAvailable(String line, Key outputType) {
if (line.toLowerCase().contains("authentication failed")) {
myAuthFailed = true;
}
}
@Override
public void processTerminated(int exitCode) {
if (myAuthFailed) {
authenticator.forgetPassword();
}
else {
authenticator.saveAuthData();
}
}
});
}
}
protected abstract Process startProcess() throws ExecutionException;
/**
* Start handling process output streams for the handler.
*/
protected abstract void startHandlingStreams();
/**
* @return a command line with full path to executable replace to "git"
*/
public String printableCommandLine() {
return unescapeCommandLine(myCommandLine.getCommandLineString("git"));
}
/**
* Cancel activity
*/
public synchronized void cancel() {
checkStarted();
if (!myIsCancellable) {
throw new IllegalStateException("The process is not cancellable.");
}
destroyProcess();
}
/**
* Destroy process
*/
public abstract void destroyProcess();
/**
* @return exit code for process if it is available
*/
public synchronized int getExitCode() {
if (myExitCode == null) {
throw new IllegalStateException("Exit code is not yet available");
}
return myExitCode.intValue();
}
/**
* @param exitCode a exit code for process
*/
protected synchronized void setExitCode(int exitCode) {
myExitCode = exitCode;
}
/**
* Cleanup environment
*/
protected synchronized void cleanupEnv() {
if (myEnvironmentCleanedUp) {
return;
}
GitRemoteProtocol remoteProtocol = GitRemoteProtocol.fromUrl(myUrl);
if (remoteProtocol == GitRemoteProtocol.SSH) {
GitXmlRpcSshService ssh = ServiceManager.getService(GitXmlRpcSshService.class);
myEnvironmentCleanedUp = true;
ssh.unregisterHandler(myHandlerNo);
}
else if (remoteProtocol == GitRemoteProtocol.HTTP) {
GitHttpAuthService service = ServiceManager.getService(GitHttpAuthService.class);
myEnvironmentCleanedUp = true;
service.unregisterHandler(myHandlerNo);
}
}
/**
* Wait for process termination
*/
public void waitFor() {
checkStarted();
try {
if (myInputProcessor != null && myProcess != null) {
myInputProcessor.process(myProcess.getOutputStream());
}
}
finally {
waitForProcess();
}
}
/**
* Wait for process
*/
protected abstract void waitForProcess();
/**
* Set silent mode. When handler is silent, it does not logs command in version control console.
* Note that this option also suppresses stderr and stdout copying.
*
* @param silent a new value of the flag
* @see #setStderrSuppressed(boolean)
* @see #setStdoutSuppressed(boolean)
*/
@SuppressWarnings({"SameParameterValue"})
public void setSilent(final boolean silent) {
checkNotStarted();
mySilent = silent;
setStderrSuppressed(silent);
setStdoutSuppressed(silent);
}
/**
* @return a character set to use for IO
*/
public Charset getCharset() {
return myCharset;
}
/**
* Set character set for IO
*
* @param charset a character set
*/
@SuppressWarnings({"SameParameterValue"})
public void setCharset(final Charset charset) {
myCharset = charset;
}
/**
* @return true if standard output is not copied to the console
*/
public boolean isStdoutSuppressed() {
return myStdoutSuppressed;
}
/**
* Set flag specifying if stdout should be copied to the console
*
* @param stdoutSuppressed true if output is not copied to the console
*/
public void setStdoutSuppressed(final boolean stdoutSuppressed) {
checkNotStarted();
myStdoutSuppressed = stdoutSuppressed;
}
/**
* @return true if standard output is not copied to the console
*/
public boolean isStderrSuppressed() {
return myStderrSuppressed;
}
/**
* Set flag specifying if stderr should be copied to the console
*
* @param stderrSuppressed true if error output is not copied to the console
*/
public void setStderrSuppressed(final boolean stderrSuppressed) {
checkNotStarted();
myStderrSuppressed = stderrSuppressed;
}
/**
* Set environment variable
*
* @param name the variable name
* @param value the variable value
*/
public void setEnvironment(String name, String value) {
myEnv.put(name, value);
}
/**
* Set processor for standard input. This is a place where input to the git application could be generated.
*
* @param inputProcessor the processor
*/
public void setInputProcessor(Processor<OutputStream> inputProcessor) {
myInputProcessor = inputProcessor;
}
/**
* Set suspend/resume actions
*
* @param suspend the suspend action
* @param resume the resume action
*/
synchronized void setSuspendResume(Runnable suspend, Runnable resume) {
mySuspendAction = suspend;
myResumeAction = resume;
}
/**
* Suspend write lock held by the handler
*/
public synchronized void suspendWriteLock() {
assert mySuspendAction != null;
mySuspendAction.run();
}
/**
* Resume write lock held by the handler
*/
public synchronized void resumeWriteLock() {
assert mySuspendAction != null;
myResumeAction.run();
}
public void setModalityState(@Nullable ModalityState state) {
myState = state;
}
/**
* @return true if the command line is too big
*/
public boolean isLargeCommandLine() {
return myCommandLine.getCommandLineString().length() > VcsFileUtil.FILE_PATH_LIMIT;
}
public void runInCurrentThread(@Nullable Runnable postStartAction) {
//LOG.assertTrue(!ApplicationManager.getApplication().isDispatchThread(), "Git process should never start in the dispatch thread.");
final GitVcs vcs = GitVcs.getInstance(myProject);
if (vcs == null) { return; }
boolean suspendable = false;
switch (myCommand.lockingPolicy()) {
case READ:
// need to lock only write operations: reads can be performed even when a write operation is going on
break;
case WRITE_SUSPENDABLE:
suspendable = true;
//noinspection fallthrough
case WRITE:
vcs.getCommandLock().writeLock().lock();
break;
}
try {
if (suspendable) {
final Object EXIT = new Object();
final Object SUSPEND = new Object();
final Object RESUME = new Object();
final LinkedBlockingQueue<Object> queue = new LinkedBlockingQueue<Object>();
Runnable suspend = new Runnable() {
public void run() {
queue.add(SUSPEND);
}
};
Runnable resume = new Runnable() {
public void run() {
queue.add(RESUME);
}
};
setSuspendResume(suspend, resume);
start();
if (isStarted()) {
if (postStartAction != null) {
postStartAction.run();
}
ApplicationManager.getApplication().executeOnPooledThread(new Runnable() {
public void run() {
waitFor();
queue.add(EXIT);
}
});
boolean suspended = false;
while (true) {
Object action;
while (true) {
try {
action = queue.take();
break;
}
catch (InterruptedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("queue.take() is interrupted", e);
}
}
}
if (action == EXIT) {
if (suspended) {
LOG.error("Exiting while RW lock is suspended (reacquiring W-lock command)");
vcs.getCommandLock().writeLock().lock();
}
break;
}
else if (action == SUSPEND) {
if (suspended) {
LOG.error("Suspending suspended W-lock (ignoring command)");
}
else {
vcs.getCommandLock().writeLock().unlock();
suspended = true;
}
}
else if (action == RESUME) {
if (!suspended) {
LOG.error("Resuming not suspended W-lock (ignoring command)");
}
else {
vcs.getCommandLock().writeLock().lock();
suspended = false;
}
}
}
}
}
else {
start();
if (isStarted()) {
if (postStartAction != null) {
postStartAction.run();
}
waitFor();
}
}
}
finally {
switch (myCommand.lockingPolicy()) {
case READ:
break;
case WRITE_SUSPENDABLE:
case WRITE:
vcs.getCommandLock().writeLock().unlock();
break;
}
logTime();
}
}
private void logTime() {
if (myStartTime > 0) {
long time = System.currentTimeMillis() - myStartTime;
if (!LOG.isDebugEnabled() && time > LONG_TIME) {
LOG.info(String.format("git %s took %s ms. Command parameters: %n%s", myCommand, time, myCommandLine.getCommandLineString()));
}
else {
LOG.debug(String.format("git %s took %s ms", myCommand, time));
}
}
else {
LOG.debug(String.format("git %s finished.", myCommand));
}
}
@Override
public String toString() {
return myCommandLine.toString();
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
authCheck.go
|
package main
import (
"encoding/json"
"github.com/mijia/sweb/log"
"io/ioutil"
"net/http"
"os"
"strings"
)
type ConsoleResponse struct {
Role MaintainerRole `json:"role"`
}
type MaintainerRole struct {
Role string `json:"role"`
}
func checkMaintainerAuth(req *http.Request) bool {
// return true
var ap string
if len(req.Form[APPNAME]) > 0 {
ap = req.Form[APPNAME][0]
} else {
return false
}
actk := req.Header.Get("access-token")
log.Debug(actk)
if strings.EqualFold(actk, "") {
actk = req.Header.Get("Access-Token")
log.Debug(actk)
if actk == "" {
return false
}
}
domain := os.Getenv("LAIN_DOMAIN")
url := "http://console." + domain + "/api/v1/repos/" + ap + "/roles/"
conReq, _ := http.NewRequest("GET", url, nil)
conReq.Header.Set("access-token", actk)
resp, err := http.DefaultClient.Do(conReq)
if err != nil {
log.Error(err)
return false
} else {
defer resp.Body.Close()
var tmp ConsoleResponse
resBody, _ := ioutil.ReadAll(resp.Body)
log.Debug(string(resBody))
err = json.Unmarshal(resBody, &tmp)
if err != nil {
log.Error(err)
return false
}
log.Debug(tmp)
if strings.EqualFold(tmp.Role.Role, "") {
return false
} else {
return true
}
}
}
|
[
"\"LAIN_DOMAIN\""
] |
[] |
[
"LAIN_DOMAIN"
] |
[]
|
["LAIN_DOMAIN"]
|
go
| 1 | 0 | |
web_page_replay_go/src/httparchive.go
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Program httparchive prints information about archives saved by record.
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/codegangsta/cli"
"webpagereplay"
)
const usage = "%s [ls|cat|edit] [options] archive_file [output_file]"
type Config struct {
method, host, fullPath string
}
func (cfg *Config) Flags() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "command",
Value: "",
Usage: "Only show URLs matching this HTTP method.",
Destination: &cfg.method,
},
cli.StringFlag{
Name: "host",
Value: "",
Usage: "Only show URLs matching this host.",
Destination: &cfg.host,
},
cli.StringFlag{
Name: "full_path",
Value: "",
Usage: "Only show URLs matching this full path.",
Destination: &cfg.fullPath,
},
}
}
func (cfg *Config) requestEnabled(req *http.Request) bool {
if cfg.method != "" && strings.ToUpper(cfg.method) != req.Method {
return false
}
if cfg.host != "" && cfg.host != req.Host {
return false
}
if cfg.fullPath != "" && cfg.fullPath != req.URL.Path {
return false
}
return true
}
func fail(msg string) {
fmt.Fprintf(os.Stderr, "Error: %s.\n\n", msg)
flag.Usage()
os.Exit(1)
}
func list(cfg *Config, a *webpagereplay.Archive, printFull bool) {
a.ForEach(func(req *http.Request, resp *http.Response) {
if !cfg.requestEnabled(req) {
return
}
if printFull {
fmt.Fprint(os.Stdout, "----------------------------------------\n")
req.Write(os.Stdout)
fmt.Fprint(os.Stdout, "\n")
err := webpagereplay.DecompressResponse(resp)
if err != nil {
fail(fmt.Sprint("Unable to decompress body %v", err))
}
resp.Write(os.Stdout)
fmt.Fprint(os.Stdout, "\n")
} else {
fmt.Fprintf(os.Stdout, "%s %s %s\n", req.Method, req.Host, req.URL)
}
})
}
func edit(cfg *Config, a *webpagereplay.Archive, outfile string) {
editor := os.Getenv("EDITOR")
if editor == "" {
fmt.Printf("Warning: EDITOR not specified, using default.\n")
editor = "vi"
}
marshalForEdit := func(w io.Writer, req *http.Request, resp *http.Response) error {
if err := req.Write(w); err != nil {
return err
}
return resp.Write(w)
}
unmarshalAfterEdit := func(r io.Reader) (*http.Request, *http.Response, error) {
br := bufio.NewReader(r)
req, err := http.ReadRequest(br)
if err != nil {
return nil, nil, fmt.Errorf("couldn't unmarshal request: %v", err)
}
resp, err := http.ReadResponse(br, req)
if err != nil {
if req.Body != nil {
req.Body.Close()
}
return nil, nil, fmt.Errorf("couldn't unmarshal response: %v", err)
}
// Read resp.Body into a buffer since the tmpfile is about to be deleted.
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, nil, fmt.Errorf("couldn't unmarshal response body: %v", err)
}
resp.Body = ioutil.NopCloser(bytes.NewReader(body))
return req, resp, nil
}
newA, err := a.Edit(func(req *http.Request, resp *http.Response) (*http.Request, *http.Response, error) {
if !cfg.requestEnabled(req) {
return req, resp, nil
}
fmt.Printf("Editing request: host=%s uri=%s\n", req.Host, req.URL.String())
// Serialize the req/resp to a temporary file, let the user edit that file, then
// de-serialize and return the result. Repeat until de-serialization succeeds.
for {
tmpf, err := ioutil.TempFile("", "httparchive_edit_request")
if err != nil {
return nil, nil, err
}
tmpname := tmpf.Name()
defer os.Remove(tmpname)
if err := marshalForEdit(tmpf, req, resp); err != nil {
tmpf.Close()
return nil, nil, err
}
if err := tmpf.Close(); err != nil {
return nil, nil, err
}
// Edit this file.
cmd := exec.Command(editor, tmpname)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return nil, nil, fmt.Errorf("Error running %s %s: %v", editor, tmpname, err)
}
// Reload.
tmpf, err = os.Open(tmpname)
if err != nil {
return nil, nil, err
}
defer tmpf.Close()
newReq, newResp, err := unmarshalAfterEdit(tmpf)
if err != nil {
fmt.Printf("Error in editing request. Try again.\n")
continue
}
return newReq, newResp, nil
}
})
if err != nil {
fmt.Printf("Error editing archive: %v\n", err)
return
}
outf, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE, os.FileMode(0660))
if err != nil {
fmt.Printf("Error opening output file %s: %v\n", outfile, err)
return
}
err0 := newA.Serialize(outf)
err1 := outf.Close()
if err0 != nil || err1 != nil {
if err0 == nil {
err0 = err1
}
fmt.Printf("Error writing edited archive to %s: %v\n", outfile, err0)
return
}
fmt.Printf("Wrote edited archive to %s\n", outfile)
}
func main() {
progName := filepath.Base(os.Args[0])
cfg := &Config{}
checkArgs := func(cmdName string, wantArgs int) func(*cli.Context) error {
return func(c *cli.Context) error {
if len(c.Args()) != wantArgs {
cmd := c.App.Command(cmdName)
return fmt.Errorf("Usage: %s %s [options] %s", progName, cmdName, cmd.ArgsUsage)
}
return nil
}
}
loadArchiveOrDie := func(c *cli.Context) *webpagereplay.Archive {
archive, err := webpagereplay.OpenArchive(c.Args().Get(0))
if err != nil {
cli.ShowSubcommandHelp(c)
os.Exit(1)
}
return archive
}
app := cli.NewApp()
app.Commands = []cli.Command{
cli.Command{
Name: "ls",
Usage: "List the requests in an archive",
ArgsUsage: "archive",
Flags: cfg.Flags(),
Before: checkArgs("ls", 1),
Action: func(c *cli.Context) { list(cfg, loadArchiveOrDie(c), false) },
},
cli.Command{
Name: "cat",
Usage: "Dump the requests/responses in an archive",
ArgsUsage: "archive",
Flags: cfg.Flags(),
Before: checkArgs("cat", 1),
Action: func(c *cli.Context) { list(cfg, loadArchiveOrDie(c), true) },
},
cli.Command{
Name: "edit",
Usage: "Edit the requests/responses in an archive",
ArgsUsage: "input_archive output_archive",
Flags: cfg.Flags(),
Before: checkArgs("edit", 2),
Action: func(c *cli.Context) { edit(cfg, loadArchiveOrDie(c), c.Args().Get(1)) },
},
}
app.Usage = "HTTP Archive Utils"
app.UsageText = fmt.Sprintf(usage, progName)
app.HideVersion = true
app.Version = ""
app.Writer = os.Stderr
app.RunAndExitOnError()
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
pkg/csi/cinder/openstack/openstack.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"os"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots"
"gopkg.in/gcfg.v1"
"k8s.io/klog"
)
type IOpenStack interface {
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, int, error)
DeleteVolume(volumeID string) error
AttachVolume(instanceID, volumeID string) (string, error)
ListVolumes() ([]Volume, error)
WaitDiskAttached(instanceID string, volumeID string) error
DetachVolume(instanceID, volumeID string) error
WaitDiskDetached(instanceID string, volumeID string) error
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
GetVolumesByName(name string) ([]Volume, error)
CreateSnapshot(name, volID, description string, tags *map[string]string) (*snapshots.Snapshot, error)
ListSnapshots(limit, offset int, filters map[string]string) ([]snapshots.Snapshot, error)
DeleteSnapshot(snapID string) error
GetSnapshotByNameAndVolumeID(n string, volumeId string) ([]snapshots.Snapshot, error)
}
type OpenStack struct {
compute *gophercloud.ServiceClient
blockstorage *gophercloud.ServiceClient
}
type Config struct {
Global struct {
AuthUrl string `gcfg:"auth-url"`
Username string
UserId string `gcfg:"user-id"`
Password string
TenantId string `gcfg:"tenant-id"`
TenantName string `gcfg:"tenant-name"`
DomainId string `gcfg:"domain-id"`
DomainName string `gcfg:"domain-name"`
Region string
}
}
func (cfg Config) toAuthOptions() gophercloud.AuthOptions {
return gophercloud.AuthOptions{
IdentityEndpoint: cfg.Global.AuthUrl,
Username: cfg.Global.Username,
UserID: cfg.Global.UserId,
Password: cfg.Global.Password,
TenantID: cfg.Global.TenantId,
TenantName: cfg.Global.TenantName,
DomainID: cfg.Global.DomainId,
DomainName: cfg.Global.DomainName,
// Persistent service, so we need to be able to renew tokens.
AllowReauth: true,
}
}
func GetConfigFromFile(configFilePath string) (gophercloud.AuthOptions, gophercloud.EndpointOpts, error) {
// Get config from file
var authOpts gophercloud.AuthOptions
var epOpts gophercloud.EndpointOpts
config, err := os.Open(configFilePath)
if err != nil {
klog.V(3).Infof("Failed to open OpenStack configuration file: %v", err)
return authOpts, epOpts, err
}
defer config.Close()
// Read configuration
var cfg Config
err = gcfg.FatalOnly(gcfg.ReadInto(&cfg, config))
if err != nil {
klog.V(3).Infof("Failed to read OpenStack configuration file: %v", err)
return authOpts, epOpts, err
}
authOpts = cfg.toAuthOptions()
epOpts = gophercloud.EndpointOpts{
Region: cfg.Global.Region,
}
return authOpts, epOpts, nil
}
func GetConfigFromEnv() (gophercloud.AuthOptions, gophercloud.EndpointOpts, error) {
// Get config from env
authOpts, err := openstack.AuthOptionsFromEnv()
var epOpts gophercloud.EndpointOpts
if err != nil {
klog.V(3).Infof("Failed to read OpenStack configuration from env: %v", err)
return authOpts, epOpts, err
}
epOpts = gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
}
return authOpts, epOpts, nil
}
var OsInstance IOpenStack = nil
var configFile string = "/etc/cloud.conf"
func InitOpenStackProvider(cfg string) {
configFile = cfg
klog.V(2).Infof("InitOpenStackProvider configFile: %s", configFile)
}
func GetOpenStackProvider() (IOpenStack, error) {
if OsInstance == nil {
// Get config from file
authOpts, epOpts, err := GetConfigFromFile(configFile)
if err != nil {
// Get config from env
authOpts, epOpts, err = GetConfigFromEnv()
if err != nil {
return nil, err
}
}
// Authenticate Client
provider, err := openstack.AuthenticatedClient(authOpts)
if err != nil {
return nil, err
}
// Init Nova ServiceClient
computeclient, err := openstack.NewComputeV2(provider, epOpts)
if err != nil {
return nil, err
}
// Init Cinder ServiceClient
blockstorageclient, err := openstack.NewBlockStorageV3(provider, epOpts)
if err != nil {
return nil, err
}
// Init OpenStack
OsInstance = &OpenStack{
compute: computeclient,
blockstorage: blockstorageclient,
}
}
return OsInstance, nil
}
|
[
"\"OS_REGION_NAME\""
] |
[] |
[
"OS_REGION_NAME"
] |
[]
|
["OS_REGION_NAME"]
|
go
| 1 | 0 | |
scripts/hid_configurator/NrfHidDevice.py
|
#
# Copyright (c) 2020 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: LicenseRef-BSD-5-Clause-Nordic
import hid
import struct
import time
import logging
from enum import IntEnum
REPORT_ID = 6
REPORT_SIZE = 30
EVENT_DATA_LEN_MAX = REPORT_SIZE - 6
MOD_FIELD_POS = 4
MOD_BROADCAST = 0xf
OPT_FIELD_POS = 0
OPT_FIELD_MAX_OPT_CNT = 0xf
OPT_BROADCAST_MAX_MOD_ID = 0x0
OPT_MODULE_DEV_DESCR = 0x0
POLL_INTERVAL_DEFAULT = 0.02
POLL_RETRY_COUNT = 200
END_OF_TRANSFER_CHAR = '\n'
class ConfigStatus(IntEnum):
SUCCESS = 0
PENDING = 1
FETCH = 2
TIMEOUT = 3
REJECT = 4
WRITE_ERROR = 5
DISCONNECTED_ERROR = 6
FAULT = 99
class Response(object):
def __init__(self, recipient, event_id, status, data):
self.recipient = recipient
self.event_id = event_id
self.status = ConfigStatus(status)
self.data = data
def __repr__(self):
base_str = ('Response:\n'
'\trecipient 0x{:04x}\n'
'\tevent_id 0x{:02x}\n'
'\tstatus {}\n').format(self.recipient,
self.event_id,
str(self.status))
if self.data is None:
data_str = '\tno data'
else:
data_str = ('\tdata_len {}\n'
'\tdata {}\n').format(len(self.data), self.data)
return base_str + data_str
@staticmethod
def parse_response(response_raw):
data_field_len = len(response_raw) - struct.calcsize('<BHBBB')
if data_field_len < 0:
logging.error('Response too short')
return None
# Report ID is not included in the feature report from device
fmt = '<BHBBB{}s'.format(data_field_len)
(report_id, rcpt, event_id, status, data_len, data) = struct.unpack(fmt, response_raw)
if report_id != REPORT_ID:
logging.error('Improper report ID')
return None
if data_len > len(data):
logging.error('Required data not present')
return None
if data_len == 0:
event_data = None
else:
event_data = data[:data_len]
return Response(rcpt, event_id, status, event_data)
class NrfHidDevice():
def __init__(self, board_name, vid, pid, dongle_pid):
self.name = board_name
self.vid = vid
self.pid = pid
self.dev_ptr = None
self.dev_config = None
direct_devs = NrfHidDevice._open_devices(vid, pid)
dongle_devs = []
if dongle_pid is not None:
dongle_devs = NrfHidDevice._open_devices(vid, dongle_pid)
devs = direct_devs + dongle_devs
for d in devs:
if self.dev_ptr is None:
board_name = NrfHidDevice._discover_board_name(d, pid)
if board_name is not None:
config = NrfHidDevice._discover_device_config(d, pid)
else:
config = None
if config is not None:
self.dev_config = config
self.dev_ptr = d
print("Device board name is {}".format(board_name))
else:
d.close()
else:
d.close()
@staticmethod
def _open_devices(vid, pid):
devs = []
try:
devlist = hid.enumerate(vid=vid, pid=pid)
for d in devlist:
dev = hid.Device(path=d['path'])
devs.append(dev)
except hid.HIDException:
pass
except Exception as e:
logging.error('Unknown exception: {}'.format(e))
return devs
@staticmethod
def _create_set_report(recipient, event_id, event_data):
""" Function creating a report in order to set a specified configuration
value. """
assert isinstance(recipient, int)
assert isinstance(event_id, int)
if event_data:
assert isinstance(event_data, bytes)
event_data_len = len(event_data)
else:
event_data_len = 0
status = ConfigStatus.PENDING
report = struct.pack('<BHBBB', REPORT_ID, recipient, event_id, status,
event_data_len)
if event_data:
report += event_data
assert len(report) <= REPORT_SIZE
report += b'\0' * (REPORT_SIZE - len(report))
return report
@staticmethod
def _create_fetch_report(recipient, event_id):
""" Function for creating a report which requests fetching of
a configuration value from a device. """
assert isinstance(recipient, int)
assert isinstance(event_id, int)
status = ConfigStatus.FETCH
report = struct.pack('<BHBBB', REPORT_ID, recipient, event_id, status, 0)
assert len(report) <= REPORT_SIZE
report += b'\0' * (REPORT_SIZE - len(report))
return report
@staticmethod
def _exchange_feature_report(dev, recipient, event_id, event_data, is_fetch,
poll_interval=POLL_INTERVAL_DEFAULT):
if is_fetch:
data = NrfHidDevice._create_fetch_report(recipient, event_id)
else:
data = NrfHidDevice._create_set_report(recipient, event_id, event_data)
try:
dev.send_feature_report(data)
except Exception:
return False, None
for _ in range(POLL_RETRY_COUNT):
time.sleep(poll_interval)
try:
response_raw = dev.get_feature_report(REPORT_ID, REPORT_SIZE)
response = Response.parse_response(response_raw)
except Exception:
response = None
if response is None:
logging.error('Invalid response')
op_status = ConfigStatus.FAULT
break
logging.debug('Parsed response: {}'.format(response))
if (response.recipient != recipient) or (response.event_id != event_id):
logging.error('Response does not match the request:\n'
'\trequest: recipient {} event_id {}\n'
'\tresponse: recipient {}, event_id {}'.format(recipient,
event_id,
response.recipient,
response.event_id))
op_status = ConfigStatus.FAULT
break
op_status = response.status
if op_status != ConfigStatus.PENDING:
break
fetched_data = None
success = False
if op_status == ConfigStatus.SUCCESS:
logging.info('Success')
success = True
if is_fetch:
fetched_data = response.data
else:
logging.warning('Error: {}'.format(op_status.name))
return success, fetched_data
@staticmethod
def _fetch_max_mod_id(dev, recipient):
event_id = (MOD_BROADCAST << MOD_FIELD_POS) | \
(OPT_BROADCAST_MAX_MOD_ID << OPT_FIELD_POS)
event_data = struct.pack('<B', 0)
success = NrfHidDevice._exchange_feature_report(dev, recipient,
event_id, event_data,
False)
if not success:
return False, None
success, fetched_data = NrfHidDevice._exchange_feature_report(dev, recipient,
event_id, None,
True)
if not success or not fetched_data:
return False, None
max_mod_id = ord(fetched_data.decode('utf-8'))
return success, max_mod_id
@staticmethod
def _fetch_next_option(dev, recipient, module_id):
event_id = (module_id << MOD_FIELD_POS) | (OPT_MODULE_DEV_DESCR << OPT_FIELD_POS)
success, fetched_data = NrfHidDevice._exchange_feature_report(dev, recipient,
event_id, None,
True)
if not success or not fetched_data:
return False, None
opt_name = fetched_data.decode('utf-8').replace(chr(0x00), '')
return success, opt_name
@staticmethod
def _get_event_id(module_name, option_name, device_config):
module_id = device_config[module_name]['id']
option_id = device_config[module_name]['options'][option_name]['id']
return (module_id << MOD_FIELD_POS) | (option_id << OPT_FIELD_POS)
@staticmethod
def _discover_module_config(dev, recipient, module_id):
module_config = {}
success, module_name = NrfHidDevice._fetch_next_option(dev, recipient,
module_id)
if not success:
return None, None
module_config['id'] = module_id
module_config['options'] = {}
# First fetched option (with index 0) is module name
opt_idx = 1
while True:
success, opt = NrfHidDevice._fetch_next_option(dev, recipient,
module_id)
if not success:
return None, None
if opt[0] == END_OF_TRANSFER_CHAR:
break
if opt_idx > OPT_FIELD_MAX_OPT_CNT:
print("Improper module description")
return None, None
module_config['options'][opt] = {
'id' : opt_idx,
}
opt_idx += 1
return module_name, module_config
@staticmethod
def _discover_device_config(dev, recipient):
device_config = {}
success, max_mod_id = NrfHidDevice._fetch_max_mod_id(dev, recipient)
if not success or (max_mod_id is None):
return None
for i in range(0, max_mod_id + 1):
module_name, module_config = NrfHidDevice._discover_module_config(dev, recipient, i)
if (module_name is None) or (module_config is None):
return None
device_config[module_name] = module_config
return device_config
@staticmethod
def _discover_board_name(dev, recipient):
success, max_mod_id = NrfHidDevice._fetch_max_mod_id(dev, recipient)
if not success:
return None
# Module with the highest index contains information about board name.
# Discover only this module to recude discovery time.
module_name, module_config = NrfHidDevice._discover_module_config(dev,
recipient,
max_mod_id)
if (module_name is None) or (module_config is None):
return None
dev_cfg = {module_name : module_config}
event_id = NrfHidDevice._get_event_id(module_name, 'board_name', dev_cfg)
success, fetched_data = NrfHidDevice._exchange_feature_report(dev, recipient,
event_id, None,
True)
board_name = fetched_data.decode('utf-8').replace(chr(0x00), '')
return board_name
def _config_operation(self, module_name, option_name, is_get, value, poll_interval):
if not self.initialized():
print("Device not found")
if is_get:
return False, None
else:
return False
try:
event_id = NrfHidDevice._get_event_id(module_name, option_name, self.dev_config)
except KeyError:
print("No module: {} or option: {}".format(module_name, option_name))
if is_get:
return False, None
else:
return False
success, fetched_data = NrfHidDevice._exchange_feature_report(self.dev_ptr, self.pid,
event_id, value,
is_get, poll_interval)
if is_get:
return success, fetched_data
else:
return success
def close_device(self):
self.dev_ptr.close()
self.dev_ptr = None
self.dev_config = None
def initialized(self):
if (self.dev_ptr is None) or (self.dev_config is None):
return False
else:
return True
def get_device_config(self):
if not self.initialized():
print("Device is not initialized")
return None
res = {}
for module in self.dev_config:
res[module] = list(self.dev_config[module]['options'].keys())
return res
def config_get(self, module_name, option_name, poll_interval=POLL_INTERVAL_DEFAULT):
return self._config_operation(module_name, option_name, True, None, poll_interval)
def config_set(self, module_name, option_name, value, poll_interval=POLL_INTERVAL_DEFAULT):
return self._config_operation(module_name, option_name, False, value, poll_interval)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
test_models.py
|
# Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, [email protected]
import os
import time
import shutil
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm_
from torch.nn import functional as F
from sklearn.metrics import confusion_matrix
from ops.dataset import TSNDataSet
# from ops.models import VideoNet
from ops.models_test import VideoNet
from ops.transforms import *
from opts_test import parser
from ops import dataset_config
from ops.utils import AverageMeter, accuracy
# from ops.temporal_shift import make_temporal_pool
from tensorboardX import SummaryWriter
#os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
#os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
def eval_video(video_data, net):
net.eval()
with torch.no_grad():
i, data, label = video_data
batch_size = label.numel()
# print(data.size())
# print(label.size())
#+++++++++++++++++
if args.dense_sample:
num_crop = 10*args.test_crops
elif args.twice_sample:
num_crop = 2*args.test_crops
else:
num_crop = 1*args.test_crops
#++++++++++++++++
rst, weights = net(data)
rst = rst.reshape(batch_size, num_crop, -1).mean(1)
#
if args.softmax:
# take the softmax to normalize the output to probability
rst = F.softmax(rst, dim=1)
rst = rst.data.cpu().numpy().copy()
return i, rst, label, weights
def main():
global args
args = parser.parse_args()
num_class, args.train_list, args.val_list, args.root_path, prefix = dataset_config.return_dataset(args.dataset,
args.modality)
assert args.modality == 'RGB'
if args.test_list:
test_list = args.test_list
else:
test_list = args.val_list
# ==== get test args ====
test_weights_list = args.test_weights.split(',')
test_nets_list = args.test_nets.split(',')
test_segments_list = [int(s) for s in args.test_segments.split(',')]
assert len(test_nets_list) == len(test_segments_list)
# test_cdivs_list = [int(s) for s in args.test_cdivs.split(',')]
# =======================
data_iter_list = []
net_list = []
scale_size = 256
crop_size = 256 if args.full_res else 224 # 224 or 256 (scale_size)
if args.test_crops == 1:
cropping = torchvision.transforms.Compose([
GroupScale(scale_size),
GroupCenterCrop(crop_size),
])
elif args.test_crops == 3: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose([
GroupFullResSample(crop_size, scale_size, flip=False)
])
elif args.test_crops == 5: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose([
GroupOverSample(crop_size, scale_size, flip=False)
])
elif args.test_crops == 10:
cropping = torchvision.transforms.Compose([
GroupOverSample(crop_size, scale_size)
])
else:
raise ValueError("Only 1, 5, 10 crops are supported while we got {}".format(args.test_crops))
test_log = 'test_logs_256'
if not os.path.exists(test_log):
os.mkdir(test_log)
log_path = './{}/log_{}_{}_{}_a{}_b{}_seg{}_{}.txt'.format(test_log, args.arch, args.dataset, "-".join(test_nets_list), \
"-".join(str(a) for a in test_segments_list), \
crop_size)
for this_net, this_segment, this_weight in zip(test_nets_list, test_segments_list, test_weights_list):
model = VideoNet(num_class, this_segment, args.modality,
backbone=args.arch, net=this_net,
consensus_type=args.consensus_type,
element_filter=args.element_filter,
cdiv=args.cdiv)
# weights_path = "./checkpoints/%s/%s_%s_c%d_s%d.pth"%(args.dataset, args.model, this_net, this_cdiv, this_segment)
print(this_weight)
if not os.path.exists(this_weight):
raise ValueError('the checkpoint file doesnot exist: %s'%this_weight)
checkpoint = torch.load(this_weight)
print(checkpoint['best_prec1'])
checkpoint_sd = checkpoint['state_dict']
#print(checkpoint_sd.keys())
base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint_sd.items())}
for ks in list(base_dict.keys()):
if ks.split('.')[-1] in ['total_params','total_ops']:
base_dict.pop(ks)
#print(ks)
model.load_state_dict(base_dict)
# crop_size = model.scale_size if args.full_res else model.input_size # 224 or 256 (scale_size)
# scale_size = model.scale_size # 256
input_mean = model.input_mean
input_std = model.input_std
# Data loading code
if args.modality != 'RGBDiff':
normalize = GroupNormalize(input_mean, input_std)
else:
normalize = IdentityTransform()
if args.modality == 'RGB':
data_length = 1
elif args.modality in ['Flow', 'RGBDiff']:
data_length = 5
# print('----Validation----')
print('batch size', args.batch_size)
test_loader = torch.utils.data.DataLoader(
TSNDataSet(args.root_path, test_list, num_segments=this_segment,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
test_mode=True,
random_shift=False,
transform=torchvision.transforms.Compose([
cropping,
GroupScale(224),
# GroupScale(int(scale_size)),
#GroupScale(256),
#GroupCenterCrop(224),
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]), dense_sample=args.dense_sample, twice_sample=args.twice_sample),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
#
total_num = len(test_loader.dataset)
print('total test number:', total_num)
#
#model = torch.nn.DataParallel(model).cuda()
model.eval()
net_list.append(model)
data_gen = enumerate(test_loader)
data_iter_list.append(data_gen)
#
top1 = AverageMeter()
top5 = AverageMeter()
batch_times = AverageMeter()
#
proc_start_time = time.time()
output = []
fw = open(log_path, 'w')
weights_data = np.zeros((num_class, 4, 4))
for i, data_label_pairs in enumerate(zip(*data_iter_list)):
with torch.no_grad():
this_rst_list = []
this_label = None
# end = time.time()
weight_data = []
for (_, (data, label)), net in zip(data_label_pairs, net_list):
end = time.time()
rst = eval_video((i, data, label), net)
batch_times.update(time.time()-end, label.size(0))
this_rst_list.append(rst[1])
weight_data = rst[3] #bsz, 4, num_blocks, 4
this_label = label
# assert len(this_rst_list) == len(coeff_list)
# for i_coeff in range(len(this_rst_list)):
# this_rst_list[i_coeff] *= coeff_list[i_coeff]
ensembled_predict = sum(this_rst_list) / len(this_rst_list)
for p, g in zip(ensembled_predict, this_label.cpu().numpy()):
output.append([p[None, ...], g])
for j in range(len(weight_data)):
weight_data[j] = sum(weight_data[j]).cpu().numpy()
weight_data = np.array(weight_data) # 4 bsz 4
weight_data = weight_data.transpose(1,0,2) # bsz 4 4
#print(weight_data.shape)
for weight, l in zip(weight_data, this_label.cpu().numpy()): # 4, num_blocks, 4
weights_data[l] = weights_data[l] + weight
cnt_time = time.time() - proc_start_time
prec1, prec5 = accuracy(torch.from_numpy(ensembled_predict), this_label, topk=(1, 5))
top1.update(prec1.item(), this_label.numel())
top5.update(prec5.item(), this_label.numel())
if i % 20 == 0:
txt = 'video {} done, total {}/{}, average {:.3f} sec/video, moving Prec@1 {:.3f} Prec@5 {:.3f}'.format(i * args.batch_size, i * args.batch_size, total_num,
float(cnt_time) / (i+1) / args.batch_size, top1.avg, top5.avg)
print(txt)
fw.write(txt+'\n')
fw.flush()
# fw.close()
print('avg computing time', batch_times.avg)
video_pred = [np.argmax(x[0]) for x in output]
video_pred_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output]
video_labels = [x[1] for x in output]
cf = confusion_matrix(video_labels, video_pred).astype(float)
# np.save('cm.npy', cf)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print(cls_acc*100)
# upper = np.mean(np.max(cf, axis=1) / cls_cnt)
# print('upper bound: {}'.format(upper))
cls_acc_avg = np.sum(cls_acc*cls_cnt)/cls_cnt.sum()
print(cls_acc_avg)
weights_data = weights_data/np.expand_dims(np.expand_dims(cls_cnt,-1).repeat(4,axis=-1),-1).repeat(4,axis=-1)
import csv
with open(args.test_nets+'_cls_acc.csv','w') as f:
f_csv = csv.writer(f)
f_csv.writerow(cls_acc)
# with open('cls_count.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerow(cls_cnt.tolist())
# return 0
# with open('cls_weight_layer1.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,0,:]/3).tolist())
# with open('cls_weight_layer2.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,1,:]/4).tolist())
# with open('cls_weight_layer3.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,2,:]/6).tolist())
# with open('cls_weight_layer4.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,3,:]/3).tolist())
print('-----Evaluation is finished------')
print('Class Accuracy {:.02f}%'.format(cls_acc_avg*100))
txt = 'Overall Prec@1 {:.02f}% Prec@5 {:.02f}%'.format(top1.avg, top5.avg)
fw.write(txt)
fw.close()
print(txt)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
wfexs_backend/singularity_container.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2022 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import os
import os.path
import re
import shutil
import subprocess
import tempfile
from typing import Dict, List, Optional, Sequence, Tuple, Union
from typing import cast
from urllib import parse
import uuid
from .common import AbsPath, RelPath, URIType
from .common import Container, ContainerType, Fingerprint
from .common import ContainerFileNamingMethod, ContainerTaggedName
from .common import DEFAULT_SINGULARITY_CMD
from .container import ContainerFactory, ContainerFactoryException
from .utils.contents import link_or_copy
from .utils.digests import ComputeDigestFromFile, nihDigester
from .utils.docker import DockerHelper
class SingularityContainerFactory(ContainerFactory):
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name, tempDir=tempDir)
self.runtime_cmd = local_config.get('tools', {}).get('singularityCommand', DEFAULT_SINGULARITY_CMD)
# This is needed due a bug in singularity 3.6, where
# singularity pull --disable-cache does not create a container
singularityCacheDir = os.path.join(self.containersCacheDir, '.singularity')
os.makedirs(singularityCacheDir, exist_ok=True)
self._environment.update({
'SINGULARITY_TMPDIR': self.tempDir,
'SINGULARITY_CACHEDIR': singularityCacheDir,
})
# Now, detect userns feature using some ideas from
# https://github.com/hpcng/singularity/issues/1445#issuecomment-381588444
userns_supported = False
if self.supportsFeature('host_userns'):
matEnv = dict(os.environ)
matEnv.update(self.environment)
with tempfile.NamedTemporaryFile() as s_out, tempfile.NamedTemporaryFile() as s_err:
s_retval = subprocess.Popen(
[self.runtime_cmd, 'exec', '--userns', '/etc', 'true'],
env=matEnv,
stdout=s_out,
stderr=s_err
).wait()
# The command always fails.
# We only need to find 'Failed to create user namespace'
# in order to discard this feature
with open(s_err.name,"r") as c_stF:
s_err_v = c_stF.read()
if 'Failed to create user namespace' not in s_err_v:
userns_supported = True
self._features.add('userns')
self.logger.debug(f'Singularity supports userns: {userns_supported}')
if not userns_supported:
self.logger.warning('Singularity does not support userns (needed for encrypted working directories)')
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.Singularity
def materializeContainers(self, tagList: Sequence[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, containers_dir: Optional[Union[RelPath, AbsPath]] = None, offline: bool = False) -> Sequence[Container]:
"""
It is assured the containers are materialized
"""
containersList = []
matEnv = dict(os.environ)
matEnv.update(self.environment)
dhelp = DockerHelper()
for tag in tagList:
# It is not an absolute URL, we are prepending the docker://
parsedTag = parse.urlparse(tag)
singTag = 'docker://' + tag if parsedTag.scheme == '' else tag
containerFilename = simpleFileNameMethod(cast(URIType, tag))
containerFilenameMeta = containerFilename + self.META_JSON_POSTFIX
localContainerPath = cast(AbsPath, os.path.join(self.engineContainersSymlinkDir, containerFilename))
localContainerPathMeta = cast(AbsPath, os.path.join(self.engineContainersSymlinkDir, containerFilenameMeta))
self.logger.info("downloading singularity container: {} => {}".format(tag, localContainerPath))
# First, let's materialize the container image
imageSignature = None
tmpContainerPath = None
tmpContainerPathMeta = None
if os.path.isfile(localContainerPathMeta):
with open(localContainerPathMeta, mode="r", encoding="utf8") as tcpm:
metadata = json.load(tcpm)
registryServer = metadata['registryServer']
repo = metadata['repo']
alias = metadata['alias']
partial_fingerprint = metadata['dcd']
elif offline:
raise ContainerFactoryException("Cannot download containers metadata in offline mode from {} to {}".format(tag, localContainerPath))
else:
tmpContainerPath = os.path.join(self.containersCacheDir,str(uuid.uuid4()))
tmpContainerPathMeta = tmpContainerPath + self.META_JSON_POSTFIX
self.logger.debug("downloading temporary container metadata: {} => {}".format(tag, tmpContainerPathMeta))
with open(tmpContainerPathMeta, mode="w", encoding="utf8") as tcpm:
registryServer, repo, alias, partial_fingerprint = dhelp.query_tag(singTag)
json.dump({
'registryServer': registryServer,
'repo': repo,
'alias': alias,
'dcd': partial_fingerprint,
}, tcpm)
canonicalContainerPath = None
canonicalContainerPathMeta = None
if not os.path.isfile(localContainerPath):
if offline:
raise ContainerFactoryException("Cannot download containers in offline mode from {} to {}".format(tag, localContainerPath))
with tempfile.NamedTemporaryFile() as s_out, tempfile.NamedTemporaryFile() as s_err:
if tmpContainerPath is None:
tmpContainerPath = os.path.join(self.containersCacheDir,str(uuid.uuid4()))
self.logger.debug("downloading temporary container: {} => {}".format(tag, tmpContainerPath))
# Singularity command line borrowed from
# https://github.com/nextflow-io/nextflow/blob/539a22b68c114c94eaf4a88ea8d26b7bfe2d0c39/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy#L221
s_retval = subprocess.Popen(
[self.runtime_cmd, 'pull', '--name', tmpContainerPath, singTag],
env=matEnv,
stdout=s_out,
stderr=s_err
).wait()
self.logger.debug("singularity pull retval: {}".format(s_retval))
with open(s_out.name,"r") as c_stF:
s_out_v = c_stF.read()
with open(s_err.name,"r") as c_stF:
s_err_v = c_stF.read()
self.logger.debug("singularity pull stdout: {}".format(s_out_v))
self.logger.debug("singularity pull stderr: {}".format(s_err_v))
# Reading the output and error for the report
if s_retval == 0:
if not os.path.exists(tmpContainerPath):
raise ContainerFactoryException("FATAL ERROR: Singularity finished properly but it did not materialize {} into {}".format(tag, tmpContainerPath))
imageSignature = cast(Fingerprint, ComputeDigestFromFile(tmpContainerPath))
# Some filesystems complain when filenames contain 'equal', 'slash' or 'plus' symbols
canonicalContainerPath = os.path.join(self.containersCacheDir, imageSignature.replace('=','~').replace('/','-').replace('+','_'))
if os.path.exists(canonicalContainerPath):
tmpSize = os.path.getsize(tmpContainerPath)
canonicalSize = os.path.getsize(canonicalContainerPath)
# Remove the temporary one
os.unlink(tmpContainerPath)
tmpContainerPath = None
if tmpContainerPathMeta is not None:
os.unlink(tmpContainerPathMeta)
tmpContainerPathMeta = None
if tmpSize != canonicalSize:
# If files were not the same complain
# This should not happen!!!!!
raise ContainerFactoryException("FATAL ERROR: Singularity cache collision for {}, with differing sizes ({} local, {} remote {})".format(imageSignature,canonicalSize,tmpSize,tag))
else:
shutil.move(tmpContainerPath, canonicalContainerPath)
tmpContainerPath = None
# Now, create the relative symbolic link
if os.path.lexists(localContainerPath):
os.unlink(localContainerPath)
os.symlink(os.path.relpath(canonicalContainerPath,self.engineContainersSymlinkDir),localContainerPath)
else:
errstr = """Could not materialize singularity image {}. Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(singTag, s_retval, s_out_v, s_err_v)
if os.path.exists(tmpContainerPath):
try:
os.unlink(tmpContainerPath)
except:
pass
raise ContainerFactoryException(errstr)
# Only metadata was generated
if tmpContainerPathMeta is not None:
if canonicalContainerPath is None:
canonicalContainerPath = os.path.normpath(os.path.join(self.engineContainersSymlinkDir, os.readlink(localContainerPath)))
canonicalContainerPathMeta = cast(AbsPath, canonicalContainerPath + self.META_JSON_POSTFIX)
shutil.move(tmpContainerPathMeta, canonicalContainerPathMeta)
if canonicalContainerPathMeta is not None:
if os.path.lexists(localContainerPathMeta):
os.unlink(localContainerPathMeta)
os.symlink(os.path.relpath(canonicalContainerPathMeta,self.engineContainersSymlinkDir),localContainerPathMeta)
# Then, compute the signature
if imageSignature is None:
imageSignature = cast(Fingerprint, ComputeDigestFromFile(localContainerPath, repMethod=nihDigester))
# Hardlink or copy the container and its metadata
if containers_dir is not None:
containerPath = cast(AbsPath, os.path.join(containers_dir, containerFilename))
containerPathMeta = cast(AbsPath, os.path.join(containers_dir, containerFilenameMeta))
# Do not allow overwriting in offline mode
if not offline or not os.path.exists(containerPath):
link_or_copy(localContainerPath, containerPath)
if not offline or not os.path.exists(containerPathMeta):
link_or_copy(localContainerPathMeta, containerPathMeta)
else:
containerPath = localContainerPath
containersList.append(
Container(
origTaggedName=tag,
taggedName=cast(URIType, singTag),
signature=imageSignature,
fingerprint=repo + '@' + partial_fingerprint,
type=self.containerType,
localPath=containerPath
)
)
return containersList
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
score_prove.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/12/26 22:22
# @Author : LIU YI
import argparse
import random
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
import wandb
# from models import *
import models
# os.environ['CUDA_VISIBLE_DEVICES'] = '4'
import copy
from score_based_pruning import reset_seed
from model_complexity import get_model_infos
def check_score(model, train_loader):
test_batch_size = 128
newmodel = copy.deepcopy(model)
reset_seed()
newmodel.K = np.zeros((test_batch_size, test_batch_size))
def counting_forward_hook(module, inp, out):
try:
if not module.visited_backwards:
return
if isinstance(inp, tuple):
inp = inp[0]
inp = inp.view(inp.size(0), -1)
x = (inp > 0).float()
K = x @ x.t()
K2 = (1. - x) @ (1. - x.t())
newmodel.K = newmodel.K + K.cpu().numpy() + K2.cpu().numpy()
except:
pass
def counting_backward_hook(module, inp, out):
module.visited_backwards = True
for name, module in newmodel.named_modules():
if 'ReLU' in str(type(module)):
# hooks[name] = module.register_forward_hook(counting_hook)
module.register_forward_hook(counting_forward_hook)
module.register_backward_hook(counting_backward_hook)
newmodel = newmodel.to(device)
s = []
for j in range(5):
data_iterator = iter(train_loader)
x, target = next(data_iterator)
x2 = torch.clone(x)
x2 = x2.to(device)
x, target = x.to(device), target.to(device)
jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device)
newmodel(x2.to(device))
s_, ld = np.linalg.slogdet(newmodel.K)
s.append(ld)
score = np.mean(s)
return score
# Prune settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')
parser.add_argument('--dataset', type=str, default='cifar100',
help='training dataset (default: cifar10)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--depth', type=int, default=16,
help='depth of the vgg')
parser.add_argument('--percent', type=float, default=0.5,
help='scale sparse rate (default: 0.5)')
parser.add_argument('--model', default='', type=str, metavar='PATH',
help='path to the model (default: none)')
parser.add_argument('--save', default='./baseline/vgg16-cifar100', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
parser.add_argument('--save_1', default='./baseline/vgg16-cifar100', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
parser.add_argument('--start_epoch', default=1, type=int, metavar='N', help='manual start epoch number')
parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number')
# quantized parameters
parser.add_argument('--bits_A', default=8, type=int, help='input quantization bits')
parser.add_argument('--bits_W', default=8, type=int, help='weight quantization bits')
parser.add_argument('--bits_G', default=8, type=int, help='gradient quantization bits')
parser.add_argument('--bits_E', default=8, type=int, help='error quantization bits')
parser.add_argument('--bits_R', default=16, type=int, help='rand number quantization bits')
parser.add_argument('--arch', default='vgg', type=str,
help='architecture to use')
# multi-gpus
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
seed = 1
if not os.path.exists(args.save):
os.makedirs(args.save)
gpu = args.gpu_ids
gpu_ids = args.gpu_ids.split(',')
args.gpu_ids = []
for gpu_id in gpu_ids:
id = int(gpu_id)
if id > 0:
args.gpu_ids.append(id)
if len(args.gpu_ids) > 0:
torch.cuda.set_device(args.gpu_ids[0])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_batch_jacobian(net, x, target, device):
net.zero_grad()
x.requires_grad_(True)
y = net(x)
y.backward(torch.ones_like(y))
jacob = x.grad.detach()
return jacob, target.detach(), y.detach()
if args.arch.endswith('lp'):
# model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth)
model = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth)
elif args.dataset == 'imagenet':
model = models.__dict__[args.arch](pretrained=False)
if len(args.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
else:
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
else:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
def create_model(model, cfg, cfg_mask):
if args.arch.endswith('lp'):
# model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth)
newmodel = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth)
elif args.dataset == 'imagenet':
newmodel = models.__dict__[args.arch](pretrained=False)
if len(args.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
else:
newmodel = models.__dict__[args.arch](dataset=args.dataset, cfg = cfg)
layer_id_in_cfg = 0
start_mask = torch.ones(3)
end_mask = cfg_mask[layer_id_in_cfg]
for [m0, m1] in zip(model.modules(), newmodel.modules()):
if isinstance(m0, nn.BatchNorm2d):
if torch.sum(end_mask) == 0:
continue
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
m1.weight.data = m0.weight.data[idx1.tolist()].clone()
m1.bias.data = m0.bias.data[idx1.tolist()].clone()
m1.running_mean = m0.running_mean[idx1.tolist()].clone()
m1.running_var = m0.running_var[idx1.tolist()].clone()
layer_id_in_cfg += 1
start_mask = end_mask.clone()
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
elif isinstance(m0, nn.Conv2d):
if torch.sum(end_mask) == 0:
continue
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
# random set for test
# new_end_mask = np.asarray(end_mask.cpu().numpy())
# new_end_mask = np.append(new_end_mask[int(len(new_end_mask)/2):], new_end_mask[:int(len(new_end_mask)/2)])
# idx1 = np.squeeze(np.argwhere(new_end_mask))
# print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
w1 = w1[idx1.tolist(), :, :, :].clone()
m1.weight.data = w1.clone()
elif isinstance(m0, nn.Linear):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
m1.weight.data = m0.weight.data[:, idx0].clone()
m1.bias.data = m0.bias.data.clone()
return newmodel
#
# def check_score(newmodel, train_loader):
#
# newmodel.K = np.zeros((args.test_batch_size, args.test_batch_size))
# def counting_forward_hook(module, inp, out):
# try:
# if not module.visited_backwards:
# return
# if isinstance(inp, tuple):
# inp = inp[0]
# inp = inp.view(inp.size(0), -1)
# x = (inp > 0).float()
# K = x @ x.t()
# K2 = (1. - x) @ (1. - x.t())
# newmodel.K = newmodel.K + K.cpu().numpy() + K2.cpu().numpy()
# except:
# pass
#
# def counting_backward_hook(module, inp, out):
# module.visited_backwards = True
#
# for name, module in newmodel.named_modules():
# if 'ReLU' in str(type(module)):
# # hooks[name] = module.register_forward_hook(counting_hook)
# module.register_forward_hook(counting_forward_hook)
# module.register_backward_hook(counting_backward_hook)
#
# newmodel = newmodel.to(device)
# s = []
#
# for j in range(5):
# data_iterator = iter(train_loader)
# x, target = next(data_iterator)
# x2 = torch.clone(x)
# x2 = x2.to(device)
# x, target = x.to(device), target.to(device)
# jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device)
# newmodel(x2.to(device))
# s_, ld = np.linalg.slogdet(newmodel.K)
# s.append(ld)
# score = np.mean(s)
# return score
if args.cuda:
model.cuda()
def pruning(model):
total = 0
cfg = []
cfg_mask = []
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
total += m.weight.data.shape[0]
bn = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.shape[0]
bn[index:(index+size)] = m.weight.data.abs().clone()
index += size
y, i = torch.sort(bn)
thre_index = int(total * args.percent)
thre = y[thre_index]
# print('Pruning threshold: {}'.format(thre))
mask = torch.zeros(total)
index = 0
for k, m in enumerate(model.modules()):
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.numel()
weight_copy = m.weight.data.abs().clone()
_mask = weight_copy.gt(thre.cuda()).float().cuda()
cfg_mask.append(_mask.clone())
if int(torch.sum(_mask)) > 0:
cfg.append(int(torch.sum(_mask)))
mask[index:(index+size)] = _mask.view(-1)
# print('layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'.format(k, _mask.shape[0], int(torch.sum(_mask))))
index += size
elif isinstance(m, nn.MaxPool2d):
cfg.append('M')
# print('Pre-processing Successful!')
return mask, cfg, cfg_mask
resume = args.save + '/model_best.pth.tar'
print('==> resumeing from model_best ...')
checkpoint = torch.load(resume)
best_epoch = checkpoint['epoch']
print('best epoch: ', best_epoch)
model.load_state_dict(checkpoint['state_dict'])
best_mask, best_cfg, best_mask_cfg = pruning(model)
size = best_mask.size(0)
# resume = args.save_1 + '/model_best.pth.tar'
# resume = args.save_1 + '/ckpt159.pth.tar'
# print('==> resumeing from model_best ...')
# checkpoint = torch.load(resume)
# best_epoch = checkpoint['epoch']
# print('best epoch: ', best_epoch)
# model.load_state_dict(checkpoint['state_dict'])
# best_mask_1 = pruning(model)
# print('overlap rate of two best model: ', float(torch.sum(best_mask==best_mask_1)) / size)
epochs = args.end_epoch - args.start_epoch + 1
overlap = np.zeros((epochs, epochs))
save_dir = os.path.join(args.save, 'overlap_'+str(args.percent))
masks = []
for i in range(args.start_epoch, args.end_epoch+1):
resume = args.save + '/ckpt' + str(i-1) + '.pth.tar'
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['state_dict'])
masks.append(pruning(model))
# for i in range(args.start_epoch, args.end_epoch+1):
# for j in range(args.start_epoch, args.end_epoch+1):
# overlap[i-1, j-1] = float(torch.sum(masks[i-1] == masks[j-1])) / size
# print('overlap[{}, {}] = {}'.format(i-1, j-1, overlap[i-1, j-1]))
#
# np.save(save_dir, overlap)
wandb_project = 'pruning_score'
name = 'trail'
# wandb.init(project=wandb_project, name=name)
best_info = {}
best_score = 0
bird = [15, 25, 40, 159]
xshape = (1, 3, 32, 32)
flops_original, param_origianl = get_model_infos(model, xshape)
for i in range(args.start_epoch, args.end_epoch):
model_new = create_model(model, masks[i][1], masks[i][2])
score = check_score(model_new, train_loader)
flop, param = get_model_infos(model_new, xshape)
info_dict = {
'epoch': i,
'score': score,
'cfg': masks[i][1],
'cfg_mask': masks[i][2],
'flop_pruning_rate': flop/flops_original,
'param_pruning_rate': param/param_origianl,
}
# wandb.log(info_dict)
print(score)
if score > best_score:
best_score = score
best_info = info_dict
if i in bird:
print(i, flop/flops_original, param/param_origianl, score)
np.save('{}-{:.2f}.npy'.format(i, best_score), info_dict)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
internal/mtls_smoketest/smoketest_test.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mtls_smoketest
import (
"context"
"os"
"testing"
"time"
bqstorage "cloud.google.com/go/bigquery/storage/apiv1"
gaming "cloud.google.com/go/gaming/apiv1beta"
vision "cloud.google.com/go/vision/apiv1"
"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
bqstoragepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1"
gamingpb "google.golang.org/genproto/googleapis/cloud/gaming/v1beta"
)
var shouldFail = os.Getenv("GOOGLE_API_USE_MTLS") == "always"
// checkErr expects an error under mtls_smoketest, and no error otherwise.
func checkErr(err error, t *testing.T) {
t.Helper()
if shouldFail && err == nil {
t.Fatalf("got no err when wanted one - this means you should delete this test and un-skip the tests it's referring to.")
}
if !shouldFail && err != nil {
t.Fatalf("got err when wanted no error: %v", err)
}
}
// When this test starts failing, delete it and the corresponding lines in system_tests.bash
//
// vision/detect
// vision/label
// vision/product_search
// run/image-processing/imagemagick
func TestVision(t *testing.T) {
tc := testutil.EndToEndTest(t)
ctx := context.Background()
// NOTE(cbro): Observed successful and unsuccessful calls take under 1s.
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
client, err := vision.NewImageAnnotatorClient(ctx, option.WithQuotaProject(tc.ProjectID))
if err != nil {
t.Fatal(err)
}
defer client.Close()
f, err := os.Open("../../vision/testdata/cat.jpg")
if err != nil {
t.Fatal(err)
}
image, err := vision.NewImageFromReader(f)
if err != nil {
t.Fatal(err)
}
_, err = client.DetectLabels(ctx, image, nil, 10)
checkErr(err, t)
}
// When this test starts failing, delete it and the corresponding lines in system_tests.bash
//
// bigquery/bigquery_storage_quickstart
func TestBigquerystorage(t *testing.T) {
tc := testutil.EndToEndTest(t)
ctx := context.Background()
// NOTE(cbro): Observed successful calls take around 1s. Unsuccessful calls hang indefinitely.
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
client, err := bqstorage.NewBigQueryReadClient(ctx)
if err != nil {
t.Fatalf("NewBigQueryStorageClient: %v", err)
}
defer client.Close()
createReadSessionRequest := &bqstoragepb.CreateReadSessionRequest{
Parent: "projects/" + tc.ProjectID,
ReadSession: &bqstoragepb.ReadSession{
Table: "projects/bigquery-public-data/datasets/usa_names/tables/usa_1910_current",
DataFormat: bqstoragepb.DataFormat_AVRO,
},
MaxStreamCount: 1,
}
_, err = client.CreateReadSession(ctx, createReadSessionRequest)
checkErr(err, t)
}
// When this test starts failing, delete it and the corresponding lines in system_tests.bash
//
// gaming/servers
func TestGameservices(t *testing.T) {
tc := testutil.EndToEndTest(t)
ctx := context.Background()
// NOTE(cbro): Observed successful and unsuccessful calls take under 1s.
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
client, err := gaming.NewRealmsClient(ctx)
if err != nil {
t.Fatalf("NewRealmsClient: %v", err)
}
defer client.Close()
req := &gamingpb.ListRealmsRequest{
Parent: "projects/" + tc.ProjectID + "/locations/global",
}
it := client.ListRealms(ctx, req)
_, err = it.Next()
if err == iterator.Done {
err = nil
}
checkErr(err, t)
}
|
[
"\"GOOGLE_API_USE_MTLS\""
] |
[] |
[
"GOOGLE_API_USE_MTLS"
] |
[]
|
["GOOGLE_API_USE_MTLS"]
|
go
| 1 | 0 | |
tests/integration_tests/data_steward/metrics/required_labs_test.py
|
# Python imports
import os
import unittest
# Third party imports
import mock
# Project imports
import app_identity
import bq_utils
import common
import gcs_utils
import resources
import validation.sql_wrangle as sql_wrangle
from utils import bq
from tests import test_util
from tests.test_util import (FAKE_HPO_ID)
from validation.metrics import required_labs as required_labs
from validation.metrics.required_labs import (
MEASUREMENT_CONCEPT_SETS_TABLE, MEASUREMENT_CONCEPT_SETS_DESCENDANTS_TABLE)
class RequiredLabsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.hpo_bucket = gcs_utils.get_hpo_bucket(FAKE_HPO_ID)
self.project_id = app_identity.get_application_id()
self.dataset_id = bq_utils.get_dataset_id()
self.rdr_dataset_id = bq_utils.get_rdr_dataset_id()
self.folder_prefix = '2019-01-01/'
test_util.delete_all_tables(self.dataset_id)
test_util.empty_bucket(self.hpo_bucket)
self.client = bq.get_client(self.project_id)
mock_get_hpo_name = mock.patch('validation.main.get_hpo_name')
self.mock_get_hpo_name = mock_get_hpo_name.start()
self.mock_get_hpo_name.return_value = 'Fake HPO'
self.addCleanup(mock_get_hpo_name.stop)
self._load_data()
def tearDown(self):
test_util.delete_all_tables(bq_utils.get_dataset_id())
test_util.empty_bucket(self.hpo_bucket)
def _load_data(self):
# Load measurement_concept_sets
required_labs.load_measurement_concept_sets_table(
project_id=self.project_id, dataset_id=self.dataset_id)
# Load measurement_concept_sets_descendants
required_labs.load_measurement_concept_sets_descendants_table(
project_id=self.project_id, dataset_id=self.dataset_id)
# we need to load measurement.csv into bigquery_dataset_id in advance for the other integration tests
ehr_measurement_result = bq_utils.load_table_from_csv(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_name=bq_utils.get_table_id(FAKE_HPO_ID, common.MEASUREMENT),
csv_path=test_util.FIVE_PERSONS_MEASUREMENT_CSV,
fields=resources.fields_for(common.MEASUREMENT))
bq_utils.wait_on_jobs([ehr_measurement_result['jobReference']['jobId']])
def test_check_and_copy_tables(self):
"""
Test to ensure all the necessary tables for required_labs.py are copied and or created
"""
# Preconditions
descendants_table_name = f'{self.project_id}.{self.dataset_id}.{MEASUREMENT_CONCEPT_SETS_DESCENDANTS_TABLE}'
concept_sets_table_name = f'{self.project_id}.{self.dataset_id}.{MEASUREMENT_CONCEPT_SETS_TABLE}'
concept_table_name = f'{self.project_id}.{self.dataset_id}.{common.CONCEPT}'
concept_ancestor_table_name = f'{self.project_id}.{self.dataset_id}.{common.CONCEPT_ANCESTOR}'
actual_descendants_table = self.client.get_table(descendants_table_name)
actual_concept_sets_table = self.client.get_table(
concept_sets_table_name)
actual_concept_table = self.client.get_table(concept_table_name)
actual_concept_ancestor_table = self.client.get_table(
concept_ancestor_table_name)
# Test
required_labs.check_and_copy_tables(self.project_id, self.dataset_id)
# Post conditions
self.assertIsNotNone(actual_descendants_table.created)
self.assertIsNotNone(actual_concept_sets_table.created)
self.assertIsNotNone(actual_concept_table.created)
self.assertIsNotNone(actual_concept_ancestor_table.created)
def test_measurement_concept_sets_table(self):
query = sql_wrangle.qualify_tables(
'''SELECT * FROM {dataset_id}.{table_id}'''.format(
dataset_id=self.dataset_id,
table_id=MEASUREMENT_CONCEPT_SETS_TABLE))
response = bq_utils.query(query)
actual_fields = [{
'name': field['name'].lower(),
'type': field['type'].lower()
} for field in response['schema']['fields']]
expected_fields = [{
'name': field['name'].lower(),
'type': field['type'].lower()
} for field in resources.fields_for(MEASUREMENT_CONCEPT_SETS_TABLE)]
self.assertListEqual(expected_fields, actual_fields)
measurement_concept_sets_table_path = os.path.join(
resources.resource_files_path,
MEASUREMENT_CONCEPT_SETS_TABLE + '.csv')
expected_total_rows = len(
resources.csv_to_list(measurement_concept_sets_table_path))
self.assertEqual(expected_total_rows, int(response['totalRows']))
def test_load_measurement_concept_sets_descendants_table(self):
query = sql_wrangle.qualify_tables(
"""SELECT * FROM {dataset_id}.{table_id}""".format(
dataset_id=self.dataset_id,
table_id=MEASUREMENT_CONCEPT_SETS_DESCENDANTS_TABLE))
response = bq_utils.query(query)
actual_fields = [{
'name': field['name'].lower(),
'type': field['type'].lower()
} for field in response['schema']['fields']]
expected_fields = [{
'name': field['name'].lower(),
'type': field['type'].lower()
} for field in resources.fields_for(
MEASUREMENT_CONCEPT_SETS_DESCENDANTS_TABLE)]
self.assertListEqual(expected_fields, actual_fields)
def test_get_lab_concept_summary_query(self):
summary_query = required_labs.get_lab_concept_summary_query(FAKE_HPO_ID)
summary_response = bq_utils.query(summary_query)
summary_rows = bq_utils.response2rows(summary_response)
submitted_labs = [
row for row in summary_rows
if row['measurement_concept_id_exists'] == 1
]
actual_total_labs = summary_response['totalRows']
# Count the total number of labs required, this number should be equal to the total number of rows in the
# results generated by get_lab_concept_summary_query including the submitted and missing labs.
unique_ancestor_concept_query = sql_wrangle.qualify_tables(
"""SELECT DISTINCT ancestor_concept_id FROM `{project_id}.{dataset_id}.{table_id}`"""
.format(project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=MEASUREMENT_CONCEPT_SETS_DESCENDANTS_TABLE))
unique_ancestor_cocnept_response = bq_utils.query(
unique_ancestor_concept_query)
expected_total_labs = unique_ancestor_cocnept_response['totalRows']
# Count the number of labs in the measurement table, this number should be equal to the number of labs
# submitted by the fake site
unique_measurement_concept_id_query = '''
SELECT
DISTINCT c.ancestor_concept_id
FROM
`{project_id}.{dataset_id}.{measurement_concept_sets_descendants}` AS c
JOIN
`{project_id}.{dataset_id}.{measurement}` AS m
ON
c.descendant_concept_id = m.measurement_concept_id
'''.format(project_id=self.project_id,
dataset_id=self.dataset_id,
measurement_concept_sets_descendants=
MEASUREMENT_CONCEPT_SETS_DESCENDANTS_TABLE,
measurement=bq_utils.get_table_id(
FAKE_HPO_ID, common.MEASUREMENT))
unique_measurement_concept_id_response = bq_utils.query(
unique_measurement_concept_id_query)
unique_measurement_concept_id_total_labs = unique_measurement_concept_id_response[
'totalRows']
self.assertEqual(int(expected_total_labs),
int(actual_total_labs),
msg='Compare the total number of labs')
self.assertEqual(int(unique_measurement_concept_id_total_labs),
len(submitted_labs),
msg='Compare the number '
'of labs submitted '
'in the measurement')
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
magefile.go
|
//go:build mage
// +build mage
package main
import (
"crypto/sha1"
"errors"
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"github.com/magefile/mage/mg"
"github.com/underpin-korea/livekit_server_go/version"
)
const (
goChecksumFile = ".checksumgo"
imageName = "livekit/livekit-server"
)
// Default target to run when none is specified
// If not set, running mage will list available targets
var Default = Build
var checksummer = NewChecksummer(".", goChecksumFile, ".go", ".mod")
func init() {
checksummer.IgnoredPaths = []string{
"pkg/service/wire_gen.go",
"pkg/rtc/types/typesfakes",
}
}
// explicitly reinstall all deps
func Deps() error {
return installTools(true)
}
// builds LiveKit server
func Build() error {
mg.Deps(generateWire)
if !checksummer.IsChanged() {
fmt.Println("up to date")
return nil
}
fmt.Println("building...")
if err := os.MkdirAll("bin", 0755); err != nil {
return err
}
cmd := exec.Command("go", "build", "-o", "../../bin/livekit-server")
cmd.Dir = "cmd/server"
connectStd(cmd)
if err := cmd.Run(); err != nil {
return err
}
checksummer.WriteChecksum()
return nil
}
// builds binary that runs on linux amd64
func BuildLinux() error {
mg.Deps(generateWire)
if !checksummer.IsChanged() {
fmt.Println("up to date")
return nil
}
fmt.Println("building...")
if err := os.MkdirAll("bin", 0755); err != nil {
return err
}
cmd := exec.Command("go", "build", "-o", "../../bin/livekit-server-amd64")
cmd.Env = []string{
"GOOS=linux",
"GOARCH=amd64",
"HOME=" + os.Getenv("HOME"),
"GOPATH=" + os.Getenv("GOPATH"),
}
cmd.Dir = "cmd/server"
connectStd(cmd)
if err := cmd.Run(); err != nil {
return err
}
checksummer.WriteChecksum()
return nil
}
func Deadlock() error {
if err := installTool("golang.org/x/tools/cmd/goimports", "latest", false); err != nil {
return err
}
if err := run("go get github.com/sasha-s/go-deadlock"); err != nil {
return err
}
if err := pipe("grep -rl sync.Mutex ./pkg", "xargs sed -i -e s/sync.Mutex/deadlock.Mutex/g"); err != nil {
return err
}
if err := pipe("grep -rl sync.RWMutex ./pkg", "xargs sed -i -e s/sync.RWMutex/deadlock.RWMutex/g"); err != nil {
return err
}
if err := pipe("grep -rl deadlock.Mutex\\|deadlock.RWMutex ./pkg", "xargs goimports -w"); err != nil {
return err
}
if err := run("go mod tidy"); err != nil {
return err
}
return nil
}
func Sync() error {
if err := pipe("grep -rl deadlock.Mutex ./pkg", "xargs sed -i -e s/deadlock.Mutex/sync.Mutex/g"); err != nil {
return err
}
if err := pipe("grep -rl deadlock.RWMutex ./pkg", "xargs sed -i -e s/deadlock.RWMutex/sync.RWMutex/g"); err != nil {
return err
}
if err := pipe("grep -rl sync.Mutex\\|sync.RWMutex ./pkg", "xargs goimports -w"); err != nil {
return err
}
if err := run("go mod tidy"); err != nil {
return err
}
return nil
}
// builds and publish snapshot docker image
func PublishDocker() error {
// don't publish snapshot versions as latest or minor version
if !strings.Contains(version.Version, "SNAPSHOT") {
return errors.New("Cannot publish non-snapshot versions")
}
versionImg := fmt.Sprintf("%s:v%s", imageName, version.Version)
cmd := exec.Command("docker", "buildx", "build",
"--push", "--platform", "linux/amd64,linux/arm64",
"--tag", versionImg,
".")
connectStd(cmd)
if err := cmd.Run(); err != nil {
return err
}
return nil
}
// run unit tests, skipping integration
func Test() error {
mg.Deps(generateWire, setULimit)
cmd := exec.Command("go", "test", "-short", "./...", "-count=1")
connectStd(cmd)
return cmd.Run()
}
// run all tests including integration
func TestAll() error {
mg.Deps(generateWire, setULimit)
return run("go test ./... -count=1 -timeout=4m -v")
}
// cleans up builds
func Clean() {
fmt.Println("cleaning...")
os.RemoveAll("bin")
os.Remove(goChecksumFile)
}
// regenerate code
func Generate() error {
mg.Deps(installDeps, generateWire)
fmt.Println("generating...")
return run("go generate ./...")
}
// code generation for wiring
func generateWire() error {
mg.Deps(installDeps)
if !checksummer.IsChanged() {
return nil
}
fmt.Println("wiring...")
wire, err := getToolPath("wire")
if err != nil {
return err
}
cmd := exec.Command(wire)
cmd.Dir = "pkg/service"
connectStd(cmd)
if err := cmd.Run(); err != nil {
return err
}
return nil
}
// implicitly install deps
func installDeps() error {
return installTools(false)
}
func installTools(force bool) error {
tools := map[string]string{
"github.com/google/wire/cmd/wire": "latest",
}
for t, v := range tools {
if err := installTool(t, v, force); err != nil {
return err
}
}
return nil
}
func installTool(url, version string, force bool) error {
name := filepath.Base(url)
if !force {
_, err := getToolPath(name)
if err == nil {
// already installed
return nil
}
}
fmt.Printf("installing %s %s\n", name, version)
urlWithVersion := fmt.Sprintf("%s@%s", url, version)
cmd := exec.Command("go", "install", urlWithVersion)
connectStd(cmd)
if err := cmd.Run(); err != nil {
return err
}
// check
_, err := getToolPath(name)
return err
}
// helpers
func getToolPath(name string) (string, error) {
if p, err := exec.LookPath(name); err == nil {
return p, nil
}
// check under gopath
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
p := filepath.Join(gopath, "bin", name)
if _, err := os.Stat(p); err != nil {
return "", err
}
return p, nil
}
func connectStd(cmd *exec.Cmd) {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
// A helper checksum library that generates a fast, non-portable checksum over a directory of files
// it's designed as a quick way to bypass
type Checksummer struct {
dir string
file string
checksum string
allExts bool
extMap map[string]bool
IgnoredPaths []string
}
func NewChecksummer(dir string, checksumfile string, exts ...string) *Checksummer {
c := &Checksummer{
dir: dir,
file: checksumfile,
extMap: make(map[string]bool),
}
if len(exts) == 0 {
c.allExts = true
} else {
for _, ext := range exts {
c.extMap[ext] = true
}
}
return c
}
func (c *Checksummer) IsChanged() bool {
// default changed
if err := c.computeChecksum(); err != nil {
log.Println("could not compute checksum", err)
return true
}
// read
existing, err := c.ReadChecksum()
if err != nil {
// may not be there
return true
}
return existing != c.checksum
}
func (c *Checksummer) ReadChecksum() (string, error) {
b, err := ioutil.ReadFile(filepath.Join(c.dir, c.file))
if err != nil {
return "", err
}
return string(b), nil
}
func (c *Checksummer) WriteChecksum() error {
if err := c.computeChecksum(); err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(c.dir, c.file), []byte(c.checksum), 0644)
}
func (c *Checksummer) computeChecksum() error {
if c.checksum != "" {
return nil
}
entries := make([]string, 0)
ignoredMap := make(map[string]bool)
for _, f := range c.IgnoredPaths {
ignoredMap[f] = true
}
err := filepath.Walk(c.dir, func(path string, info os.FileInfo, err error) error {
if path == c.dir {
return nil
}
if strings.HasPrefix(info.Name(), ".") || ignoredMap[path] {
if info.IsDir() {
return filepath.SkipDir
} else {
return nil
}
}
if info.IsDir() {
entries = append(entries, fmt.Sprintf("%s %d", path, info.ModTime().Unix()))
} else if c.allExts || c.extMap[filepath.Ext(info.Name())] {
entries = append(entries, fmt.Sprintf("%s %d %d", path, info.Size(), info.ModTime().Unix()))
}
return nil
})
if err != nil {
return err
}
sort.Strings(entries)
h := sha1.New()
for _, e := range entries {
h.Write([]byte(e))
}
c.checksum = fmt.Sprintf("%x", h.Sum(nil))
return nil
}
func run(commands ...string) error {
for _, command := range commands {
args := strings.Split(command, " ")
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return err
}
}
return nil
}
func pipe(first, second string) error {
a1 := strings.Split(first, " ")
c1 := exec.Command(a1[0], a1[1:]...)
c1.Stderr = os.Stderr
p, err := c1.StdoutPipe()
if err != nil {
return err
}
a2 := strings.Split(second, " ")
c2 := exec.Command(a2[0], a2[1:]...)
c2.Stdin = p
c2.Stdout = os.Stdout
c2.Stderr = os.Stderr
if err = c1.Start(); err != nil {
return err
}
if err = c2.Start(); err != nil {
return err
}
if err = c1.Wait(); err != nil {
return err
}
if err = c2.Wait(); err != nil {
return err
}
return nil
}
|
[
"\"HOME\"",
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"HOME"
] |
[]
|
["GOPATH", "HOME"]
|
go
| 2 | 0 | |
examples/mailsettings/mailsettings.go
|
package main
import (
"fmt"
"log"
"os"
"github.com/sendgrid/sendgrid-go"
)
// Retrieveallmailsettings : Retrieve all mail settings
// GET /mail_settings
func Retrieveallmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["limit"] = "1"
queryParams["offset"] = "1"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updateaddresswhitelistmailsettings : Update address whitelist mail settings
// PATCH /mail_settings/address_whitelist
func Updateaddresswhitelistmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/address_whitelist", host)
request.Method = "PATCH"
request.Body = []byte(` {
"enabled": true,
"list": [
"[email protected]",
"example.com"
]
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrieveaddresswhitelistmailsettings : Retrieve address whitelist mail settings
// GET /mail_settings/address_whitelist
func Retrieveaddresswhitelistmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/address_whitelist", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// UpdateBCCmailsettings : Update BCC mail settings
// PATCH /mail_settings/bcc
func UpdateBCCmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bcc", host)
request.Method = "PATCH"
request.Body = []byte(` {
"email": "[email protected]",
"enabled": false
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RetrieveallBCCmailsettings : Retrieve all BCC mail settings
// GET /mail_settings/bcc
func RetrieveallBCCmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bcc", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updatebouncepurgemailsettings : Update bounce purge mail settings
// PATCH /mail_settings/bounce_purge
func Updatebouncepurgemailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bounce_purge", host)
request.Method = "PATCH"
request.Body = []byte(` {
"enabled": true,
"hard_bounces": 5,
"soft_bounces": 5
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrievebouncepurgemailsettings : Retrieve bounce purge mail settings
// GET /mail_settings/bounce_purge
func Retrievebouncepurgemailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bounce_purge", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updatefootermailsettings : Update footer mail settings
// PATCH /mail_settings/footer
func Updatefootermailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/footer", host)
request.Method = "PATCH"
request.Body = []byte(` {
"enabled": true,
"html_content": "...",
"plain_content": "..."
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrievefootermailsettings : Retrieve footer mail settings
// GET /mail_settings/footer
func Retrievefootermailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/footer", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updateforwardbouncemailsettings : Update forward bounce mail settings
// PATCH /mail_settings/forward_bounce
func Updateforwardbouncemailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_bounce", host)
request.Method = "PATCH"
request.Body = []byte(` {
"email": "[email protected]",
"enabled": true
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrieveforwardbouncemailsettings : Retrieve forward bounce mail settings
// GET /mail_settings/forward_bounce
func Retrieveforwardbouncemailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_bounce", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updateforwardspammailsettings : Update forward spam mail settings
// PATCH /mail_settings/forward_spam
func Updateforwardspammailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_spam", host)
request.Method = "PATCH"
request.Body = []byte(` {
"email": "",
"enabled": false
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrieveforwardspammailsettings : Retrieve forward spam mail settings
// GET /mail_settings/forward_spam
func Retrieveforwardspammailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_spam", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updateplaincontentmailsettings : Update plain content mail settings
// PATCH /mail_settings/plain_content
func Updateplaincontentmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/plain_content", host)
request.Method = "PATCH"
request.Body = []byte(` {
"enabled": false
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrieveplaincontentmailsettings : Retrieve plain content mail settings
// GET /mail_settings/plain_content
func Retrieveplaincontentmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/plain_content", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updatespamcheckmailsettings : Update spam check mail settings
// PATCH /mail_settings/spam_check
func Updatespamcheckmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/spam_check", host)
request.Method = "PATCH"
request.Body = []byte(` {
"enabled": true,
"max_score": 5,
"url": "url"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrievespamcheckmailsettings : Retrieve spam check mail settings
// GET /mail_settings/spam_check
func Retrievespamcheckmailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/spam_check", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updatetemplatemailsettings : Update template mail settings
// PATCH /mail_settings/template
func Updatetemplatemailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/template", host)
request.Method = "PATCH"
request.Body = []byte(` {
"enabled": true,
"html_content": "<% body %>"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrievelegacytemplatemailsettings : Retrieve legacy template mail settings
// GET /mail_settings/template
func Retrievelegacytemplatemailsettings() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/template", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
func main() {
// add your function calls here
}
|
[
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\""
] |
[] |
[
"SENDGRID_API_KEY"
] |
[]
|
["SENDGRID_API_KEY"]
|
go
| 1 | 0 | |
pkg/helm/helm_test.go
|
// Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package helm
import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/release"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
)
func buildTestChart() *chart.Chart {
testManifestWithHook := `apiVersion: v1
kind: ConfigMap
metadata:
name: test-cm
data:
key: value`
return &chart.Chart{
Metadata: &chart.Metadata{
APIVersion: "v1",
Name: "test-chart",
Type: "application",
Version: "0.1.0",
},
Templates: []*chart.File{
{Name: "templates/config.yaml", Data: []byte(testManifestWithHook)},
},
}
}
func Log(t *testing.T, label string, err error) {
if err == nil {
err = fmt.Errorf("succeeded")
}
t.Logf("%s: %s", label, err)
}
var (
kubeNamespace = "default"
releaseName = "test-install-release"
chartName = "mychart"
tagName = "0.7.0"
hostname = os.Getenv("DOCKER_HOSTNAME")
namespace = os.Getenv("DOCKER_NAMESPACE")
username = os.Getenv("DOCKER_USERNAME")
password = os.Getenv("DOCKER_PASSWORD")
insecure, _ = strconv.ParseBool(os.Getenv("DOCKER_INSECURE"))
chartRef = ChartRef(hostname, namespace, chartName, tagName)
impl = new(Impl)
)
func ChartRef(hostname, namespace, name, tagname string) string {
return fmt.Sprintf("%s/%s/%s:%s", hostname, namespace, name, tagname)
}
func TestHelmRegistry(t *testing.T) {
tmpChart := os.Getenv("TMP_CHART")
var err error
// Test should only run as integration test if registry is available
if _, isSet := os.LookupEnv("DOCKER_HOSTNAME"); !isSet {
t.Skip("No integration environment found. Skipping test...")
}
tmpDir, err := ioutil.TempDir("", "test-helm-")
if err != nil {
t.Errorf("Unable to create temporary directory: %s", err)
}
defer os.RemoveAll(tmpDir)
pulledChartDestPath := path.Join(tmpDir, "pulledChartDir")
packedChartDir := path.Join(tmpDir, "packedChartDir")
err = os.Mkdir(pulledChartDestPath, 0700)
if err != nil {
t.Errorf("Unable to setup test temp charts directory: %s", err)
}
err = os.Mkdir(packedChartDir, 0700)
if err != nil {
t.Errorf("Unable to setup test temp charts directory: %s", err)
}
if username != "" && password != "" {
err = impl.RegistryLogin(hostname, username, password, insecure)
assert.Nil(t, err)
Log(t, "registry login", err)
}
err = impl.Package(tmpChart, packedChartDir, tagName)
assert.Nil(t, err)
Log(t, "package chart", err)
err = impl.Pull(chartRef, pulledChartDestPath)
assert.Nil(t, err)
Log(t, "pull chart", err)
pulledChart, err := impl.Load(chartRef, pulledChartDestPath)
assert.Nil(t, err)
Log(t, "load chart", err)
packagePath := packedChartDir + "/mychart-0.7.0.tgz"
packedChart, err := loader.Load(packagePath)
assert.Nil(t, err)
assert.Equal(t, packedChart.Metadata.Name, pulledChart.Metadata.Name, "expected loaded chart equals saved chart")
if username != "" && password != "" {
err = impl.RegistryLogout(hostname)
assert.Nil(t, err)
Log(t, "registry logout", err)
}
}
func TestHelmRelease(t *testing.T) {
// Test should only run as integration test if registry is available
if _, isSet := os.LookupEnv("DOCKER_HOSTNAME"); !isSet {
t.Skip("No integration environment found. Skipping test...")
}
var err error
origChart := buildTestChart()
_, _ = impl.Uninstall(kubeNamespace, releaseName)
vals := map[string]interface{}{
"data": map[string]interface{}{
"key": "value1",
},
}
_, err = impl.Install(origChart, kubeNamespace, releaseName, vals)
assert.Nil(t, err)
Log(t, "install", err)
_, err = impl.Upgrade(origChart, kubeNamespace, releaseName, vals)
assert.Nil(t, err)
Log(t, "upgrade", err)
var rel *release.Release
assert.Eventually(t, func() bool {
rel, err = impl.Status(kubeNamespace, releaseName)
assert.Nil(t, err)
return rel.Info.Status == release.StatusDeployed
}, time.Minute, time.Second)
Log(t, "status", err)
var resources []*unstructured.Unstructured
resources, err = impl.GetResources(kubeNamespace, releaseName)
assert.Nil(t, err)
assert.Len(t, resources, 1)
computedResult, _ := kstatus.Compute(resources[0])
assert.Equal(t, kstatus.CurrentStatus, computedResult.Status)
Log(t, "getResources", err)
_, err = impl.Uninstall(kubeNamespace, releaseName)
assert.Nil(t, err)
Log(t, "uninstall", err)
}
|
[
"\"DOCKER_HOSTNAME\"",
"\"DOCKER_NAMESPACE\"",
"\"DOCKER_USERNAME\"",
"\"DOCKER_PASSWORD\"",
"\"DOCKER_INSECURE\"",
"\"TMP_CHART\""
] |
[] |
[
"DOCKER_PASSWORD",
"DOCKER_INSECURE",
"TMP_CHART",
"DOCKER_USERNAME",
"DOCKER_HOSTNAME",
"DOCKER_NAMESPACE"
] |
[]
|
["DOCKER_PASSWORD", "DOCKER_INSECURE", "TMP_CHART", "DOCKER_USERNAME", "DOCKER_HOSTNAME", "DOCKER_NAMESPACE"]
|
go
| 6 | 0 | |
cmd/argo/commands/server.go
|
package commands
import (
"crypto/tls"
"fmt"
"os"
"time"
"github.com/argoproj/pkg/errors"
"github.com/argoproj/pkg/stats"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/net/context"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"github.com/argoproj/argo/cmd/argo/commands/client"
wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned"
"github.com/argoproj/argo/server/apiserver"
"github.com/argoproj/argo/util/help"
)
func NewServerCommand() *cobra.Command {
var (
authMode string
configMap string
port int
baseHRef string
secure bool
htst bool
namespaced bool // --namespaced
managedNamespace string // --managed-namespace
enableOpenBrowser bool
)
var command = cobra.Command{
Use: "server",
Short: "Start the Argo Server",
Example: fmt.Sprintf(`
See %s`, help.ArgoSever),
RunE: func(c *cobra.Command, args []string) error {
stats.RegisterStackDumper()
stats.StartStatsTicker(5 * time.Minute)
config, err := client.GetConfig().ClientConfig()
if err != nil {
return err
}
config.Burst = 30
config.QPS = 20.0
namespace := client.Namespace()
kubeConfig := kubernetes.NewForConfigOrDie(config)
wflientset := wfclientset.NewForConfigOrDie(config)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if !namespaced && managedNamespace != "" {
log.Warn("ignoring --managed-namespace because --namespaced is false")
managedNamespace = ""
}
if namespaced && managedNamespace == "" {
managedNamespace = namespace
}
log.WithFields(log.Fields{
"authMode": authMode,
"namespace": namespace,
"managedNamespace": managedNamespace,
"baseHRef": baseHRef,
"secure": secure,
}).Info()
var tlsConfig *tls.Config
if secure {
cer, err := tls.LoadX509KeyPair("argo-server.crt", "argo-server.key")
errors.CheckError(err)
// InsecureSkipVerify will not impact the TLS listener. It is needed for the server to speak to itself for GRPC.
tlsConfig = &tls.Config{Certificates: []tls.Certificate{cer}, InsecureSkipVerify: true}
} else {
log.Warn("You are running in insecure mode. Learn how to enable transport layer security: https://github.com/argoproj/argo/blob/master/docs/tls.md")
}
opts := apiserver.ArgoServerOpts{
BaseHRef: baseHRef,
TLSConfig: tlsConfig,
HSTS: htst,
Namespace: namespace,
WfClientSet: wflientset,
KubeClientset: kubeConfig,
RestConfig: config,
AuthMode: authMode,
ManagedNamespace: managedNamespace,
ConfigName: configMap,
}
err = opts.ValidateOpts()
if err != nil {
return err
}
browserOpenFunc := func(url string) {}
if enableOpenBrowser {
browserOpenFunc = func(url string) {
log.Infof("Argo UI is available at %s", url)
err := open.Run(url)
if err != nil {
log.Warnf("Unable to open the browser. %v", err)
}
}
}
apiserver.NewArgoServer(opts).Run(ctx, port, browserOpenFunc)
return nil
},
}
command.Flags().IntVarP(&port, "port", "p", 2746, "Port to listen on")
defaultBaseHRef := os.Getenv("BASE_HREF")
if defaultBaseHRef == "" {
defaultBaseHRef = "/"
}
command.Flags().StringVar(&baseHRef, "basehref", defaultBaseHRef, "Value for base href in index.html. Used if the server is running behind reverse proxy under subpath different from /. Defaults to the environment variable BASE_HREF.")
// "-e" for encrypt, like zip
command.Flags().BoolVarP(&secure, "secure", "e", false, "Whether or not we should listen on TLS.")
command.Flags().BoolVar(&htst, "hsts", true, "Whether or not we should add a HTTP Secure Transport Security header. This only has effect if secure is enabled.")
command.Flags().StringVar(&authMode, "auth-mode", "server", "API server authentication mode. One of: client|server|hybrid")
command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration")
command.Flags().BoolVar(&namespaced, "namespaced", false, "run as namespaced mode")
command.Flags().StringVar(&managedNamespace, "managed-namespace", "", "namespace that watches, default to the installation namespace")
command.Flags().BoolVarP(&enableOpenBrowser, "browser", "b", false, "enable automatic launching of the browser [local mode]")
return &command
}
|
[
"\"BASE_HREF\""
] |
[] |
[
"BASE_HREF"
] |
[]
|
["BASE_HREF"]
|
go
| 1 | 0 | |
appium/sample-scripts/python/testdroid_android.py
|
##
## For help on setting up your machine and configuring this TestScript go to
## http://docs.testdroid.com/appium/
##
import os
import time
import unittest
import subprocess
from time import sleep
from appium import webdriver
from device_finder import DeviceFinder
from selenium.common.exceptions import WebDriverException
def log(msg):
print (time.strftime("%H:%M:%S") + ": " + msg)
class TestdroidAndroid(unittest.TestCase):
"""
Take screenshot and store files to defined location, with numbering prefix
:Args:
- name - files are stored as #_name
"""
def screenshot(self, name):
screenshot_name = str(self.screenshot_count) + "_" + name + ".png"
log("Taking screenshot: " + screenshot_name)
self.driver.save_screenshot(self.screenshot_dir + "/" + screenshot_name)
self.screenshot_count += 1
def setUp(self):
##
## IMPORTANT: Set the following parameters.
## You can set the parameters outside the script with environment variables.
## If env var is not set the string after or is used.
##
testdroid_url = os.environ.get('TESTDROID_URL') or "https://cloud.testdroid.com"
appium_url = os.environ.get('TESTDROID_APPIUM_URL') or 'http://appium.testdroid.com/wd/hub'
testdroid_apiKey = os.environ.get('TESTDROID_APIKEY') or ""
testdroid_project_name = os.environ.get('TESTDROID_PROJECT') or "Android sample project"
testdroid_testrun_name = os.environ.get('TESTDROID_TESTRUN') or "My testrun"
testdroid_app = os.environ.get('TESTDROID_APP') or ""
app_package = os.environ.get('TESTDROID_APP_PACKAGE') or 'com.bitbar.testdroid'
app_activity = os.environ.get('TESTDROID_ACTIVITY') or '.BitbarSampleApplicationActivity'
new_command_timeout = os.environ.get('TESTDROID_CMD_TIMEOUT') or '60'
testdroid_test_timeout = os.environ.get('TESTDROID_TEST_TIMEOUT') or '600'
self.screenshot_dir = os.environ.get('TESTDROID_SCREENSHOTS') or os.getcwd() + "/screenshots"
log ("Will save screenshots at: " + self.screenshot_dir)
self.screenshot_count = 1
# Options to select device
# 1) Set environment variable TESTDROID_DEVICE
# 2) Set device name to this python script
# 3) Do not set #1 and #2 and let DeviceFinder to find free device for you
testdroid_device = os.environ.get('TESTDROID_DEVICE') or ""
deviceFinder = DeviceFinder(url=testdroid_url)
if testdroid_device == "":
# Loop will not exit until free device is found
while testdroid_device == "":
testdroid_device = deviceFinder.available_android_device()
if "localhost" in appium_url:
self.api_level = subprocess.check_output(["adb", "shell", "getprop ro.build.version.sdk"])
else:
self.api_level = deviceFinder.device_API_level(testdroid_device)
log("Device API level is %s" % self.api_level)
log ("Starting Appium test using device '%s'" % testdroid_device)
desired_capabilities_cloud = {}
desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey
if self.api_level > 16:
desired_capabilities_cloud['testdroid_target'] = 'android'
desired_capabilities_cloud['automationName'] = 'android'
else:
desired_capabilities_cloud['testdroid_target'] = 'selendroid'
desired_capabilities_cloud['automationName'] = 'selendroid'
desired_capabilities_cloud['testdroid_project'] = testdroid_project_name
desired_capabilities_cloud['testdroid_testrun'] = testdroid_testrun_name
desired_capabilities_cloud['testdroid_device'] = testdroid_device
desired_capabilities_cloud['testdroid_app'] = testdroid_app
desired_capabilities_cloud['platformName'] = 'Android'
desired_capabilities_cloud['deviceName'] = 'Android Phone'
desired_capabilities_cloud['appPackage'] = app_package
desired_capabilities_cloud['appActivity'] = app_activity
desired_capabilities_cloud['newCommandTimeout'] = new_command_timeout
desired_capabilities_cloud['testdroid_testTimeout'] = testdroid_test_timeout
# set up webdriver
log ("WebDriver request initiated. Waiting for response, this typically takes 2-3 mins")
self.driver = webdriver.Remote(appium_url, desired_capabilities_cloud)
log ("WebDriver response received")
def tearDown(self):
log ("Quitting")
self.driver.quit()
def testSample(self):
log (" Getting device screen size")
log (" " + str(self.driver.get_window_size()))
isSelendroid = None
if self.api_level < 17:
isSelendroid = True
self.screenshot("app_launch")
log (" Typing in name")
elems=self.driver.find_elements_by_class_name('android.widget.EditText')
log (" info: EditText:" + `len(elems)`)
log (" Filling in name")
elems[0].send_keys("Testdroid User")
sleep(2)
self.screenshot("name_typed")
self.driver.orientation = "LANDSCAPE"
self.screenshot("landscape")
self.driver.orientation = "PORTRAIT"
self.screenshot("portrait")
try:
log (" Hiding keyboard")
self.driver.hide_keyboard()
except WebDriverException:
pass # pass exception, if keyboard isn't visible already
self.screenshot("name_typed_keyboard_hidden")
log (" Clicking element 'Buy 101 devices'")
if isSelendroid:
elem = self.driver.find_element_by_link_text('Buy 101 devices')
else:
elem = self.driver.find_element_by_name('Buy 101 devices')
elem.click()
self.screenshot("clicked_button1")
log (" Clicking Answer")
if isSelendroid:
elem = self.driver.find_element_by_link_text('Answer')
else:
elem = self.driver.find_element_by_name('Answer')
elem.click()
self.screenshot("answer")
log (" Navigating back to Activity-1")
self.driver.back()
self.screenshot("main_activity")
log (" Clicking element 'Use Testdroid Cloud'")
if isSelendroid:
elem = self.driver.find_element_by_link_text('Use Testdroid Cloud')
else:
elem = self.driver.find_element_by_name('Use Testdroid Cloud')
elem.click()
self.screenshot("clicked_button2")
log (" Clicking Answer")
if isSelendroid:
elem = self.driver.find_element_by_link_text('Answer')
else:
elem = self.driver.find_element_by_name('Answer')
elem.click()
self.screenshot("answer")
def initialize():
return TestdroidAndroid
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestdroidAndroid)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[] |
[] |
[
"TESTDROID_APPIUM_URL",
"TESTDROID_APP_PACKAGE",
"TESTDROID_APIKEY",
"TESTDROID_SCREENSHOTS",
"TESTDROID_CMD_TIMEOUT",
"TESTDROID_URL",
"TESTDROID_APP",
"TESTDROID_ACTIVITY",
"TESTDROID_PROJECT",
"TESTDROID_TEST_TIMEOUT",
"TESTDROID_DEVICE",
"TESTDROID_TESTRUN"
] |
[]
|
["TESTDROID_APPIUM_URL", "TESTDROID_APP_PACKAGE", "TESTDROID_APIKEY", "TESTDROID_SCREENSHOTS", "TESTDROID_CMD_TIMEOUT", "TESTDROID_URL", "TESTDROID_APP", "TESTDROID_ACTIVITY", "TESTDROID_PROJECT", "TESTDROID_TEST_TIMEOUT", "TESTDROID_DEVICE", "TESTDROID_TESTRUN"]
|
python
| 12 | 0 | |
logger.go
|
package glog
import (
"errors"
"fmt"
"log"
"os"
"time"
)
// LogLevelDebug, LogLevelInfo, LogLevelWarn, LogLevelError define the logger level
const (
LogLevelDebug = iota
LogLevelInfo
LogLevelWarn
LogLevelError
)
// Glog is a standard error output logger, allow user it to print any log info that user want
type Glog struct {
*log.Logger
level int
isReload bool
interval int // reload log level interval(unit: second), default/minimal: 10s
}
// New create new glog
func New(isReload bool) *Glog {
logger := log.New(os.Stdout, "[LOG]", log.LstdFlags|log.Lmicroseconds)
gl := &Glog{
Logger: logger,
level: LogLevelError,
isReload: isReload,
interval: 10,
}
go gl.ReloadLogLevel()
return gl
}
// SetInterval set the interval of log level value when reload environmental opt
func (g *Glog) SetInterval(val int) {
// check the virtual value of interval
if val < 10 {
val = 10
}
g.interval = val
}
// SetLevel set logger output level
func (g *Glog) SetLevel(logLevel int) error {
if logLevel < 0 || logLevel > LogLevelError {
return errors.New("Invalid Log level")
}
g.level = logLevel
if g.level == LogLevelDebug {
g.SetFlags(log.LstdFlags | log.Lshortfile | log.Lmicroseconds)
}
return nil
}
// SetReload set the reload boolean label
func (g *Glog) SetReload(isReload bool) {
g.isReload = isReload
}
// ReloadLogLevel reload log level dynamically from system environmental variable
func (g *Glog) ReloadLogLevel() {
logLevelStr := map[int]string{
0: "DEBUG",
1: "INFO",
2: "WARN",
3: "ERROR",
}
t := time.Tick(time.Second * time.Duration(g.interval))
for range t {
if g.isReload {
logLevel := os.Getenv("LOG_LEVEL")
switch logLevel {
case "DEBUG":
g.level = LogLevelDebug
case "INFO":
g.level = LogLevelInfo
case "WARN":
g.level = LogLevelWarn
case "ERROR":
g.level = LogLevelError
default:
// if LOG_LEVEL does not exist, DO NOT do any change
}
}
g.Printf("Current log level : %s", logLevelStr[g.level])
}
}
// Debugf use the debug log level output
func (g *Glog) Debugf(format string, v ...interface{}) string {
if g.level <= LogLevelDebug {
g.SetPrefix("[LOG][DEBUG]: ")
g.Printf(format, v...)
return fmt.Sprintf("[LOG][DEBUG]: "+format, v...)
}
return ""
}
// Infof use the debug log level output
func (g *Glog) Infof(format string, v ...interface{}) string {
if g.level <= LogLevelInfo {
g.SetPrefix("[LOG][INFO]: ")
g.Printf(format, v...)
return fmt.Sprintf("[LOG][INFO]: "+format, v...)
}
return ""
}
// Warnf use the warn log level output
func (g *Glog) Warnf(format string, v ...interface{}) string {
if g.level <= LogLevelWarn {
g.SetPrefix("[LOG][WARN]: ")
g.Printf(format, v...)
return fmt.Sprintf("[LOG][WARN]: "+format, v...)
}
return ""
}
// Errorf use the error log level output
func (g *Glog) Errorf(format string, v ...interface{}) string {
if g.level <= LogLevelError {
g.SetPrefix("[LOG][ERROR]: ")
g.Printf(format, v...)
return fmt.Sprintf("[LOG][ERROR]: "+format, v...)
}
return ""
}
|
[
"\"LOG_LEVEL\""
] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
go
| 1 | 0 | |
zenml/core/pipelines/utils.py
|
# Copyright (c) maiot GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import os
import re
import shutil
from distutils.core import run_setup
from tfx.utils import io_utils
def prepare_sdist():
"""
Refer to the README.md in the docs folder
"""
dist_path = os.path.join(os.getcwd(), 'dist')
if os.path.exists(dist_path) and os.path.isdir(dist_path):
print('Removing {}'.format(dist_path))
shutil.rmtree(dist_path)
else:
print('There is no dist folder.')
# FOR THE BOPA
run_setup('setup.py', script_args=['sdist'])
req_path = os.path.join(dist_path, 'requirements.txt')
io_utils.write_string_file(req_path, '')
# FOR THE BEPA
run_setup('setup.py', script_args=['sdist'])
req_path = os.path.join(dist_path, 'requirements.txt')
io_utils.write_string_file(req_path, '')
def generate_unique_name(base_name):
"""
Args:
base_name:
"""
identifier = os.getenv('CI_COMMIT_SHORT_SHA', os.getenv('USER', 'local'))
return re.sub(
r'[^0-9a-zA-Z-]+',
'-',
'{pipeline_name}-{identifier}-{ts}'.format(
pipeline_name=base_name,
identifier=identifier,
ts=int(datetime.datetime.timestamp(datetime.datetime.now()))
).lower()
)
def sanitize_name_for_ai_platform(name: str):
"""
Args:
name (str):
"""
return 'ce_' + name.replace('-', '_').lower()
def parse_yaml_beam_args(pipeline_args):
"""Converts yaml beam args to list of args TFX accepts
Args:
pipeline_args: dict specified in the config.yml
Returns:
list of strings, where each string is a beam argument
"""
return ['--{}={}'.format(key, value) for key, value in
pipeline_args.items()]
|
[] |
[] |
[
"USER",
"CI_COMMIT_SHORT_SHA"
] |
[]
|
["USER", "CI_COMMIT_SHORT_SHA"]
|
python
| 2 | 0 | |
hippoapi/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hippoapi.settings.dev')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config.go
|
package requestgateway
import (
"context"
"log"
"os"
"strconv"
lbcf "github.com/lidstromberg/config"
)
var (
//EnvDebugOn controls verbose logging
EnvDebugOn bool
)
//preflight config checks
func preflight(ctx context.Context, bc lbcf.ConfigSetting) {
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC)
log.Println("Started Gateway preflight..")
//get the session config and apply it to the config
bc.LoadConfigMap(ctx, preflightConfigLoader())
//then check that we have everything we need
if bc.GetConfigValue(ctx, "EnvDebugOn") == "" {
log.Fatal("Could not parse environment variable EnvDebugOn")
}
if bc.GetConfigValue(ctx, "EnvGtwaySqlType") == "" {
log.Fatal("Could not parse environment variable EnvGtwaySqlType")
}
if bc.GetConfigValue(ctx, "EnvGtwaySqlConnection") == "" {
log.Fatal("Could not parse environment variable EnvGtwaySqlConnection")
}
//set the debug value
constlog, err := strconv.ParseBool(bc.GetConfigValue(ctx, "EnvDebugOn"))
if err != nil {
log.Fatal("Could not parse environment variable EnvDebugOn")
}
EnvDebugOn = constlog
log.Println("..Finished Gateway preflight.")
}
//preflightConfigLoader loads the config vars
func preflightConfigLoader() map[string]string {
cfm := make(map[string]string)
//EnvDebugOn controls verbose logging
cfm["EnvDebugOn"] = os.Getenv("GTWAYPG_DEBUGON")
//EnvGtwaySqlType is the driver type
cfm["EnvGtwaySqlType"] = os.Getenv("GTWAYPG_SQLDST")
//EnvGtwaySqlConnection is the connection string
cfm["EnvGtwaySqlConnection"] = os.Getenv("GTWAYPG_SQLCNX")
if cfm["EnvDebugOn"] == "" {
log.Fatal("Could not parse environment variable EnvDebugOn")
}
if cfm["EnvGtwaySqlType"] == "" {
log.Fatal("Could not parse environment variable EnvGtwaySqlType")
}
if cfm["EnvGtwaySqlConnection"] == "" {
log.Fatal("Could not parse environment variable EnvGtwaySqlConnection")
}
return cfm
}
|
[
"\"GTWAYPG_DEBUGON\"",
"\"GTWAYPG_SQLDST\"",
"\"GTWAYPG_SQLCNX\""
] |
[] |
[
"GTWAYPG_SQLDST",
"GTWAYPG_SQLCNX",
"GTWAYPG_DEBUGON"
] |
[]
|
["GTWAYPG_SQLDST", "GTWAYPG_SQLCNX", "GTWAYPG_DEBUGON"]
|
go
| 3 | 0 | |
translate/train_model.py
|
import os
import cv2
from skimage import io
import sys
# import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
from natsort import natsorted
# sys.path.append('../')
import segmentation_models_v1 as sm
from segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, AtUnet, ResUnet
sm.set_framework('tf.keras')
from helper_function import plot_history_flu2, save_phase_fl_history, plot_flu_prediction, plot_set_prediction
from helper_function import save_history_for_callback, plot_history_for_callback
from helper_function import precision, recall, f1_score, calculate_psnr, calculate_pearsonr
from sklearn.metrics import confusion_matrix
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default = '2')
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--net_type", type=str, default = 'Unet') #Unet, Linknet, PSPNet, FPN
parser.add_argument("--backbone", type=str, default = 'efficientnetb0')
parser.add_argument("--dataset", type=str, default = 'neuron_float')
parser.add_argument("--subset", type=str, default = 'train')
parser.add_argument("--epoch", type=int, default = 10)
parser.add_argument("--run", type=int, default = 1)
parser.add_argument("--dim", type=int, default = 512)
parser.add_argument("--ch_in", type=int, default = 3)
parser.add_argument("--ch_out", type=int, default = 3)
parser.add_argument("--fl_ch", type=str, default = 'fl12')
parser.add_argument("--rot", type=float, default = 0)
parser.add_argument("--scale", type=float, default = 100)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--act_fun", type=str, default = 'relu')
parser.add_argument("--loss", type=str, default = 'mse')
parser.add_argument("--batch_size", type=int, default = 6)
parser.add_argument("--lr", type=float, default = 5e-4)
parser.add_argument("--decay", type=float, default = 0.8)
parser.add_argument("--delta", type=float, default = 10)
parser.add_argument("--best_select", type=str2bool, default = True) ## cancel the selection of best model
parser.add_argument("--pre_train", type=str2bool, default = True)
args = parser.parse_args()
print(args)
model_name = 'Cor-FL1_FL2-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-subset-{}-loss-{}-act-{}-scale-{}-decay-{}-delta-{}-chi-{}-cho-{}-chf-{}-bselect-{}-run-{}'.format(args.net_type, args.backbone, args.pre_train,\
args.epoch, args.batch_size, args.lr, args.dim, args.train, args.rot, args.dataset, args.subset, args.loss, args.act_fun, args.scale, args.decay, args.delta, args.ch_in, args.ch_out, args.fl_ch, args.best_select, args.run)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './datasets/{}'.format(args.dataset)
train_dim = args.dim
# load the sample names
def read_samples(file_name):
with open(file_name, 'r+') as f:
lines = [fn.strip() for fn in f.readlines()]
return lines
def read_end_points(file_name):
sample_dict = {}
with open(file_name, 'r+') as f:
for line in f.readlines():
splits = line.strip().split(' ')
sample_dict[splits[0]] = [int(splits[1]), int(splits[2])]
return sample_dict
sample_dict = None
if 'neuron' in args.dataset:
sample_dict = read_end_points(os.path.join(DATA_DIR, 'range.txt'))
train_fns = read_samples(os.path.join(DATA_DIR, 'train.txt'))
test_fns = read_samples(os.path.join(DATA_DIR, 'test.txt'))
data_dir = DATA_DIR + '/data'
val_dim = 1760
# classes for data loading and preprocessing
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
def __init__(
self,
data_dir,
sample_names,
end_point_dict,
fl_ch = None,
scale = 1.0,
channels = [3,3],
augmentation=None,
preprocessing=None,
):
self.images_fps = []
self.masks1_fps = []
self.masks2_fps = []
for sn in sample_names:
sample_tag = 'T-' + sn.split('_')[3][5:]
if end_point_dict:
end1, end2 = end_point_dict[sample_tag]
else:
end1, end2 = 0, np.inf
fns = os.listdir(os.path.join(data_dir, sn, 'phase'))
for fn in fns:
if end1 <= int(fn.split('.')[0].split('-')[-1]) <= end2:
self.images_fps.append(os.path.join(data_dir, sn, 'phase', fn))
self.masks1_fps.append(os.path.join(data_dir, sn, 'fl1', fn))
self.masks2_fps.append(os.path.join(data_dir, sn, 'fl2', fn))
self.ids = self.images_fps
print('Load files: image {}, fl1: {}, fl2:{}'.format(len(self.images_fps),len(self.masks1_fps),len(self.masks2_fps)))
self.scale = scale
self.augmentation = augmentation
self.preprocessing = preprocessing
self.channels = channels
self.fl_ch = fl_ch
def __getitem__(self, i):
# load image and fl1 or fl2 or both
image = np.load(self.images_fps[i]) * 255.
if self.fl_ch == 'fl1':
mask = np.load(self.masks1_fps[i])
mask = mask * self.scale
elif self.fl_ch == 'fl2':
mask = np.load(self.masks2_fps[i])
mask = mask * self.scale
elif self.fl_ch == 'fl12':
mask1 = np.load(self.masks1_fps[i])
mask2 = np.load(self.masks2_fps[i])
mask = np.stack([mask1[:,:,1], mask2[:,:,1]], axis = -1)
mask = mask*self.scale
# decide the input and output channels
if self.channels[0] == 1:
image[:,:,0], image[:,:,2] = image[:,:,1], image[:,:,1]
elif self.channels[0] == 2:
image[:,:,2] = image[:,:,1]
if self.channels[1] == 1 and not (self.fl_ch=='fl12'):
mask = mask[:,:,1:2]
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
import albumentations as A
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
# define heavy augmentations
def get_training_augmentation(dim, rot = 0):
train_transform = [
A.HorizontalFlip(p=0.5),
A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
A.RandomCrop(height=dim, width=dim, always_apply=True),]
return A.Compose(train_transform)
def get_validation_augmentation(dim = 992):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(dim, dim)
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
## create models
BACKBONE = args.backbone
BATCH_SIZE = args.batch_size
LR = args.lr
EPOCHS = args.epoch
# processing configuration
preprocess_input = sm.get_preprocessing(BACKBONE)
# define network parameters
n_classes = args.ch_out if args.fl_ch == 'fl1' or args.fl_ch == 'fl2' else 2
activation = '{}'.format(args.act_fun)
#create model
net_func = globals()[args.net_type]
encoder_weights='imagenet' if args.pre_train else None
model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation)
# define optomizer
optim = tf.keras.optimizers.Adam(LR)
if args.loss == 'mse':
loss = tf.keras.losses.MSE
elif args.loss == 'mae':
loss = tf.keras.losses.MAE
elif args.loss == 'huber':
loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.NONE)
from tensorflow.keras import backend as K
def pearson(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
return r
metrics = [sm.metrics.PSNR(max_val=args.scale), pearson]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, loss, metrics)
# Dataset for train images
train_dataset = Dataset(
data_dir = data_dir,
sample_names = train_fns,
end_point_dict = sample_dict,
fl_ch = args.fl_ch,
channels = [args.ch_in, args.ch_out],
scale = args.scale,
augmentation=get_training_augmentation(train_dim, args.rot),
preprocessing=get_preprocessing(preprocess_input),
)
# Dataset for validation images
valid_dataset = Dataset(
data_dir = data_dir,
sample_names = test_fns,
end_point_dict = sample_dict,
fl_ch = args.fl_ch,
scale = args.scale,
channels = [args.ch_in, args.ch_out],
augmentation=get_validation_augmentation(val_dim),
preprocessing=get_preprocessing(preprocess_input),
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
print(train_dataloader[0][1].shape)
print(train_dataloader[0][1].min(), train_dataloader[0][1].max())
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, train_dim, train_dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, train_dim, train_dim, n_classes)
model_folder = '/data/2d_models/{}/{}'.format(args.dataset, model_name) if args.docker else './2d_models/{}/{}'.format(args.dataset, model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
vols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]
shp = vols.shape
ls, lx, ly = shp
sx, sy = int(lx/128), int(ly/128)
vols = vols[:,::sx,::sy]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:]
slice[0, :] = 255
slice[:, 0] = 255
slice[:, -1] = 255
slice[-1, :] = 255
rows.append(slice)
if si%8 == 7 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
class HistoryPrintCallback(tf.keras.callbacks.Callback):
def __init__(self):
super(HistoryPrintCallback, self).__init__()
self.history = {}
def on_epoch_end(self, epoch, logs=None):
if logs:
for key in logs.keys():
if epoch == 0:
self.history[key] = []
self.history[key].append(logs[key])
if epoch%5 == 0:
plot_history_for_callback(model_folder+'/train_history.png', self.history)
save_history_for_callback(model_folder, self.history)
img_vols, gt_vols, pr_vols = [],[],[]
for i in range(0, len(valid_dataset),int(len(valid_dataset)/64)):
img_vols.append(np.load(valid_dataloader.dataset.images_fps[i]))
gt_vols.append(valid_dataloader[i][1])
pr_vols.append(self.model.predict(valid_dataloader[i]))
img_vols = np.stack(img_vols, axis = 0)
gt_vols = np.concatenate(gt_vols, axis = 0)
pr_vols = np.concatenate(pr_vols, axis = 0)
save_images(model_folder+'/epoch-{}-img.png'.format(epoch), np.uint8(img_vols))
save_images(model_folder+'/epoch-{}-gt.png'.format(epoch), gt_vols/args.scale*255)
save_images(model_folder+'/epoch-{}-pr.png'.format(epoch), pr_vols/args.scale*255)
if not args.best_select:
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/weights_{epoch:02d}.h5', save_weights_only=True, save_best_only=False, period=5),
tf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),
HistoryPrintCallback(),
]
else:
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', monitor='val_pearson', save_weights_only=True, save_best_only=True, mode='max'),
tf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),
HistoryPrintCallback(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
)
# evaluate model
test_dataset = Dataset(
x_test_dir,
y1_test_dir,
y2_test_dir,
fl_ch = args.fl_ch,
channels = [args.ch_in, args.ch_out],
scale = args.scale,
augmentation=get_validation_augmentation(val_dim),
preprocessing=get_preprocessing(preprocess_input),
)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '博客.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hamster.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/logs/input/journald/tailer.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
// +build systemd
package journald
import (
"encoding/json"
"fmt"
"io"
"time"
"github.com/coreos/go-systemd/sdjournal"
"github.com/StackVista/stackstate-agent/pkg/logs/config"
"github.com/StackVista/stackstate-agent/pkg/logs/message"
"github.com/StackVista/stackstate-agent/pkg/util/log"
)
// defaultWaitDuration represents the delay before which we try to collect a new log from the journal
const (
defaultWaitDuration = 1 * time.Second
defaultApplicationName = "docker"
)
// Tailer collects logs from a journal.
type Tailer struct {
source *config.LogSource
outputChan chan *message.Message
journal *sdjournal.Journal
blacklist map[string]bool
stop chan struct{}
done chan struct{}
}
// NewTailer returns a new tailer.
func NewTailer(source *config.LogSource, outputChan chan *message.Message) *Tailer {
return &Tailer{
source: source,
outputChan: outputChan,
stop: make(chan struct{}, 1),
done: make(chan struct{}, 1),
}
}
// Start starts tailing the journal from a given offset.
func (t *Tailer) Start(cursor string) error {
if err := t.setup(); err != nil {
t.source.Status.Error(err)
return err
}
if err := t.seek(cursor); err != nil {
t.source.Status.Error(err)
return err
}
t.source.Status.Success()
t.source.AddInput(t.journalPath())
log.Info("Start tailing journal ", t.journalPath())
go t.tail()
return nil
}
// Stop stops the tailer
func (t *Tailer) Stop() {
log.Info("Stop tailing journal ", t.journalPath())
t.stop <- struct{}{}
t.source.RemoveInput(t.journalPath())
<-t.done
}
// setup configures the tailer
func (t *Tailer) setup() error {
config := t.source.Config
var err error
t.initializeTagger()
if config.Path == "" {
// open the default journal
t.journal, err = sdjournal.NewJournal()
} else {
t.journal, err = sdjournal.NewJournalFromDir(config.Path)
}
if err != nil {
return err
}
for _, unit := range config.IncludeUnits {
// add filters to collect only the logs of the units defined in the configuration,
// if no units are defined, collect all the logs of the journal by default.
match := sdjournal.SD_JOURNAL_FIELD_SYSTEMD_UNIT + "=" + unit
err := t.journal.AddMatch(match)
if err != nil {
return fmt.Errorf("could not add filter %s: %s", match, err)
}
}
t.blacklist = make(map[string]bool)
for _, unit := range config.ExcludeUnits {
// add filters to drop all the logs related to units to exclude.
t.blacklist[unit] = true
}
return nil
}
// seek seeks to the cursor if it is not empty or the end of the journal,
// returns an error if the operation failed.
func (t *Tailer) seek(cursor string) error {
if cursor != "" {
err := t.journal.SeekCursor(cursor)
if err != nil {
return err
}
// must skip one entry since the cursor points to the last committed one.
_, err = t.journal.NextSkip(1)
return err
}
return t.journal.SeekTail()
}
// tail tails the journal until a message stop is received.
func (t *Tailer) tail() {
defer func() {
t.journal.Close()
t.done <- struct{}{}
}()
for {
select {
case <-t.stop:
// stop tailing journal
return
default:
n, err := t.journal.Next()
if err != nil && err != io.EOF {
err := fmt.Errorf("cant't tail journal %s: %s", t.journalPath(), err)
t.source.Status.Error(err)
log.Error(err)
return
}
if n < 1 {
// no new entry
t.journal.Wait(defaultWaitDuration)
continue
}
entry, err := t.journal.GetEntry()
if err != nil {
log.Warnf("Could not retrieve journal entry: %s", err)
continue
}
if t.shouldDrop(entry) {
continue
}
t.outputChan <- t.toMessage(entry)
}
}
}
// shouldDrop returns true if the entry should be dropped,
// returns false otherwise.
func (t *Tailer) shouldDrop(entry *sdjournal.JournalEntry) bool {
unit, exists := entry.Fields[sdjournal.SD_JOURNAL_FIELD_SYSTEMD_UNIT]
if !exists {
return false
}
if _, blacklisted := t.blacklist[unit]; blacklisted {
// drop the entry
return true
}
return false
}
// toMessage transforms a journal entry into a message.
// A journal entry has different fields that may vary depending on its nature,
// for more information, see https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html.
func (t *Tailer) toMessage(entry *sdjournal.JournalEntry) *message.Message {
return message.NewMessage(t.getContent(entry), t.getOrigin(entry), t.getStatus(entry))
}
// getContent returns all the fields of the entry as a json-string,
// remapping "MESSAGE" into "message" and bundling all the other keys in a "journald" attribute.
// ex:
// * journal-entry:
// {
// "MESSAGE": "foo",
// "_SYSTEMD_UNIT": "foo",
// ...
// }
// * message-content:
// {
// "message": "foo",
// "journald": {
// "_SYSTEMD_UNIT": "foo",
// ...
// }
// }
func (t *Tailer) getContent(entry *sdjournal.JournalEntry) []byte {
payload := make(map[string]interface{})
fields := entry.Fields
if message, exists := fields[sdjournal.SD_JOURNAL_FIELD_MESSAGE]; exists {
payload["message"] = message
delete(fields, sdjournal.SD_JOURNAL_FIELD_MESSAGE)
}
payload["journald"] = fields
content, err := json.Marshal(payload)
if err != nil {
// ensure the message has some content if the json encoding failed
value, _ := entry.Fields[sdjournal.SD_JOURNAL_FIELD_MESSAGE]
content = []byte(value)
}
return content
}
// getOrigin returns the message origin computed from the journal entry
func (t *Tailer) getOrigin(entry *sdjournal.JournalEntry) *message.Origin {
origin := message.NewOrigin(t.source)
origin.Identifier = t.Identifier()
origin.Offset, _ = t.journal.GetCursor()
// set the service and the source attributes of the message,
// those values are still overridden by the integration config when defined
tags := t.getTags(entry)
applicationName := t.getApplicationName(entry, tags)
origin.SetSource(applicationName)
origin.SetService(applicationName)
origin.SetTags(tags)
return origin
}
// applicationKeys represents all the valid attributes used to extract the value of the application name of a journal entry.
var applicationKeys = []string{
sdjournal.SD_JOURNAL_FIELD_SYSLOG_IDENTIFIER, // "SYSLOG_IDENTIFIER"
sdjournal.SD_JOURNAL_FIELD_SYSTEMD_UNIT, // "_SYSTEMD_UNIT"
sdjournal.SD_JOURNAL_FIELD_COMM, // "_COMM"
}
// getApplicationName returns the name of the application from where the entry is from.
func (t *Tailer) getApplicationName(entry *sdjournal.JournalEntry, tags []string) string {
if t.isContainerEntry(entry) {
if t.source.Config.ContainerMode {
if shortName, found := getDockerImageShortName(t.getContainerID(entry), tags); found {
return shortName
}
}
return defaultApplicationName
}
for _, key := range applicationKeys {
if value, exists := entry.Fields[key]; exists {
return value
}
}
return ""
}
// getTags returns a list of tags matching with the journal entry.
func (t *Tailer) getTags(entry *sdjournal.JournalEntry) []string {
var tags []string
if t.isContainerEntry(entry) {
tags = t.getContainerTags(t.getContainerID(entry))
}
return tags
}
// priorityStatusMapping represents the 1:1 mapping between journal entry priorities and statuses.
var priorityStatusMapping = map[string]string{
"0": message.StatusEmergency,
"1": message.StatusAlert,
"2": message.StatusCritical,
"3": message.StatusError,
"4": message.StatusWarning,
"5": message.StatusNotice,
"6": message.StatusInfo,
"7": message.StatusDebug,
}
// getStatus returns the status of the journal entry,
// returns "info" by default if no valid value is found.
func (t *Tailer) getStatus(entry *sdjournal.JournalEntry) string {
priority, exists := entry.Fields[sdjournal.SD_JOURNAL_FIELD_PRIORITY]
if !exists {
return message.StatusInfo
}
status, exists := priorityStatusMapping[priority]
if !exists {
return message.StatusInfo
}
return status
}
// journaldIntegration represents the name of the integration,
// it's used to override the source of the message and as a fingerprint to store the journal cursor.
const journaldIntegration = "journald"
// Identifier returns the unique identifier of the current journal being tailed.
func (t *Tailer) Identifier() string {
return journaldIntegration + ":" + t.journalPath()
}
// journalPath returns the path of the journal
func (t *Tailer) journalPath() string {
if t.source.Config.Path != "" {
return t.source.Config.Path
}
return "default"
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
vendor/github.com/containers/podman/v3/libpod/networking_linux.go
|
// +build linux
package libpod
import (
"crypto/rand"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"syscall"
"time"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/libpod/network"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/namespaces"
"github.com/containers/podman/v3/pkg/netns"
"github.com/containers/podman/v3/pkg/resolvconf"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/pkg/util"
"github.com/containers/storage/pkg/lockfile"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
// slirp4netnsMTU the default MTU override
slirp4netnsMTU = 65520
// default slirp4ns subnet
defaultSlirp4netnsSubnet = "10.0.2.0/24"
// rootlessCNINSName is the file name for the rootless network namespace bind mount
rootlessCNINSName = "rootless-cni-ns"
// persistentCNIDir is the directory where the CNI files are stored
persistentCNIDir = "/var/lib/cni"
)
// Get an OCICNI network config
func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr, netDescriptions ContainerNetworkDescriptions) ocicni.PodNetwork {
var networkKey string
if len(networks) > 0 {
// This is inconsistent for >1 ctrNetwork, but it's probably the
// best we can do.
networkKey = networks[0]
} else {
networkKey = r.netPlugin.GetDefaultNetworkName()
}
ctrNetwork := ocicni.PodNetwork{
Name: name,
Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
ID: id,
NetNS: nsPath,
RuntimeConfig: map[string]ocicni.RuntimeConfig{
networkKey: {PortMappings: ports},
},
}
// If we have extra networks, add them
if len(networks) > 0 {
ctrNetwork.Networks = make([]ocicni.NetAttachment, len(networks))
for i, netName := range networks {
ctrNetwork.Networks[i].Name = netName
if eth, exists := netDescriptions.getInterfaceByName(netName); exists {
ctrNetwork.Networks[i].Ifname = eth
}
}
}
if staticIP != nil || staticMAC != nil {
// For static IP or MAC, we need to populate networks even if
// it's just the default.
if len(networks) == 0 {
// If len(networks) == 0 this is guaranteed to be the
// default ctrNetwork.
ctrNetwork.Networks = []ocicni.NetAttachment{{Name: networkKey}}
}
var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports}
if staticIP != nil {
rt.IP = staticIP.String()
}
if staticMAC != nil {
rt.MAC = staticMAC.String()
}
ctrNetwork.RuntimeConfig = map[string]ocicni.RuntimeConfig{
networkKey: rt,
}
}
return ctrNetwork
}
type RootlessCNI struct {
ns ns.NetNS
dir string
Lock lockfile.Locker
}
// getPath will join the given path to the rootless cni dir
func (r *RootlessCNI) getPath(path string) string {
return filepath.Join(r.dir, path)
}
// Do - run the given function in the rootless cni ns.
// It does not lock the rootlessCNI lock, the caller
// should only lock when needed, e.g. for cni operations.
func (r *RootlessCNI) Do(toRun func() error) error {
err := r.ns.Do(func(_ ns.NetNS) error {
// Before we can run the given function,
// we have to setup all mounts correctly.
// The order of the mounts is IMPORTANT.
// The idea of the extra mount ns is to make /run and /var/lib/cni writeable
// for the cni plugins but not affecting the podman user namespace.
// Because the plugins also need access to XDG_RUNTIME_DIR/netns some special setup is needed.
// The following bind mounts are needed
// 1. XDG_RUNTIME_DIR/netns -> XDG_RUNTIME_DIR/rootless-cni/XDG_RUNTIME_DIR/netns
// 2. /run/systemd -> XDG_RUNTIME_DIR/rootless-cni/run/systemd (only if it exists)
// 3. XDG_RUNTIME_DIR/rootless-cni/resolv.conf -> /etc/resolv.conf or XDG_RUNTIME_DIR/rootless-cni/run/symlink/target
// 4. XDG_RUNTIME_DIR/rootless-cni/var/lib/cni -> /var/lib/cni (if /var/lib/cni does not exists use the parent dir)
// 5. XDG_RUNTIME_DIR/rootless-cni/run -> /run
// Create a new mount namespace,
// this must happen inside the netns thread.
err := unix.Unshare(unix.CLONE_NEWNS)
if err != nil {
return errors.Wrapf(err, "cannot create a new mount namespace")
}
netNsDir, err := netns.GetNSRunDir()
if err != nil {
return errors.Wrap(err, "could not get network namespace directory")
}
newNetNsDir := r.getPath(netNsDir)
// 1. Mount the netns into the new run to keep them accessible.
// Otherwise cni setup will fail because it cannot access the netns files.
err = unix.Mount(netNsDir, newNetNsDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
return errors.Wrap(err, "failed to mount netns directory for rootless cni")
}
// 2. Also keep /run/systemd if it exists.
// Many files are symlinked into this dir, for example /dev/log.
runSystemd := "/run/systemd"
_, err = os.Stat(runSystemd)
if err == nil {
newRunSystemd := r.getPath(runSystemd)
err = unix.Mount(runSystemd, newRunSystemd, "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return errors.Wrap(err, "failed to mount /run/systemd directory for rootless cni")
}
}
// 3. On some distros /etc/resolv.conf is symlinked to somewhere under /run.
// Because the kernel will follow the symlink before mounting, it is not
// possible to mount a file at /etc/resolv.conf. We have to ensure that
// the link target will be available in the mount ns.
// see: https://github.com/containers/podman/issues/10855
resolvePath := "/etc/resolv.conf"
resolvePath, err = filepath.EvalSymlinks(resolvePath)
if err != nil {
return err
}
logrus.Debugf("The actual path of /etc/resolv.conf on the host is %q", resolvePath)
// When /etc/resolv.conf on the host is a symlink to /run/systemd/resolve/stub-resolv.conf,
// we have to mount an empty filesystem on /run/systemd/resolve in the child namespace,
// so as to isolate the directory from the host mount namespace.
//
// Otherwise our bind-mount for /run/systemd/resolve/stub-resolv.conf is unmounted
// when systemd-resolved unlinks and recreates /run/systemd/resolve/stub-resolv.conf on the host.
// see: https://github.com/containers/podman/issues/10929
if strings.HasPrefix(resolvePath, "/run/systemd/resolve/") {
rsr := r.getPath("/run/systemd/resolve")
err = unix.Mount("", rsr, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, "")
if err != nil {
return errors.Wrapf(err, "failed to mount tmpfs on %q for rootless cni", rsr)
}
}
if strings.HasPrefix(resolvePath, "/run/") {
resolvePath = r.getPath(resolvePath)
err = os.MkdirAll(filepath.Dir(resolvePath), 0700)
if err != nil {
return errors.Wrap(err, "failed to create rootless-cni resolv.conf directory")
}
// we want to bind mount on this file so we have to create the file first
_, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0700)
if err != nil {
return errors.Wrap(err, "failed to create rootless-cni resolv.conf file")
}
}
// mount resolv.conf to make use of the host dns
err = unix.Mount(r.getPath("resolv.conf"), resolvePath, "none", unix.MS_BIND, "")
if err != nil {
return errors.Wrap(err, "failed to mount resolv.conf for rootless cni")
}
// 4. CNI plugins need access to /var/lib/cni and /run
varDir := ""
varTarget := persistentCNIDir
// we can only mount to a target dir which exists, check /var/lib/cni recursively
// while we could always use /var there are cases where a user might store the cni
// configs under /var/custom and this would break
for {
if _, err := os.Stat(varTarget); err == nil {
varDir = r.getPath(varTarget)
break
}
varTarget = filepath.Dir(varTarget)
if varTarget == "/" {
break
}
}
if varDir == "" {
return errors.New("failed to stat /var directory")
}
// make sure to mount var first
err = unix.Mount(varDir, varTarget, "none", unix.MS_BIND, "")
if err != nil {
return errors.Wrapf(err, "failed to mount %s for rootless cni", varTarget)
}
// 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts.
runDir := r.getPath("run")
err = unix.Mount(runDir, "/run", "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return errors.Wrap(err, "failed to mount /run for rootless cni")
}
// run the given function in the correct namespace
err = toRun()
return err
})
return err
}
// Cleanup the rootless cni namespace if needed.
// It checks if we have running containers with the bridge network mode.
// Cleanup() will try to lock RootlessCNI, therefore you have to call it with an unlocked
func (r *RootlessCNI) Cleanup(runtime *Runtime) error {
_, err := os.Stat(r.dir)
if os.IsNotExist(err) {
// the directory does not exists no need for cleanup
return nil
}
r.Lock.Lock()
defer r.Lock.Unlock()
running := func(c *Container) bool {
// we cannot use c.state() because it will try to lock the container
// using c.state.State directly should be good enough for this use case
state := c.state.State
return state == define.ContainerStateRunning
}
ctrs, err := runtime.GetContainersWithoutLock(running)
if err != nil {
return err
}
cleanup := true
for _, ctr := range ctrs {
if ctr.config.NetMode.IsBridge() {
cleanup = false
}
}
if cleanup {
// make sure the the cni results (cache) dir is empty
// libpod instances with another root dir are not covered by the check above
// this allows several libpod instances to use the same rootless cni ns
contents, err := ioutil.ReadDir(r.getPath("var/lib/cni/results"))
if (err == nil && len(contents) == 0) || os.IsNotExist(err) {
logrus.Debug("Cleaning up rootless cni namespace")
err = netns.UnmountNS(r.ns)
if err != nil {
return err
}
// make the following errors not fatal
err = r.ns.Close()
if err != nil {
logrus.Error(err)
}
b, err := ioutil.ReadFile(r.getPath("rootless-cni-slirp4netns.pid"))
if err == nil {
var i int
i, err = strconv.Atoi(string(b))
if err == nil {
// kill the slirp process so we do not leak it
err = syscall.Kill(i, syscall.SIGTERM)
}
}
if err != nil {
logrus.Errorf("failed to kill slirp4netns process: %s", err)
}
err = os.RemoveAll(r.dir)
if err != nil {
logrus.Error(err)
}
} else if err != nil && !os.IsNotExist(err) {
logrus.Errorf("could not read rootless cni directory, skipping cleanup: %s", err)
}
}
return nil
}
// GetRootlessCNINetNs returns the rootless cni object. If create is set to true
// the rootless cni namespace will be created if it does not exists already.
// If called as root it returns always nil.
// On success the returned RootlessCNI lock is locked and must be unlocked by the caller.
func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) {
if !rootless.IsRootless() {
return nil, nil
}
var rootlessCNINS *RootlessCNI
runDir, err := util.GetRuntimeDir()
if err != nil {
return nil, err
}
lfile := filepath.Join(runDir, "rootless-cni.lock")
lock, err := lockfile.GetLockfile(lfile)
if err != nil {
return nil, errors.Wrap(err, "failed to get rootless-cni lockfile")
}
lock.Lock()
defer func() {
// In case of an error (early exit) rootlessCNINS will be nil.
// Make sure to unlock otherwise we could deadlock.
if rootlessCNINS == nil {
lock.Unlock()
}
}()
cniDir := filepath.Join(runDir, "rootless-cni")
err = os.MkdirAll(cniDir, 0700)
if err != nil {
return nil, errors.Wrap(err, "could not create rootless-cni directory")
}
nsDir, err := netns.GetNSRunDir()
if err != nil {
return nil, err
}
path := filepath.Join(nsDir, rootlessCNINSName)
ns, err := ns.GetNS(path)
if err != nil {
if !new {
// return a error if we could not get the namespace and should no create one
return nil, errors.Wrap(err, "error getting rootless cni network namespace")
}
// create a new namespace
logrus.Debug("creating rootless cni network namespace")
ns, err = netns.NewNSWithName(rootlessCNINSName)
if err != nil {
return nil, errors.Wrap(err, "error creating rootless cni network namespace")
}
// setup slirp4netns here
path := r.config.Engine.NetworkCmdPath
if path == "" {
var err error
path, err = exec.LookPath("slirp4netns")
if err != nil {
return nil, err
}
}
syncR, syncW, err := os.Pipe()
if err != nil {
return nil, errors.Wrapf(err, "failed to open pipe")
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
netOptions, err := parseSlirp4netnsNetworkOptions(r, nil)
if err != nil {
return nil, err
}
slirpFeatures, err := checkSlirpFlags(path)
if err != nil {
return nil, errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
}
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
if err != nil {
return nil, err
}
// Note we do not use --exit-fd, we kill this process by pid
cmdArgs = append(cmdArgs, "-c", "-r", "3")
cmdArgs = append(cmdArgs, "--netns-type=path", ns.Path(), "tap0")
cmd := exec.Command(path, cmdArgs...)
logrus.Debugf("slirp4netns command: %s", strings.Join(cmd.Args, " "))
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
// workaround for https://github.com/rootless-containers/slirp4netns/pull/153
if !netOptions.noPivotRoot && slirpFeatures.HasEnableSandbox {
cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNS
cmd.SysProcAttr.Unshareflags = syscall.CLONE_NEWNS
}
// Leak one end of the pipe in slirp4netns
cmd.ExtraFiles = append(cmd.ExtraFiles, syncW)
logPath := filepath.Join(r.config.Engine.TmpDir, "slirp4netns-rootless-cni.log")
logFile, err := os.Create(logPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
return nil, errors.Wrapf(err, "delete file %s", logPath)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
if err := cmd.Start(); err != nil {
return nil, errors.Wrapf(err, "failed to start slirp4netns process")
}
// create pid file for the slirp4netns process
// this is need to kill the process in the cleanup
pid := strconv.Itoa(cmd.Process.Pid)
err = ioutil.WriteFile(filepath.Join(cniDir, "rootless-cni-slirp4netns.pid"), []byte(pid), 0700)
if err != nil {
errors.Wrap(err, "unable to write rootless-cni slirp4netns pid file")
}
defer func() {
if err := cmd.Process.Release(); err != nil {
logrus.Errorf("unable to release command process: %q", err)
}
}()
if err := waitForSync(syncR, cmd, logFile, 1*time.Second); err != nil {
return nil, err
}
// build a new resolv.conf file which uses the slirp4netns dns server address
resolveIP, err := GetSlirp4netnsDNS(nil)
if err != nil {
return nil, errors.Wrap(err, "failed to determine default slirp4netns DNS address")
}
if netOptions.cidr != "" {
_, cidr, err := net.ParseCIDR(netOptions.cidr)
if err != nil {
return nil, errors.Wrap(err, "failed to parse slirp4netns cidr")
}
resolveIP, err = GetSlirp4netnsDNS(cidr)
if err != nil {
return nil, errors.Wrapf(err, "failed to determine slirp4netns DNS address from cidr: %s", cidr.String())
}
}
conf, err := resolvconf.Get()
if err != nil {
return nil, err
}
conf, err = resolvconf.FilterResolvDNS(conf.Content, netOptions.enableIPv6, true)
if err != nil {
return nil, err
}
searchDomains := resolvconf.GetSearchDomains(conf.Content)
dnsOptions := resolvconf.GetOptions(conf.Content)
nameServers := resolvconf.GetNameservers(conf.Content)
_, err = resolvconf.Build(filepath.Join(cniDir, "resolv.conf"), append([]string{resolveIP.String()}, nameServers...), searchDomains, dnsOptions)
if err != nil {
return nil, errors.Wrap(err, "failed to create rootless cni resolv.conf")
}
// create cni directories to store files
// they will be bind mounted to the correct location in a extra mount ns
err = os.MkdirAll(filepath.Join(cniDir, strings.TrimPrefix(persistentCNIDir, "/")), 0700)
if err != nil {
return nil, errors.Wrap(err, "could not create rootless-cni var directory")
}
runDir := filepath.Join(cniDir, "run")
err = os.MkdirAll(runDir, 0700)
if err != nil {
return nil, errors.Wrap(err, "could not create rootless-cni run directory")
}
// relabel the new run directory to the iptables /run label
// this is important, otherwise the iptables command will fail
err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false)
if err != nil {
return nil, errors.Wrap(err, "could not create relabel rootless-cni run directory")
}
// create systemd run directory
err = os.MkdirAll(filepath.Join(runDir, "systemd"), 0700)
if err != nil {
return nil, errors.Wrap(err, "could not create rootless-cni systemd directory")
}
// create the directory for the netns files at the same location
// relative to the rootless-cni location
err = os.MkdirAll(filepath.Join(cniDir, nsDir), 0700)
if err != nil {
return nil, errors.Wrap(err, "could not create rootless-cni netns directory")
}
}
// The CNI plugins need access to iptables in $PATH. As it turns out debian doesn't put
// /usr/sbin in $PATH for rootless users. This will break rootless cni completely.
// We might break existing users and we cannot expect everyone to change their $PATH so
// lets add /usr/sbin to $PATH ourselves.
path = os.Getenv("PATH")
if !strings.Contains(path, "/usr/sbin") {
path = path + ":/usr/sbin"
os.Setenv("PATH", path)
}
// Important set rootlessCNINS as last step.
// Do not return any errors after this.
rootlessCNINS = &RootlessCNI{
ns: ns,
dir: cniDir,
Lock: lock,
}
return rootlessCNINS, nil
}
// setUpOCICNIPod will set up the cni networks, on error it will also tear down the cni
// networks. If rootless it will join/create the rootless cni namespace.
func (r *Runtime) setUpOCICNIPod(podNetwork ocicni.PodNetwork) ([]ocicni.NetResult, error) {
rootlessCNINS, err := r.GetRootlessCNINetNs(true)
if err != nil {
return nil, err
}
var results []ocicni.NetResult
setUpPod := func() error {
results, err = r.netPlugin.SetUpPod(podNetwork)
if err != nil {
if err2 := r.netPlugin.TearDownPod(podNetwork); err2 != nil {
logrus.Errorf("Error tearing down partially created network namespace for container %s: %v", podNetwork.ID, err2)
}
return errors.Wrapf(err, "error configuring network namespace for container %s", podNetwork.ID)
}
return nil
}
// rootlessCNINS is nil if we are root
if rootlessCNINS != nil {
// execute the cni setup in the rootless net ns
err = rootlessCNINS.Do(setUpPod)
rootlessCNINS.Lock.Unlock()
} else {
err = setUpPod()
}
return results, err
}
// getCNIPodName return the pod name (hostname) used by CNI and the dnsname plugin.
// If we are in the pod network namespace use the pod name otherwise the container name
func getCNIPodName(c *Container) string {
if c.config.NetMode.IsPod() || c.IsInfra() {
pod, err := c.runtime.GetPod(c.PodID())
if err == nil {
return pod.Name()
}
}
return c.Name()
}
// Create and configure a new network namespace for a container
func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Result, error) {
var requestedIP net.IP
if ctr.requestedIP != nil {
requestedIP = ctr.requestedIP
// cancel request for a specific IP in case the container is reused later
ctr.requestedIP = nil
} else {
requestedIP = ctr.config.StaticIP
}
var requestedMAC net.HardwareAddr
if ctr.requestedMAC != nil {
requestedMAC = ctr.requestedMAC
// cancel request for a specific MAC in case the container is reused later
ctr.requestedMAC = nil
} else {
requestedMAC = ctr.config.StaticMAC
}
podName := getCNIPodName(ctr)
networks, _, err := ctr.networks()
if err != nil {
return nil, err
}
// All networks have been removed from the container.
// This is effectively forcing net=none.
if len(networks) == 0 {
return nil, nil
}
// Update container map of interface descriptions
if err := ctr.setupNetworkDescriptions(networks); err != nil {
return nil, err
}
podNetwork := r.getPodNetwork(ctr.ID(), podName, ctrNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ctr.state.NetInterfaceDescriptions)
aliases, err := ctr.runtime.state.GetAllNetworkAliases(ctr)
if err != nil {
return nil, err
}
if len(aliases) > 0 {
podNetwork.Aliases = aliases
}
results, err := r.setUpOCICNIPod(podNetwork)
if err != nil {
return nil, err
}
networkStatus := make([]*cnitypes.Result, 0)
for idx, r := range results {
logrus.Debugf("[%d] CNI result: %v", idx, r.Result)
resultCurrent, err := cnitypes.GetResult(r.Result)
if err != nil {
return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result, err)
}
networkStatus = append(networkStatus, resultCurrent)
}
return networkStatus, nil
}
// Create and configure a new network namespace for a container
func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, retErr error) {
ctrNS, err := netns.NewNS()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
}
defer func() {
if retErr != nil {
if err := netns.UnmountNS(ctrNS); err != nil {
logrus.Errorf("Error unmounting partially created network namespace for container %s: %v", ctr.ID(), err)
}
if err := ctrNS.Close(); err != nil {
logrus.Errorf("Error closing partially created network namespace for container %s: %v", ctr.ID(), err)
}
}
}()
logrus.Debugf("Made network namespace at %s for container %s", ctrNS.Path(), ctr.ID())
networkStatus := []*cnitypes.Result{}
if !ctr.config.NetMode.IsSlirp4netns() {
networkStatus, err = r.configureNetNS(ctr, ctrNS)
}
return ctrNS, networkStatus, err
}
// Configure the network namespace for a rootless container
func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
if ctr.config.NetMode.IsSlirp4netns() {
return r.setupSlirp4netns(ctr)
}
networks, _, err := ctr.networks()
if err != nil {
return err
}
if len(networks) > 0 && len(ctr.config.PortMappings) > 0 {
// set up port forwarder for CNI-in-slirp4netns
netnsPath := ctr.state.NetNS.Path()
// TODO: support slirp4netns port forwarder as well
return r.setupRootlessPortMappingViaRLK(ctr, netnsPath)
}
return nil
}
// Configure the network namespace using the container process
func (r *Runtime) setupNetNS(ctr *Container) error {
nsProcess := fmt.Sprintf("/proc/%d/ns/net", ctr.state.PID)
b := make([]byte, 16)
if _, err := rand.Reader.Read(b); err != nil {
return errors.Wrapf(err, "failed to generate random netns name")
}
nsPath, err := netns.GetNSRunDir()
if err != nil {
return err
}
nsPath = filepath.Join(nsPath, fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]))
if err := os.MkdirAll(filepath.Dir(nsPath), 0711); err != nil {
return err
}
mountPointFd, err := os.Create(nsPath)
if err != nil {
return err
}
if err := mountPointFd.Close(); err != nil {
return err
}
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
return errors.Wrapf(err, "cannot mount %s", nsPath)
}
netNS, err := ns.GetNS(nsPath)
if err != nil {
return err
}
networkStatus, err := r.configureNetNS(ctr, netNS)
// Assign NetNS attributes to container
ctr.state.NetNS = netNS
ctr.state.NetworkStatus = networkStatus
return err
}
// Join an existing network namespace
func joinNetNS(path string) (ns.NetNS, error) {
netNS, err := ns.GetNS(path)
if err != nil {
return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
}
return netNS, nil
}
// Close a network namespace.
// Differs from teardownNetNS() in that it will not attempt to undo the setup of
// the namespace, but will instead only close the open file descriptor
func (r *Runtime) closeNetNS(ctr *Container) error {
if ctr.state.NetNS == nil {
// The container has no network namespace, we're set
return nil
}
if err := ctr.state.NetNS.Close(); err != nil {
return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
}
ctr.state.NetNS = nil
return nil
}
// Tear down a container's CNI network configuration and joins the
// rootless net ns as rootless user
func (r *Runtime) teardownOCICNIPod(podNetwork ocicni.PodNetwork) error {
rootlessCNINS, err := r.GetRootlessCNINetNs(false)
if err != nil {
return err
}
tearDownPod := func() error {
err := r.netPlugin.TearDownPod(podNetwork)
return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", podNetwork.ID)
}
// rootlessCNINS is nil if we are root
if rootlessCNINS != nil {
// execute the cni setup in the rootless net ns
err = rootlessCNINS.Do(tearDownPod)
rootlessCNINS.Lock.Unlock()
if err == nil {
err = rootlessCNINS.Cleanup(r)
}
} else {
err = tearDownPod()
}
return err
}
// Tear down a container's CNI network configuration, but do not tear down the
// namespace itself.
func (r *Runtime) teardownCNI(ctr *Container) error {
if ctr.state.NetNS == nil {
// The container has no network namespace, we're set
return nil
}
logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID())
networks, _, err := ctr.networks()
if err != nil {
return err
}
if !ctr.config.NetMode.IsSlirp4netns() && len(networks) > 0 {
var requestedIP net.IP
if ctr.requestedIP != nil {
requestedIP = ctr.requestedIP
// cancel request for a specific IP in case the container is reused later
ctr.requestedIP = nil
} else {
requestedIP = ctr.config.StaticIP
}
var requestedMAC net.HardwareAddr
if ctr.requestedMAC != nil {
requestedMAC = ctr.requestedMAC
// cancel request for a specific MAC in case the container is reused later
ctr.requestedMAC = nil
} else {
requestedMAC = ctr.config.StaticMAC
}
podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ctr.state.NetInterfaceDescriptions)
err = r.teardownOCICNIPod(podNetwork)
return err
}
return nil
}
// Tear down a network namespace, undoing all state associated with it.
func (r *Runtime) teardownNetNS(ctr *Container) error {
if err := r.teardownCNI(ctr); err != nil {
return err
}
// First unmount the namespace
if err := netns.UnmountNS(ctr.state.NetNS); err != nil {
return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID())
}
// Now close the open file descriptor
if err := ctr.state.NetNS.Close(); err != nil {
return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
}
ctr.state.NetNS = nil
return nil
}
func getContainerNetNS(ctr *Container) (string, error) {
if ctr.state.NetNS != nil {
return ctr.state.NetNS.Path(), nil
}
if ctr.config.NetNsCtr != "" {
c, err := ctr.runtime.GetContainer(ctr.config.NetNsCtr)
if err != nil {
return "", err
}
if err = c.syncContainer(); err != nil {
return "", err
}
return getContainerNetNS(c)
}
return "", nil
}
// isBridgeNetMode checks if the given network mode is bridge.
// It returns nil when it is set to bridge and an error otherwise.
func isBridgeNetMode(n namespaces.NetworkMode) error {
if !n.IsBridge() {
return errors.Wrapf(define.ErrNetworkModeInvalid, "%q is not supported", n)
}
return nil
}
// Reload only works with containers with a configured network.
// It will tear down, and then reconfigure, the network of the container.
// This is mainly used when a reload of firewall rules wipes out existing
// firewall configuration.
// Efforts will be made to preserve MAC and IP addresses, but this only works if
// the container only joined a single CNI network, and was only assigned a
// single MAC or IP.
// Only works on root containers at present, though in the future we could
// extend this to stop + restart slirp4netns
func (r *Runtime) reloadContainerNetwork(ctr *Container) ([]*cnitypes.Result, error) {
if ctr.state.NetNS == nil {
return nil, errors.Wrapf(define.ErrCtrStateInvalid, "container %s network is not configured, refusing to reload", ctr.ID())
}
if err := isBridgeNetMode(ctr.config.NetMode); err != nil {
return nil, err
}
logrus.Infof("Going to reload container %s network", ctr.ID())
var requestedIP net.IP
var requestedMAC net.HardwareAddr
// Set requested IP and MAC address, if possible.
if len(ctr.state.NetworkStatus) == 1 {
result := ctr.state.NetworkStatus[0]
if len(result.IPs) == 1 {
resIP := result.IPs[0]
requestedIP = resIP.Address.IP
ctr.requestedIP = requestedIP
logrus.Debugf("Going to preserve container %s IP address %s", ctr.ID(), ctr.requestedIP.String())
if resIP.Interface != nil && *resIP.Interface < len(result.Interfaces) && *resIP.Interface >= 0 {
var err error
requestedMAC, err = net.ParseMAC(result.Interfaces[*resIP.Interface].Mac)
if err != nil {
return nil, errors.Wrapf(err, "error parsing container %s MAC address %s", ctr.ID(), result.Interfaces[*resIP.Interface].Mac)
}
ctr.requestedMAC = requestedMAC
logrus.Debugf("Going to preserve container %s MAC address %s", ctr.ID(), ctr.requestedMAC.String())
}
}
}
err := r.teardownCNI(ctr)
if err != nil {
// teardownCNI will error if the iptables rules do not exists and this is the case after
// a firewall reload. The purpose of network reload is to recreate the rules if they do
// not exists so we should not log this specific error as error. This would confuse users otherwise.
// iptables-legacy and iptables-nft will create different errors make sure to match both.
b, rerr := regexp.MatchString("Couldn't load target `CNI-[a-f0-9]{24}':No such file or directory|Chain 'CNI-[a-f0-9]{24}' does not exist", err.Error())
if rerr == nil && !b {
logrus.Error(err)
} else {
logrus.Info(err)
}
}
// teardownCNI will clean the requested IP and MAC so we need to set them again
ctr.requestedIP = requestedIP
ctr.requestedMAC = requestedMAC
return r.configureNetNS(ctr, ctr.state.NetNS)
}
func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
var netStats *netlink.LinkStatistics
// With slirp4netns, we can't collect statistics at present.
// For now, we allow stats to at least run by returning nil
if rootless.IsRootless() || ctr.config.NetMode.IsSlirp4netns() {
return netStats, nil
}
netNSPath, netPathErr := getContainerNetNS(ctr)
if netPathErr != nil {
return nil, netPathErr
}
if netNSPath == "" {
// If netNSPath is empty, it was set as none, and no netNS was set up
// this is a valid state and thus return no error, nor any statistics
return nil, nil
}
err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error {
link, err := netlink.LinkByName(ocicni.DefaultInterfaceName)
if err != nil {
return err
}
netStats = link.Attrs().Statistics
return nil
})
return netStats, err
}
// Produce an InspectNetworkSettings containing information on the container
// network.
func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, error) {
if c.config.NetNsCtr != "" {
netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr)
if err != nil {
return nil, err
}
// see https://github.com/containers/podman/issues/10090
// the container has to be locked for syncContainer()
netNsCtr.lock.Lock()
defer netNsCtr.lock.Unlock()
// Have to sync to ensure that state is populated
if err := netNsCtr.syncContainer(); err != nil {
return nil, err
}
logrus.Debugf("Container %s shares network namespace, retrieving network info of container %s", c.ID(), c.config.NetNsCtr)
return netNsCtr.getContainerNetworkInfo()
}
settings := new(define.InspectNetworkSettings)
settings.Ports = makeInspectPortBindings(c.config.PortMappings)
networks, isDefault, err := c.networks()
if err != nil {
return nil, err
}
// We can't do more if the network is down.
if c.state.NetNS == nil {
// We still want to make dummy configurations for each CNI net
// the container joined.
if len(networks) > 0 {
settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(networks))
for _, net := range networks {
cniNet := new(define.InspectAdditionalNetwork)
cniNet.NetworkID = net
settings.Networks[net] = cniNet
}
}
return settings, nil
}
// Set network namespace path
settings.SandboxKey = c.state.NetNS.Path()
// If this is empty, we're probably slirp4netns
if len(c.state.NetworkStatus) == 0 {
return settings, nil
}
// If we have CNI networks - handle that here
if len(networks) > 0 {
if len(networks) != len(c.state.NetworkStatus) {
return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d CNI network(s) %v, but have information on %d network(s)", len(networks), networks, len(c.state.NetworkStatus))
}
settings.Networks = make(map[string]*define.InspectAdditionalNetwork)
// CNI results should be in the same order as the list of
// networks we pass into CNI.
for index, name := range networks {
cniResult := c.state.NetworkStatus[index]
addedNet := new(define.InspectAdditionalNetwork)
addedNet.NetworkID = name
basicConfig, err := resultToBasicNetworkConfig(cniResult)
if err != nil {
return nil, err
}
aliases, err := c.runtime.state.GetNetworkAliases(c, name)
if err != nil {
return nil, err
}
addedNet.Aliases = aliases
addedNet.InspectBasicNetworkConfig = basicConfig
settings.Networks[name] = addedNet
}
if !isDefault {
return settings, nil
}
}
// If not joining networks, we should have at most 1 result
if len(c.state.NetworkStatus) > 1 {
return nil, errors.Wrapf(define.ErrInternal, "should have at most 1 CNI result if not joining networks, instead got %d", len(c.state.NetworkStatus))
}
if len(c.state.NetworkStatus) == 1 {
basicConfig, err := resultToBasicNetworkConfig(c.state.NetworkStatus[0])
if err != nil {
return nil, err
}
settings.InspectBasicNetworkConfig = basicConfig
}
return settings, nil
}
// setupNetworkDescriptions adds networks and eth values to the container's
// network descriptions
func (c *Container) setupNetworkDescriptions(networks []string) error {
// if the map is nil and we have networks
if c.state.NetInterfaceDescriptions == nil && len(networks) > 0 {
c.state.NetInterfaceDescriptions = make(ContainerNetworkDescriptions)
}
origLen := len(c.state.NetInterfaceDescriptions)
for _, n := range networks {
// if the network is not in the map, add it
if _, exists := c.state.NetInterfaceDescriptions[n]; !exists {
c.state.NetInterfaceDescriptions.add(n)
}
}
// if the map changed, we need to save the container state
if origLen != len(c.state.NetInterfaceDescriptions) {
if err := c.save(); err != nil {
return err
}
}
return nil
}
// resultToBasicNetworkConfig produces an InspectBasicNetworkConfig from a CNI
// result
func resultToBasicNetworkConfig(result *cnitypes.Result) (define.InspectBasicNetworkConfig, error) {
config := define.InspectBasicNetworkConfig{}
for _, ctrIP := range result.IPs {
size, _ := ctrIP.Address.Mask.Size()
switch {
case ctrIP.Version == "4" && config.IPAddress == "":
config.IPAddress = ctrIP.Address.IP.String()
config.IPPrefixLen = size
config.Gateway = ctrIP.Gateway.String()
if ctrIP.Interface != nil && *ctrIP.Interface < len(result.Interfaces) && *ctrIP.Interface >= 0 {
config.MacAddress = result.Interfaces[*ctrIP.Interface].Mac
}
case ctrIP.Version == "4" && config.IPAddress != "":
config.SecondaryIPAddresses = append(config.SecondaryIPAddresses, ctrIP.Address.String())
if ctrIP.Interface != nil && *ctrIP.Interface < len(result.Interfaces) && *ctrIP.Interface >= 0 {
config.AdditionalMacAddresses = append(config.AdditionalMacAddresses, result.Interfaces[*ctrIP.Interface].Mac)
}
case ctrIP.Version == "6" && config.IPAddress == "":
config.GlobalIPv6Address = ctrIP.Address.IP.String()
config.GlobalIPv6PrefixLen = size
config.IPv6Gateway = ctrIP.Gateway.String()
case ctrIP.Version == "6" && config.IPAddress != "":
config.SecondaryIPv6Addresses = append(config.SecondaryIPv6Addresses, ctrIP.Address.String())
default:
return config, errors.Wrapf(define.ErrInternal, "unrecognized IP version %q", ctrIP.Version)
}
}
return config, nil
}
// This is a horrible hack, necessary because CNI does not properly clean up
// after itself on an unclean reboot. Return what we're pretty sure is the path
// to CNI's internal files (it's not really exposed to us).
func getCNINetworksDir() (string, error) {
return filepath.Join(persistentCNIDir, "networks"), nil
}
type logrusDebugWriter struct {
prefix string
}
func (w *logrusDebugWriter) Write(p []byte) (int, error) {
logrus.Debugf("%s%s", w.prefix, string(p))
return len(p), nil
}
// NetworkDisconnect removes a container from the network
func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) error {
// only the bridge mode supports cni networks
if err := isBridgeNetMode(c.config.NetMode); err != nil {
return err
}
networks, err := c.networksByNameIndex()
if err != nil {
return err
}
// check if network exists and if the input is a ID we get the name
// ocicni only uses names so it is important that we only use the name
netName, err = network.NormalizeName(c.runtime.config, netName)
if err != nil {
return err
}
index, nameExists := networks[netName]
if !nameExists && len(networks) > 0 {
return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
}
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
if err := c.runtime.state.NetworkDisconnect(c, netName); err != nil {
return err
}
c.newNetworkEvent(events.NetworkDisconnect, netName)
if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) {
return nil
}
if c.state.NetNS == nil {
return errors.Wrapf(define.ErrNoNetwork, "unable to disconnect %s from %s", nameOrID, netName)
}
podConfig := c.runtime.getPodNetwork(c.ID(), c.Name(), c.state.NetNS.Path(), []string{netName}, c.config.PortMappings, nil, nil, c.state.NetInterfaceDescriptions)
if err := c.runtime.teardownOCICNIPod(podConfig); err != nil {
return err
}
// update network status if container is not running
networkStatus := c.state.NetworkStatus
// clip out the index of the network
tmpNetworkStatus := make([]*cnitypes.Result, 0, len(networkStatus)-1)
for k, v := range networkStatus {
if index != k {
tmpNetworkStatus = append(tmpNetworkStatus, v)
}
}
c.state.NetworkStatus = tmpNetworkStatus
return c.save()
}
// ConnectNetwork connects a container to a given network
func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) error {
// only the bridge mode supports cni networks
if err := isBridgeNetMode(c.config.NetMode); err != nil {
return err
}
networks, err := c.networksByNameIndex()
if err != nil {
return err
}
// check if network exists and if the input is a ID we get the name
// ocicni only uses names so it is important that we only use the name
netName, err = network.NormalizeName(c.runtime.config, netName)
if err != nil {
return err
}
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
if err := c.runtime.state.NetworkConnect(c, netName, aliases); err != nil {
return err
}
c.newNetworkEvent(events.NetworkConnect, netName)
if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) {
return nil
}
if c.state.NetNS == nil {
return errors.Wrapf(define.ErrNoNetwork, "unable to connect %s to %s", nameOrID, netName)
}
ctrNetworks, _, err := c.networks()
if err != nil {
return err
}
// Update network descriptions
if err := c.setupNetworkDescriptions(ctrNetworks); err != nil {
return err
}
podConfig := c.runtime.getPodNetwork(c.ID(), c.Name(), c.state.NetNS.Path(), []string{netName}, c.config.PortMappings, nil, nil, c.state.NetInterfaceDescriptions)
podConfig.Aliases = make(map[string][]string, 1)
podConfig.Aliases[netName] = aliases
results, err := c.runtime.setUpOCICNIPod(podConfig)
if err != nil {
return err
}
if len(results) != 1 {
return errors.New("when adding aliases, results must be of length 1")
}
networkResults := make([]*cnitypes.Result, 0)
for _, r := range results {
resultCurrent, err := cnitypes.GetResult(r.Result)
if err != nil {
return errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result, err)
}
networkResults = append(networkResults, resultCurrent)
}
// update network status
networkStatus := c.state.NetworkStatus
// if len is one and we confirmed earlier that the container is in
// fact connected to the network, then just return an empty slice
if len(networkStatus) == 0 {
c.state.NetworkStatus = append(c.state.NetworkStatus, networkResults...)
} else {
// build a list of network names so we can sort and
// get the new name's index
var networkNames []string
for name := range networks {
networkNames = append(networkNames, name)
}
networkNames = append(networkNames, netName)
// sort
sort.Strings(networkNames)
// get index of new network name
index := sort.SearchStrings(networkNames, netName)
// Append a zero value to to the slice
networkStatus = append(networkStatus, &cnitypes.Result{})
// populate network status
copy(networkStatus[index+1:], networkStatus[index:])
networkStatus[index] = networkResults[0]
c.state.NetworkStatus = networkStatus
}
return c.save()
}
// DisconnectContainerFromNetwork removes a container from its CNI network
func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force bool) error {
ctr, err := r.LookupContainer(nameOrID)
if err != nil {
return err
}
return ctr.NetworkDisconnect(nameOrID, netName, force)
}
// ConnectContainerToNetwork connects a container to a CNI network
func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, aliases []string) error {
ctr, err := r.LookupContainer(nameOrID)
if err != nil {
return err
}
return ctr.NetworkConnect(nameOrID, netName, aliases)
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
vendor/github.com/prometheus/tsdb/querier.go
|
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels"
)
// Querier provides querying access over time series data of a fixed
// time range.
type Querier interface {
// Select returns a set of series that matches the given label matchers.
Select(...labels.Matcher) (SeriesSet, error)
// LabelValues returns all potential values for a label name.
LabelValues(string) ([]string, error)
// LabelValuesFor returns all potential values for a label name.
// under the constraint of another label.
LabelValuesFor(string, labels.Label) ([]string, error)
// LabelNames returns all the unique label names present in the block in sorted order.
LabelNames() ([]string, error)
// Close releases the resources of the Querier.
Close() error
}
// Series exposes a single time series.
type Series interface {
// Labels returns the complete set of labels identifying the series.
Labels() labels.Labels
// Iterator returns a new iterator of the data of the series.
Iterator() SeriesIterator
}
// querier aggregates querying results from time blocks within
// a single partition.
type querier struct {
blocks []Querier
}
func (q *querier) LabelValues(n string) ([]string, error) {
return q.lvals(q.blocks, n)
}
// LabelNames returns all the unique label names present querier blocks.
func (q *querier) LabelNames() ([]string, error) {
labelNamesMap := make(map[string]struct{})
for _, b := range q.blocks {
names, err := b.LabelNames()
if err != nil {
return nil, errors.Wrap(err, "LabelNames() from Querier")
}
for _, name := range names {
labelNamesMap[name] = struct{}{}
}
}
labelNames := make([]string, 0, len(labelNamesMap))
for name := range labelNamesMap {
labelNames = append(labelNames, name)
}
sort.Strings(labelNames)
return labelNames, nil
}
func (q *querier) lvals(qs []Querier, n string) ([]string, error) {
if len(qs) == 0 {
return nil, nil
}
if len(qs) == 1 {
return qs[0].LabelValues(n)
}
l := len(qs) / 2
s1, err := q.lvals(qs[:l], n)
if err != nil {
return nil, err
}
s2, err := q.lvals(qs[l:], n)
if err != nil {
return nil, err
}
return mergeStrings(s1, s2), nil
}
func (q *querier) LabelValuesFor(string, labels.Label) ([]string, error) {
return nil, fmt.Errorf("not implemented")
}
func (q *querier) Select(ms ...labels.Matcher) (SeriesSet, error) {
return q.sel(q.blocks, ms)
}
func (q *querier) sel(qs []Querier, ms []labels.Matcher) (SeriesSet, error) {
if len(qs) == 0 {
return EmptySeriesSet(), nil
}
if len(qs) == 1 {
return qs[0].Select(ms...)
}
l := len(qs) / 2
a, err := q.sel(qs[:l], ms)
if err != nil {
return nil, err
}
b, err := q.sel(qs[l:], ms)
if err != nil {
return nil, err
}
return newMergedSeriesSet(a, b), nil
}
func (q *querier) Close() error {
var merr MultiError
for _, bq := range q.blocks {
merr.Add(bq.Close())
}
return merr.Err()
}
// NewBlockQuerier returns a querier against the reader.
func NewBlockQuerier(b BlockReader, mint, maxt int64) (Querier, error) {
indexr, err := b.Index()
if err != nil {
return nil, errors.Wrapf(err, "open index reader")
}
chunkr, err := b.Chunks()
if err != nil {
indexr.Close()
return nil, errors.Wrapf(err, "open chunk reader")
}
tombsr, err := b.Tombstones()
if err != nil {
indexr.Close()
chunkr.Close()
return nil, errors.Wrapf(err, "open tombstone reader")
}
return &blockQuerier{
mint: mint,
maxt: maxt,
index: indexr,
chunks: chunkr,
tombstones: tombsr,
}, nil
}
// blockQuerier provides querying access to a single block database.
type blockQuerier struct {
index IndexReader
chunks ChunkReader
tombstones TombstoneReader
mint, maxt int64
}
func (q *blockQuerier) Select(ms ...labels.Matcher) (SeriesSet, error) {
base, err := LookupChunkSeries(q.index, q.tombstones, ms...)
if err != nil {
return nil, err
}
return &blockSeriesSet{
set: &populatedChunkSeries{
set: base,
chunks: q.chunks,
mint: q.mint,
maxt: q.maxt,
},
mint: q.mint,
maxt: q.maxt,
}, nil
}
func (q *blockQuerier) LabelValues(name string) ([]string, error) {
tpls, err := q.index.LabelValues(name)
if err != nil {
return nil, err
}
res := make([]string, 0, tpls.Len())
for i := 0; i < tpls.Len(); i++ {
vals, err := tpls.At(i)
if err != nil {
return nil, err
}
res = append(res, vals[0])
}
return res, nil
}
func (q *blockQuerier) LabelNames() ([]string, error) {
return q.index.LabelNames()
}
func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
return nil, fmt.Errorf("not implemented")
}
func (q *blockQuerier) Close() error {
var merr MultiError
merr.Add(q.index.Close())
merr.Add(q.chunks.Close())
merr.Add(q.tombstones.Close())
return merr.Err()
}
// PostingsForMatchers assembles a single postings iterator against the index reader
// based on the given matchers. It returns a list of label names that must be manually
// checked to not exist in series the postings list points to.
func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, error) {
var its []index.Postings
for _, m := range ms {
it, err := postingsForMatcher(ix, m)
if err != nil {
return nil, err
}
its = append(its, it)
}
return ix.SortedPostings(index.Intersect(its...)), nil
}
func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) {
// If the matcher selects an empty value, it selects all the series which don't
// have the label name set too. See: https://github.com/prometheus/prometheus/issues/3575
// and https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
if m.Matches("") {
return postingsForUnsetLabelMatcher(ix, m)
}
// Fast-path for equal matching.
if em, ok := m.(*labels.EqualMatcher); ok {
it, err := ix.Postings(em.Name(), em.Value())
if err != nil {
return nil, err
}
return it, nil
}
tpls, err := ix.LabelValues(m.Name())
if err != nil {
return nil, err
}
var res []string
for i := 0; i < tpls.Len(); i++ {
vals, err := tpls.At(i)
if err != nil {
return nil, err
}
if m.Matches(vals[0]) {
res = append(res, vals[0])
}
}
if len(res) == 0 {
return index.EmptyPostings(), nil
}
var rit []index.Postings
for _, v := range res {
it, err := ix.Postings(m.Name(), v)
if err != nil {
return nil, err
}
rit = append(rit, it)
}
return index.Merge(rit...), nil
}
func postingsForUnsetLabelMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) {
tpls, err := ix.LabelValues(m.Name())
if err != nil {
return nil, err
}
var res []string
for i := 0; i < tpls.Len(); i++ {
vals, err := tpls.At(i)
if err != nil {
return nil, err
}
if !m.Matches(vals[0]) {
res = append(res, vals[0])
}
}
var rit []index.Postings
for _, v := range res {
it, err := ix.Postings(m.Name(), v)
if err != nil {
return nil, err
}
rit = append(rit, it)
}
allPostings, err := ix.Postings(index.AllPostingsKey())
if err != nil {
return nil, err
}
return index.Without(allPostings, index.Merge(rit...)), nil
}
func mergeStrings(a, b []string) []string {
maxl := len(a)
if len(b) > len(a) {
maxl = len(b)
}
res := make([]string, 0, maxl*10/9)
for len(a) > 0 && len(b) > 0 {
d := strings.Compare(a[0], b[0])
if d == 0 {
res = append(res, a[0])
a, b = a[1:], b[1:]
} else if d < 0 {
res = append(res, a[0])
a = a[1:]
} else if d > 0 {
res = append(res, b[0])
b = b[1:]
}
}
// Append all remaining elements.
res = append(res, a...)
res = append(res, b...)
return res
}
// SeriesSet contains a set of series.
type SeriesSet interface {
Next() bool
At() Series
Err() error
}
var emptySeriesSet = errSeriesSet{}
// EmptySeriesSet returns a series set that's always empty.
func EmptySeriesSet() SeriesSet {
return emptySeriesSet
}
// mergedSeriesSet takes two series sets as a single series set. The input series sets
// must be sorted and sequential in time, i.e. if they have the same label set,
// the datapoints of a must be before the datapoints of b.
type mergedSeriesSet struct {
a, b SeriesSet
cur Series
adone, bdone bool
}
// NewMergedSeriesSet takes two series sets as a single series set. The input series sets
// must be sorted and sequential in time, i.e. if they have the same label set,
// the datapoints of a must be before the datapoints of b.
func NewMergedSeriesSet(a, b SeriesSet) SeriesSet {
return newMergedSeriesSet(a, b)
}
func newMergedSeriesSet(a, b SeriesSet) *mergedSeriesSet {
s := &mergedSeriesSet{a: a, b: b}
// Initialize first elements of both sets as Next() needs
// one element look-ahead.
s.adone = !s.a.Next()
s.bdone = !s.b.Next()
return s
}
func (s *mergedSeriesSet) At() Series {
return s.cur
}
func (s *mergedSeriesSet) Err() error {
if s.a.Err() != nil {
return s.a.Err()
}
return s.b.Err()
}
func (s *mergedSeriesSet) compare() int {
if s.adone {
return 1
}
if s.bdone {
return -1
}
return labels.Compare(s.a.At().Labels(), s.b.At().Labels())
}
func (s *mergedSeriesSet) Next() bool {
if s.adone && s.bdone || s.Err() != nil {
return false
}
d := s.compare()
// Both sets contain the current series. Chain them into a single one.
if d > 0 {
s.cur = s.b.At()
s.bdone = !s.b.Next()
} else if d < 0 {
s.cur = s.a.At()
s.adone = !s.a.Next()
} else {
s.cur = &chainedSeries{series: []Series{s.a.At(), s.b.At()}}
s.adone = !s.a.Next()
s.bdone = !s.b.Next()
}
return true
}
// ChunkSeriesSet exposes the chunks and intervals of a series instead of the
// actual series itself.
type ChunkSeriesSet interface {
Next() bool
At() (labels.Labels, []chunks.Meta, Intervals)
Err() error
}
// baseChunkSeries loads the label set and chunk references for a postings
// list from an index. It filters out series that have labels set that should be unset.
type baseChunkSeries struct {
p index.Postings
index IndexReader
tombstones TombstoneReader
lset labels.Labels
chks []chunks.Meta
intervals Intervals
err error
}
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
// over them. It drops chunks based on tombstones in the given reader.
func LookupChunkSeries(ir IndexReader, tr TombstoneReader, ms ...labels.Matcher) (ChunkSeriesSet, error) {
if tr == nil {
tr = newMemTombstones()
}
p, err := PostingsForMatchers(ir, ms...)
if err != nil {
return nil, err
}
return &baseChunkSeries{
p: p,
index: ir,
tombstones: tr,
}, nil
}
func (s *baseChunkSeries) At() (labels.Labels, []chunks.Meta, Intervals) {
return s.lset, s.chks, s.intervals
}
func (s *baseChunkSeries) Err() error { return s.err }
func (s *baseChunkSeries) Next() bool {
var (
lset = make(labels.Labels, len(s.lset))
chkMetas = make([]chunks.Meta, len(s.chks))
err error
)
for s.p.Next() {
ref := s.p.At()
if err := s.index.Series(ref, &lset, &chkMetas); err != nil {
// Postings may be stale. Skip if no underlying series exists.
if errors.Cause(err) == ErrNotFound {
continue
}
s.err = err
return false
}
s.lset = lset
s.chks = chkMetas
s.intervals, err = s.tombstones.Get(s.p.At())
if err != nil {
s.err = errors.Wrap(err, "get tombstones")
return false
}
if len(s.intervals) > 0 {
// Only those chunks that are not entirely deleted.
chks := make([]chunks.Meta, 0, len(s.chks))
for _, chk := range s.chks {
if !(Interval{chk.MinTime, chk.MaxTime}.isSubrange(s.intervals)) {
chks = append(chks, chk)
}
}
s.chks = chks
}
return true
}
if err := s.p.Err(); err != nil {
s.err = err
}
return false
}
// populatedChunkSeries loads chunk data from a store for a set of series
// with known chunk references. It filters out chunks that do not fit the
// given time range.
type populatedChunkSeries struct {
set ChunkSeriesSet
chunks ChunkReader
mint, maxt int64
err error
chks []chunks.Meta
lset labels.Labels
intervals Intervals
}
func (s *populatedChunkSeries) At() (labels.Labels, []chunks.Meta, Intervals) {
return s.lset, s.chks, s.intervals
}
func (s *populatedChunkSeries) Err() error { return s.err }
func (s *populatedChunkSeries) Next() bool {
for s.set.Next() {
lset, chks, dranges := s.set.At()
for len(chks) > 0 {
if chks[0].MaxTime >= s.mint {
break
}
chks = chks[1:]
}
// This is to delete in place while iterating.
for i, rlen := 0, len(chks); i < rlen; i++ {
j := i - (rlen - len(chks))
c := &chks[j]
// Break out at the first chunk that has no overlap with mint, maxt.
if c.MinTime > s.maxt {
chks = chks[:j]
break
}
c.Chunk, s.err = s.chunks.Chunk(c.Ref)
if s.err != nil {
// This means that the chunk has be garbage collected. Remove it from the list.
if s.err == ErrNotFound {
s.err = nil
// Delete in-place.
s.chks = append(chks[:j], chks[j+1:]...)
}
return false
}
}
if len(chks) == 0 {
continue
}
s.lset = lset
s.chks = chks
s.intervals = dranges
return true
}
if err := s.set.Err(); err != nil {
s.err = err
}
return false
}
// blockSeriesSet is a set of series from an inverted index query.
type blockSeriesSet struct {
set ChunkSeriesSet
err error
cur Series
mint, maxt int64
}
func (s *blockSeriesSet) Next() bool {
for s.set.Next() {
lset, chunks, dranges := s.set.At()
s.cur = &chunkSeries{
labels: lset,
chunks: chunks,
mint: s.mint,
maxt: s.maxt,
intervals: dranges,
}
return true
}
if s.set.Err() != nil {
s.err = s.set.Err()
}
return false
}
func (s *blockSeriesSet) At() Series { return s.cur }
func (s *blockSeriesSet) Err() error { return s.err }
// chunkSeries is a series that is backed by a sequence of chunks holding
// time series data.
type chunkSeries struct {
labels labels.Labels
chunks []chunks.Meta // in-order chunk refs
mint, maxt int64
intervals Intervals
}
func (s *chunkSeries) Labels() labels.Labels {
return s.labels
}
func (s *chunkSeries) Iterator() SeriesIterator {
return newChunkSeriesIterator(s.chunks, s.intervals, s.mint, s.maxt)
}
// SeriesIterator iterates over the data of a time series.
type SeriesIterator interface {
// Seek advances the iterator forward to the given timestamp.
// If there's no value exactly at t, it advances to the first value
// after t.
Seek(t int64) bool
// At returns the current timestamp/value pair.
At() (t int64, v float64)
// Next advances the iterator by one.
Next() bool
// Err returns the current error.
Err() error
}
// chainedSeries implements a series for a list of time-sorted series.
// They all must have the same labels.
type chainedSeries struct {
series []Series
}
func (s *chainedSeries) Labels() labels.Labels {
return s.series[0].Labels()
}
func (s *chainedSeries) Iterator() SeriesIterator {
return newChainedSeriesIterator(s.series...)
}
// chainedSeriesIterator implements a series iterater over a list
// of time-sorted, non-overlapping iterators.
type chainedSeriesIterator struct {
series []Series // series in time order
i int
cur SeriesIterator
}
func newChainedSeriesIterator(s ...Series) *chainedSeriesIterator {
return &chainedSeriesIterator{
series: s,
i: 0,
cur: s[0].Iterator(),
}
}
func (it *chainedSeriesIterator) Seek(t int64) bool {
// We just scan the chained series sequentially as they are already
// pre-selected by relevant time and should be accessed sequentially anyway.
for i, s := range it.series[it.i:] {
cur := s.Iterator()
if !cur.Seek(t) {
continue
}
it.cur = cur
it.i += i
return true
}
return false
}
func (it *chainedSeriesIterator) Next() bool {
if it.cur.Next() {
return true
}
if err := it.cur.Err(); err != nil {
return false
}
if it.i == len(it.series)-1 {
return false
}
it.i++
it.cur = it.series[it.i].Iterator()
return it.Next()
}
func (it *chainedSeriesIterator) At() (t int64, v float64) {
return it.cur.At()
}
func (it *chainedSeriesIterator) Err() error {
return it.cur.Err()
}
// chunkSeriesIterator implements a series iterator on top
// of a list of time-sorted, non-overlapping chunks.
type chunkSeriesIterator struct {
chunks []chunks.Meta
i int
cur chunkenc.Iterator
maxt, mint int64
intervals Intervals
}
func newChunkSeriesIterator(cs []chunks.Meta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator {
it := cs[0].Chunk.Iterator()
if len(dranges) > 0 {
it = &deletedIterator{it: it, intervals: dranges}
}
return &chunkSeriesIterator{
chunks: cs,
i: 0,
cur: it,
mint: mint,
maxt: maxt,
intervals: dranges,
}
}
func (it *chunkSeriesIterator) Seek(t int64) (ok bool) {
if t > it.maxt {
return false
}
// Seek to the first valid value after t.
if t < it.mint {
t = it.mint
}
for ; it.chunks[it.i].MaxTime < t; it.i++ {
if it.i == len(it.chunks)-1 {
return false
}
}
it.cur = it.chunks[it.i].Chunk.Iterator()
if len(it.intervals) > 0 {
it.cur = &deletedIterator{it: it.cur, intervals: it.intervals}
}
for it.cur.Next() {
t0, _ := it.cur.At()
if t0 >= t {
return true
}
}
return false
}
func (it *chunkSeriesIterator) At() (t int64, v float64) {
return it.cur.At()
}
func (it *chunkSeriesIterator) Next() bool {
if it.cur.Next() {
t, _ := it.cur.At()
if t < it.mint {
if !it.Seek(it.mint) {
return false
}
t, _ = it.At()
return t <= it.maxt
}
if t > it.maxt {
return false
}
return true
}
if err := it.cur.Err(); err != nil {
return false
}
if it.i == len(it.chunks)-1 {
return false
}
it.i++
it.cur = it.chunks[it.i].Chunk.Iterator()
if len(it.intervals) > 0 {
it.cur = &deletedIterator{it: it.cur, intervals: it.intervals}
}
return it.Next()
}
func (it *chunkSeriesIterator) Err() error {
return it.cur.Err()
}
// deletedIterator wraps an Iterator and makes sure any deleted metrics are not
// returned.
type deletedIterator struct {
it chunkenc.Iterator
intervals Intervals
}
func (it *deletedIterator) At() (int64, float64) {
return it.it.At()
}
func (it *deletedIterator) Next() bool {
Outer:
for it.it.Next() {
ts, _ := it.it.At()
for _, tr := range it.intervals {
if tr.inBounds(ts) {
continue Outer
}
if ts > tr.Maxt {
it.intervals = it.intervals[1:]
continue
}
return true
}
return true
}
return false
}
func (it *deletedIterator) Err() error {
return it.it.Err()
}
type errSeriesSet struct {
err error
}
func (s errSeriesSet) Next() bool { return false }
func (s errSeriesSet) At() Series { return nil }
func (s errSeriesSet) Err() error { return s.err }
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
lib/exec_command_test.go
|
package lib
import (
"fmt"
"os"
"os/exec"
"strings"
"testing"
)
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{
"GO_WANT_HELPER_PROCESS=1",
"GOT_COMMAND=" + strings.Join(append([]string{command}, args...), " "),
}
return cmd
}
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
fmt.Printf("Received command: %s", os.Getenv("GOT_COMMAND"))
os.Exit(0)
}
|
[
"\"GO_WANT_HELPER_PROCESS\"",
"\"GOT_COMMAND\""
] |
[] |
[
"GO_WANT_HELPER_PROCESS",
"GOT_COMMAND"
] |
[]
|
["GO_WANT_HELPER_PROCESS", "GOT_COMMAND"]
|
go
| 2 | 0 | |
tests/constants.py
|
import getpass
import os
try:
import yaml
except ImportError:
yaml = None
class Constants(object):
CONF_FILE = 'settings-test.yaml'
_url = None
_username = 'admin'
def __init__(self, conf=None):
if conf:
self.CONF_FILE = conf
try:
if yaml:
with open(self.CONF_FILE, 'r') as f:
self._settings = yaml.load(f, Loader=yaml.FullLoader)
else:
self._settings = {}
except:
self._settings = {}
try:
# Attempt to pull from environment
self._settings['ssc_url'] = os.environ['PYSSC_URL']
self._settings['username'] = os.environ['PYSSC_USERNAME']
self._settings['password'] = os.environ['PYSSC_PASSWORD']
self._settings['token'] = os.environ['PYSSC_TOKEN']
except:
pass
@property
def password(self):
if 'password' in self._settings:
self._password = self._settings['password']
try:
with open('.password','r') as f:
self._password = f.read().strip()
except:
pass
if not hasattr(self, '_password'):
self._password = getpass.getpass('\nPassword: ')
return self._password
@property
def username(self):
if 'username' in self._settings:
return self._settings['username']
return self._username
@property
def credentials(self):
return (self.username, self.password)
@property
def token(self):
if 'token' in self._settings:
return self._settings['token']
return None
@property
def url(self):
if 'ssc_url' in self._settings:
return self._settings['ssc_url']
return self._url
@property
def proxies(self):
if 'proxies' in self._settings:
return self._settings['proxies']
return None
def setup_proxy(self, client):
if self.proxies:
client._api.proxies = self.proxies
|
[] |
[] |
[
"PYSSC_USERNAME",
"PYSSC_PASSWORD",
"PYSSC_URL",
"PYSSC_TOKEN"
] |
[]
|
["PYSSC_USERNAME", "PYSSC_PASSWORD", "PYSSC_URL", "PYSSC_TOKEN"]
|
python
| 4 | 0 | |
submit_jobs.py
|
'''
Python script that takes arguments (hyperparameters of RNN) and automatically writes job files for submission on WashU cluster
written in Python 3.8.3
@ Elham
'''
import os
import sys
import argparse
import subprocess
import itertools
import json
#'g':1.2, 'pg':0.1, 'n_train':1000, 'encoding':[0.5,1.5,1], 'seed':0, 'init_dist':'Gauss'
parser = argparse.ArgumentParser()
parser.add_argument('-g', nargs='+', type=float)
parser.add_argument('-pg', nargs='+', type=float)
parser.add_argument('-fb', '--fb_var', nargs='+', type=float)
parser.add_argument('-in','--input_var', nargs='+', type=float)
parser.add_argument('-n','--n_train', nargs='+', type=int)
parser.add_argument('-e', '--encoding', nargs='+', type=float)
parser.add_argument('-s', '--seed', nargs='+', type=int)
parser.add_argument('-i', '--init_dist', nargs='+', type=str)
arg = parser.parse_args()
sbatchpath = './'
scratchpath = '/scratch/elham/results3500c/' #outputfile
#print('path', scratchpath)
def write_jobfiles(cmd, jobname, sbatchpath, scratchpath, nodes=1, ppn=1, gpus=0, mem=32):
jobfile = os.path.join(sbatchpath, jobname + '.pbs')
logname = os.path.join('log', jobname)
with open(jobfile, 'w') as f:
f.write(
'#! /bin/bash\n'
+ '\n'
+ '#PBS -N {}\n'.format(jobname)
#+ '#PBS -M [email protected]\n'
#+ '#PBS -m abe\n'
+ '#PBS -l nodes={}:ppn={},mem={}gb,walltime=23:30:00\n'.format(nodes, ppn, mem)
+ '#PBS -o {}{}.o\n'.format(scratchpath, jobname)
+ '#PBS -e {}{}.e\n'.format(scratchpath, jobname)
+ 'cd ./rnn\n'
+ 'export PATH=/export/Anaconda3-2020.02/bin:$PATH\n'
+ 'source activate myenv\n'
+ '{}\n'.format(cmd)
+ '{} >> {}.o 1>&1\n'.format(cmd, 'all_logs')
+ 'pwd\n'
+ 'echo $PATH\n'
+ 'echo {} >> {}.log 2>&1\n'.format(jobname,'all_names' )
+ 'exit 0;\n'
)
return jobfile
def get_params(**kwargs):
all = list()
kwargs['encoding']=[kwargs['encoding']]
keys = list(kwargs)
for values in itertools.product(*map(kwargs.get, keys)):
all_param = dict(zip(keys, values))
all.append(all_param)
return all
all = get_params(**vars(arg))
for param in all:
jobname = '_'.join(['{}'.format(val) if type(val) != list else '{}'.format(''.join([str(s) for s in val])) for key, val in param.items()])
jparam = json.dumps(param)
cmd = 'python train_posthoc_clst.py -d ' + '\''+ str(jparam) + '\''
jobfile = write_jobfiles(cmd, jobname, sbatchpath, scratchpath, gpus=0)
subprocess.call(['qsub', jobfile])
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
apiclient_test.go
|
package darksky_test
import (
"os"
"testing"
"time"
"github.com/sxg/DarkSky"
)
func TestNewAPIClient(t *testing.T) {
var client = darksky.NewAPIClient(os.Getenv("FORECAST_IO_API_KEY"))
if client == nil {
t.Error("couldn't create a new API client")
}
}
func TestAPIClientSetUnits(t *testing.T) {
var client = darksky.NewAPIClient(os.Getenv("FORECAST_IO_API_KEY"))
client.Units = darksky.UnitsSI
if client.Units != darksky.UnitsSI {
t.Error("couldn't set units on API client")
}
}
func TestAPIClientSetLanguage(t *testing.T) {
var client = darksky.NewAPIClient(os.Getenv("FORECAST_IO_API_KEY"))
client.Language = darksky.LanguageFrench
if client.Language != darksky.LanguageFrench {
t.Error("couldn't set language on API client")
}
}
func TestGetForecast(t *testing.T) {
var client = darksky.NewAPIClient(os.Getenv("FORECAST_IO_API_KEY"))
_, err := client.GetForecast(30, 30)
if err != nil {
t.Error(err)
}
}
func TestGetForecastAtTime(t *testing.T) {
var client = darksky.NewAPIClient(os.Getenv("FORECAST_IO_API_KEY"))
_, err := client.GetForecastAtTime(30, 30, time.Now())
if err != nil {
t.Error(err)
}
}
|
[
"\"FORECAST_IO_API_KEY\"",
"\"FORECAST_IO_API_KEY\"",
"\"FORECAST_IO_API_KEY\"",
"\"FORECAST_IO_API_KEY\"",
"\"FORECAST_IO_API_KEY\""
] |
[] |
[
"FORECAST_IO_API_KEY"
] |
[]
|
["FORECAST_IO_API_KEY"]
|
go
| 1 | 0 | |
python/aswfdocker/builder.py
|
# Copyright (c) Contributors to the aswf-docker Project. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
CI Image and Package Builder
"""
import logging
import subprocess
import json
import os
import tempfile
import typing
from aswfdocker import constants, aswfinfo, utils, groupinfo, index
logger = logging.getLogger(__name__)
class Builder:
"""Builder generates a "docker buildx bake" json file to drive the parallel builds of Docker images."""
def __init__(
self,
build_info: aswfinfo.ASWFInfo,
group_info: groupinfo.GroupInfo,
push: bool = False,
use_conan: bool = False,
):
self.push = push
self.build_info = build_info
self.group_info = group_info
self.use_conan = use_conan
self.index = index.Index()
def make_bake_dict(self) -> typing.Dict[str, dict]:
root: typing.Dict[str, dict] = {}
root["target"] = {}
versions_to_bake = set()
for image, version in self.group_info.iter_images_versions():
use_conan = self.group_info.type == constants.ImageType.PACKAGE and (
self.use_conan
or self.index.is_conan_only_package(image.replace("ci-package-", ""))
)
major_version = utils.get_major_version(version)
version_info = self.index.version_info(major_version)
if self.group_info.type == constants.ImageType.PACKAGE:
image_base = image.replace("ci-package-", "")
group = self.index.get_group_from_image(
self.group_info.type, image_base
)
if use_conan:
if version in versions_to_bake:
# Only one version per image needed
continue
if version_info.ci_common_version != major_version:
# Only bake images for ci_common!
version = version_info.ci_common_version
major_version = utils.get_major_version(version)
versions_to_bake.add(version)
tags = list(
map(
lambda tag: f"{constants.DOCKER_REGISTRY}/{self.build_info.docker_org}"
+ f"/ci-centos7-gl-conan:{tag}",
[version, major_version],
)
)
docker_file = "packages/common/Dockerfile"
else:
tags = version_info.get_tags(
version,
self.build_info.docker_org,
image,
extra_suffix=version_info.package_versions.get(
"ASWF_"
+ image.replace("ci-package-", "").upper()
+ "_VERSION"
),
)
docker_file = f"packages/{group}/Dockerfile"
else:
tags = version_info.get_tags(version, self.build_info.docker_org, image)
docker_file = f"{image}/Dockerfile"
if version_info.ci_common_version == major_version:
channel = f"ci_common{major_version}"
else:
channel = f"vfx{version_info.major_version}"
args = {
"ASWF_ORG": self.build_info.docker_org,
"ASWF_PKG_ORG": self.build_info.package_org,
"ASWF_VERSION": version,
"CI_COMMON_VERSION": version_info.ci_common_version,
"ASWF_CONAN_CHANNEL": channel,
}
args.update(version_info.all_package_versions)
target_dict = {
"context": ".",
"dockerfile": docker_file,
"args": args,
"labels": {
"org.opencontainers.image.created": self.build_info.build_date,
"org.opencontainers.image.revision": self.build_info.vcs_ref,
},
"tags": tags,
"output": ["type=registry,push=true" if self.push else "type=docker"],
}
if self.group_info.type == constants.ImageType.PACKAGE:
if use_conan:
target_dict["target"] = "ci-centos7-gl-conan"
else:
target_dict["target"] = image
root["target"][f"{image}-{major_version}"] = target_dict
root["group"] = {"default": {"targets": list(root["target"].keys())}}
return root
def make_bake_jsonfile(self) -> typing.Optional[str]:
d = self.make_bake_dict()
if not d["group"]["default"]["targets"]:
return None
groups = "-".join(self.group_info.names)
versions = "-".join(self.group_info.versions)
path = os.path.join(
tempfile.gettempdir(),
f"docker-bake-{self.group_info.type.name}-{groups}-{versions}.json",
)
with open(path, "w", encoding="utf-8") as f:
json.dump(d, f, indent=4, sort_keys=True)
return path
def _run(self, cmd: str, dry_run: bool):
if dry_run:
logger.info("Would run: '%s'", cmd)
else:
logger.info("Building: '%s'", cmd)
subprocess.run(cmd, shell=True, check=True, cwd=self.build_info.repo_root)
def _run_in_docker(self, base_cmd, cmd, dry_run):
self._run(
" ".join(base_cmd + cmd),
dry_run=dry_run,
)
def _get_conan_env_vars(self, version_info):
envs = {
"CONAN_USER_HOME": constants.CONAN_USER_HOME,
"CCACHE_DIR": "/tmp/ccache",
"CONAN_NON_INTERACTIVE": "1",
}
if "CONAN_LOGIN_USERNAME" in os.environ:
envs["CONAN_LOGIN_USERNAME"] = os.environ["CONAN_PASSWORD"]
if "ARTIFACTORY_USER" in os.environ:
envs["CONAN_LOGIN_USERNAME"] = os.environ["ARTIFACTORY_USER"]
if "CONAN_PASSWORD" in os.environ:
envs["CONAN_PASSWORD"] = os.environ["CONAN_PASSWORD"]
if "ARTIFACTORY_TOKEN" in os.environ:
envs["CONAN_PASSWORD"] = os.environ["ARTIFACTORY_TOKEN"]
for name, value in version_info.all_package_versions.items():
envs[name] = value
return envs
def _get_conan_vols(self):
conan_base = os.path.join(utils.get_git_top_level(), "packages", "conan")
vols = {
os.path.join(conan_base, "settings"): os.path.join(
constants.CONAN_USER_HOME, ".conan"
),
os.path.join(conan_base, "data"): os.path.join(
constants.CONAN_USER_HOME, "d"
),
os.path.join(conan_base, "recipes"): os.path.join(
constants.CONAN_USER_HOME, "recipes"
),
os.path.join(conan_base, "ccache"): "/tmp/ccache",
}
return vols
def _get_conan_base_cmd(self, version_info):
base_cmd = ["docker", "run"]
for name, value in self._get_conan_env_vars(version_info).items():
base_cmd.append("-e")
base_cmd.append(f"{name}={value}")
for name, value in self._get_conan_vols().items():
base_cmd.append("-v")
base_cmd.append(f"{name}:{value}")
tag = (
f"{constants.DOCKER_REGISTRY}/{self.build_info.docker_org}"
+ f"/ci-centos7-gl-conan:{version_info.ci_common_version}"
)
base_cmd.append(tag)
return base_cmd
def _build_conan_package(
self,
image,
version,
dry_run,
keep_source,
keep_build,
conan_login,
build_missing,
):
major_version = utils.get_major_version(version)
version_info = self.index.version_info(major_version)
base_cmd = self._get_conan_base_cmd(version_info)
if conan_login:
self._run_in_docker(
base_cmd,
[
"conan",
"user",
"-p",
"-r",
self.build_info.docker_org,
],
dry_run,
)
self._run_in_docker(
base_cmd,
[
"conan",
"config",
"set",
f"general.default_profile={version_info.conan_profile}",
],
dry_run,
)
full_version = version_info.package_versions.get(
"ASWF_" + image.upper() + "_VERSION"
)
conan_version = (
f"{image}/{full_version}"
f"@{self.build_info.docker_org}/{version_info.conan_profile}"
)
build_cmd = [
"conan",
"create",
os.path.join(constants.CONAN_USER_HOME, "recipes", image),
conan_version,
]
if keep_source:
build_cmd.append("--keep-source")
if keep_build:
build_cmd.append("--keep-build")
if build_missing:
build_cmd.append("--build=missing")
self._run_in_docker(
base_cmd,
build_cmd,
dry_run,
)
alias_version = (
f"{image}/latest"
f"@{self.build_info.docker_org}/{version_info.conan_profile}"
)
self._run_in_docker(
base_cmd,
[
"conan",
"alias",
alias_version,
conan_version,
],
dry_run,
)
if self.push:
self._run_in_docker(
base_cmd,
[
"conan",
"upload",
"--all",
"-r",
self.build_info.docker_org,
conan_version,
],
dry_run,
)
self._run_in_docker(
base_cmd,
[
"conan",
"upload",
"--all",
"-r",
self.build_info.docker_org,
alias_version,
],
dry_run,
)
def build(
self,
dry_run: bool = False,
progress: str = "",
keep_source=False,
keep_build=False,
conan_login=False,
build_missing=False,
) -> None:
images_and_versions = []
for image, version in self.group_info.iter_images_versions(get_image=True):
if (
self.group_info.type == constants.ImageType.PACKAGE
and not self.use_conan
and self.index.is_conan_only_package(image)
):
logger.warning("Skipping %s as it is a conan-only package!", image)
continue
images_and_versions.append((image, version))
if not images_and_versions:
return
path = self.make_bake_jsonfile()
if path:
self._run(
f"docker buildx bake -f {path} --progress {progress}", dry_run=dry_run
)
if not self.use_conan or self.group_info.type == constants.ImageType.IMAGE:
return
conan_base = os.path.join(utils.get_git_top_level(), "packages", "conan")
for image, version in images_and_versions:
recipe_path = os.path.join(conan_base, "recipes", image)
if not os.path.exists(recipe_path):
logger.warning("Recipe for %s not found: skipping!", image)
continue
self._build_conan_package(
image,
version,
dry_run,
keep_source,
keep_build,
conan_login,
build_missing,
)
|
[] |
[] |
[
"CONAN_PASSWORD",
"ARTIFACTORY_USER",
"ARTIFACTORY_TOKEN"
] |
[]
|
["CONAN_PASSWORD", "ARTIFACTORY_USER", "ARTIFACTORY_TOKEN"]
|
python
| 3 | 0 | |
pkg/diskmaker/discovery/discovery_test.go
|
package discovery
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/openshift/local-storage-operator/pkg/apis/local/v1alpha1"
"github.com/openshift/local-storage-operator/pkg/diskmaker"
"github.com/openshift/local-storage-operator/pkg/internal"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
)
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
fmt.Fprintf(os.Stdout, os.Getenv("STDOUT"))
os.Exit(0)
}
func TestDiscoverDevices(t *testing.T) {
testcases := []struct {
label string
deviceDiscovery *DeviceDiscovery
fakeExecCmdOutput string
fakeGlobfunc func(string) ([]string, error)
errMessage error
}{
{
label: "Case 1",
deviceDiscovery: getFakeDeviceDiscovery(),
fakeExecCmdOutput: `NAME="sda" KNAME="sda" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="0" RM="0" STATE="running" FSTYPE="" SERIAL=""` + "\n" +
`NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="" FSTYPE="" SERIAL=""`,
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem", "sda"}, nil
},
errMessage: nil,
},
}
for _, tc := range testcases {
internal.ExecResult = tc.fakeExecCmdOutput
internal.ExecCommand = internal.FakeExecCommand
internal.FilePathGlob = tc.fakeGlobfunc
defer func() {
internal.FilePathGlob = filepath.Glob
internal.ExecCommand = exec.Command
}()
err := tc.deviceDiscovery.discoverDevices()
assert.NoError(t, err)
}
}
func TestDiscoverDevicesFail(t *testing.T) {
testcases := []struct {
label string
deviceDiscovery *DeviceDiscovery
mockClient *diskmaker.MockAPIUpdater
fakeExecCmdOutput string
fakeGlobfunc func(string) ([]string, error)
errMessage error
}{
{
label: "Case 1",
deviceDiscovery: getFakeDeviceDiscovery(),
mockClient: &diskmaker.MockAPIUpdater{
MockUpdateDiscoveryResultStatus: func(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error {
return fmt.Errorf("failed to update status")
},
},
fakeExecCmdOutput: `NAME="sda" KNAME="sda" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="1" RM="0" STATE="running" FSTYPE="" SERIAL=""` + "\n" +
`NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="" FSTYPE="" SERIAL=""`,
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
errMessage: nil,
},
}
for _, tc := range testcases {
internal.ExecResult = tc.fakeExecCmdOutput
internal.ExecCommand = internal.FakeExecCommand
internal.FilePathGlob = tc.fakeGlobfunc
defer func() {
internal.FilePathGlob = filepath.Glob
internal.ExecCommand = exec.Command
}()
tc.deviceDiscovery.apiClient = tc.mockClient
err := tc.deviceDiscovery.discoverDevices()
assert.Error(t, err)
}
}
func TestIgnoreDevices(t *testing.T) {
testcases := []struct {
label string
blockDevice internal.BlockDevice
fakeGlobfunc func(string) ([]string, error)
expected bool
errMessage error
}{
{
label: "Case 1",
blockDevice: internal.BlockDevice{
Name: "sdb",
KName: "sdb",
ReadOnly: "0",
State: "running",
Type: "disk",
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expected: false,
errMessage: fmt.Errorf("ignored wrong device"),
},
{
label: "Case 2",
blockDevice: internal.BlockDevice{
Name: "sdb",
KName: "sdb",
ReadOnly: "1",
State: "running",
Type: "disk",
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expected: true,
errMessage: fmt.Errorf("failed to ignore read only device"),
},
{
label: "Case 3",
blockDevice: internal.BlockDevice{
Name: "sdb",
KName: "sdb",
ReadOnly: "0",
State: "suspended",
Type: "disk",
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expected: true,
errMessage: fmt.Errorf("ignored wrong suspended device"),
},
{
label: "Case 4",
blockDevice: internal.BlockDevice{
Name: "sdb",
KName: "sdb",
ReadOnly: "0",
State: "running",
Type: "disk",
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem", "sdb"}, nil
},
expected: true,
errMessage: fmt.Errorf("failed to ignore root device with children"),
},
}
for _, tc := range testcases {
internal.FilePathGlob = tc.fakeGlobfunc
defer func() {
internal.FilePathGlob = filepath.Glob
}()
actual := ignoreDevices(tc.blockDevice)
assert.Equalf(t, tc.expected, actual, "[%s]: %s", tc.label, tc.errMessage)
}
}
func TestValidBlockDevices(t *testing.T) {
testcases := []struct {
label string
blockDevices []internal.BlockDevice
fakeExecCmdOutput string
fakeGlobfunc func(string) ([]string, error)
expectedDiscoveredDeviceSize int
errMessage error
}{
{
label: "Case 1",
fakeExecCmdOutput: `NAME="sda" KNAME="sda" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="1" RM="0" STATE="running" FSTYPE="" SERIAL=""` + "\n" +
`NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="" FSTYPE="" SERIAL=""`,
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expectedDiscoveredDeviceSize: 1,
errMessage: fmt.Errorf("failed to ignore readonly device sda"),
},
{
label: "Case 2",
fakeExecCmdOutput: `NAME="sda" KNAME="sda" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="0" RM="0" STATE="running" FSTYPE="" SERIAL=""` + "\n" +
`NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="" FSTYPE="" SERIAL=""`,
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem", "sda"}, nil
},
expectedDiscoveredDeviceSize: 1,
errMessage: fmt.Errorf("failed to ignore root device sda with partition"),
},
{
label: "Case 3",
fakeExecCmdOutput: `NAME="sda" KNAME="sda" ROTA="1" TYPE="loop" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="0" RM="0" STATE="running" FSTYPE="" SERIAL=""` + "\n" +
`NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="" FSTYPE="" SERIAL=""`,
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expectedDiscoveredDeviceSize: 1,
errMessage: fmt.Errorf("failed to ignore device sda with type loop"),
},
{
label: "Case 4",
fakeExecCmdOutput: `NAME="sda" KNAME="sda" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="0" RM="0" STATE="running" FSTYPE="" SERIAL=""` + "\n" +
`NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="suspended" FSTYPE="" SERIAL=""`,
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expectedDiscoveredDeviceSize: 1,
errMessage: fmt.Errorf("failed to ignore child device sda1 in suspended state"),
},
}
for _, tc := range testcases {
internal.ExecResult = tc.fakeExecCmdOutput
internal.ExecCommand = internal.FakeExecCommand
internal.FilePathGlob = tc.fakeGlobfunc
defer func() {
internal.FilePathGlob = filepath.Glob
internal.ExecCommand = exec.Command
}()
actual, err := getValidBlockDevices()
assert.NoError(t, err)
assert.Equalf(t, tc.expectedDiscoveredDeviceSize, len(actual), "[%s]: %s", tc.label, tc.errMessage)
}
}
func TestGetDiscoveredDevices(t *testing.T) {
testcases := []struct {
label string
blockDevices []internal.BlockDevice
expected []v1alpha1.DiscoveredDevice
fakeGlobfunc func(string) ([]string, error)
fakeEvalSymlinkfunc func(string) (string, error)
}{
{
label: "Case 1",
blockDevices: []internal.BlockDevice{
{
Name: "sdb",
KName: "sdb",
FSType: "ext4",
Type: "disk",
Size: "62914560000",
Model: "VBOX HARDDISK",
Vendor: "ATA",
Serial: "DEVICE_SERIAL_NUMBER",
Rotational: "1",
ReadOnly: "0",
Removable: "0",
State: "running",
},
},
expected: []v1alpha1.DiscoveredDevice{
{
DeviceID: "/dev/disk/by-id/sdb",
Path: "/dev/sdb",
Model: "VBOX HARDDISK",
Type: "disk",
Vendor: "ATA",
Serial: "DEVICE_SERIAL_NUMBER",
Size: resource.MustParse("62914560000"),
Property: "Rotational",
FSType: "ext4",
Status: v1alpha1.DeviceStatus{State: "NotAvailable"},
},
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/sdb"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sdb", nil
},
},
{
label: "Case 2",
blockDevices: []internal.BlockDevice{
{
Name: "sda1",
KName: "sda1",
FSType: "ext4",
Type: "part",
Size: "62913494528",
Model: "",
Vendor: "",
Serial: "",
Rotational: "0",
ReadOnly: "0",
Removable: "0",
State: "running",
},
},
expected: []v1alpha1.DiscoveredDevice{
{
DeviceID: "/dev/disk/by-id/sda1",
Path: "/dev/sda1",
Model: "",
Type: "part",
Vendor: "",
Serial: "",
Size: resource.MustParse("62913494528"),
Property: "NonRotational",
FSType: "ext4",
Status: v1alpha1.DeviceStatus{State: "NotAvailable"},
},
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/sda1"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sda1", nil
},
},
{
label: "Case 3",
blockDevices: []internal.BlockDevice{
{
Name: "sda1",
KName: "sda1",
FSType: "",
Type: "part",
Size: "62913494528",
Model: "",
Vendor: "",
Serial: "",
Rotational: "0",
ReadOnly: "0",
Removable: "0",
State: "running",
PartLabel: "BIOS-BOOT",
},
},
expected: []v1alpha1.DiscoveredDevice{
{
DeviceID: "/dev/disk/by-id/sda1",
Path: "/dev/sda1",
Model: "",
Type: "part",
Vendor: "",
Serial: "",
Size: resource.MustParse("62913494528"),
Property: "NonRotational",
FSType: "",
Status: v1alpha1.DeviceStatus{State: "NotAvailable"},
},
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/sda1"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sda1", nil
},
},
{
label: "Case 4",
blockDevices: []internal.BlockDevice{
{
Name: "sda1",
KName: "sda1",
FSType: "",
Type: "part",
Size: "62913494528",
Model: "",
Vendor: "",
Serial: "",
Rotational: "0",
ReadOnly: "0",
Removable: "0",
State: "running",
PartLabel: "EFI-SYSTEM",
},
},
expected: []v1alpha1.DiscoveredDevice{
{
DeviceID: "/dev/disk/by-id/sda1",
Path: "/dev/sda1",
Model: "",
Type: "part",
Vendor: "",
Serial: "",
Size: resource.MustParse("62913494528"),
Property: "NonRotational",
FSType: "",
Status: v1alpha1.DeviceStatus{State: "NotAvailable"},
},
},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/sda1"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sda1", nil
},
},
}
for _, tc := range testcases {
internal.FilePathGlob = tc.fakeGlobfunc
internal.FilePathEvalSymLinks = tc.fakeEvalSymlinkfunc
defer func() {
internal.FilePathGlob = filepath.Glob
internal.FilePathEvalSymLinks = filepath.EvalSymlinks
}()
actual := getDiscoverdDevices(tc.blockDevices)
for i := 0; i < len(tc.expected); i++ {
assert.Equalf(t, tc.expected[i].DeviceID, actual[i].DeviceID, "[%s: Discovered Device: %d]: invalid device ID", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Path, actual[i].Path, "[%s: Discovered Device: %d]: invalid device path", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Model, actual[i].Model, "[%s: Discovered Device: %d]: invalid device model", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Type, actual[i].Type, "[%s: Discovered Device: %d]: invalid device type", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Vendor, actual[i].Vendor, "[%s: Discovered Device: %d]: invalid device vendor", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Serial, actual[i].Serial, "[%s: Discovered Device: %d]: invalid device serial", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Size, actual[i].Size, "[%s: Discovered Device: %d]: invalid device size", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Property, actual[i].Property, "[%s: Discovered Device: %d]: invalid device property", tc.label, i+1)
assert.Equalf(t, tc.expected[i].FSType, actual[i].FSType, "[%s: Discovered Device: %d]: invalid device filesystem", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Status, actual[i].Status, "[%s: Discovered Device: %d]: invalid device status", tc.label, i+1)
}
}
}
func TestParseDeviceType(t *testing.T) {
testcases := []struct {
label string
input string
expected v1alpha1.DeviceType
}{
{
label: "Case 1",
input: "disk",
expected: v1alpha1.RawDisk,
},
{
label: "Case 1",
input: "part",
expected: v1alpha1.Partition,
},
{
label: "Case 3",
input: "loop",
expected: "",
},
}
for _, tc := range testcases {
actual := parseDeviceType(tc.input)
assert.Equalf(t, tc.expected, actual, "[%s]: failed to parse device type", tc.label)
}
}
func TestParseDeviceProperty(t *testing.T) {
testcases := []struct {
label string
input string
expected v1alpha1.DeviceMechanicalProperty
}{
{
label: "Case 1",
input: "1",
expected: v1alpha1.Rotational,
},
{
label: "Case 1",
input: "0",
expected: v1alpha1.NonRotational,
},
{
label: "Case 3",
input: "2",
expected: "",
},
}
for _, tc := range testcases {
actual := parseDeviceProperty(tc.input)
assert.Equalf(t, tc.expected, actual, "[%s]: failed to parse device mechanical property", tc.label)
}
}
func getFakeDeviceDiscovery() *DeviceDiscovery {
dd := &DeviceDiscovery{}
dd.apiClient = &diskmaker.MockAPIUpdater{}
dd.eventSync = diskmaker.NewEventReporter(dd.apiClient)
dd.disks = []v1alpha1.DiscoveredDevice{}
dd.localVolumeDiscovery = &v1alpha1.LocalVolumeDiscovery{}
return dd
}
func setEnv() {
os.Setenv("MY_NODE_NAME", "node1")
os.Setenv("WATCH_NAMESPACE", "ns")
os.Setenv("UID", "uid")
os.Setenv("POD_NAME", "pod123")
}
func unsetEnv() {
os.Unsetenv("MY_NODE_NAME")
os.Unsetenv("WATCH_NAMESPACE")
os.Unsetenv("UID")
os.Unsetenv("POD_NAME")
}
|
[
"\"GO_WANT_HELPER_PROCESS\"",
"\"STDOUT\""
] |
[] |
[
"STDOUT",
"GO_WANT_HELPER_PROCESS"
] |
[]
|
["STDOUT", "GO_WANT_HELPER_PROCESS"]
|
go
| 2 | 0 | |
vendor/github.com/code-ready/crc/pkg/crc/constants/constants.go
|
package constants
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/code-ready/crc/pkg/crc/version"
)
const (
DefaultName = "crc"
DefaultCPUs = 4
DefaultMemory = 9216
DefaultSSHPort = 22
DefaultSSHUser = "core"
CrcEnvPrefix = "CRC"
DefaultWebConsoleURL = "https://console-openshift-console.apps-crc.testing"
DefaultAPIURL = "https://api.crc.testing:6443"
DefaultDiskImage = "crc.disk"
DefaultLogLevel = "info"
ConfigFile = "crc.json"
LogFile = "crc.log"
DaemonLogFile = "crcd.log"
GlobalStateFile = "globalstate.json"
CrcLandingPageURL = "https://cloud.redhat.com/openshift/install/crc/installer-provisioned" // #nosec G101
PullSecretFile = "pullsecret.json"
DefaultOcUrlBase = "https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest"
DefaultPodmanUrlBase = "https://storage.googleapis.com/libpod-master-releases"
CrcTrayDownloadURL = "https://github.com/code-ready/tray-macos/releases/download/v%s/crc-tray-macos.tar.gz"
DefaultContext = "admin"
)
var ocUrlForOs = map[string]string{
"darwin": fmt.Sprintf("%s/%s", DefaultOcUrlBase, "macosx/oc.tar.gz"),
"linux": fmt.Sprintf("%s/%s", DefaultOcUrlBase, "linux/oc.tar.gz"),
"windows": fmt.Sprintf("%s/%s", DefaultOcUrlBase, "windows/oc.zip"),
}
func GetOcUrlForOs(os string) string {
return ocUrlForOs[os]
}
func GetOcUrl() string {
return GetOcUrlForOs(runtime.GOOS)
}
var podmanUrlForOs = map[string]string{
"darwin": fmt.Sprintf("%s/%s", DefaultPodmanUrlBase, "podman-remote-latest-master-darwin-amd64.zip"),
"linux": fmt.Sprintf("%s/%s", DefaultPodmanUrlBase, "podman-remote-latest-master-linux---amd64.zip"),
"windows": fmt.Sprintf("%s/%s", DefaultPodmanUrlBase, "podman-remote-latest-master-windows-amd64.zip"),
}
func GetPodmanUrlForOs(os string) string {
return podmanUrlForOs[os]
}
func GetPodmanUrl() string {
return podmanUrlForOs[runtime.GOOS]
}
var defaultBundleForOs = map[string]string{
"darwin": fmt.Sprintf("crc_hyperkit_%s.crcbundle", version.GetBundleVersion()),
"linux": fmt.Sprintf("crc_libvirt_%s.crcbundle", version.GetBundleVersion()),
"windows": fmt.Sprintf("crc_hyperv_%s.crcbundle", version.GetBundleVersion()),
}
func GetDefaultBundleForOs(os string) string {
return defaultBundleForOs[os]
}
func GetDefaultBundle() string {
return GetDefaultBundleForOs(runtime.GOOS)
}
var (
CrcBaseDir = filepath.Join(GetHomeDir(), ".crc")
CrcBinDir = filepath.Join(CrcBaseDir, "bin")
ConfigPath = filepath.Join(CrcBaseDir, ConfigFile)
LogFilePath = filepath.Join(CrcBaseDir, LogFile)
DaemonLogFilePath = filepath.Join(CrcBaseDir, DaemonLogFile)
MachineBaseDir = CrcBaseDir
MachineCertsDir = filepath.Join(MachineBaseDir, "certs")
MachineCacheDir = filepath.Join(MachineBaseDir, "cache")
MachineInstanceDir = filepath.Join(MachineBaseDir, "machines")
GlobalStatePath = filepath.Join(CrcBaseDir, GlobalStateFile)
DefaultBundlePath = filepath.Join(MachineCacheDir, GetDefaultBundle())
bundleEmbedded = "false"
DaemonSocketPath = filepath.Join(CrcBaseDir, "crc.sock")
)
// GetHomeDir returns the home directory for the current user
func GetHomeDir() string {
if runtime.GOOS == "windows" {
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
homeDir := filepath.Join(homeDrive, homePath)
if _, err := os.Stat(homeDir); err == nil {
return homeDir
}
}
if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
if _, err := os.Stat(userProfile); err == nil {
return userProfile
}
}
}
return os.Getenv("HOME")
}
// EnsureBaseDirExists create the ~/.crc dir if its not there
func EnsureBaseDirExists() error {
_, err := os.Stat(CrcBaseDir)
if err != nil {
return os.Mkdir(CrcBaseDir, 0750)
}
return nil
}
// IsBundleEmbedded returns true if the binary was compiled to contain the bundle
func BundleEmbedded() bool {
return bundleEmbedded == "true"
}
func GetPublicKeyPath() string {
return filepath.Join(MachineInstanceDir, DefaultName, "id_rsa.pub")
}
func GetPrivateKeyPath() string {
return filepath.Join(MachineInstanceDir, DefaultName, "id_rsa")
}
func GetCrcTrayDownloadURL() string {
return fmt.Sprintf(CrcTrayDownloadURL, version.GetCRCTrayVersion())
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
handlers.go
|
//
// This file is part of arduino-connector
//
// Copyright (C) 2017-2018 Arduino AG (http://www.arduino.cc/)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/eclipse/paho.mqtt.golang"
"github.com/kardianos/osext"
"github.com/kr/pty"
"github.com/nats-io/go-nats"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh/terminal"
)
// StatusEvent replies with the current status of the arduino-connector
func (status *Status) StatusEvent(client mqtt.Client, msg mqtt.Message) {
status.Publish()
}
// UpdateEvent handles the connector autoupdate
// Any URL must be signed with Arduino private key
func (status *Status) UpdateEvent(client mqtt.Client, msg mqtt.Message) {
var info struct {
URL string `json:"url"`
Signature string `json:"signature"`
Token string `json:"token"`
}
err := json.Unmarshal(msg.Payload(), &info)
if err != nil {
status.Error("/update", errors.Wrapf(err, "unmarshal %s", msg.Payload()))
return
}
executablePath, _ := os.Executable()
name := filepath.Join(os.TempDir(), filepath.Base(executablePath))
err = downloadFile(name, info.URL, info.Token)
err = downloadFile(name+".sig", info.URL+".sig", info.Token)
if err != nil {
status.Error("/update", errors.Wrap(err, "no signature file "+info.URL+".sig"))
return
}
// check the signature
err = checkGPGSig(name, name+".sig")
if err != nil {
status.Error("/update", errors.Wrap(err, "wrong signature "+info.URL+".sig"))
return
}
// chmod it
err = os.Chmod(name, 0755)
if err != nil {
status.Error("/update", errors.Wrapf(err, "chmod 755 %s", name))
return
}
os.Rename(executablePath, executablePath+".old")
// copy it over existing binary
err = copyFileAndRemoveOriginal(name, executablePath)
if err != nil {
// rollback
os.Rename(executablePath+".old", executablePath)
status.Error("/update", errors.Wrap(err, "error copying itself from "+name+" to "+executablePath))
return
}
os.Chmod(executablePath, 0755)
os.Remove(executablePath + ".old")
// leap of faith: kill itself, systemd should respawn the process
os.Exit(0)
}
// UploadEvent receives the url and name of the sketch binary, then it
// - downloads the binary,
// - chmods +x it
// - executes redirecting stdout and sterr to a proper logger
func (status *Status) UploadEvent(client mqtt.Client, msg mqtt.Message) {
var info struct {
ID string `json:"id"`
URL string `json:"url"`
Name string `json:"name"`
Token string `json:"token"`
}
err := json.Unmarshal(msg.Payload(), &info)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "unmarshal %s", msg.Payload()))
return
}
if info.ID == "" {
info.ID = info.Name
}
// Stop and delete if existing
var sketch SketchStatus
if sketch, ok := status.Sketches[info.ID]; ok {
err = applyAction(sketch, "STOP", status)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "stop pid %d", sketch.PID))
return
}
sketchFolder, err := getSketchFolder(status)
sketchPath := filepath.Join(sketchFolder, sketch.Name)
if _, err = os.Stat(sketchPath); !os.IsNotExist(err) {
err = os.Remove(sketchPath)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "remove %d", sketch.Name))
return
}
}
}
folder, err := getSketchFolder(status)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "create sketch folder %s", info.ID))
return
}
// download the binary
name := filepath.Join(folder, info.Name)
err = downloadFile(name, info.URL, info.Token)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "download file %s", info.URL))
return
}
// download the binary sig
sigName := filepath.Join(folder, info.Name+".sig")
err = downloadFile(sigName, info.URL+".sig", info.Token)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "download file signature %s", info.URL+".sig"))
return
}
sigFile, err := ioutil.ReadFile(sigName)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "open file signature %s", info.URL))
return
}
binFile, err := ioutil.ReadFile(name)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "open file for file signature %s", info.URL))
return
}
err = verifyBinary(binFile, sigFile, status.config.SignatureKey)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "signature do not match %s", info.URL))
return
}
// chmod it
err = os.Chmod(name, 0700)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "chmod 700 %s", name))
return
}
sketch.ID = info.ID
sketch.Name = info.Name
// save ID-Name to a sort of DB
insertSketchInDB(sketch.Name, sketch.ID, status)
// spawn process
pid, _, _, err := spawnProcess(name, &sketch, status)
if err != nil {
status.Error("/upload", errors.Wrapf(err, "spawn %s", name))
return
}
status.Info("/upload", "Sketch started with PID "+strconv.Itoa(pid))
sketch.PID = pid
sketch.Status = "RUNNING"
status.Set(info.ID, &sketch)
status.Publish()
// go func(stdout io.ReadCloser) {
// in := bufio.NewScanner(stdout)
// for {
// for in.Scan() {
// fmt.Printf(in.Text()) // write each line to your log, or anything you need
// }
// }
// }(stdout)
}
func getSketchFolder(status *Status) (string, error) {
// create folder if it doesn't exist
folder, err := osext.ExecutableFolder()
if status.config.SketchesPath != "" {
folder = status.config.SketchesPath
}
folder = filepath.Join(folder, "sketches")
if _, err := os.Stat(folder); os.IsNotExist(err) {
err = os.Mkdir(folder, 0700)
}
return folder, err
}
func getSketchDBFolder(status *Status) (string, error) {
// create folder if it doesn't exist
folder, err := getSketchFolder(status)
folder = filepath.Join(folder, "db")
if _, err := os.Stat(folder); os.IsNotExist(err) {
err = os.Mkdir(folder, 0700)
}
return folder, err
}
func getSketchDB(status *Status) (string, error) {
// create folder if it doesn't exist
folder, err := getSketchDBFolder(status)
if err != nil {
return "", err
}
db := filepath.Join(folder, "db")
return db, err
}
func insertSketchInDB(name string, id string, status *Status) {
// create folder if it doesn't exist
db, err := getSketchDB(status)
if err != nil {
return
}
var c []SketchBinding
raw, err := ioutil.ReadFile(db)
json.Unmarshal(raw, &c)
for _, element := range c {
if element.ID == id && element.Name == name {
return
}
}
c = append(c, SketchBinding{ID: id, Name: name})
data, _ := json.Marshal(c)
ioutil.WriteFile(db, data, 0600)
}
func getSketchIDFromDB(name string, status *Status) (string, error) {
// create folder if it doesn't exist
db, err := getSketchDB(status)
if err != nil {
return "", errors.New("Can't open DB")
}
var c []SketchBinding
raw, err := ioutil.ReadFile(db)
json.Unmarshal(raw, &c)
for _, element := range c {
if element.Name == name {
return element.ID, nil
}
}
return "", errors.New("No matching sketch")
}
// SketchEvent listens to commands to start and stop sketches
func (status *Status) SketchEvent(client mqtt.Client, msg mqtt.Message) {
var info struct {
ID string
Name string
Action string
}
err := json.Unmarshal(msg.Payload(), &info)
if err != nil {
status.Error("/sketch", errors.Wrapf(err, "unmarshal %s", msg.Payload()))
return
}
if info.ID == "" {
info.ID = info.Name
}
if sketch, ok := status.Sketches[info.ID]; ok {
err := applyAction(sketch, info.Action, status)
if err != nil {
status.Error("/sketch", errors.Wrapf(err, "applying %s to %s", info.Action, info.Name))
return
}
status.Info("/sketch", "successfully performed "+info.Action+" on sketch "+info.ID)
status.Set(info.ID, sketch)
status.Publish()
return
}
status.Error("/sketch", errors.New("sketch "+info.ID+" not found"))
}
func natsCloudCB(s *Status) nats.MsgHandler {
return func(m *nats.Msg) {
thingName := strings.TrimPrefix(m.Subject, "$arduino.cloud.")
updateMessage := fmt.Sprintf("{\"state\": {\"reported\": { \"%s\": %s}}}", thingName, string(m.Data))
if s.messagesSent > 1000 {
fmt.Println("rate limiting: " + strconv.Itoa(s.messagesSent))
introducedDelay := time.Duration(s.messagesSent/1000) * time.Second
if introducedDelay > 20*time.Second {
introducedDelay = 20 * time.Second
}
time.Sleep(introducedDelay)
}
s.messagesSent++
s.mqttClient.Publish("$aws/things/"+s.id+"/shadow/update", 1, false, updateMessage)
if debugMqtt {
fmt.Println("MQTT OUT: $aws/things/"+s.id+"/shadow/update", updateMessage)
}
}
}
// downloadfile substitute a file with something that downloads from an url
func downloadFile(filepath, url, token string) error {
// Create the file - remove the existing one if it exists
if _, err := os.Stat(filepath); err == nil {
err := os.Remove(filepath)
if err != nil {
return errors.Wrap(err, "remove "+filepath)
}
}
out, err := os.Create(filepath)
if err != nil {
return errors.Wrap(err, "create "+filepath)
}
defer out.Close()
// Get the data
client := http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Expected OK, got " + resp.Status)
}
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}
func logSketchStdoutStderr(cmd *exec.Cmd, stdout io.ReadCloser, stderr io.ReadCloser, sketch *SketchStatus) {
stdoutCopy := bufio.NewScanner(stdout)
stderrCopy := bufio.NewScanner(stderr)
stdoutCopy.Split(bufio.ScanLines)
stderrCopy.Split(bufio.ScanLines)
go func() {
fmt.Println("started scanning stdout")
for stdoutCopy.Scan() {
fmt.Printf(stdoutCopy.Text())
}
}()
go func() {
fmt.Println("started scanning stderr")
for stderrCopy.Scan() {
fmt.Printf(stderrCopy.Text())
}
}()
}
func stdInCB(pty *os.File, status *Status) mqtt.MessageHandler {
return func(client mqtt.Client, msg mqtt.Message) {
if len(msg.Payload()) > 0 {
pty.Write(msg.Payload())
}
}
}
type dylibMap struct {
Name string `json:"Name"`
Provides []string `json:"Provides"`
URL string `json:"URL"`
Help string `json:"Help"`
}
func (d *dylibMap) Download(path string) {
for _, element := range d.Provides {
resp, err := http.Get(d.URL + "/" + element)
if err != nil {
continue
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
filePath := filepath.Join(path, element)
ioutil.WriteFile(filePath, body, 0600)
}
}
func (d *dylibMap) Contains(match string) bool {
for _, element := range d.Provides {
if strings.Contains(element, match) {
return true
}
}
return false
}
func downloadDylibDependencies(library string, status *Status) error {
resp, err := http.Get("https://downloads.arduino.cc/libArduino/dylib_dependencies.txt")
if err != nil {
return errors.New("Can't download dylibs registry")
}
defer resp.Body.Close()
if resp.StatusCode == 200 { // OK
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return errors.New("Can't read dylibs registry")
}
var v []dylibMap
err = json.Unmarshal(bodyBytes, &v)
if err != nil {
return err
}
for _, element := range v {
if element.Contains(library) {
folder, _ := getSketchFolder(status)
fmt.Println(element.Help)
if element.Help != "" {
// TODO: remove and replace with a status.Info()
return errors.New(element.Help)
}
element.Download(filepath.Join(folder, "lib"))
}
}
return errors.New("Can't find a provider for " + library)
}
return nil
}
func extractLibrary(errorString string) string {
fields := strings.Fields(errorString)
for _, subStr := range fields {
if strings.Contains(subStr, ".so") {
subStr = strings.TrimRight(subStr, ":")
libName := strings.Split(subStr, ".")
if len(libName) >= 2 {
return libName[0] + "." + libName[1]
}
}
}
return ""
}
func checkForLibrariesMissingError(filepath string, sketch *SketchStatus, status *Status, err string) {
if strings.Contains(err, "error while loading shared libraries") {
// download dependencies and retry
// if the error persists, bail out
addIntelLibrariesToLdPath()
fmt.Println("Missing library!")
library := extractLibrary(err)
status.Info("/upload", "Downloading needed libraries")
if err := downloadDylibDependencies(library, status); err != nil {
status.Error("/upload", err)
}
status.Error("/upload", errors.New("Missing libraries, install them and relaunch the sketch"))
}
}
func checkSketchForMissingDisplayEnvVariable(errorString string, filepath string, sketch *SketchStatus, status *Status) {
if strings.Contains(errorString, "Can't open display") || strings.Contains(errorString, "cannot open display") {
if os.Getenv("DISPLAY") == "NULL" {
os.Setenv("DISPLAY", ":0")
return
}
err := setupDisplay(true)
if err != nil {
setupDisplay(false)
}
spawnProcess(filepath, sketch, status)
sketch.Status = "RUNNING"
}
}
func setupDisplay(usermode bool) error {
// Blindly set DISPLAY env variable to default
i := 0
for {
os.Setenv("DISPLAY", ":"+strconv.Itoa(i))
fmt.Println("Exporting DISPLAY as " + ":" + strconv.Itoa(i))
// Unlock xorg session for localhost connections
// TODO: find a way to automatically remove -nolisten tcp
cmd := exec.Command("xhost", "+localhost")
if usermode {
cmd.SysProcAttr = &syscall.SysProcAttr{}
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 1000, Gid: 1000}
}
out, errXhost := cmd.CombinedOutput()
fmt.Println(string(out))
// Also try xrandr
cmd = exec.Command("xrandr")
out, errXrandr := cmd.CombinedOutput()
fmt.Println(string(out))
if errXhost != nil || errXrandr != nil {
if i > 2 {
fmt.Println("Xorg server unavailable, make sure you have a display attached and a user logged in")
fmt.Println("If it's already ok, try setting up Xorg to accept incoming connection (-listen tcp)")
fmt.Println("On Ubuntu, add \n\n[SeatDefaults]\nxserver-allow-tcp=true\n\nto /etc/lightdm/lightdm.conf")
os.Setenv("DISPLAY", "NULL")
return errors.New("Unable to open display")
}
} else {
return nil
}
i++
}
}
// spawn Process creates a new process from a file
func spawnProcess(filepath string, sketch *SketchStatus, status *Status) (int, io.ReadCloser, io.ReadCloser, error) {
cmd := exec.Command(filepath)
stdout, err := cmd.StdoutPipe()
stderr, err := cmd.StderrPipe()
var stderrBuf bytes.Buffer
cmd.Stderr = &stderrBuf
f, err := pty.Start(cmd)
terminal.MakeRaw(int(f.Fd()))
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderrBuf.String())
return 0, stdout, stderr, err
}
sketch.pty = f
if status.mqttClient != nil {
go status.mqttClient.Subscribe("$aws/things/"+status.id+"/stdin", 1, stdInCB(f, status))
}
go func() {
for {
temp := make([]byte, 1000)
len, err := f.Read(temp)
if err != nil {
break
}
if len > 0 {
//fmt.Println(string(temp[:len]))
status.Raw("/stdout", string(temp[:len]))
checkForLibrariesMissingError(filepath, sketch, status, string(temp))
checkSketchForMissingDisplayEnvVariable(string(temp), filepath, sketch, status)
}
}
}()
//logSketchStdoutStderr(cmd, stdout, stderr, sketch)
// keep track of sketch life (and isgnal if it ends abruptly)
go func() {
err := cmd.Wait()
//if we get here signal that the sketch has died
applyAction(sketch, "STOP", status)
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderrBuf.String())
}
fmt.Println("sketch exited ")
}()
return cmd.Process.Pid, stdout, stderr, err
}
func applyAction(sketch *SketchStatus, action string, status *Status) error {
process, err := os.FindProcess(sketch.PID)
if err != nil && sketch.PID != 0 {
fmt.Println("exit because of error")
return err
}
switch action {
case "START":
if sketch.PID != 0 {
err = process.Signal(syscall.SIGCONT)
} else {
folder, err := getSketchFolder(status)
if err != nil {
return err
}
name := filepath.Join(folder, sketch.Name)
sketch.PID, _, _, err = spawnProcess(name, sketch, status)
}
if err != nil {
return err
}
sketch.Status = "RUNNING"
break
case "STOP":
fmt.Println("stop called")
if sketch.PID != 0 && err == nil && process.Pid != 0 {
fmt.Println("kill called")
err = process.Kill()
} else {
err = nil
}
sketch.PID = 0
sketch.Status = "STOPPED"
break
case "DELETE":
applyAction(sketch, "STOP", status)
fmt.Println("delete called")
sketchFolder, err := getSketchFolder(status)
err = os.Remove(filepath.Join(sketchFolder, sketch.Name))
if err != nil {
fmt.Println("error deleting sketch")
}
status.Sketches[sketch.ID] = nil
break
case "PAUSE":
err = process.Signal(syscall.SIGTSTP)
sketch.Status = "PAUSED"
break
}
return err
}
|
[
"\"DISPLAY\""
] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
go
| 1 | 0 | |
main_test.go
|
package main
import (
"bytes"
"encoding/json"
"log"
"net/http"
"net/http/httptest"
"os"
"strconv"
"testing"
)
var a App
const tableCreationQuery = `CREATE TABLE IF NOT EXISTS services
(
id SERIAL,
name TEXT NOT NULL,
endpoint TEXT NOT NULL,
command TEXT NOT NULL,
CONSTRAINT services_pkey PRIMARY KEY (id)
)`
func TestMain(m *testing.M) {
a = App{}
a.Initialise(
os.Getenv("TEST_DB_USERNAME"),
os.Getenv("TEST_DB_PASSWORD"),
os.Getenv("TEST_DB_NAME"),
)
ensureTableExists()
code := m.Run()
clearTable()
os.Exit(code)
}
func ensureTableExists() {
if _, err := a.DB.Exec(tableCreationQuery); err != nil {
log.Fatal(err)
}
}
func clearTable() {
a.DB.Exec("DELETE FROM services")
a.DB.Exec("ALTER SEQUENCE services_id_seq RESTART WITH 1")
}
func TestEmptyTable(t *testing.T) {
clearTable()
req, _ := http.NewRequest("GET", "/services", nil)
req.Header.Set("APIKEY", "APIKEY")
response := executeRequest(req)
checkResponseCode(t, http.StatusOK, response.Code)
if body := response.Body.String(); body != "[]" {
t.Errorf("Expected an empty array. Got %s", body)
}
}
func executeRequest(req *http.Request) *httptest.ResponseRecorder {
rr := httptest.NewRecorder()
a.Router.ServeHTTP(rr, req)
return rr
}
func checkResponseCode(t *testing.T, expected, actual int) {
if expected != actual {
t.Errorf("Expected response code %d. Got %d\n", expected, actual)
}
}
func TestGetNonExistentService(t *testing.T) {
clearTable()
req, _ := http.NewRequest("GET", "/service/11", nil)
req.Header.Set("APIKEY", "APIKEY")
response := executeRequest(req)
checkResponseCode(t, http.StatusNotFound, response.Code)
var m map[string]string
json.Unmarshal(response.Body.Bytes(), &m)
if m["error"] != "Service not found" {
t.Errorf("Expected the 'error' key of the response to be set to 'Service not found'. Got '%s'", m["error"])
}
}
func TestCreateService(t *testing.T) {
clearTable()
payload := []byte(`{"name":"test_service","endpoint":"some_webhook","command":"ls -lah"}`)
req, _ := http.NewRequest("POST", "/service", bytes.NewBuffer(payload))
req.Header.Set("APIKEY", "APIKEY")
response := executeRequest(req)
checkResponseCode(t, http.StatusCreated, response.Code)
var m map[string]interface{}
json.Unmarshal(response.Body.Bytes(), &m)
if m["name"] != "test_service" {
t.Errorf("Expected service name to be 'test_service'. Got '%v'", m["name"])
}
if m["endpoint"] != "some_webhook" {
t.Errorf("Expected service endpoint to be 'some_webhook'. Got '%v'", m["endpoint"])
}
if m["command"] != "ls -lah" {
t.Errorf("Expected service command to be 'ls -lah'. Got '%v'", m["command"])
}
// the id is compared to 1.0 because JSON unmarshaling converts numbers to
// floats, when the target is a map[string]interface{}
if m["id"] != 1.0 {
t.Errorf("Expected service ID to be '1'. Got '%v'", m["id"])
}
}
func TestGetService(t *testing.T) {
clearTable()
addServices(1)
req, _ := http.NewRequest("GET", "/service/1", nil)
req.Header.Set("APIKEY", "APIKEY")
response := executeRequest(req)
checkResponseCode(t, http.StatusOK, response.Code)
}
func addServices(count int) {
if count < 1 {
count = 1
}
for i := 0; i < count; i++ {
a.DB.Exec("INSERT INTO services(name, endpoint, command) VALUES($1, $2, $3)", "Service "+strconv.Itoa(i), "some_endpoint", "some_command")
}
}
func TestUpdateService(t *testing.T) {
clearTable()
addServices(1)
req, _ := http.NewRequest("GET", "/service/1", nil)
req.Header.Set("APIKEY", "APIKEY")
response := executeRequest(req)
var originalService map[string]interface{}
json.Unmarshal(response.Body.Bytes(), &originalService)
//println(response.Body.String())
payload := []byte(`{"name":"test_service_UPDATE","endpoint":"some_webhook","command":"ls -lah"}`)
req, _ = http.NewRequest("PUT", "/service/1", bytes.NewBuffer(payload))
req.Header.Set("APIKEY", "APIKEY")
response = executeRequest(req)
checkResponseCode(t, http.StatusOK, response.Code)
var m map[string]interface{}
json.Unmarshal(response.Body.Bytes(), &m)
//println(response.Body.String())
if m["id"] != originalService["id"] {
t.Errorf("Expected the id to remain the same (%v). Got %v", originalService["id"], m["id"])
}
if m["name"] == originalService["name"] {
t.Errorf("Expected the name to change from '%v' to '%v'. Got '%v'", originalService["name"], m["name"], m["name"])
}
if m["endpoint"] == originalService["endpoint"] {
t.Errorf("Expected the endpoint to change from '%v' to '%v'. Got '%v'", originalService["endpoint"], m["endpoint"], m["endpoint"])
}
if m["command"] == originalService["command"] {
t.Errorf("Expected the endpoint to change from '%v' to '%v'. Got '%v'", originalService["command"], m["command"], m["command"])
}
}
func TestDeleteService(t *testing.T) {
clearTable()
addServices(1)
req, _ := http.NewRequest("GET", "/service/1", nil)
req.Header.Set("APIKEY", "APIKEY")
response := executeRequest(req)
//println(response.Body.String())
checkResponseCode(t, http.StatusOK, response.Code)
req, _ = http.NewRequest("DELETE", "/service/1", nil)
req.Header.Set("APIKEY", "APIKEY")
response = executeRequest(req)
//println(response.Body.String())
checkResponseCode(t, http.StatusOK, response.Code)
req, _ = http.NewRequest("GET", "/service/1", nil)
req.Header.Set("APIKEY", "APIKEY")
response = executeRequest(req)
//print(response.Body.String())
checkResponseCode(t, http.StatusNotFound, response.Code)
}
|
[
"\"TEST_DB_USERNAME\"",
"\"TEST_DB_PASSWORD\"",
"\"TEST_DB_NAME\""
] |
[] |
[
"TEST_DB_NAME",
"TEST_DB_PASSWORD",
"TEST_DB_USERNAME"
] |
[]
|
["TEST_DB_NAME", "TEST_DB_PASSWORD", "TEST_DB_USERNAME"]
|
go
| 3 | 0 | |
train.py
|
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from datetime import date
import os
import sys
import tensorflow as tf
# import keras
# import keras.preprocessing.image
# import keras.backend as K
# from keras.optimizers import Adam, SGD
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD
from augmentor.color import VisualEffect
from augmentor.misc import MiscEffect
from model import efficientdet
from losses import smooth_l1, focal, smooth_l1_quad
from efficientnet import BASE_WEIGHTS_PATH, WEIGHTS_HASHES
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
"""
Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def create_callbacks(training_model, prediction_model, validation_generator, args):
"""
Creates the callbacks to use during training.
Args
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=args.tensorboard_dir,
histogram_freq=0,
batch_size=args.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from eval.coco import Evaluate
# use prediction model for evaluation
evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback)
else:
from eval.pascal import Evaluate
evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
f'{args.dataset_type}_{{epoch:02d}}_{{loss:.4f}}_{{val_loss:.4f}}.h5' if args.compute_val_loss
else f'{args.dataset_type}_{{epoch:02d}}_{{loss:.4f}}.h5'
),
verbose=1,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
callbacks.append(checkpoint)
# callbacks.append(keras.callbacks.ReduceLROnPlateau(
# monitor='loss',
# factor=0.1,
# patience=2,
# verbose=1,
# mode='auto',
# min_delta=0.0001,
# cooldown=0,
# min_lr=0
# ))
return callbacks
def create_generators(args):
"""
Create generators for training and validation.
Args
args: parseargs object containing configuration for generators.
preprocess_image: Function that preprocesses an image for the network.
"""
common_args = {
'batch_size': args.batch_size,
'phi': args.phi,
'detect_text': args.detect_text,
'detect_quadrangle': args.detect_quadrangle
}
# create random transform generator for augmenting training data
if args.random_transform:
misc_effect = MiscEffect()
visual_effect = VisualEffect()
else:
misc_effect = None
visual_effect = None
if args.dataset_type == 'pascal':
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
skip_difficult=True,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'val',
skip_difficult=True,
shuffle_groups=False,
**common_args
)
elif args.dataset_type == 'csv':
from generators.csv_ import CSVGenerator
train_generator = CSVGenerator(
args.annotations_path,
args.classes_path,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
if args.val_annotations_path:
validation_generator = CSVGenerator(
args.val_annotations_path,
args.classes_path,
shuffle_groups=False,
**common_args
)
else:
validation_generator = None
elif args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from generators.coco import CocoGenerator
train_generator = CocoGenerator(
args.coco_path,
'train2017',
misc_effect=misc_effect,
visual_effect=visual_effect,
group_method='random',
**common_args
)
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
shuffle_groups=False,
**common_args
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def check_args(parsed_args):
"""
Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
Args
parsed_args: parser.parse_args()
Returns
parsed_args
"""
if parsed_args.gpu and parsed_args.batch_size < len(parsed_args.gpu.split(',')):
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
len(parsed_args.gpu.split(
','))))
return parsed_args
def parse_args(args):
"""
Parse the arguments.
"""
today = str(date.today())
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations_path', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes_path', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations-path',
help='Path to CSV file containing annotations for validation (optional).')
parser.add_argument('--detect-quadrangle', help='If to detect quadrangle.', action='store_true', default=False)
parser.add_argument('--detect-text', help='If is text detection task.', action='store_true', default=False)
parser.add_argument('--snapshot', help='Resume training from a snapshot.')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--freeze-bn', help='Freeze training of BatchNormalization layers.', action='store_true')
parser.add_argument('--weighted-bifpn', help='Use weighted BiFPN', action='store_true')
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--phi', help='Hyper parameter phi', default=0, type=int, choices=(0, 1, 2, 3, 4, 5, 6))
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--snapshot-path',
help='Path to store snapshots of models during training',
default='checkpoints/{}'.format(today))
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output',
default='logs/{}'.format(today))
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation',
action='store_false')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss',
action='store_true')
parser.add_argument('--learning-rate', help='Learning rate of training.', type=float, default=1e-3)
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int,
default=10)
print(vars(parser.parse_args(args)))
return check_args(parser.parse_args(args))
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create the generators
train_generator, validation_generator = create_generators(args)
num_classes = train_generator.num_classes()
num_anchors = train_generator.num_anchors
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
K.set_session(get_session())
model, prediction_model = efficientdet(args.phi,
num_classes=num_classes,
num_anchors=num_anchors,
weighted_bifpn=args.weighted_bifpn,
freeze_bn=args.freeze_bn,
detect_quadrangle=args.detect_quadrangle
)
# load pretrained weights
if args.snapshot:
if args.snapshot == 'imagenet':
model_name = 'efficientnet-b{}'.format(args.phi)
file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(model_name)
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = keras.utils.get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path, by_name=True)
else:
print('Loading model, this may take a second...')
model.load_weights(args.snapshot, by_name=True)
# freeze backbone layers
if args.freeze_backbone:
# 227, 329, 329, 374, 464, 566, 656
for i in range(1, [227, 329, 329, 374, 464, 566, 656][args.phi]):
model.layers[i].trainable = False
if args.gpu and len(args.gpu.split(',')) > 1:
model = keras.utils.multi_gpu_model(model, gpus=list(map(int, args.gpu.split(','))))
# compile model
model.compile(optimizer=Adam(lr=args.learning_rate), loss={
'regression': smooth_l1_quad() if args.detect_quadrangle else smooth_l1(),
'classification': focal()
}, )
# print(model.summary())
# create the callbacks
callbacks = create_callbacks(
model,
prediction_model,
validation_generator,
args,
)
if not args.compute_val_loss:
validation_generator = None
elif args.compute_val_loss and validation_generator is None:
raise ValueError('When you have no validation data, you should not specify --compute-val-loss.')
# start training
return model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
initial_epoch=0,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=args.multiprocessing,
max_queue_size=args.max_queue_size,
validation_data=validation_generator
)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
test_models.py
|
import os
import unittest
from unittest import mock
from scrapy import Item
from scrapy.item import Field
from jedeschule.items import School
from jedeschule.pipelines.school_pipeline import SchoolPipelineItem
from jedeschule.pipelines.db_pipeline import School as DBSchool, get_session
class TestSchoolItem(Item):
name = Field()
nr = Field()
@mock.patch.dict(os.environ, {'DATABASE_URL': 'sqlite://'})
class TestSchool(unittest.TestCase):
def test_import_new(self):
# Arrange
info = School(name='Test Schule', id='NDS-1')
item = dict(name='Test Schule', nr=1)
school_item: SchoolPipelineItem = SchoolPipelineItem(info=info, item=item)
db_item = DBSchool.update_or_create(school_item)
session = get_session()
session.add(db_item)
session.commit()
# Act
count = session.query(DBSchool).count()
# Assert
self.assertEqual(count, 1)
def test_import_existing(self):
# This test requires the previous one to have run already so that the item
# exists in the database
# Arrange
info = School(name='Test Schule (updated)', id='NDS-1')
item = dict(name='Test Schule', nr=1)
school_item: SchoolPipelineItem = SchoolPipelineItem(info=info, item=item)
db_item = DBSchool.update_or_create(school_item)
session = get_session()
session.add(db_item)
session.commit()
# Act
count = session.query(DBSchool).count()
db_school = session.query(DBSchool).first()
# Assert
self.assertEqual(count, 1)
self.assertEqual(db_school.name, "Test Schule (updated)")
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
value_float32.go
|
package conf
// Float32Var defines a float32 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) Float32Var(p *float32, name string, value float32, usage string) {
c.env().Float32Var(p, name, value, usage)
c.flag().Float32Var(p, name, value, usage)
}
// Float32 defines a float32 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Float32(name string, value float32, usage string) *float32 {
p := new(float32)
c.Float32Var(p, name, value, usage)
return p
}
// Float32VarE defines a float32 environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the environment variable.
func (c *Configurator) Float32VarE(p *float32, name string, value float32, usage string) {
c.env().Float32Var(p, name, value, usage)
}
// Float32E defines a float32 environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the environment variable.
func (c *Configurator) Float32E(name string, value float32, usage string) *float32 {
p := new(float32)
c.Float32VarE(p, name, value, usage)
return p
}
// Float32VarF defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func (c *Configurator) Float32VarF(p *float32, name string, value float32, usage string) {
c.flag().Float32Var(p, name, value, usage)
}
// Float32F defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func (c *Configurator) Float32F(name string, value float32, usage string) *float32 {
p := new(float32)
c.Float32VarF(p, name, value, usage)
return p
}
// Float32Var defines a float32 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag and/or environment variable.
func Float32Var(p *float32, name string, value float32, usage string) {
Global.Float32Var(p, name, value, usage)
}
// Float32 defines a float32 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag and/or environment variable.
func Float32(name string, value float32, usage string) *float32 {
return Global.Float32(name, value, usage)
}
// Float32VarE defines a float32 environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the environment variable.
func Float32VarE(p *float32, name string, value float32, usage string) {
Global.Float32VarE(p, name, value, usage)
}
// Float32E defines a float32 environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the environment variable.
func Float32E(name string, value float32, usage string) *float32 {
return Global.Float32E(name, value, usage)
}
// Float32VarF defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func Float32VarF(p *float32, name string, value float32, usage string) {
Global.Float32VarF(p, name, value, usage)
}
// Float32F defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func Float32F(name string, value float32, usage string) *float32 {
return Global.Float32F(name, value, usage)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/test/java/software/amazon/awssdk/crt/test/ShutdownTest.java
|
package software.amazon.awssdk.crt.test;
import java.net.URI;
import java.util.concurrent.CompletableFuture;
import org.junit.Assume;
import org.junit.Test;
import software.amazon.awssdk.crt.http.HttpClientConnection;
import software.amazon.awssdk.crt.http.HttpClientConnectionManager;
import software.amazon.awssdk.crt.http.HttpClientConnectionManagerOptions;
import software.amazon.awssdk.crt.io.ClientBootstrap;
import software.amazon.awssdk.crt.io.EventLoopGroup;
import software.amazon.awssdk.crt.io.HostResolver;
import software.amazon.awssdk.crt.io.SocketOptions;
import software.amazon.awssdk.crt.io.TlsContext;
import software.amazon.awssdk.crt.io.TlsContextOptions;
/*
A temporary test that sets up an asynchronously-acquired resource (HttpClientConnection) and then immediately
exits in order to try and trigger a JVM shutdown while acquisition is still outstanding in native code, checking
the safety of the callback if the acquisition completes after JVM shutdown and before process exit.
This is temporary and only has a chance of hitting the condition if it's the only test ran (which is why
we filter it with an environment variable and run it as a single test in a separate CI step).
Long term, we want to switch this to a mvn execable utility that supports triggering JVM shutdown at controllable
points, in the same way that the Python CRT checks sloppy shutdown.
*/
public class ShutdownTest {
private static String SHUTDOWN_TEST_ENABLED = System.getenv("AWS_CRT_SHUTDOWN_TESTING");
private static boolean doShutdownTest() {
return SHUTDOWN_TEST_ENABLED != null;
}
private HttpClientConnectionManager createConnectionManager(URI uri) {
try (EventLoopGroup eventLoopGroup = new EventLoopGroup(1);
HostResolver resolver = new HostResolver(eventLoopGroup);
ClientBootstrap bootstrap = new ClientBootstrap(eventLoopGroup, resolver);
SocketOptions sockOpts = new SocketOptions();
TlsContextOptions tlsOpts = TlsContextOptions.createDefaultClient();
TlsContext tlsContext = new TlsContext(tlsOpts)) {
HttpClientConnectionManagerOptions options = new HttpClientConnectionManagerOptions();
options.withClientBootstrap(bootstrap)
.withSocketOptions(sockOpts)
.withTlsContext(tlsContext)
.withUri(uri)
.withMaxConnections(1);
return HttpClientConnectionManager.create(options);
}
}
@Test
public void testShutdownDuringAcquire() throws Exception {
Assume.assumeTrue(doShutdownTest());
HttpClientConnectionManager manager = createConnectionManager(new URI("https://aws-crt-test-stuff.s3.amazonaws.com"));
CompletableFuture<HttpClientConnection> connection = manager.acquireConnection();
}
}
|
[
"\"AWS_CRT_SHUTDOWN_TESTING\""
] |
[] |
[
"AWS_CRT_SHUTDOWN_TESTING"
] |
[]
|
["AWS_CRT_SHUTDOWN_TESTING"]
|
java
| 1 | 0 | |
tests/conftest.py
|
import pytest
import os
def str_to_bool(val):
try:
val = val.lower()
except AttributeError:
val = str(val).lower()
if val == 'true':
return True
elif val == 'false':
return False
else:
raise ValueError("Invalid input value: %s" % val)
@pytest.fixture(scope="module")
def setup(host):
cluster_address = ""
container_binary = ""
osd_ids = []
osds = []
ansible_vars = host.ansible.get_variables()
ansible_facts = host.ansible("setup")
docker = ansible_vars.get("docker")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
group_names = ansible_vars["group_names"]
fsid = ansible_vars.get("fsid")
ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"]
if ansible_distribution == "CentOS":
public_interface = "eth1"
cluster_interface = "eth2"
else:
public_interface = "ens6"
cluster_interface = "ens7"
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"])
if osd_auto_discovery:
num_osds = 3
else:
num_osds = len(ansible_vars.get("devices", []))
if not num_osds:
num_osds = len(ansible_vars.get("lvm_volumes", []))
osds_per_device = ansible_vars.get("osds_per_device", 1)
num_osds = num_osds * osds_per_device
# If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_osds)
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if "osds" in group_names:
cluster_address = host.interface(cluster_interface).addresses[0]
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids
address = host.interface(public_interface).addresses[0]
if docker:
container_binary = "docker"
if docker and str_to_bool(os.environ.get('IS_PODMAN', False)): # noqa E501
container_binary = "podman"
data = dict(
cluster_name=cluster_name,
subnet=subnet,
osd_ids=osd_ids,
num_mons=num_mons,
num_osds=num_osds,
address=address,
osds=osds,
conf_path=conf_path,
public_interface=public_interface,
cluster_interface=cluster_interface,
cluster_address=cluster_address,
container_binary=container_binary)
return data
@pytest.fixture()
def node(host, request):
"""
This fixture represents a single node in the ceph cluster. Using the
host.ansible fixture provided by testinfra it can access all the ansible
variables provided to it by the specific test scenario being ran.
You must include this fixture on any tests that operate on specific type
of node because it contains the logic to manage which tests a node
should run.
"""
ansible_vars = host.ansible.get_variables()
# tox will pass in this environment variable. we need to do it this way
# because testinfra does not collect and provide ansible config passed in
# from using --extra-vars
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "luminous")
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
group_names = ansible_vars["group_names"]
docker = ansible_vars.get("docker")
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
ceph_release_num = {
'jewel': 10,
'kraken': 11,
'luminous': 12,
'mimic': 13,
'nautilus': 14,
'octopus': 15,
'dev': 99
}
# capture the initial/default state
test_is_applicable = False
for marker in request.node.iter_markers():
if marker.name in group_names or marker.name == 'all':
test_is_applicable = True
break
# Check if any markers on the test method exist in the nodes group_names.
# If they do not, this test is not valid for the node being tested.
if not test_is_applicable:
reason = "%s: Not a valid test for node type: %s" % (
request.function, group_names)
pytest.skip(reason)
if request.node.get_closest_marker("no_docker") and docker:
pytest.skip(
"Not a valid test for containerized deployments or atomic hosts")
if request.node.get_closest_marker("docker") and not docker:
pytest.skip(
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
data = dict(
vars=ansible_vars,
docker=docker,
ceph_stable_release=ceph_stable_release,
ceph_release_num=ceph_release_num,
rolling_update=rolling_update,
radosgw_num_instances=radosgw_num_instances,
)
return data
def pytest_collection_modifyitems(session, config, items):
for item in items:
test_path = item.location[0]
if "mon" in test_path:
item.add_marker(pytest.mark.mons)
elif "osd" in test_path:
item.add_marker(pytest.mark.osds)
elif "mds" in test_path:
item.add_marker(pytest.mark.mdss)
elif "mgr" in test_path:
item.add_marker(pytest.mark.mgrs)
elif "rbd-mirror" in test_path:
item.add_marker(pytest.mark.rbdmirrors)
elif "rgw" in test_path:
item.add_marker(pytest.mark.rgws)
elif "nfs" in test_path:
item.add_marker(pytest.mark.nfss)
elif "iscsi" in test_path:
item.add_marker(pytest.mark.iscsigws)
else:
item.add_marker(pytest.mark.all)
if "journal_collocation" in test_path:
item.add_marker(pytest.mark.journal_collocation)
|
[] |
[] |
[
"IS_PODMAN",
"CEPH_STABLE_RELEASE",
"ROLLING_UPDATE"
] |
[]
|
["IS_PODMAN", "CEPH_STABLE_RELEASE", "ROLLING_UPDATE"]
|
python
| 3 | 0 | |
sdk/communication/azure-communication-sms/samples/send_sms_to_single_recipient_sample.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: send_sms_to_single_recipient_sample.py
DESCRIPTION:
This sample demonstrates sending an SMS message to a single recipient. The SMS client is
authenticated using a connection string.
USAGE:
python send_sms_to_single_recipient_sample.py
Set the environment variable with your own value before running the sample:
1) COMMUNICATION_SAMPLES_CONNECTION_STRING - the connection string in your ACS account
"""
import os
import sys
from azure.communication.sms import SmsClient
sys.path.append("..")
class SmsSingleRecipientSample(object):
connection_string = os.getenv("COMMUNICATION_SAMPLES_CONNECTION_STRING")
phone_number = os.getenv("AZURE_COMMUNICATION_SERVICE_PHONE_NUMBER")
def send_sms_to_single_recipient(self):
sms_client = SmsClient.from_connection_string(self.connection_string)
# calling send() with sms values
sms_responses = sms_client.send(
from_=self.phone_number,
to=self.phone_number,
message="Hello World via SMS",
enable_delivery_report=True, # optional property
tag="custom-tag") # optional property
sms_response = sms_responses[0]
if (sms_response.successful):
print("Message with message id {} was successful sent to {}"
.format(sms_response.message_id, sms_response.to))
else:
print("Message failed to send to {} with the status code {} and error: {}"
.format(sms_response.to, sms_response.http_status_code, sms_response.error_message))
if __name__ == '__main__':
sample = SmsSingleRecipientSample()
sample.send_sms_to_single_recipient()
|
[] |
[] |
[
"AZURE_COMMUNICATION_SERVICE_PHONE_NUMBER",
"COMMUNICATION_SAMPLES_CONNECTION_STRING"
] |
[]
|
["AZURE_COMMUNICATION_SERVICE_PHONE_NUMBER", "COMMUNICATION_SAMPLES_CONNECTION_STRING"]
|
python
| 2 | 0 | |
tools/c7n_org/c7n_org/cli.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""Run a custodian policy across an organization's accounts
"""
import csv
from collections import Counter
import logging
import os
import time
import subprocess
import sys
import multiprocessing
from concurrent.futures import (
ProcessPoolExecutor,
as_completed)
import yaml
from botocore.compat import OrderedDict
from botocore.exceptions import ClientError
import click
import jsonschema
from c7n.credentials import assumed_session, SessionFactory
from c7n.executor import MainThreadExecutor
from c7n.config import Config
from c7n.policy import PolicyCollection
from c7n.provider import get_resource_class
from c7n.reports.csvout import Formatter, fs_record_set
from c7n.resources import load_available
from c7n.utils import CONN_CACHE, dumps, filter_empty
from c7n_org.utils import environ, account_tags
log = logging.getLogger('c7n_org')
# Workaround OSX issue, note this exists for py2 but there
# isn't anything we can do in that case.
# https://bugs.python.org/issue33725
if sys.platform == 'darwin' and (
sys.version_info.major > 3 and sys.version_info.minor > 4):
multiprocessing.set_start_method('spawn')
WORKER_COUNT = int(
os.environ.get('C7N_ORG_PARALLEL', multiprocessing.cpu_count() * 4))
CONFIG_SCHEMA = {
'$schema': 'http://json-schema.org/draft-07/schema',
'id': 'http://schema.cloudcustodian.io/v0/orgrunner.json',
'definitions': {
'account': {
'type': 'object',
'additionalProperties': True,
'anyOf': [
{'required': ['role', 'account_id']},
{'required': ['profile', 'account_id']}
],
'properties': {
'name': {'type': 'string'},
'display_name': {'type': 'string'},
'org_id': {'type': 'string'},
'email': {'type': 'string'},
'account_id': {
'type': 'string',
'pattern': '^[0-9]{12}$',
'minLength': 12, 'maxLength': 12},
'profile': {'type': 'string', 'minLength': 3},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'regions': {'type': 'array', 'items': {'type': 'string'}},
'role': {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string', 'minLength': 3}]},
'external_id': {'type': 'string'},
'vars': {'type': 'object'},
}
},
'subscription': {
'type': 'object',
'additionalProperties': False,
'required': ['subscription_id'],
'properties': {
'subscription_id': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'name': {'type': 'string'},
'vars': {'type': 'object'},
}
},
'project': {
'type': 'object',
'additionalProperties': False,
'required': ['project_id'],
'properties': {
'project_id': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'name': {'type': 'string'},
'vars': {'type': 'object'},
}
},
},
'type': 'object',
'additionalProperties': False,
'oneOf': [
{'required': ['accounts']},
{'required': ['projects']},
{'required': ['subscriptions']}
],
'properties': {
'vars': {'type': 'object'},
'accounts': {
'type': 'array',
'items': {'$ref': '#/definitions/account'}
},
'subscriptions': {
'type': 'array',
'items': {'$ref': '#/definitions/subscription'}
},
'projects': {
'type': 'array',
'items': {'$ref': '#/definitions/project'}
}
}
}
@click.group()
def cli():
"""custodian organization multi-account runner."""
class LogFilter:
"""We want to keep the main c7n-org cli output to be readable.
We previously did so via squelching custodian's log output via
level filter on the logger, however doing that meant that log
outputs stored to output locations were also squelched.
We effectively want differential handling at the top level logger
stream handler, ie. we want `custodian` log messages to propagate
to the root logger based on level, but we also want them to go the
custodian logger's directly attached handlers on debug level.
"""
def filter(self, r):
if not r.name.startswith('custodian'):
return 1
elif r.levelno >= logging.WARNING:
return 1
return 0
def init(config, use, debug, verbose, accounts, tags, policies, resource=None, policy_tags=()):
level = verbose and logging.DEBUG or logging.INFO
logging.basicConfig(
level=level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger().setLevel(level)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('s3transfer').setLevel(logging.WARNING)
logging.getLogger('custodian.s3').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# Filter out custodian log messages on console output if not
# at warning level or higher, see LogFilter docs and #2674
for h in logging.getLogger().handlers:
if isinstance(h, logging.StreamHandler):
h.addFilter(LogFilter())
with open(config, 'rb') as fh:
accounts_config = yaml.safe_load(fh.read())
jsonschema.validate(accounts_config, CONFIG_SCHEMA)
if use:
with open(use) as fh:
custodian_config = yaml.safe_load(fh.read())
else:
custodian_config = {}
accounts_config['accounts'] = list(accounts_iterator(accounts_config))
filter_policies(custodian_config, policy_tags, policies, resource)
filter_accounts(accounts_config, tags, accounts)
load_available()
MainThreadExecutor.c7n_async = False
executor = debug and MainThreadExecutor or ProcessPoolExecutor
return accounts_config, custodian_config, executor
def resolve_regions(regions, account):
if 'all' in regions:
session = get_session(account, 'c7n-org', "us-east-1")
client = session.client('ec2')
return [region['RegionName'] for region in client.describe_regions()['Regions']]
if not regions:
return ('us-east-1', 'us-west-2')
return regions
def get_session(account, session_name, region):
if account.get('provider') != 'aws':
return None
if account.get('role'):
roles = account['role']
if isinstance(roles, str):
roles = [roles]
s = None
for r in roles:
try:
s = assumed_session(
r, session_name, region=region,
external_id=account.get('external_id'),
session=s)
except ClientError as e:
log.error(
"unable to obtain credentials for account:%s role:%s error:%s",
account['name'], r, e)
raise
return s
elif account.get('profile'):
return SessionFactory(region, account['profile'])()
else:
raise ValueError(
"No profile or role assume specified for account %s" % account)
def filter_accounts(accounts_config, tags, accounts, not_accounts=None):
filtered_accounts = []
for a in accounts_config.get('accounts', ()):
if not_accounts and a['name'] in not_accounts:
continue
account_id = a.get('account_id') or a.get('project_id') or a.get('subscription_id') or ''
if accounts and a['name'] not in accounts and account_id not in accounts:
continue
if tags:
found = set()
for t in tags:
if t in a.get('tags', ()):
found.add(t)
if not found == set(tags):
continue
filtered_accounts.append(a)
accounts_config['accounts'] = filtered_accounts
def filter_policies(policies_config, tags, policies, resource, not_policies=None):
filtered_policies = []
for p in policies_config.get('policies', ()):
if not_policies and p['name'] in not_policies:
continue
if policies and p['name'] not in policies:
continue
if resource and p['resource'] != resource:
continue
if tags:
found = set()
for t in tags:
if t in p.get('tags', ()):
found.add(t)
if not found == set(tags):
continue
filtered_policies.append(p)
policies_config['policies'] = filtered_policies
def report_account(account, region, policies_config, output_path, cache_path, debug):
output_path = os.path.join(output_path, account['name'], region)
cache_path = os.path.join(cache_path, "%s-%s.cache" % (account['name'], region))
load_available()
config = Config.empty(
region=region,
output_dir=output_path,
account_id=account['account_id'], metrics_enabled=False,
cache=cache_path, log_group=None, profile=None, external_id=None)
if account.get('role'):
config['assume_role'] = account['role']
config['external_id'] = account.get('external_id')
elif account.get('profile'):
config['profile'] = account['profile']
policies = PolicyCollection.from_data(policies_config, config)
records = []
for p in policies:
# initializee policy execution context for output access
p.ctx.initialize()
log.debug(
"Report policy:%s account:%s region:%s path:%s",
p.name, account['name'], region, output_path)
policy_records = fs_record_set(p.ctx.log_dir, p.name)
for r in policy_records:
r['policy'] = p.name
r['region'] = p.options.region
r['account'] = account['name']
for t in account.get('tags', ()):
if ':' in t:
k, v = t.split(':', 1)
r[k] = v
records.extend(policy_records)
return records
@cli.command()
@click.option('-c', '--config', required=True, help="Accounts config file")
@click.option('-f', '--output', type=click.File('w'), default='-', help="Output File")
@click.option('-u', '--use', required=True)
@click.option('-s', '--output-dir', required=True, type=click.Path())
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('--field', multiple=True)
@click.option('--no-default-fields', default=False, is_flag=True)
@click.option('-t', '--tags', multiple=True, default=None, help="Account tag filter")
@click.option('-r', '--region', default=None, multiple=True)
@click.option('--debug', default=False, is_flag=True)
@click.option('-v', '--verbose', default=False, help="Verbose", is_flag=True)
@click.option('-p', '--policy', multiple=True)
@click.option('-l', '--policytags', 'policy_tags',
multiple=True, default=None, help="Policy tag filter")
@click.option('--format', default='csv', type=click.Choice(['csv', 'json']))
@click.option('--resource', default=None)
@click.option('--cache-path', required=False, type=click.Path(), default="~/.cache/c7n-org")
def report(config, output, use, output_dir, accounts,
field, no_default_fields, tags, region, debug, verbose,
policy, policy_tags, format, resource, cache_path):
"""report on a cross account policy execution."""
accounts_config, custodian_config, executor = init(
config, use, debug, verbose, accounts, tags, policy,
resource=resource, policy_tags=policy_tags)
resource_types = set()
for p in custodian_config.get('policies'):
resource_types.add(p['resource'])
if len(resource_types) > 1:
raise ValueError("can only report on one resource type at a time")
elif not len(custodian_config['policies']) > 0:
raise ValueError("no matching policies found")
records = []
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config.get('accounts', ()):
for r in resolve_regions(region or a.get('regions', ()), a):
futures[w.submit(
report_account,
a, r,
custodian_config,
output_dir,
cache_path,
debug)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if debug:
raise
log.warning(
"Error running policy in %s @ %s exception: %s",
a['name'], r, f.exception())
records.extend(f.result())
log.debug(
"Found %d records across %d accounts and %d policies",
len(records), len(accounts_config['accounts']),
len(custodian_config['policies']))
if format == 'json':
dumps(records, output, indent=2)
return
prefix_fields = OrderedDict(
(('Account', 'account'), ('Region', 'region'), ('Policy', 'policy')))
config = Config.empty()
factory = get_resource_class(list(resource_types)[0])
formatter = Formatter(
factory.resource_type,
extra_fields=field,
include_default_fields=not(no_default_fields),
include_region=False,
include_policy=False,
fields=prefix_fields)
rows = formatter.to_csv(records, unique=False)
writer = csv.writer(output, formatter.headers())
writer.writerow(formatter.headers())
writer.writerows(rows)
def _get_env_creds(account, session, region):
env = {}
if account["provider"] == 'aws':
creds = session._session.get_credentials()
env['AWS_ACCESS_KEY_ID'] = creds.access_key
env['AWS_SECRET_ACCESS_KEY'] = creds.secret_key
env['AWS_SESSION_TOKEN'] = creds.token
env['AWS_DEFAULT_REGION'] = region
env['AWS_ACCOUNT_ID'] = account["account_id"]
elif account["provider"] == 'azure':
env['AZURE_SUBSCRIPTION_ID'] = account["account_id"]
elif account["provider"] == 'gcp':
env['GOOGLE_CLOUD_PROJECT'] = account["account_id"]
env['CLOUDSDK_CORE_PROJECT'] = account["account_id"]
return filter_empty(env)
def run_account_script(account, region, output_dir, debug, script_args):
try:
session = get_session(account, "org-script", region)
except ClientError:
return 1
env = os.environ.copy()
env.update(_get_env_creds(account, session, region))
log.info("running script on account:%s region:%s script: `%s`",
account['name'], region, " ".join(script_args))
if debug:
subprocess.check_call(args=script_args, env=env)
return 0
output_dir = os.path.join(output_dir, account['name'], region)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, 'stdout'), 'wb') as stdout:
with open(os.path.join(output_dir, 'stderr'), 'wb') as stderr:
return subprocess.call(
args=script_args, env=env, stdout=stdout, stderr=stderr)
@cli.command(name='run-script', context_settings=dict(ignore_unknown_options=True))
@click.option('-c', '--config', required=True, help="Accounts config file")
@click.option('-s', '--output-dir', required=True, type=click.Path())
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('-t', '--tags', multiple=True, default=None, help="Account tag filter")
@click.option('-r', '--region', default=None, multiple=True)
@click.option('--echo', default=False, is_flag=True)
@click.option('--serial', default=False, is_flag=True)
@click.argument('script_args', nargs=-1, type=click.UNPROCESSED)
def run_script(config, output_dir, accounts, tags, region, echo, serial, script_args):
"""run an aws/azure/gcp script across accounts"""
# TODO count up on success / error / error list by account
accounts_config, custodian_config, executor = init(
config, None, serial, True, accounts, tags, (), ())
if echo:
print("command to run: `%s`" % (" ".join(script_args)))
return
# Support fully quoted scripts, which are common to avoid parameter
# overlap with c7n-org run-script.
if len(script_args) == 1 and " " in script_args[0]:
script_args = script_args[0].split()
success = True
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config.get('accounts', ()):
for r in resolve_regions(region or a.get('regions', ()), a):
futures[
w.submit(run_account_script, a, r, output_dir,
serial, script_args)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if serial:
raise
log.warning(
"Error running script in %s @ %s exception: %s",
a['name'], r, f.exception())
success = False
exit_code = f.result()
if exit_code == 0:
log.info(
"ran script on account:%s region:%s script: `%s`",
a['name'], r, " ".join(script_args))
else:
log.info(
"error running script on account:%s region:%s script: `%s`",
a['name'], r, " ".join(script_args))
success = False
if not success:
sys.exit(1)
def accounts_iterator(config):
for a in config.get('accounts', ()):
if 'role' in a:
if isinstance(a['role'], str) and not a['role'].startswith('arn'):
a['role'] = "arn:aws:iam::{}:role/{}".format(
a['account_id'], a['role'])
yield {**a, **{'provider': 'aws'}}
for a in config.get('subscriptions', ()):
d = {'account_id': a['subscription_id'],
'name': a.get('name', a['subscription_id']),
'regions': ['global'],
'provider': 'azure',
'tags': a.get('tags', ()),
'vars': a.get('vars', {})}
yield d
for a in config.get('projects', ()):
d = {'account_id': a['project_id'],
'name': a.get('name', a['project_id']),
'regions': ['global'],
'provider': 'gcp',
'tags': a.get('tags', ()),
'vars': a.get('vars', {})}
yield d
def run_account(account, region, policies_config, output_path,
cache_period, cache_path, metrics, dryrun, debug):
"""Execute a set of policies on an account.
"""
logging.getLogger('custodian.output').setLevel(logging.ERROR + 1)
CONN_CACHE.session = None
CONN_CACHE.time = None
load_available()
# allow users to specify interpolated output paths
if '{' not in output_path:
output_path = os.path.join(output_path, account['name'], region)
cache_path = os.path.join(cache_path, "%s-%s.cache" % (account['account_id'], region))
config = Config.empty(
region=region, cache=cache_path,
cache_period=cache_period, dryrun=dryrun, output_dir=output_path,
account_id=account['account_id'], metrics_enabled=metrics,
log_group=None, profile=None, external_id=None)
env_vars = account_tags(account)
if account.get('role'):
if isinstance(account['role'], str):
config['assume_role'] = account['role']
config['external_id'] = account.get('external_id')
else:
env_vars.update(
_get_env_creds(account, get_session(account, 'custodian', region), region))
elif account.get('profile'):
config['profile'] = account['profile']
policies = PolicyCollection.from_data(policies_config, config)
policy_counts = {}
success = True
st = time.time()
with environ(**env_vars):
for p in policies:
# Extend policy execution conditions with account information
p.conditions.env_vars['account'] = account
# Variable expansion and non schema validation (not optional)
p.expand_variables(p.get_variables(account.get('vars', {})))
p.validate()
log.debug(
"Running policy:%s account:%s region:%s",
p.name, account['name'], region)
try:
resources = p.run()
policy_counts[p.name] = resources and len(resources) or 0
if not resources:
continue
if not config.dryrun and p.execution_mode != 'pull':
log.info("Ran account:%s region:%s policy:%s provisioned time:%0.2f",
account['name'], region, p.name, time.time() - st)
continue
log.info(
"Ran account:%s region:%s policy:%s matched:%d time:%0.2f",
account['name'], region, p.name, len(resources),
time.time() - st)
except ClientError as e:
success = False
if e.response['Error']['Code'] == 'AccessDenied':
log.warning('Access denied api:%s policy:%s account:%s region:%s',
e.operation_name, p.name, account['name'], region)
return policy_counts, success
log.error(
"Exception running policy:%s account:%s region:%s error:%s",
p.name, account['name'], region, e)
continue
except Exception as e:
success = False
log.error(
"Exception running policy:%s account:%s region:%s error:%s",
p.name, account['name'], region, e)
if not debug:
continue
import traceback, pdb, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
raise
return policy_counts, success
@cli.command(name='run')
@click.option('-c', '--config', required=True, help="Accounts config file")
@click.option("-u", "--use", required=True)
@click.option('-s', '--output-dir', required=True, type=click.Path())
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('-t', '--tags', multiple=True, default=None, help="Account tag filter")
@click.option('-r', '--region', default=None, multiple=True)
@click.option('-p', '--policy', multiple=True)
@click.option('-l', '--policytags', 'policy_tags',
multiple=True, default=None, help="Policy tag filter")
@click.option('--cache-period', default=15, type=int)
@click.option('--cache-path', required=False,
type=click.Path(
writable=True, readable=True, exists=True,
resolve_path=True, allow_dash=False,
file_okay=False, dir_okay=True),
default=None)
@click.option("--metrics", default=False, is_flag=True)
@click.option("--metrics-uri", default=None, help="Configure provider metrics target")
@click.option("--dryrun", default=False, is_flag=True)
@click.option('--debug', default=False, is_flag=True)
@click.option('-v', '--verbose', default=False, help="Verbose", is_flag=True)
def run(config, use, output_dir, accounts, tags, region,
policy, policy_tags, cache_period, cache_path, metrics,
dryrun, debug, verbose, metrics_uri):
"""run a custodian policy across accounts"""
accounts_config, custodian_config, executor = init(
config, use, debug, verbose, accounts, tags, policy, policy_tags=policy_tags)
policy_counts = Counter()
success = True
if metrics_uri:
metrics = metrics_uri
if not cache_path:
cache_path = os.path.expanduser("~/.cache/c7n-org")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config['accounts']:
for r in resolve_regions(region or a.get('regions', ()), a):
futures[w.submit(
run_account,
a, r,
custodian_config,
output_dir,
cache_period,
cache_path,
metrics,
dryrun,
debug)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if debug:
raise
log.warning(
"Error running policy in %s @ %s exception: %s",
a['name'], r, f.exception())
continue
account_region_pcounts, account_region_success = f.result()
for p in account_region_pcounts:
policy_counts[p] += account_region_pcounts[p]
if not account_region_success:
success = False
log.info("Policy resource counts %s" % policy_counts)
if not success:
sys.exit(1)
|
[] |
[] |
[
"C7N_ORG_PARALLEL"
] |
[]
|
["C7N_ORG_PARALLEL"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"context"
"log"
"net"
"os"
"time"
"github.com/joho/godotenv"
"github.com/nats-io/nats.go"
otgrpc "github.com/opentracing-contrib/go-grpc"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/config"
"github.com/wisdommatt/ecommerce-microservice-user-service/grpc/proto"
servers "github.com/wisdommatt/ecommerce-microservice-user-service/grpc/service-servers"
"github.com/wisdommatt/ecommerce-microservice-user-service/internal/users"
"github.com/wisdommatt/ecommerce-microservice-user-service/services"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"google.golang.org/grpc"
)
func main() {
log := logrus.New()
log.SetFormatter(&logrus.JSONFormatter{PrettyPrint: true})
log.SetReportCaller(true)
log.SetOutput(os.Stdout)
mustLoadDotenv(log)
natsConn, err := nats.Connect(os.Getenv("NATS_URI"))
if err != nil {
log.WithField("nats_uri", os.Getenv("NATS_URI")).WithError(err).
Error("an error occured while connecting to nats")
}
defer natsConn.Close()
serviceTracer := initTracer("user-service")
opentracing.SetGlobalTracer(serviceTracer)
port := os.Getenv("PORT")
if port == "" {
port = "2020"
}
lis, err := net.Listen("tcp", ":"+port)
if err != nil {
log.WithError(err).Fatal("TCP conn error")
}
mongoDBClient := mustConnectMongoDB(log)
userRepository := users.NewRepository(mongoDBClient, initTracer("mongodb"))
userService := services.NewUserService(userRepository, initTracer("user.ServiceHandler"), natsConn)
grpcServer := grpc.NewServer(
grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(serviceTracer)),
grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(serviceTracer)),
)
proto.RegisterUserServiceServer(grpcServer, servers.NewUserServiceServer(userService))
log.WithField("nats_uri", os.Getenv("NATS_URI")).Info("Server running on port: ", port)
grpcServer.Serve(lis)
}
func mustConnectMongoDB(log *logrus.Logger) *mongo.Database {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, err := mongo.Connect(ctx, options.Client().ApplyURI(os.Getenv("MONGODB_URI")))
if err != nil {
log.WithError(err).Fatal("Unable to connect to mongodb")
}
return client.Database(os.Getenv("MONGODB_DATABASE_NAME"))
}
func mustLoadDotenv(log *logrus.Logger) {
err := godotenv.Load(".env", ".env-defaults")
if err != nil {
log.WithError(err).Fatal("Unable to load env files")
}
}
func initTracer(serviceName string) opentracing.Tracer {
return initJaegerTracer(serviceName)
}
func initJaegerTracer(serviceName string) opentracing.Tracer {
cfg := &config.Configuration{
ServiceName: serviceName,
Sampler: &config.SamplerConfig{
Type: "const",
Param: 1,
},
}
tracer, _, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
if err != nil {
log.Fatal("ERROR: cannot init Jaeger", err)
}
return tracer
}
|
[
"\"NATS_URI\"",
"\"NATS_URI\"",
"\"PORT\"",
"\"NATS_URI\"",
"\"MONGODB_URI\"",
"\"MONGODB_DATABASE_NAME\""
] |
[] |
[
"PORT",
"NATS_URI",
"MONGODB_DATABASE_NAME",
"MONGODB_URI"
] |
[]
|
["PORT", "NATS_URI", "MONGODB_DATABASE_NAME", "MONGODB_URI"]
|
go
| 4 | 0 | |
runlog/wsgi.py
|
"""
WSGI config for runlog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'runlog.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/spf13/cobra/doc/man_docs.go
|
// Copyright 2015 Red Hat Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package doc
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/cpuguy83/go-md2man/v2/md2man"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// GenManTree will generate a man page for this command and all descendants
// in the directory given. The header may be nil. This function may not work
// correctly if your command names have `-` in them. If you have `cmd` with two
// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
// it is undefined which help output will be in the file `cmd-sub-third.1`.
func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error {
return GenManTreeFromOpts(cmd, GenManTreeOptions{
Header: header,
Path: dir,
CommandSeparator: "-",
})
}
// GenManTreeFromOpts generates a man page for the command and all descendants.
// The pages are written to the opts.Path directory.
func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
header := opts.Header
if header == nil {
header = &GenManHeader{}
}
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
if err := GenManTreeFromOpts(c, opts); err != nil {
return err
}
}
section := "1"
if header.Section != "" {
section = header.Section
}
separator := "_"
if opts.CommandSeparator != "" {
separator = opts.CommandSeparator
}
basename := strings.Replace(cmd.CommandPath(), " ", separator, -1)
filename := filepath.Join(opts.Path, basename+"."+section)
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
headerCopy := *header
return GenMan(cmd, &headerCopy, f)
}
// GenManTreeOptions is the options for generating the man pages.
// Used only in GenManTreeFromOpts.
type GenManTreeOptions struct {
Header *GenManHeader
Path string
CommandSeparator string
}
// GenManHeader is a lot like the .TH header at the start of man pages. These
// include the title, section, date, source, and manual. We will use the
// current time if Date is unset and will use "Auto generated by spf13/cobra"
// if the Source is unset.
type GenManHeader struct {
Title string
Section string
Date *time.Time
date string
Source string
Manual string
}
// GenMan will generate a man page for the given command and write it to
// w. The header argument may be nil, however obviously w may not.
func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error {
if header == nil {
header = &GenManHeader{}
}
if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil {
return err
}
b := genMan(cmd, header)
_, err := w.Write(md2man.Render(b))
return err
}
func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error {
if header.Title == "" {
header.Title = strings.ToUpper(strings.Replace(name, " ", "\\-", -1))
}
if header.Section == "" {
header.Section = "1"
}
if header.Date == nil {
now := time.Now()
if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" {
unixEpoch, err := strconv.ParseInt(epoch, 10, 64)
if err != nil {
return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err)
}
now = time.Unix(unixEpoch, 0)
}
header.Date = &now
}
header.date = (*header.Date).Format("Jan 2006")
if header.Source == "" && !disableAutoGen {
header.Source = "Auto generated by spf13/cobra"
}
return nil
}
func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) {
description := cmd.Long
if len(description) == 0 {
description = cmd.Short
}
cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s"
# NAME
`, header.Title, header.Section, header.date, header.Source, header.Manual))
cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short))
cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n")
cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine()))
cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n")
cobra.WriteStringAndCheck(buf, description+"\n\n")
}
func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) {
flags.VisitAll(func(flag *pflag.Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
return
}
format := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name)
} else {
format = fmt.Sprintf("**--%s**", flag.Name)
}
if len(flag.NoOptDefVal) > 0 {
format += "["
}
if flag.Value.Type() == "string" {
// put quotes on the value
format += "=%q"
} else {
format += "=%s"
}
if len(flag.NoOptDefVal) > 0 {
format += "]"
}
format += "\n\t%s\n\n"
cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage))
})
}
func manPrintOptions(buf io.StringWriter, command *cobra.Command) {
flags := command.NonInheritedFlags()
if flags.HasAvailableFlags() {
cobra.WriteStringAndCheck(buf, "# OPTIONS\n")
manPrintFlags(buf, flags)
cobra.WriteStringAndCheck(buf, "\n")
}
flags = command.InheritedFlags()
if flags.HasAvailableFlags() {
cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n")
manPrintFlags(buf, flags)
cobra.WriteStringAndCheck(buf, "\n")
}
}
func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
cmd.InitDefaultHelpCmd()
cmd.InitDefaultHelpFlag()
// something like `rootcmd-subcmd1-subcmd2`
dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1)
buf := new(bytes.Buffer)
manPreamble(buf, header, cmd, dashCommandName)
manPrintOptions(buf, cmd)
if len(cmd.Example) > 0 {
buf.WriteString("# EXAMPLE\n")
buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example))
}
if hasSeeAlso(cmd) {
buf.WriteString("# SEE ALSO\n")
seealsos := make([]string, 0)
if cmd.HasParent() {
parentPath := cmd.Parent().CommandPath()
dashParentPath := strings.Replace(parentPath, " ", "-", -1)
seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section)
seealsos = append(seealsos, seealso)
cmd.VisitParents(func(c *cobra.Command) {
if c.DisableAutoGenTag {
cmd.DisableAutoGenTag = c.DisableAutoGenTag
}
})
}
children := cmd.Commands()
sort.Sort(byName(children))
for _, c := range children {
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
seealsos = append(seealsos, seealso)
}
buf.WriteString(strings.Join(seealsos, ", ") + "\n")
}
if !cmd.DisableAutoGenTag {
buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")))
}
return buf.Bytes()
}
|
[
"\"SOURCE_DATE_EPOCH\""
] |
[] |
[
"SOURCE_DATE_EPOCH"
] |
[]
|
["SOURCE_DATE_EPOCH"]
|
go
| 1 | 0 | |
main.py
|
import discord
from discord.ext import commands
import os
import cogs
from myutils import MyUtils
TOKEN = os.getenv("BOT_TOKEN")
PREFIX = "&"
bot = commands.Bot(command_prefix=PREFIX, description="Bot de l'ASTUS")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name="Help"))
print(f'{bot.user} has connected to Discord!')
@bot.event
async def on_message(message):
if not message.author.bot:
if message.content.lower() == "ping":
await message.channel.send("pong")
await bot.change_presence(activity=discord.Game(name="Ping-Pong"))
if message.content.lower().replace(" ", "") in ["astusbot", "botastus", ] or \
bot.user in message.mentions:
await message.channel.send("Le bot de l'astus pour te servir, tu as besoin de savoir ce que tu peux "
"me demander ? tape ``" + PREFIX + "help `` pour avoir une liste de ce que"
"je sais faire. \n Sinon ``" + PREFIX +
"help [sujet]`` te permet "
"d'avoir de l'aide sur un sujet en particulier :wink:")
await bot.process_commands(message)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send("Mmmmmmh, j'ai bien l'impression que cette commande n'existe pas :/")
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Il manque un argument.")
if isinstance(error, commands.CheckFailure) or isinstance(error, commands.MissingPermissions):
await ctx.send("Oups tu ne peux pas utiliser cette commande.")
if isinstance(error, discord.Forbidden):
await ctx.send("Oups, je n'ai pas les permissions nécessaires pour faire cette commmande")
@bot.event
async def on_raw_reaction_add(payload):
messageID = payload.message_id
if messageID == 726611125252128768:
await bot.change_presence(activity=discord.Game(name="Give some roles"))
guildID = payload.guild_id
guild = discord.utils.find(lambda g: g.id == guildID, bot.guilds)
if payload.emoji.name == '3️⃣':
# print("3TC")
await payload.member.add_roles(MyUtils(guild).get3TCRole(),
MyUtils(guild).getStudentRole())
elif payload.emoji.name == '4️⃣':
# print("4TC")
await payload.member.add_roles(MyUtils(guild).get4TCRole(),
MyUtils(guild).getStudentRole())
elif payload.emoji.name == '5️⃣':
# print("5TC")
await payload.member.add_roles(MyUtils(guild).get5TCRole(),
MyUtils(guild).getStudentRole())
elif payload.emoji.name == '🇦':
# print("TCA")
await payload.member.add_roles(MyUtils(guild).getTCARole())
elif payload.emoji.name == '👨🏫':
# print("Prof")
await payload.member.add_roles(MyUtils(guild).getProfRole())
elif payload.emoji.name == '🎓':
# print("Diplomes")
await payload.member.add_roles(MyUtils(guild).getDiplomesRole())
elif payload.emoji.name == '🆕':
# print("Futur TC")
await payload.member.add_roles(MyUtils(guild).getFuturTCRole())
elif payload.emoji.name == "💼":
await payload.member.add_roles(MyUtils(guild).getEntrepriseRole())
@bot.event
async def on_member_update(before, after):
if len(before.roles) < len(after.roles):
new_role = next(role for role in after.roles if role not in before.roles)
chan = await after.create_dm()
if new_role.name == "3 TC":
await chan.send("\nSalut, tu as le rôle 3TC sur le serveur de l'astus, ce rôle te permet de voir la "
"catégorie 3TC et de discuter avec tes camarades")
if new_role.name == "4 TC":
await chan.send("\nSalut, tu as le rôle 4TC sur le serveur de l'astus, ce rôle te permet de voir la "
"catégorie 4TC et de discuter avec tes camarades")
if new_role.name == "5 TC":
await chan.send("\nSalut, tu as le rôle 5TC sur le serveur de l'astus, ce rôle te permet de voir la "
"catégorie 5TC et de discuter avec tes camarades")
if new_role.name == "Futur TC":
await chan.send(
"\nSalut, et bienvenue à toi Futur TC. Tu as accès à la categorie Integration du serveur :wave: \n\n "
"Le bureau de l'Astus est prêt à t'accueillir et à te faire passer une intégration que tu n'oublieras "
"pas , crois nous ! ( tout dans le respect des gestes barrières :man_pouting: :left_right_arrow: "
":deaf_person: , le gel hydroalcoolique sera notre meilleur ami ). \n"
":arrow_forward: Point intégration : La rentrée est initialement prévue le 14 septembre 2020, mais une "
"rumeur de Covid complique les choses. Donc pour le moment on se base dessus, et on prévoie de vous "
"organiser une inté à partir du jeudi 10 au soir. \n"
":arrow_forward: Si ce n'est pas déjà le cas, on t'invite à rejoindre le groupe Facebook de la promo, "
"où des informations tourneront aussi par rapport aux activités en journée"
" www.facebook.com/groups/tc2023/ \n "
":arrow_forward: Questions réponses : Ce chanel est dédié à répondre à toutes vos questions sur"
" l'intégration, que ce soit d'un point de vue logistique ou même sur l'organisation globale de "
"celle- ci. N'hésite pas, nous serons nombreux à pouvoir te répondre ! \n")
if new_role.name == "Student":
await chan.send("\n:wave: Bienvenue sur le serveur de l'ASTUS, tu trouveras plusieurs categories sur le "
"serveur. \n \n"
"``Général`` ici des annonces de l'ASTUS seront faites. Vous pouvez voir un channel \n"
"``gestion-music`` qui permet d'utiliser l'enceinte de l'ASTUS \n"
"``Que deviens- tu ? `` Tu as envie de parler de ton expérience à l'issu de ton parcours "
"TC? Des conseils à donner aux futurs diplômés? Cet espace est fait pour toi ! "
" :man_technologist: \n"
"Au contraire, tu es un étudiant concerné par ce que deviennent les anciens diplômés,"
" c'est aussi ici que tu peux t'exprimer ! \n"
"``Section Astus `` ( Accès attribués aux étudiants ): "
"Alors là, vous faites ce que vous voulez, quand vous voulez. Le chanel modélise le local "
"de l'asso, donc on modère uniquement en cas de propos haineux, racistes, ou toute la liste"
" qui suit. C'est votre espace détente donc lâchez vous, ça compte aussi pour les "
"futurs 3TC ! \n"
"``Section intégration `` Le bureau de l'Astus est prêt à t'accueillir et à te faire passer"
" une intégration que tu n'oublieras pas , crois nous ! ( tout dans le respect des gestes "
"barrières :man_pouting: :left_right_arrow: :deaf_person: , le gel hydroalcoolique sera "
"notre meilleur ami ). \n "
":arrow_forward: Point intégration : La rentrée est initialement prévue le 14 septembre"
" 2020, mais une rumeur de Covid complique les choses. Donc pour le moment on se base "
"dessus, et on prévoie de vous organiser une inté à partir du jeudi 10 au soir. \n"
":arrow_forward: Si ce n'est pas déjà le cas, on t'invite à rejoindre le groupe "
"Facebook de la promo, où des informations tourneront aussi par rapport aux activités "
"en journée www.facebook.com/groups/tc2023/ \n "
":arrow_forward: Questions réponses : Ce chanel est dédié à répondre à toutes "
"vos questions sur l'intégration, que ce soit d'un point de vue logistique ou même sur "
"l'organisation globale de celle- ci. N'hésite pas, nous serons nombreux "
"à pouvoir te répondre ! \n"
)
if new_role.name in ["Prof", "Diplômés"]:
await chan.send("\n:wave:Madame, Monsieur, \n"
"Bienvenue sur le serveur de l'ASTUS, vous trouverez plusieurs categories sur le "
"serveur. :speaking_head: \n \n"
":arrow_forward: ``Général`` ici des annonces de l'ASTUS seront faites. \n"
":arrow_forward: ``gestion-music`` qui permet d'utiliser l'enceinte de l'ASTUS \n"
":arrow_forward: ``Un Boulot / Stage`` , permet de mettre en relation des dipômés avec "
"les TC actuels "
"afin de trouver un stage ou un emploi pour les 5TC qui vont avoir leur diplôme \n"
" :arrow_forward: Garder le contact, permet de discuter avec des diplômés de leur"
" parcours\n "
)
if new_role.name == "Admin Groupe de Travail":
await chan.send("\nTu es un admin des groupes de travail mis en place par l'ASTUS, tu peux créer, "
"supprimer des channels dans la categorie groupe de travail afin de les animer "
"au mieux. May the force be with you ! :man_technologist: \n"
)
if new_role.name == "ASTUS":
await chan.send("\nBienvenue à l'ASTUS ! \n"
"Tout d'abord, félicitation à toi pour avoir intégré l'ASTUS :wink: \n"
"Tu as maintenant accés à la categorie ASTUS, tu retrouveras un channel général pour "
"parler avec tous tes p'tits potes de l'ASTUS. Il y a aussi channel passation pour "
"parler avec l'ancien G4 de la gestion de l'ASTUS quand la fameuse heure viendra."
" En fonction de ton rôle, tu ne vois pas certains"
"channel, je t'explique tout cela rapidement :wink:\n")
if new_role.name == "G4":
await chan.send("\nUn grand pouvoir inplique de grandes responsabilités. C'est grâce à toi que l'astus "
"peut tourner. Tu as accès à quelques commandes de gestion du serveur (plus d'info "
"avec ``" + PREFIX + "help`` sur le serveur\n")
if new_role.name == "Team Event":
await chan.send("\nC'est toi qui va nous régaler avec tout pleins d'Event. Un channel dans la catégorie "
"ASTUS t'es réservé\n")
if new_role.name == "Resp Team Event":
await chan.send("\nTu gères la Team Event, pour cela tu as accès à un channel dédié avec ta team\n")
if new_role.name == "Team Entreprise":
await chan.send("\nC'est toi qui va nous régaler avec tout pleins de rencontre avec des entreprises."
"Un channel dans la catégorie ASTUS t'es réservé\n")
if new_role.name == "Resp Team Entreprise":
await chan.send("\nTu gères la Team Entreprise, pour cela tu as accès à un channel dédié avec ta team\n")
if new_role.name == "Resp Site International":
await chan.send("\nResp du site ! \n"
"C'est grâce à toi que le site peut évoluer, demande à ce qu'on t'ajoute au "
"repo GitHub :wink:\n")
if new_role.name == "Resp Comm":
await chan.send("\nResp comm ! \n"
"L'ASTUS compte sur toi pour un max de communication. Tu géres la page FB de l'astus. "
"Tu fais les annonces et les affiches pour tous les events\n ")
if new_role.name == "Entreprise":
await chan.send("\n:wave:Madame, Monsieur, \n"
"Bienvenue sur le serveur de l'ASTUS, vous trouverez plusieurs categories sur le "
"serveur. :speaking_head: \n \n"
":arrow_forward: ``Général`` ici tous le monde peut parler (profs, élèves, entreprises, "
"diplômés). \n"
":arrow_forward: ``Un Boulot / Stage`` , permet de mettre en relation des étudiants avec "
"des entrepries. \n"
)
@bot.command()
async def load(ctx, name=None):
if name and MyUtils(ctx.guild).getAdminRole() in ctx.message.author.roles:
try:
bot.load_extension(name)
await ctx.send(name + "load")
except:
await ctx.send(name + " has has already up")
else:
raise discord.ext.commands.CheckFailure
@bot.command()
async def unload(ctx, name=None):
if name and MyUtils(ctx.guild).getAdminRole() in ctx.message.author.roles:
try:
bot.unload_extension(name)
await ctx.send(name + " unload")
except:
await ctx.send(name + " has has already down")
else:
raise discord.ext.commands.CheckFailure
@bot.command()
async def reload(ctx, name=None):
if name and MyUtils(ctx.guild).getAdminRole() in ctx.message.author.roles:
try:
bot.reload_extension(name)
await ctx.send(name + " reload")
except:
bot.load_extension(name)
await ctx.send(name + " load")
else:
raise discord.ext.commands.CheckFailure
if __name__ == '__main__':
# Remove default help command
bot.remove_command("help")
# cogs
bot.add_cog(cogs.CogPassation(bot, PREFIX))
bot.load_extension("cogs.newyear")
bot.load_extension("cogs.help")
bot.load_extension("cogs.videoDiplomes")
bot.load_extension("cogs.invitation")
bot.load_extension("cogs.infoFromIP")
bot.load_extension("cogs.NSandSOALookup")
bot.load_extension("cogs.international")
bot.load_extension("cogs.vendrediChill")
bot.load_extension("cogs.calendar.cog")
bot.load_extension("cogs.pot_tres_confine")
bot.load_extension("cogs.tgg.cog")
bot.load_extension("cogs.bastos")
bot.run(TOKEN)
|
[] |
[] |
[
"BOT_TOKEN"
] |
[]
|
["BOT_TOKEN"]
|
python
| 1 | 0 | |
orderer/common/server/main.go
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package server
import (
"bytes"
"context"
"fmt"
"github.com/hyperledger/fabric/orderer/common/channelparticipation"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // This is essentially the main package for the orderer
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-lib-go/healthz"
cb "github.com/hyperledger/fabric-protos-go/common"
ab "github.com/hyperledger/fabric-protos-go/orderer"
"github.com/hyperledger/fabric/bccsp"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto"
"github.com/hyperledger/fabric/common/flogging"
floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics"
"github.com/hyperledger/fabric/common/grpclogging"
"github.com/hyperledger/fabric/common/grpcmetrics"
"github.com/hyperledger/fabric/common/ledger/blockledger"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/tools/protolator"
"github.com/hyperledger/fabric/core/operations"
"github.com/hyperledger/fabric/internal/pkg/comm"
"github.com/hyperledger/fabric/internal/pkg/identity"
"github.com/hyperledger/fabric/msp"
"github.com/hyperledger/fabric/orderer/common/bootstrap/file"
"github.com/hyperledger/fabric/orderer/common/cluster"
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/metadata"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/orderer/consensus/etcdraft"
"github.com/hyperledger/fabric/orderer/consensus/kafka"
"github.com/hyperledger/fabric/orderer/consensus/solo"
"github.com/hyperledger/fabric/protoutil"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"gopkg.in/alecthomas/kingpin.v2"
)
var logger = flogging.MustGetLogger("orderer.common.server")
//command line flags
var (
app = kingpin.New("orderer", "Hyperledger Fabric orderer node")
_ = app.Command("start", "Start the orderer node").Default() // preserved for cli compatibility
version = app.Command("version", "Show version information")
clusterTypes = map[string]struct{}{"etcdraft": {}}
)
// Main is the entry point of orderer process
func Main() {
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
// "version" command
if fullCmd == version.FullCommand() {
fmt.Println(metadata.GetVersionInfo())
return
}
conf, err := localconfig.Load()
if err != nil {
logger.Error("failed to parse config: ", err)
os.Exit(1)
}
initializeLogging()
prettyPrintStruct(conf)
cryptoProvider := factory.GetDefault()
signer, signErr := loadLocalMSP(conf).GetDefaultSigningIdentity()
if signErr != nil {
logger.Panicf("Failed to get local MSP identity: %s", signErr)
}
opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
metricsProvider := opsSystem.Provider
logObserver := floggingmetrics.NewObserver(metricsProvider)
flogging.SetObserver(logObserver)
serverConfig := initializeServerConfig(conf, metricsProvider)
grpcServer := initializeGrpcServer(conf, serverConfig)
caMgr := &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
clientRootCAs: serverConfig.SecOpts.ClientRootCAs,
}
lf, _, err := createLedgerFactory(conf, metricsProvider)
if err != nil {
logger.Panicf("Failed to create ledger factory: %v", err)
}
var clusterBootBlock *cb.Block
// configure following artifacts properly if orderer is of cluster type
var r *replicationInitiator
clusterServerConfig := serverConfig
clusterGRPCServer := grpcServer // by default, cluster shares the same grpc server
var clusterClientConfig comm.ClientConfig
var clusterDialer *cluster.PredicateDialer
var clusterType, reuseGrpcListener bool
var serversToUpdate []*comm.GRPCServer
bootstrapMethod := conf.General.BootstrapMethod
if bootstrapMethod == "file" || bootstrapMethod == "none" {
bootstrapBlock := extractBootstrapBlock(conf)
if bootstrapBlock == nil {
bootstrapBlock = extractSystemChannel(lf, cryptoProvider)
}
if bootstrapBlock != nil {
if err := ValidateBootstrapBlock(bootstrapBlock, cryptoProvider); err != nil {
logger.Panicf("Failed validating bootstrap block: %v", err)
}
sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock)
clusterBootBlock = selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock)
typ := consensusType(bootstrapBlock, cryptoProvider)
clusterType = isClusterType(clusterBootBlock, cryptoProvider)
if clusterType {
logger.Infof("Setting up cluster for orderer type %s", typ)
clusterClientConfig = initializeClusterClientConfig(conf)
clusterDialer = &cluster.PredicateDialer{
Config: clusterClientConfig,
}
r = createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer, cryptoProvider)
// Only clusters that are equipped with a recent config block can replicate.
if conf.General.BootstrapMethod == "file" {
r.replicateIfNeeded(bootstrapBlock)
}
if reuseGrpcListener = reuseListener(conf, typ); !reuseGrpcListener {
clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, ioutil.ReadFile)
}
// If we have a separate gRPC server for the cluster,
// we need to update its TLS CA certificate pool.
serversToUpdate = append(serversToUpdate, clusterGRPCServer)
}
// Are we bootstrapping?
if len(lf.ChannelIDs()) == 0 {
initializeBootstrapChannel(clusterBootBlock, lf)
} else {
logger.Info("Not bootstrapping because of existing channels")
}
}
}
identityBytes, err := signer.Serialize()
if err != nil {
logger.Panicf("Failed serializing signing identity: %v", err)
}
expirationLogger := flogging.MustGetLogger("certmonitor")
crypto.TrackExpiration(
serverConfig.SecOpts.UseTLS,
serverConfig.SecOpts.Certificate,
[][]byte{clusterClientConfig.SecOpts.Certificate},
identityBytes,
expirationLogger.Warnf, // This can be used to piggyback a metric event in the future
time.Now(),
time.AfterFunc)
// if cluster is reusing client-facing server, then it is already
// appended to serversToUpdate at this point.
if grpcServer.MutualTLSRequired() && !reuseGrpcListener {
serversToUpdate = append(serversToUpdate, grpcServer)
}
tlsCallback := func(bundle *channelconfig.Bundle) {
logger.Debug("Executing callback to update root CAs")
caMgr.updateTrustedRoots(bundle, serversToUpdate...)
if clusterType {
caMgr.updateClusterDialer(
clusterDialer,
clusterClientConfig.SecOpts.ServerRootCAs,
)
}
}
manager := initializeMultichannelRegistrar(
clusterBootBlock,
r,
clusterDialer,
clusterServerConfig,
clusterGRPCServer,
conf,
signer,
metricsProvider,
opsSystem,
lf,
cryptoProvider,
tlsCallback,
)
opsSystem.RegisterHandler(
channelparticipation.URLBaseV1,
channelparticipation.NewHTTPHandler(conf.ChannelParticipation, manager),
)
if err = opsSystem.Start(); err != nil {
logger.Panicf("failed to start operations subsystem: %s", err)
}
defer opsSystem.Stop()
mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
server := NewServer(
manager,
metricsProvider,
&conf.Debug,
conf.General.Authentication.TimeWindow,
mutualTLS,
conf.General.Authentication.NoExpirationChecks,
)
logger.Infof("Starting %s", metadata.GetVersionInfo())
handleSignals(addPlatformSignals(map[os.Signal]func(){
syscall.SIGTERM: func() {
grpcServer.Stop()
if clusterGRPCServer != grpcServer {
clusterGRPCServer.Stop()
}
},
}))
if !reuseGrpcListener && clusterType {
logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
go clusterGRPCServer.Start()
}
if conf.General.Profile.Enabled {
go initializeProfilingService(conf)
}
ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
logger.Info("Beginning to serve requests")
if err := grpcServer.Start(); err != nil {
logger.Fatalf("Atomic Broadcast gRPC server has terminated while serving requests due to: %v", err)
}
}
func reuseListener(conf *localconfig.TopLevel, typ string) bool {
clusterConf := conf.General.Cluster
// If listen address is not configured, and the TLS certificate isn't configured,
// it means we use the general listener of the node.
if clusterConf.ListenPort == 0 && clusterConf.ServerCertificate == "" && clusterConf.ListenAddress == "" && clusterConf.ServerPrivateKey == "" {
logger.Info("Cluster listener is not configured, defaulting to use the general listener on port", conf.General.ListenPort)
if !conf.General.TLS.Enabled {
logger.Panicf("TLS is required for running ordering nodes of type %s.", typ)
}
return true
}
// Else, one of the above is defined, so all 4 properties should be defined.
if clusterConf.ListenPort == 0 || clusterConf.ServerCertificate == "" || clusterConf.ListenAddress == "" || clusterConf.ServerPrivateKey == "" {
logger.Panic("Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, General.Cluster.ServerCertificate," +
" General.Cluster.ServerPrivateKey, should be defined altogether.")
}
return false
}
// Extract system channel last config block
func extractSysChanLastConfig(lf blockledger.Factory, bootstrapBlock *cb.Block) *cb.Block {
// Are we bootstrapping?
channelCount := len(lf.ChannelIDs())
if channelCount == 0 {
logger.Info("Bootstrapping because no existing channels")
return nil
}
logger.Infof("Not bootstrapping because of %d existing channels", channelCount)
systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemChannelLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
logger.Panicf("Failed getting system channel ledger: %v", err)
}
height := systemChannelLedger.Height()
lastConfigBlock := multichannel.ConfigBlock(systemChannelLedger)
logger.Infof("System channel: name=%s, height=%d, last config block number=%d",
systemChannelName, height, lastConfigBlock.Header.Number)
return lastConfigBlock
}
// extractSystemChannel loops through all channels, and return the last
// config block for the system channel. Returns nil if no system channel
// was found.
func extractSystemChannel(lf blockledger.Factory, bccsp bccsp.BCCSP) *cb.Block {
for _, cID := range lf.ChannelIDs() {
channelLedger, err := lf.GetOrCreate(cID)
if err != nil {
logger.Panicf("Failed getting channel %v's ledger: %v", cID, err)
}
channelConfigBlock := multichannel.ConfigBlock(channelLedger)
err = ValidateBootstrapBlock(channelConfigBlock, bccsp)
if err == nil {
return channelConfigBlock
}
}
return nil
}
// Select cluster boot block
func selectClusterBootBlock(bootstrapBlock, sysChanLastConfig *cb.Block) *cb.Block {
if sysChanLastConfig == nil {
logger.Debug("Selected bootstrap block, because system channel last config block is nil")
return bootstrapBlock
}
if sysChanLastConfig.Header.Number > bootstrapBlock.Header.Number {
logger.Infof("Cluster boot block is system channel last config block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return sysChanLastConfig
}
logger.Infof("Cluster boot block is bootstrap (genesis) block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return bootstrapBlock
}
func createReplicator(
lf blockledger.Factory,
bootstrapBlock *cb.Block,
conf *localconfig.TopLevel,
secOpts comm.SecureOptions,
signer identity.SignerSerializer,
bccsp bccsp.BCCSP,
) *replicationInitiator {
logger := flogging.MustGetLogger("orderer.common.cluster")
vl := &verifierLoader{
verifierFactory: &cluster.BlockVerifierAssembler{Logger: logger, BCCSP: bccsp},
onFailure: func(block *cb.Block) {
protolator.DeepMarshalJSON(os.Stdout, block)
},
ledgerFactory: lf,
logger: logger,
}
systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
// System channel is not verified because we trust the bootstrap block
// and use backward hash chain verification.
verifiersByChannel := vl.loadVerifiers()
verifiersByChannel[systemChannelName] = &cluster.NoopBlockVerifier{}
vr := &cluster.VerificationRegistry{
LoadVerifier: vl.loadVerifier,
Logger: logger,
VerifiersByChannel: verifiersByChannel,
VerifierFactory: &cluster.BlockVerifierAssembler{Logger: logger, BCCSP: bccsp},
}
ledgerFactory := &ledgerFactory{
Factory: lf,
onBlockCommit: vr.BlockCommitted,
}
return &replicationInitiator{
registerChain: vr.RegisterVerifier,
verifierRetriever: vr,
logger: logger,
secOpts: secOpts,
conf: conf,
lf: ledgerFactory,
signer: signer,
cryptoProvider: bccsp,
}
}
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stderr,
LogSpec: loggingSpec,
})
}
// Start the profiling service if enabled.
func initializeProfilingService(conf *localconfig.TopLevel) {
logger.Info("Starting Go pprof profiling service on:", conf.General.Profile.Address)
// The ListenAndServe() call does not return unless an error occurs.
logger.Panic("Go pprof service failed:", http.ListenAndServe(conf.General.Profile.Address, nil))
}
func handleSignals(handlers map[os.Signal]func()) {
var signals []os.Signal
for sig := range handlers {
signals = append(signals, sig)
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, signals...)
go func() {
for sig := range signalChan {
logger.Infof("Received signal: %d (%s)", sig, sig)
handlers[sig]()
}
}()
}
type loadPEMFunc func(string) ([]byte, error)
// configureClusterListener returns a new ServerConfig and a new gRPC server (with its own TLS listener).
func configureClusterListener(conf *localconfig.TopLevel, generalConf comm.ServerConfig, loadPEM loadPEMFunc) (comm.ServerConfig, *comm.GRPCServer) {
clusterConf := conf.General.Cluster
cert, err := loadPEM(clusterConf.ServerCertificate)
if err != nil {
logger.Panicf("Failed to load cluster server certificate from '%s' (%s)", clusterConf.ServerCertificate, err)
}
key, err := loadPEM(clusterConf.ServerPrivateKey)
if err != nil {
logger.Panicf("Failed to load cluster server key from '%s' (%s)", clusterConf.ServerPrivateKey, err)
}
port := fmt.Sprintf("%d", clusterConf.ListenPort)
bindAddr := net.JoinHostPort(clusterConf.ListenAddress, port)
var clientRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := loadPEM(serverRoot)
if err != nil {
logger.Panicf("Failed to load CA cert file '%s' (%s)", serverRoot, err)
}
clientRootCAs = append(clientRootCAs, rootCACert)
}
serverConf := comm.ServerConfig{
StreamInterceptors: generalConf.StreamInterceptors,
UnaryInterceptors: generalConf.UnaryInterceptors,
ConnectionTimeout: generalConf.ConnectionTimeout,
ServerStatsHandler: generalConf.ServerStatsHandler,
Logger: generalConf.Logger,
KaOpts: generalConf.KaOpts,
SecOpts: comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
CipherSuites: comm.DefaultTLSCipherSuites,
ClientRootCAs: clientRootCAs,
RequireClientCert: true,
Certificate: cert,
UseTLS: true,
Key: key,
},
}
srv, err := comm.NewGRPCServer(bindAddr, serverConf)
if err != nil {
logger.Panicf("Failed creating gRPC server on %s:%d due to %v", clusterConf.ListenAddress, clusterConf.ListenPort, err)
}
return serverConf, srv
}
func initializeClusterClientConfig(conf *localconfig.TopLevel) comm.ClientConfig {
cc := comm.ClientConfig{
AsyncConnect: true,
KaOpts: comm.DefaultKeepaliveOptions,
Timeout: conf.General.Cluster.DialTimeout,
SecOpts: comm.SecureOptions{},
}
if conf.General.Cluster.ClientCertificate == "" {
return cc
}
certFile := conf.General.Cluster.ClientCertificate
certBytes, err := ioutil.ReadFile(certFile)
if err != nil {
logger.Fatalf("Failed to load client TLS certificate file '%s' (%s)", certFile, err)
}
keyFile := conf.General.Cluster.ClientPrivateKey
keyBytes, err := ioutil.ReadFile(keyFile)
if err != nil {
logger.Fatalf("Failed to load client TLS key file '%s' (%s)", keyFile, err)
}
var serverRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)", serverRoot, err)
}
serverRootCAs = append(serverRootCAs, rootCACert)
}
cc.SecOpts = comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
RequireClientCert: true,
CipherSuites: comm.DefaultTLSCipherSuites,
ServerRootCAs: serverRootCAs,
Certificate: certBytes,
Key: keyBytes,
UseTLS: true,
}
return cc
}
func initializeServerConfig(conf *localconfig.TopLevel, metricsProvider metrics.Provider) comm.ServerConfig {
// secure server config
secureOpts := comm.SecureOptions{
UseTLS: conf.General.TLS.Enabled,
RequireClientCert: conf.General.TLS.ClientAuthRequired,
}
// check to see if TLS is enabled
if secureOpts.UseTLS {
msg := "TLS"
// load crypto material from files
serverCertificate, err := ioutil.ReadFile(conf.General.TLS.Certificate)
if err != nil {
logger.Fatalf("Failed to load server Certificate file '%s' (%s)",
conf.General.TLS.Certificate, err)
}
serverKey, err := ioutil.ReadFile(conf.General.TLS.PrivateKey)
if err != nil {
logger.Fatalf("Failed to load PrivateKey file '%s' (%s)",
conf.General.TLS.PrivateKey, err)
}
var serverRootCAs, clientRootCAs [][]byte
for _, serverRoot := range conf.General.TLS.RootCAs {
root, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
err, serverRoot)
}
serverRootCAs = append(serverRootCAs, root)
}
if secureOpts.RequireClientCert {
for _, clientRoot := range conf.General.TLS.ClientRootCAs {
root, err := ioutil.ReadFile(clientRoot)
if err != nil {
logger.Fatalf("Failed to load ClientRootCAs file '%s' (%s)",
err, clientRoot)
}
clientRootCAs = append(clientRootCAs, root)
}
msg = "mutual TLS"
}
secureOpts.Key = serverKey
secureOpts.Certificate = serverCertificate
secureOpts.ServerRootCAs = serverRootCAs
secureOpts.ClientRootCAs = clientRootCAs
logger.Infof("Starting orderer with %s enabled", msg)
}
kaOpts := comm.DefaultKeepaliveOptions
// keepalive settings
// ServerMinInterval must be greater than 0
if conf.General.Keepalive.ServerMinInterval > time.Duration(0) {
kaOpts.ServerMinInterval = conf.General.Keepalive.ServerMinInterval
}
kaOpts.ServerInterval = conf.General.Keepalive.ServerInterval
kaOpts.ServerTimeout = conf.General.Keepalive.ServerTimeout
commLogger := flogging.MustGetLogger("core.comm").With("server", "Orderer")
if metricsProvider == nil {
metricsProvider = &disabled.Provider{}
}
return comm.ServerConfig{
SecOpts: secureOpts,
KaOpts: kaOpts,
Logger: commLogger,
ServerStatsHandler: comm.NewServerStatsHandler(metricsProvider),
ConnectionTimeout: conf.General.ConnectionTimeout,
StreamInterceptors: []grpc.StreamServerInterceptor{
grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)),
grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
},
UnaryInterceptors: []grpc.UnaryServerInterceptor{
grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)),
grpclogging.UnaryServerInterceptor(
flogging.MustGetLogger("comm.grpc.server").Zap(),
grpclogging.WithLeveler(grpclogging.LevelerFunc(grpcLeveler)),
),
},
}
}
func grpcLeveler(ctx context.Context, fullMethod string) zapcore.Level {
switch fullMethod {
case "/orderer.Cluster/Step":
return flogging.DisabledLevel
default:
return zapcore.InfoLevel
}
}
func extractBootstrapBlock(conf *localconfig.TopLevel) *cb.Block {
var bootstrapBlock *cb.Block
// Select the bootstrapping mechanism
switch conf.General.BootstrapMethod {
case "file": // For now, "file" is the only supported genesis method
bootstrapBlock = file.New(conf.General.BootstrapFile).GenesisBlock()
case "none": // simply honor the configuration value
return nil
default:
logger.Panic("Unknown genesis method:", conf.General.BootstrapMethod)
}
return bootstrapBlock
}
func initializeBootstrapChannel(genesisBlock *cb.Block, lf blockledger.Factory) {
channelID, err := protoutil.GetChannelIDFromBlock(genesisBlock)
if err != nil {
logger.Fatal("Failed to parse channel ID from genesis block:", err)
}
gl, err := lf.GetOrCreate(channelID)
if err != nil {
logger.Fatal("Failed to create the system channel:", err)
}
if err := gl.Append(genesisBlock); err != nil {
logger.Fatal("Could not write genesis block to ledger:", err)
}
}
func isClusterType(genesisBlock *cb.Block, bccsp bccsp.BCCSP) bool {
_, exists := clusterTypes[consensusType(genesisBlock, bccsp)]
return exists
}
func consensusType(genesisBlock *cb.Block, bccsp bccsp.BCCSP) string {
if genesisBlock == nil || genesisBlock.Data == nil || len(genesisBlock.Data.Data) == 0 {
logger.Fatalf("Empty genesis block")
}
env := &cb.Envelope{}
if err := proto.Unmarshal(genesisBlock.Data.Data[0], env); err != nil {
logger.Fatalf("Failed to unmarshal the genesis block's envelope: %v", err)
}
bundle, err := channelconfig.NewBundleFromEnvelope(env, bccsp)
if err != nil {
logger.Fatalf("Failed creating bundle from the genesis block: %v", err)
}
ordConf, exists := bundle.OrdererConfig()
if !exists {
logger.Fatalf("Orderer config doesn't exist in bundle derived from genesis block")
}
return ordConf.ConsensusType()
}
func initializeGrpcServer(conf *localconfig.TopLevel, serverConfig comm.ServerConfig) *comm.GRPCServer {
lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", conf.General.ListenAddress, conf.General.ListenPort))
if err != nil {
logger.Fatal("Failed to listen:", err)
}
// Create GRPC server - return if an error occurs
grpcServer, err := comm.NewGRPCServerFromListener(lis, serverConfig)
if err != nil {
logger.Fatal("Failed to return new GRPC server:", err)
}
return grpcServer
}
func loadLocalMSP(conf *localconfig.TopLevel) msp.MSP {
// MUST call GetLocalMspConfig first, so that default BCCSP is properly
// initialized prior to LoadByType.
mspConfig, err := msp.GetLocalMspConfig(conf.General.LocalMSPDir, conf.General.BCCSP, conf.General.LocalMSPID)
if err != nil {
logger.Panicf("Failed to get local msp config: %v", err)
}
typ := msp.ProviderTypeToString(msp.FABRIC)
opts, found := msp.Options[typ]
if !found {
logger.Panicf("MSP option for type %s is not found", typ)
}
localmsp, err := msp.New(opts, factory.GetDefault())
if err != nil {
logger.Panicf("Failed to load local MSP: %v", err)
}
if err = localmsp.Setup(mspConfig); err != nil {
logger.Panicf("Failed to setup local msp with config: %v", err)
}
return localmsp
}
//go:generate counterfeiter -o mocks/health_checker.go -fake-name HealthChecker . healthChecker
// HealthChecker defines the contract for health checker
type healthChecker interface {
RegisterChecker(component string, checker healthz.HealthChecker) error
}
func initializeMultichannelRegistrar(
bootstrapBlock *cb.Block,
ri *replicationInitiator,
clusterDialer *cluster.PredicateDialer,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
conf *localconfig.TopLevel,
signer identity.SignerSerializer,
metricsProvider metrics.Provider,
healthChecker healthChecker,
lf blockledger.Factory,
bccsp bccsp.BCCSP,
callbacks ...channelconfig.BundleActor,
) *multichannel.Registrar {
registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, bccsp, callbacks...)
consenters := map[string]consensus.Consenter{}
var icr etcdraft.InactiveChainRegistry
if conf.General.BootstrapMethod == "file" || conf.General.BootstrapMethod == "none" {
if bootstrapBlock != nil && isClusterType(bootstrapBlock, bccsp) {
etcdConsenter := initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider, bccsp)
icr = etcdConsenter.InactiveChainRegistry
}
}
consenters["solo"] = solo.New()
var kafkaMetrics *kafka.Metrics
consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker, icr, registrar.CreateChain)
// Note, we pass a 'nil' channel here, we could pass a channel that
// closes if we wished to cleanup this routine on exit.
go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil)
registrar.Initialize(consenters)
return registrar
}
func initializeEtcdraftConsenter(
consenters map[string]consensus.Consenter,
conf *localconfig.TopLevel,
lf blockledger.Factory,
clusterDialer *cluster.PredicateDialer,
bootstrapBlock *cb.Block,
ri *replicationInitiator,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
registrar *multichannel.Registrar,
metricsProvider metrics.Provider,
bccsp bccsp.BCCSP,
) *etcdraft.Consenter {
replicationRefreshInterval := conf.General.Cluster.ReplicationBackgroundRefreshInterval
if replicationRefreshInterval == 0 {
replicationRefreshInterval = defaultReplicationBackgroundRefreshInterval
}
systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock)
if err != nil {
ri.logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
ri.logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err)
}
getConfigBlock := func() *cb.Block {
return multichannel.ConfigBlock(systemLedger)
}
exponentialSleep := exponentialDurationSeries(replicationBackgroundInitialRefreshInterval, replicationRefreshInterval)
ticker := newTicker(exponentialSleep)
icr := &inactiveChainReplicator{
logger: logger,
scheduleChan: ticker.C,
quitChan: make(chan struct{}),
replicator: ri,
chains2CreationCallbacks: make(map[string]chainCreation),
retrieveLastSysChannelConfigBlock: getConfigBlock,
registerChain: ri.registerChain,
}
// Use the inactiveChainReplicator as a channel lister, since it has knowledge
// of all inactive chains.
// This is to prevent us pulling the entire system chain when attempting to enumerate
// the channels in the system.
ri.channelLister = icr
go icr.run()
raftConsenter := etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, icr, metricsProvider, bccsp)
consenters["etcdraft"] = raftConsenter
return raftConsenter
}
func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
return operations.NewSystem(operations.Options{
Logger: flogging.MustGetLogger("orderer.operations"),
ListenAddress: ops.ListenAddress,
Metrics: operations.MetricsOptions{
Provider: metrics.Provider,
Statsd: &operations.Statsd{
Network: metrics.Statsd.Network,
Address: metrics.Statsd.Address,
WriteInterval: metrics.Statsd.WriteInterval,
Prefix: metrics.Statsd.Prefix,
},
},
TLS: operations.TLS{
Enabled: ops.TLS.Enabled,
CertFile: ops.TLS.Certificate,
KeyFile: ops.TLS.PrivateKey,
ClientCertRequired: ops.TLS.ClientAuthRequired,
ClientCACertFiles: ops.TLS.ClientRootCAs,
},
Version: metadata.Version,
})
}
// caMgr manages certificate authorities scoped by channel
type caManager struct {
sync.Mutex
appRootCAsByChain map[string][][]byte
ordererRootCAsByChain map[string][][]byte
clientRootCAs [][]byte
}
func (mgr *caManager) updateTrustedRoots(
cm channelconfig.Resources,
servers ...*comm.GRPCServer,
) {
mgr.Lock()
defer mgr.Unlock()
appRootCAs := [][]byte{}
ordererRootCAs := [][]byte{}
appOrgMSPs := make(map[string]struct{})
ordOrgMSPs := make(map[string]struct{})
if ac, ok := cm.ApplicationConfig(); ok {
//loop through app orgs and build map of MSPIDs
for _, appOrg := range ac.Organizations() {
appOrgMSPs[appOrg.MSPID()] = struct{}{}
}
}
if ac, ok := cm.OrdererConfig(); ok {
//loop through orderer orgs and build map of MSPIDs
for _, ordOrg := range ac.Organizations() {
ordOrgMSPs[ordOrg.MSPID()] = struct{}{}
}
}
if cc, ok := cm.ConsortiumsConfig(); ok {
for _, consortium := range cc.Consortiums() {
//loop through consortium orgs and build map of MSPIDs
for _, consortiumOrg := range consortium.Organizations() {
appOrgMSPs[consortiumOrg.MSPID()] = struct{}{}
}
}
}
cid := cm.ConfigtxValidator().ChannelID()
logger.Debugf("updating root CAs for channel [%s]", cid)
msps, err := cm.MSPManager().GetMSPs()
if err != nil {
logger.Errorf("Error getting root CAs for channel %s (%s)", cid, err)
return
}
for k, v := range msps {
// check to see if this is a FABRIC MSP
if v.GetType() == msp.FABRIC {
for _, root := range v.GetTLSRootCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, root)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, root)
}
}
for _, intermediate := range v.GetTLSIntermediateCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, intermediate)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, intermediate)
}
}
}
}
mgr.appRootCAsByChain[cid] = appRootCAs
mgr.ordererRootCAsByChain[cid] = ordererRootCAs
// now iterate over all roots for all app and orderer chains
trustedRoots := [][]byte{}
for _, roots := range mgr.appRootCAsByChain {
trustedRoots = append(trustedRoots, roots...)
}
for _, roots := range mgr.ordererRootCAsByChain {
trustedRoots = append(trustedRoots, roots...)
}
// also need to append statically configured root certs
if len(mgr.clientRootCAs) > 0 {
trustedRoots = append(trustedRoots, mgr.clientRootCAs...)
}
// now update the client roots for the gRPC server
for _, srv := range servers {
err = srv.SetClientRootCAs(trustedRoots)
if err != nil {
msg := "Failed to update trusted roots for orderer from latest config " +
"block. This orderer may not be able to communicate " +
"with members of channel %s (%s)"
logger.Warningf(msg, cm.ConfigtxValidator().ChannelID(), err)
}
}
}
func (mgr *caManager) updateClusterDialer(
clusterDialer *cluster.PredicateDialer,
localClusterRootCAs [][]byte,
) {
mgr.Lock()
defer mgr.Unlock()
// Iterate over all orderer root CAs for all chains and add them
// to the root CAs
var clusterRootCAs [][]byte
for _, roots := range mgr.ordererRootCAsByChain {
clusterRootCAs = append(clusterRootCAs, roots...)
}
// Add the local root CAs too
clusterRootCAs = append(clusterRootCAs, localClusterRootCAs...)
// Update the cluster config with the new root CAs
clusterDialer.UpdateRootCAs(clusterRootCAs)
}
func prettyPrintStruct(i interface{}) {
params := localconfig.Flatten(i)
var buffer bytes.Buffer
for i := range params {
buffer.WriteString("\n\t")
buffer.WriteString(params[i])
}
logger.Infof("Orderer config values:%s\n", buffer.String())
}
|
[
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
] |
[] |
[
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
] |
[]
|
["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"]
|
go
| 2 | 0 | |
bot/mcbot.go
|
// Package bot implements a simple Minecraft client that can join a server
// or just ping it for getting information.
//
// Runnable example could be found at cmd/ .
package bot
import (
"fmt"
"net"
"strconv"
"github.com/Tnze/go-mc/data"
mcnet "github.com/Tnze/go-mc/net"
pk "github.com/Tnze/go-mc/net/packet"
)
// ProtocolVersion , the protocol version number of minecraft net protocol
const ProtocolVersion = 753
// JoinServer connect a Minecraft server for playing the game.
func (c *Client) JoinServer(addr string, port int) (err error) {
return c.JoinServerWithDialer(&net.Dialer{}, fmt.Sprintf("%s:%d", addr, port))
}
// JoinServerWithDialer is similar to JoinServer but using a Dialer.
func (c *Client) JoinServerWithDialer(d Dialer, addr string) (err error) {
return c.join(d, addr)
}
func (c *Client) join(d Dialer, addr string) (err error) {
conn, err := d.Dial("tcp", addr)
if err != nil {
err = fmt.Errorf("bot: connect server fail: %v", err)
return err
}
//Set Conn
c.conn = mcnet.WrapConn(conn)
//Get Host and Port
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
err = fmt.Errorf("bot: connect server fail: %v", err)
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
err = fmt.Errorf("bot: connect server fail: %v", err)
return err
}
//Handshake
err = c.conn.WritePacket(
//Handshake Packet
pk.Marshal(
0x00, //Handshake packet ID
pk.VarInt(ProtocolVersion), //Protocol version
pk.String(host), //Server's address
pk.UnsignedShort(port),
pk.Byte(2),
))
if err != nil {
err = fmt.Errorf("bot: send handshake packect fail: %v", err)
return
}
//Login
err = c.conn.WritePacket(
//LoginStart Packet
pk.Marshal(0, pk.String(c.Name)))
if err != nil {
err = fmt.Errorf("bot: send login start packect fail: %v", err)
return
}
for {
//Recive Packet
var pack pk.Packet
pack, err = c.conn.ReadPacket()
if err != nil {
err = fmt.Errorf("bot: recv packet for Login fail: %v", err)
return
}
//Handle Packet
switch pack.ID {
case 0x00: //Disconnect
var reason pk.String
err = pack.Scan(&reason)
if err != nil {
err = fmt.Errorf("bot: read Disconnect message fail: %v", err)
} else {
err = fmt.Errorf("bot: connect disconnected by server: %s", reason)
}
return
case 0x01: //Encryption Request
if err := handleEncryptionRequest(c, pack); err != nil {
return fmt.Errorf("bot: encryption fail: %v", err)
}
case 0x02: //Login Success
// uuid, l := pk.UnpackString(pack.Data)
// name, _ := unpackString(pack.Data[l:])
return nil
case 0x03: //Set Compression
var threshold pk.VarInt
if err := pack.Scan(&threshold); err != nil {
return fmt.Errorf("bot: set compression fail: %v", err)
}
c.conn.SetThreshold(int(threshold))
case 0x04: //Login Plugin Request
if err := handlePluginPacket(c, pack); err != nil {
return fmt.Errorf("bot: handle plugin packet fail: %v", err)
}
}
}
}
// A Dialer is a means to establish a connection.
type Dialer interface {
// Dial connects to the given address via the proxy.
Dial(network, addr string) (c net.Conn, err error)
}
// Conn return the MCConn of the Client.
// Only used when you want to handle the packets by yourself
func (c *Client) Conn() *mcnet.Conn {
return c.conn
}
// SendMessage sends a chat message.
func (c *Client) SendMessage(msg string) error {
return c.conn.WritePacket(
pk.Marshal(
data.ChatServerbound,
pk.String(msg),
),
)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Tool_Functions/twitter-sentiment-analysis/Stanford_Tokenize.py
|
import json
import pandas as pd
import string
import nltk
from nltk.tokenize.stanford import StanfordTokenizer
from nltk.tag import StanfordPOSTagger
from nltk import word_tokenize
import os
# stop words to remove from text
nltk.download("stopwords")
# also removing @ in this case since Stanford Tokenizer tokenizes them
useless_ones = nltk.corpus.stopwords.words("english") + list(string.punctuation) + ['@']
# workaround for adding environment variable for tagger
jar = 'stanford-postagger.jar'
model = 'english-bidirectional-distsim.tagger'
pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')
# set java path in environment
java_path = 'C:/Program Files/Java/jdk-13/bin/java.exe'
os.environ['JAVAHOME'] = java_path
def tokenizeTweets(tweetList):
retTweetList = []
for tweet in tweetList:
wordlist = [word for word in pos_tagger.tag(word_tokenize(tweet)) if word not in useless_ones]
retTweetList.append(wordlist)
return retTweetList
def tokenizeTweet(tweet):
wordlist = [word[0] for word in pos_tagger.tag(word_tokenize(tweet)) if word not in useless_ones]
return wordlist
def main(twtInfo:object):
clean_data_tweets = pd.read_json(twtInfo, orient="records")
tweets = clean_data_tweets["text"]
data_id = clean_data_tweets["id"]
data_tc_tweets = []
for tweet in tweets:
data_tc_tweets.append(tokenizeTweet(tweet))
ret = []
for i in range(len(data_tc_tweets)):
ret.append({})
ret[i]["text"] = data_tc_tweets[i]
ret[i]["id"] = data_id[i]
return pd.Series(ret).to_json(orient="records")
|
[] |
[] |
[
"JAVAHOME"
] |
[]
|
["JAVAHOME"]
|
python
| 1 | 0 | |
approve-authorization/server/go/server.go
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/joho/godotenv"
"github.com/stripe/stripe-go/v72"
"github.com/stripe/stripe-go/v72/issuing/authorization"
"github.com/stripe/stripe-go/v72/webhook"
)
func main() {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
// Set your secret key. Remember to switch to your live secret key in production!
// See your keys here: https://dashboard.stripe.com/account/apikeys
stripe.Key = os.Getenv("STRIPE_SECRET_KEY")
// For sample support and debugging, not required for production:
stripe.SetAppInfo(&stripe.AppInfo{
Name: "stripe-samples/issuing/approve-authorization",
Version: "0.0.1",
URL: "https://github.com/stripe-samples",
})
http.HandleFunc("/webhook", handleWebhook)
addr := "localhost:4242"
log.Printf("Listening on %s ...", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
func handleWebhook(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Verify webhook signature and extract the event.
event, err := webhook.ConstructEvent(b, r.Header.Get("Stripe-Signature"), os.Getenv("STRIPE_WEBHOOK_SECRET"))
if err != nil {
fmt.Fprintf(os.Stderr, "Error verifying webhook signature: %v\n", err)
w.WriteHeader(http.StatusBadRequest)
return
}
if event.Type == "issuing_authorization.request" {
var auth stripe.IssuingAuthorization
err := json.Unmarshal(event.Data.Raw, &auth)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing webhook JSON: %v\n", err)
w.WriteHeader(http.StatusBadRequest)
return
}
handleAuthorizationRequest(auth)
}
w.WriteHeader(http.StatusOK)
}
func handleAuthorizationRequest(auth stripe.IssuingAuthorization) {
// Authorize the transaction.
_, _ = authorization.Approve(auth.ID, &stripe.IssuingAuthorizationApproveParams{})
fmt.Println("Approved 🎉")
}
|
[
"\"STRIPE_SECRET_KEY\"",
"\"STRIPE_WEBHOOK_SECRET\""
] |
[] |
[
"STRIPE_SECRET_KEY",
"STRIPE_WEBHOOK_SECRET"
] |
[]
|
["STRIPE_SECRET_KEY", "STRIPE_WEBHOOK_SECRET"]
|
go
| 2 | 0 | |
gopls/internal/regtest/runner.go
|
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package regtest
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"runtime/pprof"
"strings"
"sync"
"testing"
"time"
"golang.org/x/tools/gopls/internal/hooks"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/cache"
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/fake"
"golang.org/x/tools/internal/lsp/lsprpc"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
)
// Mode is a bitmask that defines for which execution modes a test should run.
type Mode int
const (
// Singleton mode uses a separate in-process gopls instance for each test,
// and communicates over pipes to mimic the gopls sidecar execution mode,
// which communicates over stdin/stderr.
Singleton Mode = 1 << iota
// Forwarded forwards connections to a shared in-process gopls instance.
Forwarded
// SeparateProcess forwards connection to a shared separate gopls process.
SeparateProcess
// Experimental enables all of the experimental configurations that are
// being developed. Currently, it enables the workspace module.
Experimental
// NormalModes are the global default execution modes, when unmodified by
// test flags or by individual test options.
NormalModes = Singleton | Experimental
)
// A Runner runs tests in gopls execution environments, as specified by its
// modes. For modes that share state (for example, a shared cache or common
// remote), any tests that execute on the same Runner will share the same
// state.
type Runner struct {
DefaultModes Mode
Timeout time.Duration
GoplsPath string
PrintGoroutinesOnFailure bool
TempDir string
SkipCleanup bool
mu sync.Mutex
ts *servertest.TCPServer
socketDir string
// closers is a queue of clean-up functions to run at the end of the entire
// test suite.
closers []io.Closer
}
type runConfig struct {
editor fake.EditorConfig
sandbox fake.SandboxConfig
modes Mode
timeout time.Duration
debugAddr string
skipLogs bool
skipHooks bool
nestWorkdir bool
}
func (r *Runner) defaultConfig() *runConfig {
return &runConfig{
modes: r.DefaultModes,
timeout: r.Timeout,
}
}
// A RunOption augments the behavior of the test runner.
type RunOption interface {
set(*runConfig)
}
type optionSetter func(*runConfig)
func (f optionSetter) set(opts *runConfig) {
f(opts)
}
// Timeout configures a custom timeout for this test run.
func Timeout(d time.Duration) RunOption {
return optionSetter(func(opts *runConfig) {
opts.timeout = d
})
}
// ProxyFiles configures a file proxy using the given txtar-encoded string.
func ProxyFiles(txt string) RunOption {
return optionSetter(func(opts *runConfig) {
opts.sandbox.ProxyFiles = txt
})
}
// Modes configures the execution modes that the test should run in.
func Modes(modes Mode) RunOption {
return optionSetter(func(opts *runConfig) {
opts.modes = modes
})
}
func SendPID() RunOption {
return optionSetter(func(opts *runConfig) {
opts.editor.SendPID = true
})
}
// EditorConfig is a RunOption option that configured the regtest editor.
type EditorConfig fake.EditorConfig
func (c EditorConfig) set(opts *runConfig) {
opts.editor = fake.EditorConfig(c)
}
// WithoutWorkspaceFolders prevents workspace folders from being sent as part
// of the sandbox's initialization. It is used to simulate opening a single
// file in the editor, without a workspace root. In that case, the client sends
// neither workspace folders nor a root URI.
func WithoutWorkspaceFolders() RunOption {
return optionSetter(func(opts *runConfig) {
opts.editor.WithoutWorkspaceFolders = true
})
}
// RootPath specifies the rootURI of the workspace folder opened in the
// editor. By default, the sandbox opens the top-level directory, but some
// tests need to check other cases.
func RootPath(path string) RunOption {
return optionSetter(func(opts *runConfig) {
opts.editor.WorkspaceRoot = path
})
}
// InGOPATH configures the workspace working directory to be GOPATH, rather
// than a separate working directory for use with modules.
func InGOPATH() RunOption {
return optionSetter(func(opts *runConfig) {
opts.sandbox.InGoPath = true
})
}
// DebugAddress configures a debug server bound to addr. This option is
// currently only supported when executing in Singleton mode. It is intended to
// be used for long-running stress tests.
func DebugAddress(addr string) RunOption {
return optionSetter(func(opts *runConfig) {
opts.debugAddr = addr
})
}
// SkipLogs skips the buffering of logs during test execution. It is intended
// for long-running stress tests.
func SkipLogs() RunOption {
return optionSetter(func(opts *runConfig) {
opts.skipLogs = true
})
}
// InExistingDir runs the test in a pre-existing directory. If set, no initial
// files may be passed to the runner. It is intended for long-running stress
// tests.
func InExistingDir(dir string) RunOption {
return optionSetter(func(opts *runConfig) {
opts.sandbox.Workdir = dir
})
}
// SkipHooks allows for disabling the test runner's client hooks that are used
// for instrumenting expectations (tracking diagnostics, logs, work done,
// etc.). It is intended for performance-sensitive stress tests or benchmarks.
func SkipHooks(skip bool) RunOption {
return optionSetter(func(opts *runConfig) {
opts.skipHooks = skip
})
}
// GOPROXY configures the test environment to have an explicit proxy value.
// This is intended for stress tests -- to ensure their isolation, regtests
// should instead use WithProxyFiles.
func GOPROXY(goproxy string) RunOption {
return optionSetter(func(opts *runConfig) {
opts.sandbox.GOPROXY = goproxy
})
}
// LimitWorkspaceScope sets the LimitWorkspaceScope configuration.
func LimitWorkspaceScope() RunOption {
return optionSetter(func(opts *runConfig) {
opts.editor.LimitWorkspaceScope = true
})
}
// NestWorkdir inserts the sandbox working directory in a subdirectory of the
// editor workspace.
func NestWorkdir() RunOption {
return optionSetter(func(opts *runConfig) {
opts.nestWorkdir = true
})
}
type TestFunc func(t *testing.T, env *Env)
// Run executes the test function in the default configured gopls execution
// modes. For each a test run, a new workspace is created containing the
// un-txtared files specified by filedata.
func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) {
t.Helper()
checkBuilder(t)
tests := []struct {
name string
mode Mode
getServer func(context.Context, *testing.T) jsonrpc2.StreamServer
}{
{"singleton", Singleton, singletonServer},
{"forwarded", Forwarded, r.forwardedServer},
{"separate_process", SeparateProcess, r.separateProcessServer},
{"experimental_workspace_module", Experimental, experimentalWorkspaceModule},
}
for _, tc := range tests {
tc := tc
config := r.defaultConfig()
for _, opt := range opts {
opt.set(config)
}
if config.modes&tc.mode == 0 {
continue
}
if config.debugAddr != "" && tc.mode != Singleton {
// Debugging is useful for running stress tests, but since the daemon has
// likely already been started, it would be too late to debug.
t.Fatalf("debugging regtest servers only works in Singleton mode, "+
"got debug addr %q and mode %v", config.debugAddr, tc.mode)
}
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), config.timeout)
defer cancel()
ctx = debug.WithInstance(ctx, "", "off")
if config.debugAddr != "" {
di := debug.GetInstance(ctx)
di.DebugAddress = config.debugAddr
di.Serve(ctx)
di.MonitorMemory(ctx)
}
rootDir := filepath.Join(r.TempDir, filepath.FromSlash(t.Name()))
if err := os.MkdirAll(rootDir, 0755); err != nil {
t.Fatal(err)
}
if config.nestWorkdir {
config.sandbox.Workdir = "work/nested"
}
config.sandbox.Files = files
config.sandbox.RootDir = rootDir
sandbox, err := fake.NewSandbox(&config.sandbox)
if err != nil {
t.Fatal(err)
}
workdir := sandbox.Workdir.RootURI().SpanURI().Filename()
if config.nestWorkdir {
// Now that we know the actual workdir, set our workspace to be the
// parent directory.
config.editor.WorkspaceRoot = filepath.Clean(filepath.Join(workdir, ".."))
}
// Deferring the closure of ws until the end of the entire test suite
// has, in testing, given the LSP server time to properly shutdown and
// release any file locks held in workspace, which is a problem on
// Windows. This may still be flaky however, and in the future we need a
// better solution to ensure that all Go processes started by gopls have
// exited before we clean up.
r.AddCloser(sandbox)
ss := tc.getServer(ctx, t)
framer := jsonrpc2.NewRawStream
ls := &loggingFramer{}
if !config.skipLogs {
framer = ls.framer(jsonrpc2.NewRawStream)
}
ts := servertest.NewPipeServer(ctx, ss, framer)
env := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks)
defer func() {
if t.Failed() && r.PrintGoroutinesOnFailure {
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
}
if t.Failed() || testing.Verbose() {
ls.printBuffers(t.Name(), os.Stderr)
}
env.CloseEditor()
}()
// Always await the initial workspace load.
env.Await(InitialWorkspaceLoad)
test(t, env)
})
}
}
// longBuilders maps builders that are skipped when -short is set to a
// (possibly empty) justification.
var longBuilders = map[string]string{
"openbsd-amd64-64": "golang.org/issues/42789",
"openbsd-386-64": "golang.org/issues/42789",
"openbsd-386-68": "golang.org/issues/42789",
"openbsd-amd64-68": "golang.org/issues/42789",
"linux-arm": "golang.org/issues/43355",
"darwin-amd64-10_12": "",
"freebsd-amd64-race": "",
"illumos-amd64": "",
"netbsd-arm-bsiegert": "",
"solaris-amd64-oraclerel": "",
"windows-arm-zx2c4": "",
}
func checkBuilder(t *testing.T) {
t.Helper()
builder := os.Getenv("GO_BUILDER_NAME")
if reason, ok := longBuilders[builder]; ok && testing.Short() {
if reason != "" {
t.Skipf("Skipping %s with -short due to %s", builder, reason)
} else {
t.Skipf("Skipping %s with -short", builder)
}
}
}
type loggingFramer struct {
mu sync.Mutex
buf *safeBuffer
}
// safeBuffer is a threadsafe buffer for logs.
type safeBuffer struct {
mu sync.Mutex
buf bytes.Buffer
}
func (b *safeBuffer) Write(p []byte) (int, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.buf.Write(p)
}
func (s *loggingFramer) framer(f jsonrpc2.Framer) jsonrpc2.Framer {
return func(nc net.Conn) jsonrpc2.Stream {
s.mu.Lock()
framed := false
if s.buf == nil {
s.buf = &safeBuffer{buf: bytes.Buffer{}}
framed = true
}
s.mu.Unlock()
stream := f(nc)
if framed {
return protocol.LoggingStream(stream, s.buf)
}
return stream
}
}
func (s *loggingFramer) printBuffers(testname string, w io.Writer) {
s.mu.Lock()
defer s.mu.Unlock()
if s.buf == nil {
return
}
fmt.Fprintf(os.Stderr, "#### Start Gopls Test Logs for %q\n", testname)
s.buf.mu.Lock()
io.Copy(w, &s.buf.buf)
s.buf.mu.Unlock()
fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname)
}
func singletonServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
return lsprpc.NewStreamServer(cache.New(ctx, hooks.Options), false)
}
func experimentalWorkspaceModule(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
options := func(o *source.Options) {
hooks.Options(o)
o.ExperimentalWorkspaceModule = true
}
return lsprpc.NewStreamServer(cache.New(ctx, options), false)
}
func (r *Runner) forwardedServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
ts := r.getTestServer()
return lsprpc.NewForwarder("tcp", ts.Addr)
}
// getTestServer gets the shared test server instance to connect to, or creates
// one if it doesn't exist.
func (r *Runner) getTestServer() *servertest.TCPServer {
r.mu.Lock()
defer r.mu.Unlock()
if r.ts == nil {
ctx := context.Background()
ctx = debug.WithInstance(ctx, "", "off")
ss := lsprpc.NewStreamServer(cache.New(ctx, hooks.Options), false)
r.ts = servertest.NewTCPServer(ctx, ss, nil)
}
return r.ts
}
func (r *Runner) separateProcessServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
// TODO(rfindley): can we use the autostart behavior here, instead of
// pre-starting the remote?
socket := r.getRemoteSocket(t)
return lsprpc.NewForwarder("unix", socket)
}
// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
// tests. It's a trick to allow tests to find a binary to use to start a gopls
// subprocess.
const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS"
func (r *Runner) getRemoteSocket(t *testing.T) string {
t.Helper()
r.mu.Lock()
defer r.mu.Unlock()
const daemonFile = "gopls-test-daemon"
if r.socketDir != "" {
return filepath.Join(r.socketDir, daemonFile)
}
if r.GoplsPath == "" {
t.Fatal("cannot run tests with a separate process unless a path to a gopls binary is configured")
}
var err error
r.socketDir, err = ioutil.TempDir(r.TempDir, "gopls-regtest-socket")
if err != nil {
t.Fatalf("creating tempdir: %v", err)
}
socket := filepath.Join(r.socketDir, daemonFile)
args := []string{"serve", "-listen", "unix;" + socket, "-listen.timeout", "10s"}
cmd := exec.Command(r.GoplsPath, args...)
cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true")
var stderr bytes.Buffer
cmd.Stderr = &stderr
go func() {
if err := cmd.Run(); err != nil {
panic(fmt.Sprintf("error running external gopls: %v\nstderr:\n%s", err, stderr.String()))
}
}()
return socket
}
// AddCloser schedules a closer to be closed at the end of the test run. This
// is useful for Windows in particular, as
func (r *Runner) AddCloser(closer io.Closer) {
r.mu.Lock()
defer r.mu.Unlock()
r.closers = append(r.closers, closer)
}
// Close cleans up resource that have been allocated to this workspace.
func (r *Runner) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
var errmsgs []string
if r.ts != nil {
if err := r.ts.Close(); err != nil {
errmsgs = append(errmsgs, err.Error())
}
}
if r.socketDir != "" {
if err := os.RemoveAll(r.socketDir); err != nil {
errmsgs = append(errmsgs, err.Error())
}
}
if !r.SkipCleanup {
for _, closer := range r.closers {
if err := closer.Close(); err != nil {
errmsgs = append(errmsgs, err.Error())
}
}
if err := os.RemoveAll(r.TempDir); err != nil {
errmsgs = append(errmsgs, err.Error())
}
}
if len(errmsgs) > 0 {
return fmt.Errorf("errors closing the test runner:\n\t%s", strings.Join(errmsgs, "\n\t"))
}
return nil
}
|
[
"\"GO_BUILDER_NAME\""
] |
[] |
[
"GO_BUILDER_NAME"
] |
[]
|
["GO_BUILDER_NAME"]
|
go
| 1 | 0 | |
parallelization/collect.py
|
import sys
import os
import subprocess
import re
import time
from dataclasses import dataclass
from typing import List
import pandas
time_reg = re.compile("Checkpoint \d: ([\d\\.]{1,})")
def run_cmd(cmd):
print(f"Running {cmd}")
proc = subprocess.run(cmd, shell=True, capture_output=True)
stdout = proc.stdout.decode()
stderr = proc.stderr.decode()
return stdout, stderr
@dataclass
class Result:
program: str
checkpoints: List[float]
threads: int
filesize: float
@property
def encoding_time(self):
return self.checkpoints[2]
@property
def decoding_time(self):
return self.checkpoints[4]
def asdict(self):
d = self.__dict__
d['encoding_time'] = self.encoding_time
d['decoding_time'] = self.decoding_time
del d['checkpoints']
return d
if __name__ == "__main__":
in_dir = "../../inputs"
inputs = sorted(os.listdir(in_dir))
program = ["mpi.sh", "baseline", "baseline-8ecc", "omp", "omp-8ecc"]
results = []
for p in program:
for i in inputs:
if "7.txt" in i and "mpi" in p:
continue
for threads in range(1,17):
if "baseline" in p and threads > 1:
break
if p == "omp":
os.environ['OMP_NUM_THREADS'] = str(threads)
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
if "mpi" in p:
for threads in [32,48,64,96]:
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
df = pandas.DataFrame([x.asdict() for x in results])
df.to_csv("results.csv")
print(df)
|
[] |
[] |
[
"OMP_NUM_THREADS"
] |
[]
|
["OMP_NUM_THREADS"]
|
python
| 1 | 0 | |
dataset_06/greentea_brew_tool.py
|
import os, sys, inspect
import h5py
import numpy as np
import random
import math
import multiprocessing
from Crypto.Random.random import randint
import gc
import resource
# Visualization
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
# from mayavi import mlab
# from mayavi.core.ui.mayavi_scene import MayaviScene
# import volume_slicer
# Load the configuration file
import config
from numpy import float32, int32, uint8, dtype
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], config.caffe_path + "/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path + "/python")
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../malis")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
import malis as malis
# Import visualization and display
# import visualizer
# Fix up OpenCL variables. Can interfere with the
# frame buffer if the GPU is also a display driver
os.environ["GPU_MAX_ALLOC_PERCENT"] = "100"
os.environ["GPU_SINGLE_ALLOC_PERCENT"] = "100"
os.environ["GPU_MAX_HEAP_SIZE"] = "100"
os.environ["GPU_FORCE_64BIT_PTR"] = "1"
dims = len(config.output_dims)
def normalize(dataset, newmin=-1, newmax=1):
maxval = dataset
while len(maxval.shape) > 0:
maxval = maxval.max(0)
minval = dataset
while len(minval.shape) > 0:
minval = minval.min(0)
return ((dataset - minval) / (maxval - minval)) * (newmax - newmin) + newmin
def error_scale(data, factor_low, factor_high):
scale = np.add((data >= 0.5) * factor_high, (data < 0.5) * factor_low)
return scale
def count_affinity(dataset):
aff_high = np.sum(dataset >= 0.5)
aff_low = np.sum(dataset < 0.5)
return aff_high, aff_low
def border_reflect(dataset, border):
return np.pad(dataset,((border, border)),'reflect')
def inspect_2D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: X: %s Y: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0), np.asarray(dset).min(0).min(0)]
def inspect_3D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: X: %s Y: %s Z: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0).max(0), np.asarray(dset).min(0).min(0).min(0)]
def inspect_4D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: T: %s X: %s Y: %s Z: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0).max(0).max(0), np.asarray(dset).min(0).min(0).min(0).min(0)]
def display_raw(raw_ds, index):
slice = raw_ds[0:raw_ds.shape[0], 0:raw_ds.shape[1], index]
minval = np.min(np.min(slice, axis=1), axis=0)
maxval = np.max(np.max(slice, axis=1), axis=0)
img = Image.fromarray((slice - minval) / (maxval - minval) * 255)
img.show()
def display_con(con_ds, index):
slice = con_ds[0:con_ds.shape[0], 0:con_ds.shape[1], index]
rgbArray = np.zeros((con_ds.shape[0], con_ds.shape[1], 3), 'uint8')
rgbArray[..., 0] = colorsr[slice] * 256
rgbArray[..., 1] = colorsg[slice] * 256
rgbArray[..., 2] = colorsb[slice] * 256
img = Image.fromarray(rgbArray, 'RGB')
img.show()
def display_aff(aff_ds, index):
sliceX = aff_ds[0, 0:520, 0:520, index]
sliceY = aff_ds[1, 0:520, 0:520, index]
sliceZ = aff_ds[2, 0:520, 0:520, index]
img = Image.fromarray((sliceX & sliceY & sliceZ) * 255)
img.show()
def display_binary(bin_ds, index):
slice = bin_ds[0:bin_ds.shape[0], 0:bin_ds.shape[1], index]
img = Image.fromarray(np.uint8(slice * 255))
img.show()
def slice_data(data, offsets, sizes):
if (len(offsets) == 1):
return data[offsets[0]:offsets[0] + sizes[0]]
if (len(offsets) == 2):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]]
if (len(offsets) == 3):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]]
if (len(offsets) == 4):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]]
def set_slice_data(data, insert_data, offsets, sizes):
if (len(offsets) == 1):
data[offsets[0]:offsets[0] + sizes[0]] = insert_data
if (len(offsets) == 2):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]] = insert_data
if (len(offsets) == 3):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]] = insert_data
if (len(offsets) == 4):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]] = insert_data
def sanity_check_net_blobs(net):
for key in net.blobs.keys():
dst = net.blobs[key]
data = np.ndarray.flatten(dst.data[0].copy())
print 'Blob: %s; %s' % (key, data.shape)
failure = False
first = -1
for i in range(0,data.shape[0]):
if abs(data[i]) > 100000:
failure = True
if first == -1:
first = i
print 'Failure, location %d; objective %d' % (i, data[i])
print 'Failure: %s, first at %d' % (failure,first)
if failure:
break;
def process(net, data_arrays, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
dst = net.blobs['prob']
dummy_slice = [0]
for i in range(0, len(data_arrays)):
data_array = data_arrays[i]
dims = len(data_array.shape)
offsets = []
in_dims = []
out_dims = []
for d in range(0, dims):
offsets += [0]
in_dims += [data_array.shape[d]]
out_dims += [data_array.shape[d] - config.input_padding[d]]
pred_array = np.zeros(tuple([3] + out_dims))
while(True):
data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)])
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.forward()
output = dst.data[0].copy()
print offsets
# while(True):
# blob = raw_input('Blob:')
# fmap = int(raw_input('Enter the feature map:'))
# m = volume_slicer.VolumeSlicer(data=np.squeeze(net.blobs[blob].data[0])[fmap,:,:])
# m.configure_traits()
set_slice_data(pred_array, output, [0] + offsets, [3] + config.output_dims)
incremented = False
for d in range(0, dims):
if (offsets[dims - 1 - d] == out_dims[dims - 1 - d] - config.output_dims[dims - 1 - d]):
# Reset direction
offsets[dims - 1 - d] = 0
else:
# Increment direction
offsets[dims - 1 - d] = min(offsets[dims - 1 - d] + config.output_dims[dims - 1 - d], out_dims[dims - 1 - d] - config.output_dims[dims - 1 - d])
incremented = True
break
# Processed the whole input block
if not incremented:
break
# Safe the output
outhdf5 = h5py.File(output_folder+'/'+str(i)+'.h5', 'w')
outdset = outhdf5.create_dataset('main', tuple([3]+out_dims), np.float32, data=pred_array)
outdset.attrs['edges'] = np.string_('-1,0,0;0,-1,0;0,0,-1')
outhdf5.close()
def train(solver, data_arrays, label_arrays, mode='malis'):
losses = []
net = solver.net
if mode == 'malis':
nhood = malis.mknhood3d()
if mode == 'euclid':
nhood = malis.mknhood3d()
if mode == 'malis_aniso':
nhood = malis.mknhood3d_aniso()
if mode == 'euclid_aniso':
nhood = malis.mknhood3d_aniso()
data_slice_cont = np.zeros((1,1,132,132,132), dtype=float32)
label_slice_cont = np.zeros((1,1,44,44,44), dtype=float32)
aff_slice_cont = np.zeros((1,3,44,44,44), dtype=float32)
nhood_cont = np.zeros((1,1,3,3), dtype=float32)
error_scale_cont = np.zeros((1,1,44,44,44), dtype=float32)
dummy_slice = np.ascontiguousarray([0]).astype(float32)
# Loop from current iteration to last iteration
for i in range(solver.iter, solver.max_iter):
# First pick the dataset to train with
dataset = randint(0, len(data_arrays) - 1)
data_array = data_arrays[dataset]
label_array = label_arrays[dataset]
# affinity_array = affinity_arrays[dataset]
offsets = []
for j in range(0, dims):
offsets.append(randint(0, data_array.shape[j] - (config.output_dims[j] + config.input_padding[j])))
# These are the raw data elements
data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)])
# These are the labels (connected components)
label_slice = slice_data(label_array, [offsets[di] + int(math.ceil(config.input_padding[di] / float(2))) for di in range(0, dims)], config.output_dims)
# These are the affinity edge values
# Also recomputing the corresponding labels (connected components)
aff_slice = malis.seg_to_affgraph(label_slice,nhood)
label_slice,ccSizes = malis.connected_components_affgraph(aff_slice,nhood)
print (data_slice[None, None, :]).shape
print (label_slice[None, None, :]).shape
print (aff_slice[None, :]).shape
print (nhood).shape
if mode == 'malis':
np.copyto(data_slice_cont, np.ascontiguousarray(data_slice[None, None, :]).astype(float32))
np.copyto(label_slice_cont, np.ascontiguousarray(label_slice[None, None, :]).astype(float32))
np.copyto(aff_slice_cont, np.ascontiguousarray(aff_slice[None, :]).astype(float32))
np.copyto(nhood_cont, np.ascontiguousarray(nhood[None, None, :]).astype(float32))
net.set_input_arrays(0, data_slice_cont, dummy_slice)
net.set_input_arrays(1, label_slice_cont, dummy_slice)
net.set_input_arrays(2, aff_slice_cont, dummy_slice)
net.set_input_arrays(3, nhood_cont, dummy_slice)
# We pass the raw and affinity array only
if mode == 'euclid':
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(1, np.ascontiguousarray(aff_slice[None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(2, np.ascontiguousarray(error_scale(aff_slice[None, :],1.0,0.045)).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
if mode == 'softmax':
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(1, np.ascontiguousarray(label_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
# Single step
loss = solver.step(1)
# Memory clean up and report
print("Memory usage (before GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024)))
while gc.collect():
pass
print("Memory usage (after GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024)))
# m = volume_slicer.VolumeSlicer(data=np.squeeze((net.blobs['Convolution18'].data[0])[0,:,:]))
# m.configure_traits()
print("Loss: %s" % loss)
losses += [loss]
hdf5_raw_file = 'fibsem_medulla_7col/tstvol-520-1-h5/img_normalized.h5'
hdf5_gt_file = 'fibsem_medulla_7col/tstvol-520-1-h5/groundtruth_seg.h5'
# hdf5_aff_file = 'fibsem_medulla_7col/tstvol-520-1-h5/groundtruth_aff.h5'
#hdf5_raw_file = 'zebrafish_friedrich/raw.hdf5'
#hdf5_gt_file = 'zebrafish_friedrich/labels_2.hdf5'
hdf5_raw = h5py.File(hdf5_raw_file, 'r')
hdf5_gt = h5py.File(hdf5_gt_file, 'r')
# hdf5_aff = h5py.File(hdf5_aff_file, 'r')
#inspect_3D_hdf5(hdf5_raw)
#inspect_3D_hdf5(hdf5_gt)
#inspect_4D_hdf5(hdf5_aff)
# Make the dataset ready for the network
hdf5_raw_ds = normalize(np.asarray(hdf5_raw[hdf5_raw.keys()[0]]).astype(float32), -1, 1)
hdf5_gt_ds = np.asarray(hdf5_gt[hdf5_gt.keys()[0]]).astype(float32)
# hdf5_aff_ds = np.asarray(hdf5_aff[hdf5_aff.keys()[0]])
#display_aff(hdf5_aff_ds, 1)
#display_con(hdf5_gt_ds, 0)
#display_raw(hdf5_raw_ds, 0)
#display_binary(hdf5_gt_ds, 0)
#Initialize caffe
caffe.set_mode_gpu()
caffe.set_device(config.device_id)
if(config.mode == "train"):
solver = caffe.get_solver_from_file(config.solver_proto)
#solver.restore("net__iter_8000.solverstate")
net = solver.net
train(solver, [normalize(hdf5_raw_ds)], [hdf5_gt_ds])
if(config.mode == "process"):
net = caffe.Net(config.test_net, config.trained_model, caffe.TEST)
process(net, [normalize(hdf5_raw_ds)], config.output_folder)
|
[] |
[] |
[
"GPU_MAX_ALLOC_PERCENT",
"GPU_SINGLE_ALLOC_PERCENT",
"GPU_MAX_HEAP_SIZE",
"GPU_FORCE_64BIT_PTR"
] |
[]
|
["GPU_MAX_ALLOC_PERCENT", "GPU_SINGLE_ALLOC_PERCENT", "GPU_MAX_HEAP_SIZE", "GPU_FORCE_64BIT_PTR"]
|
python
| 4 | 0 | |
internal/cmd/config.go
|
package cmd
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"reflect"
"regexp"
"runtime"
"runtime/pprof"
"sort"
"strconv"
"strings"
"text/template"
"time"
"unicode"
"github.com/Masterminds/sprig/v3"
"github.com/coreos/go-semver/semver"
gogit "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/google/gops/agent"
"github.com/gregjones/httpcache"
"github.com/gregjones/httpcache/diskcache"
"github.com/mitchellh/mapstructure"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/twpayne/go-shell"
"github.com/twpayne/go-vfs/v4"
"github.com/twpayne/go-xdg/v6"
"go.uber.org/multierr"
"golang.org/x/term"
"github.com/twpayne/chezmoi/v2/assets/templates"
"github.com/twpayne/chezmoi/v2/internal/chezmoi"
"github.com/twpayne/chezmoi/v2/internal/git"
)
const (
logComponentKey = "component"
logComponentValueEncryption = "encryption"
logComponentValuePersistentState = "persistentState"
logComponentValueSourceState = "sourceState"
logComponentValueSystem = "system"
)
type purgeOptions struct {
binary bool
}
type templateConfig struct {
Options []string `mapstructure:"options"`
}
// A Config represents a configuration.
type Config struct {
// Global configuration, settable in the config file.
CacheDirAbsPath chezmoi.AbsPath `mapstructure:"cacheDir"`
Color autoBool `mapstructure:"color"`
Data map[string]interface{} `mapstructure:"data"`
DestDirAbsPath chezmoi.AbsPath `mapstructure:"destDir"`
Interpreters map[string]*chezmoi.Interpreter `mapstructure:"interpreters"`
Mode chezmoi.Mode `mapstructure:"mode"`
Pager string `mapstructure:"pager"`
PINEntry pinEntryConfig `mapstructure:"pinentry"`
Safe bool `mapstructure:"safe"`
SourceDirAbsPath chezmoi.AbsPath `mapstructure:"sourceDir"`
Template templateConfig `mapstructure:"template"`
Umask fs.FileMode `mapstructure:"umask"`
UseBuiltinAge autoBool `mapstructure:"useBuiltinAge"`
UseBuiltinGit autoBool `mapstructure:"useBuiltinGit"`
Verbose bool `mapstructure:"verbose"`
WorkingTreeAbsPath chezmoi.AbsPath `mapstructure:"workingTree"`
// Global configuration, not settable in the config file.
configFormat readDataFormat
cpuProfile chezmoi.AbsPath
debug bool
dryRun bool
force bool
gops bool
homeDir string
keepGoing bool
noPager bool
noTTY bool
outputAbsPath chezmoi.AbsPath
refreshExternals bool
sourcePath bool
templateFuncs template.FuncMap
// Password manager configurations, settable in the config file.
Bitwarden bitwardenConfig `mapstructure:"bitwarden"`
Gopass gopassConfig `mapstructure:"gopass"`
Keepassxc keepassxcConfig `mapstructure:"keepassxc"`
Lastpass lastpassConfig `mapstructure:"lastpass"`
Onepassword onepasswordConfig `mapstructure:"onepassword"`
Pass passConfig `mapstructure:"pass"`
Secret secretConfig `mapstructure:"secret"`
Vault vaultConfig `mapstructure:"vault"`
// Encryption configurations, settable in the config file.
Encryption string `mapstructure:"encryption"`
Age chezmoi.AgeEncryption `mapstructure:"age"`
GPG chezmoi.GPGEncryption `mapstructure:"gpg"`
// Password manager data.
gitHub gitHubData
keyring keyringData
// Command configurations, settable in the config file.
Add addCmdConfig `mapstructure:"add"`
CD cdCmdConfig `mapstructure:"cd"`
Diff diffCmdConfig `mapstructure:"diff"`
Docs docsCmdConfig `mapstructure:"docs"`
Edit editCmdConfig `mapstructure:"edit"`
Git gitCmdConfig `mapstructure:"git"`
Merge mergeCmdConfig `mapstructure:"merge"`
// Command configurations, not settable in the config file.
apply applyCmdConfig
archive archiveCmdConfig
data dataCmdConfig
dump dumpCmdConfig
executeTemplate executeTemplateCmdConfig
_import importCmdConfig
init initCmdConfig
managed managedCmdConfig
mergeAll mergeAllCmdConfig
purge purgeCmdConfig
reAdd reAddCmdConfig
remove removeCmdConfig
secretKeyring secretKeyringCmdConfig
state stateCmdConfig
status statusCmdConfig
update updateCmdConfig
upgrade upgradeCmdConfig
verify verifyCmdConfig
// Version information.
version semver.Version
versionInfo VersionInfo
versionStr string
// Configuration.
fileSystem vfs.FS
bds *xdg.BaseDirectorySpecification
configFileAbsPath chezmoi.AbsPath
baseSystem chezmoi.System
sourceSystem chezmoi.System
destSystem chezmoi.System
persistentStateAbsPath chezmoi.AbsPath
persistentState chezmoi.PersistentState
httpClient *http.Client
logger *zerolog.Logger
// Computed configuration.
homeDirAbsPath chezmoi.AbsPath
encryption chezmoi.Encryption
stdin io.Reader
stdout io.Writer
stderr io.Writer
tempDirs map[string]chezmoi.AbsPath
ioregData ioregData
}
// A configOption sets and option on a Config.
type configOption func(*Config) error
type configState struct {
ConfigTemplateContentsSHA256 chezmoi.HexBytes `json:"configTemplateContentsSHA256" yaml:"configTemplateContentsSHA256"` //nolint:lll,tagliatelle
}
var (
chezmoiRelPath = chezmoi.NewRelPath("chezmoi")
persistentStateFileRelPath = chezmoi.NewRelPath("chezmoistate.boltdb")
httpCacheDirRelPath = chezmoi.NewRelPath("httpcache")
configStateKey = []byte("configState")
defaultAgeEncryptionConfig = chezmoi.AgeEncryption{
Command: "age",
Suffix: ".age",
}
defaultGPGEncryptionConfig = chezmoi.GPGEncryption{
Command: "gpg",
Suffix: ".asc",
}
identifierRx = regexp.MustCompile(`\A[\pL_][\pL\p{Nd}_]*\z`)
whitespaceRx = regexp.MustCompile(`\s+`)
viperDecodeConfigOptions = []viper.DecoderConfigOption{
viper.DecodeHook(
mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
chezmoi.StringSliceToEntryTypeSetHookFunc(),
chezmoi.StringToAbsPathHookFunc(),
StringOrBoolToAutoBoolHookFunc(),
),
),
}
)
// newConfig creates a new Config with the given options.
func newConfig(options ...configOption) (*Config, error) {
userHomeDir, err := os.UserHomeDir()
if err != nil {
return nil, err
}
homeDirAbsPath, err := chezmoi.NormalizePath(userHomeDir)
if err != nil {
return nil, err
}
bds, err := xdg.NewBaseDirectorySpecification()
if err != nil {
return nil, err
}
cacheDirAbsPath := chezmoi.NewAbsPath(bds.CacheHome).Join(chezmoiRelPath)
c := &Config{
// Global configuration, settable in the config file.
CacheDirAbsPath: cacheDirAbsPath,
Color: autoBool{
auto: true,
},
Interpreters: defaultInterpreters,
Pager: os.Getenv("PAGER"),
PINEntry: pinEntryConfig{
Options: pinEntryDefaultOptions,
},
Safe: true,
Template: templateConfig{
Options: chezmoi.DefaultTemplateOptions,
},
Umask: chezmoi.Umask,
UseBuiltinAge: autoBool{
auto: true,
},
UseBuiltinGit: autoBool{
auto: true,
},
// Global configuration, not settable in the config file.
homeDir: userHomeDir,
templateFuncs: sprig.TxtFuncMap(),
// Password manager configurations, settable in the config file.
Bitwarden: bitwardenConfig{
Command: "bw",
},
Gopass: gopassConfig{
Command: "gopass",
},
Keepassxc: keepassxcConfig{
Command: "keepassxc-cli",
},
Lastpass: lastpassConfig{
Command: "lpass",
},
Onepassword: onepasswordConfig{
Command: "op",
},
Pass: passConfig{
Command: "pass",
},
Vault: vaultConfig{
Command: "vault",
},
// Encryption configurations, settable in the config file.
Age: defaultAgeEncryptionConfig,
GPG: defaultGPGEncryptionConfig,
// Password manager data.
// Command configurations, settable in the config file.
Add: addCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
Diff: diffCmdConfig{
Exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
},
Docs: docsCmdConfig{
MaxWidth: 80,
},
Edit: editCmdConfig{
Hardlink: true,
MinDuration: 1 * time.Second,
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(
chezmoi.EntryTypeDirs | chezmoi.EntryTypeFiles | chezmoi.EntryTypeSymlinks | chezmoi.EntryTypeEncrypted,
),
},
Git: gitCmdConfig{
Command: "git",
},
Merge: mergeCmdConfig{
Command: "vimdiff",
},
// Command configurations, not settable in the config file.
apply: applyCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
archive: archiveCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
data: dataCmdConfig{
format: defaultWriteDataFormat,
},
dump: dumpCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
format: defaultWriteDataFormat,
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
executeTemplate: executeTemplateCmdConfig{
stdinIsATTY: true,
},
_import: importCmdConfig{
destination: homeDirAbsPath,
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
},
init: initCmdConfig{
data: true,
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
},
managed: managedCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(
chezmoi.EntryTypeDirs | chezmoi.EntryTypeFiles | chezmoi.EntryTypeSymlinks | chezmoi.EntryTypeEncrypted,
),
},
mergeAll: mergeAllCmdConfig{
recursive: true,
},
reAdd: reAddCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
state: stateCmdConfig{
data: stateDataCmdConfig{
format: defaultWriteDataFormat,
},
dump: stateDumpCmdConfig{
format: defaultWriteDataFormat,
},
getBucket: stateGetBucketCmdConfig{
format: defaultWriteDataFormat,
},
},
status: statusCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
update: updateCmdConfig{
apply: true,
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
upgrade: upgradeCmdConfig{
owner: "twpayne",
repo: "chezmoi",
},
verify: verifyCmdConfig{
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll &^ chezmoi.EntryTypeScripts),
recursive: true,
},
// Configuration.
fileSystem: vfs.OSFS,
bds: bds,
// Computed configuration.
homeDirAbsPath: homeDirAbsPath,
tempDirs: make(map[string]chezmoi.AbsPath),
stdin: os.Stdin,
stdout: os.Stdout,
stderr: os.Stderr,
}
for key, value := range map[string]interface{}{
"bitwarden": c.bitwardenTemplateFunc,
"bitwardenAttachment": c.bitwardenAttachmentTemplateFunc,
"bitwardenFields": c.bitwardenFieldsTemplateFunc,
"decrypt": c.decryptTemplateFunc,
"encrypt": c.encryptTemplateFunc,
"fromYaml": c.fromYamlTemplateFunc,
"gitHubKeys": c.gitHubKeysTemplateFunc,
"gitHubLatestRelease": c.gitHubLatestReleaseTemplateFunc,
"gopass": c.gopassTemplateFunc,
"gopassRaw": c.gopassRawTemplateFunc,
"include": c.includeTemplateFunc,
"ioreg": c.ioregTemplateFunc,
"joinPath": c.joinPathTemplateFunc,
"keepassxc": c.keepassxcTemplateFunc,
"keepassxcAttribute": c.keepassxcAttributeTemplateFunc,
"keyring": c.keyringTemplateFunc,
"lastpass": c.lastpassTemplateFunc,
"lastpassRaw": c.lastpassRawTemplateFunc,
"lookPath": c.lookPathTemplateFunc,
"mozillaInstallHash": c.mozillaInstallHashTemplateFunc,
"onepassword": c.onepasswordTemplateFunc,
"onepasswordDetailsFields": c.onepasswordDetailsFieldsTemplateFunc,
"onepasswordDocument": c.onepasswordDocumentTemplateFunc,
"onepasswordItemFields": c.onepasswordItemFieldsTemplateFunc,
"output": c.outputTemplateFunc,
"pass": c.passTemplateFunc,
"passRaw": c.passRawTemplateFunc,
"secret": c.secretTemplateFunc,
"secretJSON": c.secretJSONTemplateFunc,
"stat": c.statTemplateFunc,
"toYaml": c.toYamlTemplateFunc,
"vault": c.vaultTemplateFunc,
} {
c.addTemplateFunc(key, value)
}
for _, option := range options {
if err := option(c); err != nil {
return nil, err
}
}
c.homeDirAbsPath, err = chezmoi.NormalizePath(c.homeDir)
if err != nil {
return nil, err
}
c.configFileAbsPath, err = c.defaultConfigFile(c.fileSystem, c.bds)
if err != nil {
return nil, err
}
c.SourceDirAbsPath, err = c.defaultSourceDir(c.fileSystem, c.bds)
if err != nil {
return nil, err
}
c.DestDirAbsPath = c.homeDirAbsPath
c._import.destination = c.homeDirAbsPath
return c, nil
}
// addTemplateFunc adds the template function with the key key and value value
// to c. It panics if there is already an existing template function with the
// same key.
func (c *Config) addTemplateFunc(key string, value interface{}) {
if _, ok := c.templateFuncs[key]; ok {
panic(fmt.Sprintf("%s: already defined", key))
}
c.templateFuncs[key] = value
}
type applyArgsOptions struct {
include *chezmoi.EntryTypeSet
init bool
exclude *chezmoi.EntryTypeSet
recursive bool
umask fs.FileMode
preApplyFunc chezmoi.PreApplyFunc
}
// applyArgs is the core of all commands that make changes to a target system.
// It checks config file freshness, reads the source state, and then applies the
// source state for each target entry in args. If args is empty then the source
// state is applied to all target entries.
func (c *Config) applyArgs(
ctx context.Context, targetSystem chezmoi.System, targetDirAbsPath chezmoi.AbsPath, args []string,
options applyArgsOptions,
) error {
if options.init {
if err := c.createAndReloadConfigFile(); err != nil {
return err
}
}
var currentConfigTemplateContentsSHA256 []byte
configTemplateRelPath, _, configTemplateContents, err := c.findFirstConfigTemplate()
if err != nil {
return err
}
if configTemplateRelPath != chezmoi.EmptyRelPath {
currentConfigTemplateContentsSHA256 = chezmoi.SHA256Sum(configTemplateContents)
}
var previousConfigTemplateContentsSHA256 []byte
if configStateData, err := c.persistentState.Get(chezmoi.ConfigStateBucket, configStateKey); err != nil {
return err
} else if configStateData != nil {
var configState configState
if err := json.Unmarshal(configStateData, &configState); err != nil {
return err
}
previousConfigTemplateContentsSHA256 = []byte(configState.ConfigTemplateContentsSHA256)
}
configTemplatesEmpty := currentConfigTemplateContentsSHA256 == nil && previousConfigTemplateContentsSHA256 == nil
configTemplateContentsUnchanged := configTemplatesEmpty ||
bytes.Equal(currentConfigTemplateContentsSHA256, previousConfigTemplateContentsSHA256)
if !configTemplateContentsUnchanged {
if c.force {
if configTemplateRelPath == chezmoi.EmptyRelPath {
if err := c.persistentState.Delete(chezmoi.ConfigStateBucket, configStateKey); err != nil {
return err
}
} else {
configStateValue, err := json.Marshal(configState{
ConfigTemplateContentsSHA256: chezmoi.HexBytes(currentConfigTemplateContentsSHA256),
})
if err != nil {
return err
}
if err := c.persistentState.Set(chezmoi.ConfigStateBucket, configStateKey, configStateValue); err != nil {
return err
}
}
} else {
c.errorf("warning: config file template has changed, run chezmoi init to regenerate config file\n")
}
}
sourceState, err := c.newSourceState(ctx)
if err != nil {
return err
}
var targetRelPaths chezmoi.RelPaths
switch {
case len(args) == 0:
targetRelPaths = sourceState.TargetRelPaths()
case c.sourcePath:
targetRelPaths, err = c.targetRelPathsBySourcePath(sourceState, args)
if err != nil {
return err
}
default:
targetRelPaths, err = c.targetRelPaths(sourceState, args, targetRelPathsOptions{
mustBeInSourceState: true,
recursive: options.recursive,
})
if err != nil {
return err
}
}
applyOptions := chezmoi.ApplyOptions{
Include: options.include.Sub(options.exclude),
PreApplyFunc: options.preApplyFunc,
Umask: options.umask,
}
keptGoingAfterErr := false
for _, targetRelPath := range targetRelPaths {
switch err := sourceState.Apply(
targetSystem, c.destSystem, c.persistentState, targetDirAbsPath, targetRelPath, applyOptions,
); {
case errors.Is(err, chezmoi.Skip):
continue
case err != nil && c.keepGoing:
c.errorf("%v\n", err)
keptGoingAfterErr = true
case err != nil:
return err
}
}
if keptGoingAfterErr {
return ExitCodeError(1)
}
return nil
}
// close closes resources associated with c.
func (c *Config) close() error {
var err error
for _, tempDirAbsPath := range c.tempDirs {
err2 := os.RemoveAll(tempDirAbsPath.String())
c.logger.Err(err2).
Stringer("tempDir", tempDirAbsPath).
Msg("RemoveAll")
err = multierr.Append(err, err2)
}
pprof.StopCPUProfile()
agent.Close()
return err
}
// cmdOutput returns the of running the command name with args in dirAbsPath.
func (c *Config) cmdOutput(dirAbsPath chezmoi.AbsPath, name string, args []string) ([]byte, error) {
cmd := exec.Command(name, args...)
if !dirAbsPath.Empty() {
dirRawAbsPath, err := c.baseSystem.RawPath(dirAbsPath)
if err != nil {
return nil, err
}
cmd.Dir = dirRawAbsPath.String()
}
return c.baseSystem.IdempotentCmdOutput(cmd)
}
// colorAutoFunc detects whether color should be used.
func (c *Config) colorAutoFunc() bool {
if _, ok := os.LookupEnv("NO_COLOR"); ok {
return false
}
if stdout, ok := c.stdout.(*os.File); ok {
return term.IsTerminal(int(stdout.Fd()))
}
return false
}
// createAndReloadConfigFile creates a config file if it there is a config file
// template and reloads it.
func (c *Config) createAndReloadConfigFile() error {
// Find config template, execute it, and create config file.
configTemplateRelPath, ext, configTemplateContents, err := c.findFirstConfigTemplate()
if err != nil {
return err
}
var configFileContents []byte
if configTemplateRelPath == chezmoi.EmptyRelPath {
if err := c.persistentState.Delete(chezmoi.ConfigStateBucket, configStateKey); err != nil {
return err
}
} else {
configFileContents, err = c.createConfigFile(configTemplateRelPath, configTemplateContents)
if err != nil {
return err
}
// Validate the config.
v := viper.New()
v.SetConfigType(ext)
if err := v.ReadConfig(bytes.NewBuffer(configFileContents)); err != nil {
return err
}
if err := v.Unmarshal(&Config{}, viperDecodeConfigOptions...); err != nil {
return err
}
// Write the config.
configPath := c.init.configPath
if c.init.configPath.Empty() {
configPath = chezmoi.NewAbsPath(c.bds.ConfigHome).Join(chezmoiRelPath, configTemplateRelPath)
}
if err := chezmoi.MkdirAll(c.baseSystem, configPath.Dir(), 0o777); err != nil {
return err
}
if err := c.baseSystem.WriteFile(configPath, configFileContents, 0o600); err != nil {
return err
}
configStateValue, err := json.Marshal(configState{
ConfigTemplateContentsSHA256: chezmoi.HexBytes(chezmoi.SHA256Sum(configTemplateContents)),
})
if err != nil {
return err
}
if err := c.persistentState.Set(chezmoi.ConfigStateBucket, configStateKey, configStateValue); err != nil {
return err
}
}
// Reload config if it was created.
if configTemplateRelPath != chezmoi.EmptyRelPath {
viper.SetConfigType(ext)
if err := viper.ReadConfig(bytes.NewBuffer(configFileContents)); err != nil {
return err
}
if err := viper.Unmarshal(c, viperDecodeConfigOptions...); err != nil {
return err
}
}
return nil
}
// createConfigFile creates a config file using a template and returns its
// contents.
func (c *Config) createConfigFile(filename chezmoi.RelPath, data []byte) ([]byte, error) {
funcMap := make(template.FuncMap)
chezmoi.RecursiveMerge(funcMap, c.templateFuncs)
chezmoi.RecursiveMerge(funcMap, map[string]interface{}{
"promptBool": c.promptBool,
"promptInt": c.promptInt,
"promptString": c.promptString,
"stdinIsATTY": c.stdinIsATTY,
"writeToStdout": c.writeToStdout,
})
t, err := template.New(filename.String()).Funcs(funcMap).Parse(string(data))
if err != nil {
return nil, err
}
builder := strings.Builder{}
templateData := c.defaultTemplateData()
if c.init.data {
chezmoi.RecursiveMerge(templateData, c.Data)
}
if err = t.Execute(&builder, templateData); err != nil {
return nil, err
}
return []byte(builder.String()), nil
}
// defaultConfigFile returns the default config file according to the XDG Base
// Directory Specification.
func (c *Config) defaultConfigFile(
fileSystem vfs.Stater, bds *xdg.BaseDirectorySpecification,
) (chezmoi.AbsPath, error) {
// Search XDG Base Directory Specification config directories first.
for _, configDir := range bds.ConfigDirs {
configDirAbsPath, err := chezmoi.NewAbsPathFromExtPath(configDir, c.homeDirAbsPath)
if err != nil {
return chezmoi.EmptyAbsPath, err
}
for _, extension := range viper.SupportedExts {
configFileAbsPath := configDirAbsPath.JoinString("chezmoi", "chezmoi."+extension)
if _, err := fileSystem.Stat(configFileAbsPath.String()); err == nil {
return configFileAbsPath, nil
}
}
}
// Fallback to XDG Base Directory Specification default.
configHomeAbsPath, err := chezmoi.NewAbsPathFromExtPath(bds.ConfigHome, c.homeDirAbsPath)
if err != nil {
return chezmoi.EmptyAbsPath, err
}
return configHomeAbsPath.JoinString("chezmoi", "chezmoi.toml"), nil
}
// defaultPreApplyFunc is the default pre-apply function. If the target entry
// has changed since chezmoi last wrote it then it prompts the user for the
// action to take.
func (c *Config) defaultPreApplyFunc(
targetRelPath chezmoi.RelPath, targetEntryState, lastWrittenEntryState, actualEntryState *chezmoi.EntryState,
) error {
c.logger.Info().
Stringer("targetRelPath", targetRelPath).
Object("targetEntryState", targetEntryState).
Object("lastWrittenEntryState", lastWrittenEntryState).
Object("actualEntryState", actualEntryState).
Msg("defaultPreApplyFunc")
switch {
case targetEntryState.Overwrite():
return nil
case targetEntryState.Type == chezmoi.EntryStateTypeScript:
return nil
case c.force:
return nil
case lastWrittenEntryState == nil:
return nil
case lastWrittenEntryState.Equivalent(actualEntryState):
return nil
}
prompt := fmt.Sprintf("%s has changed since chezmoi last wrote it", targetRelPath)
var choices []string
actualContents := actualEntryState.Contents()
targetContents := targetEntryState.Contents()
if actualContents != nil || targetContents != nil {
choices = append(choices, "diff")
}
choices = append(choices, "overwrite", "all-overwrite", "skip", "quit")
for {
switch choice, err := c.promptChoice(prompt, choices); {
case err != nil:
return err
case choice == "diff":
if err := c.diffFile(
targetRelPath,
actualContents, actualEntryState.Mode,
targetContents, targetEntryState.Mode,
); err != nil {
return err
}
case choice == "overwrite":
return nil
case choice == "all-overwrite":
c.force = true
return nil
case choice == "skip":
return chezmoi.Skip
case choice == "quit":
return ExitCodeError(1)
default:
return nil
}
}
}
// defaultSourceDir returns the default source directory according to the XDG
// Base Directory Specification.
func (c *Config) defaultSourceDir(fileSystem vfs.Stater, bds *xdg.BaseDirectorySpecification) (chezmoi.AbsPath, error) {
// Check for XDG Base Directory Specification data directories first.
for _, dataDir := range bds.DataDirs {
dataDirAbsPath, err := chezmoi.NewAbsPathFromExtPath(dataDir, c.homeDirAbsPath)
if err != nil {
return chezmoi.EmptyAbsPath, err
}
sourceDirAbsPath := dataDirAbsPath.Join(chezmoiRelPath)
if _, err := fileSystem.Stat(sourceDirAbsPath.String()); err == nil {
return sourceDirAbsPath, nil
}
}
// Fallback to XDG Base Directory Specification default.
dataHomeAbsPath, err := chezmoi.NewAbsPathFromExtPath(bds.DataHome, c.homeDirAbsPath)
if err != nil {
return chezmoi.EmptyAbsPath, err
}
return dataHomeAbsPath.Join(chezmoiRelPath), nil
}
// defaultTemplateData returns the default template data.
func (c *Config) defaultTemplateData() map[string]interface{} {
// Determine the user's username and group, if possible.
//
// user.Current and user.LookupGroupId in Go's standard library are
// generally unreliable, so work around errors if possible, or ignore them.
//
// If CGO is disabled, then the Go standard library falls back to parsing
// /etc/passwd and /etc/group, which will return incorrect results without
// error if the system uses an alternative password database such as NIS or
// LDAP.
//
// If CGO is enabled then user.Current and user.LookupGroupId will use the
// underlying libc functions, namely getpwuid_r and getgrnam_r. If linked
// with glibc this will return the correct result. If linked with musl then
// they will use musl's implementation which, like Go's non-CGO
// implementation, also only parses /etc/passwd and /etc/group and so also
// returns incorrect results without error if NIS or LDAP are being used.
//
// On Windows, the user's group ID returned by user.Current() is an SID and
// no further useful lookup is possible with Go's standard library.
//
// Since neither the username nor the group are likely widely used in
// templates, leave these variables unset if their values cannot be
// determined. Unset variables will trigger template errors if used,
// alerting the user to the problem and allowing them to find alternative
// solutions.
var username, group string
if currentUser, err := user.Current(); err == nil {
username = currentUser.Username
if runtime.GOOS != "windows" {
if rawGroup, err := user.LookupGroupId(currentUser.Gid); err == nil {
group = rawGroup.Name
} else {
c.logger.Info().
Str("gid", currentUser.Gid).
Err(err).
Msg("user.LookupGroupId")
}
}
} else {
c.logger.Info().
Err(err).
Msg("user.Current")
var ok bool
username, ok = os.LookupEnv("USER")
if !ok {
c.logger.Info().
Str("key", "USER").
Bool("ok", ok).
Msg("os.LookupEnv")
}
}
fqdnHostname := chezmoi.FQDNHostname(c.fileSystem)
var hostname string
if rawHostname, err := os.Hostname(); err == nil {
hostname = strings.SplitN(rawHostname, ".", 2)[0]
} else {
c.logger.Info().
Err(err).
Msg("os.Hostname")
}
kernel, err := chezmoi.Kernel(c.fileSystem)
if err != nil {
c.logger.Info().
Err(err).
Msg("chezmoi.Kernel")
}
var osRelease map[string]interface{}
if rawOSRelease, err := chezmoi.OSRelease(c.baseSystem); err == nil {
osRelease = upperSnakeCaseToCamelCaseMap(rawOSRelease)
} else {
c.logger.Info().
Err(err).
Msg("chezmoi.OSRelease")
}
executable, _ := os.Executable()
return map[string]interface{}{
"chezmoi": map[string]interface{}{
"arch": runtime.GOARCH,
"args": os.Args,
"configFile": c.configFileAbsPath.String(),
"executable": executable,
"fqdnHostname": fqdnHostname,
"group": group,
"homeDir": c.homeDir,
"hostname": hostname,
"kernel": kernel,
"os": runtime.GOOS,
"osRelease": osRelease,
"sourceDir": c.SourceDirAbsPath.String(),
"username": username,
"version": map[string]interface{}{
"builtBy": c.versionInfo.BuiltBy,
"commit": c.versionInfo.Commit,
"date": c.versionInfo.Date,
"version": c.versionInfo.Version,
},
"workingTree": c.WorkingTreeAbsPath.String(),
},
}
}
type destAbsPathInfosOptions struct {
follow bool
ignoreNotExist bool
recursive bool
}
// destAbsPathInfos returns the os/fs.FileInfos for each destination entry in
// args, recursing into subdirectories and following symlinks if configured in
// options.
func (c *Config) destAbsPathInfos(
sourceState *chezmoi.SourceState, args []string, options destAbsPathInfosOptions,
) (map[chezmoi.AbsPath]fs.FileInfo, error) {
destAbsPathInfos := make(map[chezmoi.AbsPath]fs.FileInfo)
for _, arg := range args {
arg = filepath.Clean(arg)
destAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return nil, err
}
if _, err := destAbsPath.TrimDirPrefix(c.DestDirAbsPath); err != nil {
return nil, err
}
if options.recursive {
walkFunc := func(destAbsPath chezmoi.AbsPath, fileInfo fs.FileInfo, err error) error {
switch {
case options.ignoreNotExist && errors.Is(err, fs.ErrNotExist):
return nil
case err != nil:
return err
}
if options.follow && fileInfo.Mode().Type() == fs.ModeSymlink {
fileInfo, err = c.destSystem.Stat(destAbsPath)
if err != nil {
return err
}
}
return sourceState.AddDestAbsPathInfos(destAbsPathInfos, c.destSystem, destAbsPath, fileInfo)
}
if err := chezmoi.Walk(c.destSystem, destAbsPath, walkFunc); err != nil {
return nil, err
}
} else {
var fileInfo fs.FileInfo
if options.follow {
fileInfo, err = c.destSystem.Stat(destAbsPath)
} else {
fileInfo, err = c.destSystem.Lstat(destAbsPath)
}
switch {
case options.ignoreNotExist && errors.Is(err, fs.ErrNotExist):
continue
case err != nil:
return nil, err
}
if err := sourceState.AddDestAbsPathInfos(destAbsPathInfos, c.destSystem, destAbsPath, fileInfo); err != nil {
return nil, err
}
}
}
return destAbsPathInfos, nil
}
// diffFile outputs the diff between fromData and fromMode and toData and toMode
// at path.
func (c *Config) diffFile(
path chezmoi.RelPath,
fromData []byte, fromMode fs.FileMode,
toData []byte, toMode fs.FileMode,
) error {
builder := strings.Builder{}
unifiedEncoder := diff.NewUnifiedEncoder(&builder, diff.DefaultContextLines)
color := c.Color.Value(c.colorAutoFunc)
if color {
unifiedEncoder.SetColor(diff.NewColorConfig())
}
diffPatch, err := chezmoi.DiffPatch(path, fromData, fromMode, toData, toMode)
if err != nil {
return err
}
if err := unifiedEncoder.Encode(diffPatch); err != nil {
return err
}
return c.pageOutputString(builder.String(), c.Diff.Pager)
}
// editor returns the path to the user's editor and any extra arguments.
func (c *Config) editor(args []string) (string, []string) {
// If the user has set and edit command then use it.
if c.Edit.Command != "" {
return c.Edit.Command, append(c.Edit.Args, args...)
}
// Prefer $VISUAL over $EDITOR and fallback to the OS's default editor.
editor := firstNonEmptyString(
os.Getenv("VISUAL"),
os.Getenv("EDITOR"),
defaultEditor,
)
// If editor is found, return it.
if path, err := exec.LookPath(editor); err == nil {
return path, args
}
// Otherwise, if editor contains spaces, then assume that the first word is
// the editor and the rest are arguments.
components := whitespaceRx.Split(editor, -1)
if len(components) > 1 {
if path, err := exec.LookPath(components[0]); err == nil {
return path, append(components[1:], args...)
}
}
// Fallback to editor only.
return editor, args
}
// errorf writes an error to stderr.
func (c *Config) errorf(format string, args ...interface{}) {
fmt.Fprintf(c.stderr, "chezmoi: "+format, args...)
}
// execute creates a new root command and executes it with args.
func (c *Config) execute(args []string) error {
rootCmd, err := c.newRootCmd()
if err != nil {
return err
}
rootCmd.SetArgs(args)
return rootCmd.Execute()
}
// filterInput reads from args (or the standard input if args is empty),
// transforms it with f, and writes the output.
func (c *Config) filterInput(args []string, f func([]byte) ([]byte, error)) error {
if len(args) == 0 {
input, err := io.ReadAll(c.stdin)
if err != nil {
return err
}
output, err := f(input)
if err != nil {
return err
}
return c.writeOutput(output)
}
for _, arg := range args {
argAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return err
}
input, err := c.baseSystem.ReadFile(argAbsPath)
if err != nil {
return err
}
output, err := f(input)
if err != nil {
return err
}
if err := c.writeOutput(output); err != nil {
return err
}
}
return nil
}
// findFirstConfigTemplate searches for a config template, returning the path,
// format, and contents of the first one that it finds.
func (c *Config) findFirstConfigTemplate() (chezmoi.RelPath, string, []byte, error) {
sourceDirAbsPath, err := c.sourceDirAbsPath()
if err != nil {
return chezmoi.EmptyRelPath, "", nil, err
}
for _, ext := range viper.SupportedExts {
filename := chezmoi.NewRelPath(chezmoi.Prefix + "." + ext + chezmoi.TemplateSuffix)
contents, err := c.baseSystem.ReadFile(sourceDirAbsPath.Join(filename))
switch {
case errors.Is(err, fs.ErrNotExist):
continue
case err != nil:
return chezmoi.EmptyRelPath, "", nil, err
}
return chezmoi.NewRelPath("chezmoi." + ext), ext, contents, nil
}
return chezmoi.EmptyRelPath, "", nil, nil
}
func (c *Config) getHTTPClient() (*http.Client, error) {
if c.httpClient != nil {
return c.httpClient, nil
}
httpCacheBasePath, err := c.baseSystem.RawPath(c.CacheDirAbsPath.Join(httpCacheDirRelPath))
if err != nil {
return nil, err
}
httpCache := diskcache.New(httpCacheBasePath.String())
httpTransport := httpcache.NewTransport(httpCache)
c.httpClient = httpTransport.Client()
return c.httpClient, nil
}
// gitAutoAdd adds all changes to the git index and returns the new git status.
func (c *Config) gitAutoAdd() (*git.Status, error) {
if err := c.run(c.WorkingTreeAbsPath, c.Git.Command, []string{"add", "."}); err != nil {
return nil, err
}
output, err := c.cmdOutput(c.WorkingTreeAbsPath, c.Git.Command, []string{"status", "--porcelain=v2"})
if err != nil {
return nil, err
}
return git.ParseStatusPorcelainV2(output)
}
// gitAutoCommit commits all changes in the git index, including generating a
// commit message from status.
func (c *Config) gitAutoCommit(status *git.Status) error {
if status.Empty() {
return nil
}
commitMessageTemplate, err := templates.FS.ReadFile("COMMIT_MESSAGE.tmpl")
if err != nil {
return err
}
commitMessageTmpl, err := template.New("commit_message").Funcs(c.templateFuncs).Parse(string(commitMessageTemplate))
if err != nil {
return err
}
commitMessage := strings.Builder{}
if err := commitMessageTmpl.Execute(&commitMessage, status); err != nil {
return err
}
return c.run(c.WorkingTreeAbsPath, c.Git.Command, []string{"commit", "--message", commitMessage.String()})
}
// gitAutoPush pushes all changes to the remote if status is not empty.
func (c *Config) gitAutoPush(status *git.Status) error {
if status.Empty() {
return nil
}
return c.run(c.WorkingTreeAbsPath, c.Git.Command, []string{"push"})
}
// makeRunEWithSourceState returns a function for
// github.com/spf13/cobra.Command.RunE that includes reading the source state.
func (c *Config) makeRunEWithSourceState(
runE func(*cobra.Command, []string, *chezmoi.SourceState) error,
) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
sourceState, err := c.newSourceState(cmd.Context())
if err != nil {
return err
}
return runE(cmd, args, sourceState)
}
}
// marshal formats data in dataFormat and writes it to the standard output.
func (c *Config) marshal(dataFormat writeDataFormat, data interface{}) error {
var format chezmoi.Format
switch dataFormat {
case writeDataFormatJSON:
format = chezmoi.FormatJSON
case writeDataFormatYAML:
format = chezmoi.FormatYAML
default:
return fmt.Errorf("%s: unknown format", dataFormat)
}
marshaledData, err := format.Marshal(data)
if err != nil {
return err
}
return c.writeOutput(marshaledData)
}
// newRootCmd returns a new root github.com/spf13/cobra.Command.
func (c *Config) newRootCmd() (*cobra.Command, error) {
rootCmd := &cobra.Command{
Use: "chezmoi",
Short: "Manage your dotfiles across multiple diverse machines, securely",
Version: c.versionStr,
PersistentPreRunE: c.persistentPreRunRootE,
PersistentPostRunE: c.persistentPostRunRootE,
SilenceErrors: true,
SilenceUsage: true,
}
persistentFlags := rootCmd.PersistentFlags()
persistentFlags.Var(&c.Color, "color", "Colorize output")
persistentFlags.VarP(&c.DestDirAbsPath, "destination", "D", "Set destination directory")
persistentFlags.Var(&c.Mode, "mode", "Mode")
persistentFlags.Var(&c.persistentStateAbsPath, "persistent-state", "Set persistent state file")
persistentFlags.BoolVar(&c.Safe, "safe", c.Safe, "Safely replace files and symlinks")
persistentFlags.VarP(&c.SourceDirAbsPath, "source", "S", "Set source directory")
persistentFlags.Var(&c.UseBuiltinAge, "use-builtin-age", "Use builtin age")
persistentFlags.Var(&c.UseBuiltinGit, "use-builtin-git", "Use builtin git")
persistentFlags.BoolVarP(&c.Verbose, "verbose", "v", c.Verbose, "Make output more verbose")
persistentFlags.VarP(&c.WorkingTreeAbsPath, "working-tree", "W", "Set working tree directory")
for viperKey, key := range map[string]string{
"color": "color",
"destDir": "destination",
"persistentState": "persistent-state",
"mode": "mode",
"safe": "safe",
"sourceDir": "source",
"useBuiltinAge": "use-builtin-age",
"useBuiltinGit": "use-builtin-git",
"verbose": "verbose",
"workingTree": "working-tree",
} {
if err := viper.BindPFlag(viperKey, persistentFlags.Lookup(key)); err != nil {
return nil, err
}
}
persistentFlags.VarP(&c.configFileAbsPath, "config", "c", "Set config file")
persistentFlags.Var(&c.configFormat, "config-format", "Set config file format")
persistentFlags.Var(&c.cpuProfile, "cpu-profile", "Write a CPU profile to path")
persistentFlags.BoolVar(&c.debug, "debug", c.debug, "Include debug information in output")
persistentFlags.BoolVarP(
&c.dryRun, "dry-run", "n", c.dryRun, "Do not make any modifications to the destination directory",
)
persistentFlags.BoolVar(&c.force, "force", c.force, "Make all changes without prompting")
persistentFlags.BoolVar(&c.gops, "gops", c.gops, "Enable gops agent")
persistentFlags.BoolVarP(&c.keepGoing, "keep-going", "k", c.keepGoing, "Keep going as far as possible after an error")
persistentFlags.BoolVar(&c.noPager, "no-pager", c.noPager, "Do not use the pager")
persistentFlags.BoolVar(&c.noTTY, "no-tty", c.noTTY, "Do not attempt to get a TTY for reading passwords")
persistentFlags.VarP(&c.outputAbsPath, "output", "o", "Write output to path instead of stdout")
persistentFlags.BoolVarP(&c.refreshExternals, "refresh-externals", "R", c.refreshExternals, "Refresh external cache")
persistentFlags.BoolVar(&c.sourcePath, "source-path", c.sourcePath, "Specify targets by source path")
for _, err := range []error{
rootCmd.MarkPersistentFlagFilename("config"),
rootCmd.MarkPersistentFlagFilename("cpu-profile"),
persistentFlags.MarkHidden("cpu-profile"),
rootCmd.MarkPersistentFlagDirname("destination"),
persistentFlags.MarkHidden("gops"),
rootCmd.MarkPersistentFlagFilename("output"),
persistentFlags.MarkHidden("safe"),
rootCmd.MarkPersistentFlagDirname("source"),
} {
if err != nil {
return nil, err
}
}
rootCmd.SetHelpCommand(c.newHelpCmd())
for _, cmd := range []*cobra.Command{
c.newAddCmd(),
c.newApplyCmd(),
c.newArchiveCmd(),
c.newCatCmd(),
c.newCDCmd(),
c.newChattrCmd(),
c.newCompletionCmd(),
c.newDataCmd(),
c.newDecryptCommand(),
c.newDiffCmd(),
c.newDocsCmd(),
c.newDoctorCmd(),
c.newDumpCmd(),
c.newEditCmd(),
c.newEditConfigCmd(),
c.newEncryptCommand(),
c.newExecuteTemplateCmd(),
c.newForgetCmd(),
c.newGitCmd(),
c.newImportCmd(),
c.newInitCmd(),
c.newInternalTestCmd(),
c.newManagedCmd(),
c.newMergeCmd(),
c.newMergeAllCmd(),
c.newPurgeCmd(),
c.newReAddCmd(),
c.newRemoveCmd(),
c.newSecretCmd(),
c.newSourcePathCmd(),
c.newStateCmd(),
c.newStatusCmd(),
c.newUnmanagedCmd(),
c.newUpdateCmd(),
c.newUpgradeCmd(),
c.newVerifyCmd(),
} {
if cmd != nil {
rootCmd.AddCommand(cmd)
}
}
return rootCmd, nil
}
// newSourceState returns a new SourceState with options.
func (c *Config) newSourceState(
ctx context.Context, options ...chezmoi.SourceStateOption,
) (*chezmoi.SourceState, error) {
httpClient, err := c.getHTTPClient()
if err != nil {
return nil, err
}
sourceStateLogger := c.logger.With().Str(logComponentKey, logComponentValueSourceState).Logger()
versionAbsPath := c.SourceDirAbsPath.JoinString(chezmoi.VersionName)
switch data, err := c.baseSystem.ReadFile(versionAbsPath); {
case errors.Is(err, fs.ErrNotExist):
case err != nil:
return nil, err
default:
minVersion, err := semver.NewVersion(strings.TrimSpace(string(data)))
if err != nil {
return nil, fmt.Errorf("%s: %w", versionAbsPath, err)
}
var zeroVersion semver.Version
if c.version != zeroVersion && c.version.LessThan(*minVersion) {
return nil, &chezmoi.TooOldError{
Need: *minVersion,
Have: c.version,
}
}
}
c.SourceDirAbsPath, err = c.sourceDirAbsPath()
if err != nil {
return nil, err
}
s := chezmoi.NewSourceState(append([]chezmoi.SourceStateOption{
chezmoi.WithBaseSystem(c.baseSystem),
chezmoi.WithCacheDir(c.CacheDirAbsPath),
chezmoi.WithDefaultTemplateDataFunc(c.defaultTemplateData),
chezmoi.WithDestDir(c.DestDirAbsPath),
chezmoi.WithEncryption(c.encryption),
chezmoi.WithHTTPClient(httpClient),
chezmoi.WithInterpreters(c.Interpreters),
chezmoi.WithLogger(&sourceStateLogger),
chezmoi.WithMode(c.Mode),
chezmoi.WithPriorityTemplateData(c.Data),
chezmoi.WithSourceDir(c.SourceDirAbsPath),
chezmoi.WithSystem(c.sourceSystem),
chezmoi.WithTemplateFuncs(c.templateFuncs),
chezmoi.WithTemplateOptions(c.Template.Options),
chezmoi.WithVersion(c.version),
}, options...)...)
if err := s.Read(ctx, &chezmoi.ReadOptions{
RefreshExternals: c.refreshExternals,
}); err != nil {
return nil, err
}
return s, nil
}
// persistentPostRunRootE performs post-run actions for the root command.
func (c *Config) persistentPostRunRootE(cmd *cobra.Command, args []string) error {
if err := c.persistentState.Close(); err != nil {
return err
}
if boolAnnotation(cmd, modifiesConfigFile) {
// Warn the user of any errors reading the config file.
v := viper.New()
v.SetFs(afero.FromIOFS{FS: c.fileSystem})
v.SetConfigFile(c.configFileAbsPath.String())
err := v.ReadInConfig()
if err == nil {
err = v.Unmarshal(&Config{}, viperDecodeConfigOptions...)
}
if err != nil {
c.errorf("warning: %s: %v\n", c.configFileAbsPath, err)
}
}
if boolAnnotation(cmd, modifiesSourceDirectory) {
var status *git.Status
if c.Git.AutoAdd || c.Git.AutoCommit || c.Git.AutoPush {
var err error
status, err = c.gitAutoAdd()
if err != nil {
return err
}
}
if c.Git.AutoCommit || c.Git.AutoPush {
if err := c.gitAutoCommit(status); err != nil {
return err
}
}
if c.Git.AutoPush {
if err := c.gitAutoPush(status); err != nil {
return err
}
}
}
return nil
}
// pageOutputString writes output using cmdPager as the pager command.
func (c *Config) pageOutputString(output, cmdPager string) error {
pager := firstNonEmptyString(cmdPager, c.Pager)
if c.noPager || pager == "" {
return c.writeOutputString(output)
}
// If the pager command contains any spaces, assume that it is a full
// shell command to be executed via the user's shell. Otherwise, execute
// it directly.
var pagerCmd *exec.Cmd
if strings.IndexFunc(pager, unicode.IsSpace) != -1 {
shell, _ := shell.CurrentUserShell()
pagerCmd = exec.Command(shell, "-c", pager)
} else {
pagerCmd = exec.Command(pager)
}
pagerCmd.Stdin = bytes.NewBufferString(output)
pagerCmd.Stdout = c.stdout
pagerCmd.Stderr = c.stderr
return pagerCmd.Run()
}
// persistentPreRunRootE performs pre-run actions for the root command.
func (c *Config) persistentPreRunRootE(cmd *cobra.Command, args []string) error {
// Enable CPU profiling if configured.
if !c.cpuProfile.Empty() {
f, err := os.Create(c.cpuProfile.String())
if err != nil {
return err
}
if err := pprof.StartCPUProfile(f); err != nil {
return err
}
}
// Enable gops if configured.
if c.gops {
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
}
// Read the config file.
if err := c.readConfig(); err != nil {
if !boolAnnotation(cmd, doesNotRequireValidConfig) {
return fmt.Errorf("invalid config: %s: %w", c.configFileAbsPath, err)
}
c.errorf("warning: %s: %v\n", c.configFileAbsPath, err)
}
// Determine whether color should be used.
color := c.Color.Value(c.colorAutoFunc)
if color {
if err := enableVirtualTerminalProcessing(c.stdout); err != nil {
return err
}
}
// Configure the logger.
log.Logger = log.Output(zerolog.NewConsoleWriter(
func(w *zerolog.ConsoleWriter) {
w.Out = c.stderr
w.NoColor = !color
w.TimeFormat = time.RFC3339
},
))
if c.debug {
zerolog.SetGlobalLevel(zerolog.InfoLevel)
} else {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
c.logger = &log.Logger
// Log basic information.
c.logger.Info().
Object("version", c.versionInfo).
Strs("args", args).
Str("goVersion", runtime.Version()).
Msg("persistentPreRunRootE")
c.baseSystem = chezmoi.NewRealSystem(c.fileSystem,
chezmoi.RealSystemWithSafe(c.Safe),
)
if c.debug {
systemLogger := c.logger.With().Str(logComponentKey, logComponentValueSystem).Logger()
c.baseSystem = chezmoi.NewDebugSystem(c.baseSystem, &systemLogger)
}
// Set up the persistent state.
switch {
case cmd.Annotations[persistentStateMode] == persistentStateModeEmpty:
c.persistentState = chezmoi.NewMockPersistentState()
case cmd.Annotations[persistentStateMode] == persistentStateModeReadOnly:
persistentStateFileAbsPath, err := c.persistentStateFile()
if err != nil {
return err
}
c.persistentState, err = chezmoi.NewBoltPersistentState(
c.baseSystem, persistentStateFileAbsPath, chezmoi.BoltPersistentStateReadOnly,
)
if err != nil {
return err
}
case cmd.Annotations[persistentStateMode] == persistentStateModeReadMockWrite:
fallthrough
case cmd.Annotations[persistentStateMode] == persistentStateModeReadWrite && c.dryRun:
persistentStateFileAbsPath, err := c.persistentStateFile()
if err != nil {
return err
}
persistentState, err := chezmoi.NewBoltPersistentState(
c.baseSystem, persistentStateFileAbsPath, chezmoi.BoltPersistentStateReadOnly,
)
if err != nil {
return err
}
dryRunPeristentState := chezmoi.NewMockPersistentState()
if err := persistentState.CopyTo(dryRunPeristentState); err != nil {
return err
}
if err := persistentState.Close(); err != nil {
return err
}
c.persistentState = dryRunPeristentState
case cmd.Annotations[persistentStateMode] == persistentStateModeReadWrite:
persistentStateFileAbsPath, err := c.persistentStateFile()
if err != nil {
return err
}
c.persistentState, err = chezmoi.NewBoltPersistentState(
c.baseSystem, persistentStateFileAbsPath, chezmoi.BoltPersistentStateReadWrite,
)
if err != nil {
return err
}
default:
c.persistentState = chezmoi.NullPersistentState{}
}
if c.debug && c.persistentState != nil {
persistentStateLogger := c.logger.With().Str(logComponentKey, logComponentValuePersistentState).Logger()
c.persistentState = chezmoi.NewDebugPersistentState(c.persistentState, &persistentStateLogger)
}
// Set up the source and destination systems.
c.sourceSystem = c.baseSystem
c.destSystem = c.baseSystem
if !boolAnnotation(cmd, modifiesDestinationDirectory) {
c.destSystem = chezmoi.NewReadOnlySystem(c.destSystem)
}
if !boolAnnotation(cmd, modifiesSourceDirectory) {
c.sourceSystem = chezmoi.NewReadOnlySystem(c.sourceSystem)
}
if c.dryRun {
c.sourceSystem = chezmoi.NewDryRunSystem(c.sourceSystem)
c.destSystem = chezmoi.NewDryRunSystem(c.destSystem)
}
if c.Verbose {
c.sourceSystem = chezmoi.NewGitDiffSystem(c.sourceSystem, c.stdout, c.SourceDirAbsPath, &chezmoi.GitDiffSystemOptions{
Color: color,
Include: c.Diff.include.Sub(c.Diff.Exclude),
})
c.destSystem = chezmoi.NewGitDiffSystem(c.destSystem, c.stdout, c.DestDirAbsPath, &chezmoi.GitDiffSystemOptions{
Color: color,
Include: c.Diff.include.Sub(c.Diff.Exclude),
})
}
// Set up encryption.
switch c.Encryption {
case "age":
// Only use builtin age encryption if age encryption is explicitly
// specified. Otherwise, chezmoi would fall back to using age encryption
// (rather than no encryption) if age is not in $PATH, which leads to
// error messages from the builtin age instead of error messages about
// encryption not being configured.
c.Age.UseBuiltin = c.UseBuiltinAge.Value(c.useBuiltinAgeAutoFunc)
c.encryption = &c.Age
case "gpg":
c.encryption = &c.GPG
case "":
// Detect encryption if any non-default configuration is set, preferring
// gpg for backwards compatibility.
switch {
case !reflect.DeepEqual(c.GPG, defaultGPGEncryptionConfig):
c.encryption = &c.GPG
case !reflect.DeepEqual(c.Age, defaultAgeEncryptionConfig):
c.encryption = &c.Age
default:
c.encryption = chezmoi.NoEncryption{}
}
default:
return fmt.Errorf("%s: unknown encryption", c.Encryption)
}
if c.debug {
encryptionLogger := c.logger.With().Str(logComponentKey, logComponentValueEncryption).Logger()
c.encryption = chezmoi.NewDebugEncryption(c.encryption, &encryptionLogger)
}
// Create the config directory if needed.
if boolAnnotation(cmd, requiresConfigDirectory) {
if err := chezmoi.MkdirAll(c.baseSystem, c.configFileAbsPath.Dir(), 0o777); err != nil {
return err
}
}
// Create the source directory if needed.
if boolAnnotation(cmd, requiresSourceDirectory) {
if err := chezmoi.MkdirAll(c.baseSystem, c.SourceDirAbsPath, 0o777); err != nil {
return err
}
}
// Create the runtime directory if needed.
if boolAnnotation(cmd, runsCommands) {
if runtime.GOOS == "linux" && c.bds.RuntimeDir != "" {
// Snap sets the $XDG_RUNTIME_DIR environment variable to
// /run/user/$uid/snap.$snap_name, but does not create this
// directory. Consequently, any spawned processes that need
// $XDG_DATA_DIR will fail. As a work-around, create the directory
// if it does not exist. See
// https://forum.snapcraft.io/t/wayland-dconf-and-xdg-runtime-dir/186/13.
if err := chezmoi.MkdirAll(c.baseSystem, chezmoi.NewAbsPath(c.bds.RuntimeDir), 0o700); err != nil {
return err
}
}
}
// Determine the working tree directory if it is not configured.
if c.WorkingTreeAbsPath.Empty() {
workingTreeAbsPath := c.SourceDirAbsPath
FOR:
for {
gitDirAbsPath := workingTreeAbsPath.JoinString(gogit.GitDirName)
if fileInfo, err := c.baseSystem.Stat(gitDirAbsPath); err == nil && fileInfo.IsDir() {
c.WorkingTreeAbsPath = workingTreeAbsPath
break FOR
}
prevWorkingTreeDirAbsPath := workingTreeAbsPath
workingTreeAbsPath = workingTreeAbsPath.Dir()
if workingTreeAbsPath == c.homeDirAbsPath || workingTreeAbsPath.Len() >= prevWorkingTreeDirAbsPath.Len() {
c.WorkingTreeAbsPath = c.SourceDirAbsPath
break FOR
}
}
}
// Create the working tree directory if needed.
if boolAnnotation(cmd, requiresWorkingTree) {
if _, err := c.SourceDirAbsPath.TrimDirPrefix(c.WorkingTreeAbsPath); err != nil {
return err
}
if err := chezmoi.MkdirAll(c.baseSystem, c.WorkingTreeAbsPath, 0o777); err != nil {
return err
}
}
return nil
}
// persistentStateFile returns the absolute path to the persistent state file,
// returning the first persistent file found, and returning the default path if
// none are found.
func (c *Config) persistentStateFile() (chezmoi.AbsPath, error) {
if !c.persistentStateAbsPath.Empty() {
return c.persistentStateAbsPath, nil
}
if !c.configFileAbsPath.Empty() {
return c.configFileAbsPath.Dir().Join(persistentStateFileRelPath), nil
}
for _, configDir := range c.bds.ConfigDirs {
configDirAbsPath, err := chezmoi.NewAbsPathFromExtPath(configDir, c.homeDirAbsPath)
if err != nil {
return chezmoi.EmptyAbsPath, err
}
persistentStateFile := configDirAbsPath.Join(chezmoiRelPath, persistentStateFileRelPath)
if _, err := os.Stat(persistentStateFile.String()); err == nil {
return persistentStateFile, nil
}
}
defaultConfigFileAbsPath, err := c.defaultConfigFile(c.fileSystem, c.bds)
if err != nil {
return chezmoi.EmptyAbsPath, err
}
return defaultConfigFileAbsPath.Dir().Join(persistentStateFileRelPath), nil
}
// promptChoice prompts the user for one of choices until a valid choice is made.
func (c *Config) promptChoice(prompt string, choices []string) (string, error) {
promptWithChoices := fmt.Sprintf("%s [%s]? ", prompt, strings.Join(choices, ","))
abbreviations := uniqueAbbreviations(choices)
for {
line, err := c.readLine(promptWithChoices)
if err != nil {
return "", err
}
if value, ok := abbreviations[strings.TrimSpace(line)]; ok {
return value, nil
}
}
}
// readConfig reads the config file, if it exists.
func (c *Config) readConfig() error {
viper.SetConfigFile(c.configFileAbsPath.String())
if c.configFormat != "" {
viper.SetConfigType(c.configFormat.String())
}
viper.SetFs(afero.FromIOFS{FS: c.fileSystem})
switch err := viper.ReadInConfig(); {
case errors.Is(err, fs.ErrNotExist):
return nil
case err != nil:
return err
}
if err := viper.Unmarshal(c, viperDecodeConfigOptions...); err != nil {
return err
}
return c.validateData()
}
// readLine reads a line from stdin, trimming leading and trailing whitespace.
func (c *Config) readLine(prompt string) (string, error) {
_, err := c.stdout.Write([]byte(prompt))
if err != nil {
return "", err
}
line, err := bufio.NewReader(c.stdin).ReadString('\n')
if err != nil {
return "", err
}
return strings.TrimSpace(line), nil
}
// run runs name with args in dir.
func (c *Config) run(dir chezmoi.AbsPath, name string, args []string) error {
cmd := exec.Command(name, args...)
if !dir.Empty() {
dirRawAbsPath, err := c.baseSystem.RawPath(dir)
if err != nil {
return err
}
cmd.Dir = dirRawAbsPath.String()
}
cmd.Stdin = c.stdin
cmd.Stdout = c.stdout
cmd.Stderr = c.stderr
return c.baseSystem.RunCmd(cmd)
}
// runEditor runs the configured editor with args.
func (c *Config) runEditor(args []string) error {
if err := c.persistentState.Close(); err != nil {
return err
}
editor, editorArgs := c.editor(args)
start := time.Now()
err := c.run(chezmoi.EmptyAbsPath, editor, editorArgs)
if runtime.GOOS != "windows" && c.Edit.MinDuration != 0 {
if duration := time.Since(start); duration < c.Edit.MinDuration {
c.errorf("warning: %s: returned in less than %s\n", shellQuoteCommand(editor, editorArgs), c.Edit.MinDuration)
}
}
return err
}
// sourceAbsPaths returns the source absolute paths for each target path in
// args.
func (c *Config) sourceAbsPaths(sourceState *chezmoi.SourceState, args []string) ([]chezmoi.AbsPath, error) {
targetRelPaths, err := c.targetRelPaths(sourceState, args, targetRelPathsOptions{
mustBeInSourceState: true,
})
if err != nil {
return nil, err
}
sourceAbsPaths := make([]chezmoi.AbsPath, 0, len(targetRelPaths))
for _, targetRelPath := range targetRelPaths {
sourceAbsPath := c.SourceDirAbsPath.Join(sourceState.MustEntry(targetRelPath).SourceRelPath().RelPath())
sourceAbsPaths = append(sourceAbsPaths, sourceAbsPath)
}
return sourceAbsPaths, nil
}
// sourceDirAbsPath returns the source directory, using .chezmoiroot if it
// exists.
func (c *Config) sourceDirAbsPath() (chezmoi.AbsPath, error) {
switch data, err := c.sourceSystem.ReadFile(c.SourceDirAbsPath.JoinString(chezmoi.RootName)); {
case errors.Is(err, fs.ErrNotExist):
return c.SourceDirAbsPath, nil
case err != nil:
return chezmoi.EmptyAbsPath, err
default:
return c.SourceDirAbsPath.JoinString(string(bytes.TrimSpace(data))), nil
}
}
type targetRelPathsOptions struct {
mustBeInSourceState bool
recursive bool
}
// targetRelPaths returns the target relative paths for each target path in
// args. The returned paths are sorted and de-duplicated.
func (c *Config) targetRelPaths(
sourceState *chezmoi.SourceState, args []string, options targetRelPathsOptions,
) (chezmoi.RelPaths, error) {
targetRelPaths := make(chezmoi.RelPaths, 0, len(args))
for _, arg := range args {
argAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return nil, err
}
targetRelPath, err := argAbsPath.TrimDirPrefix(c.DestDirAbsPath)
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
if options.mustBeInSourceState {
if !sourceState.Contains(targetRelPath) {
return nil, fmt.Errorf("%s: not in source state", arg)
}
}
targetRelPaths = append(targetRelPaths, targetRelPath)
if options.recursive {
parentRelPath := targetRelPath
// FIXME we should not call s.TargetRelPaths() here - risk of
// accidentally quadratic
for _, targetRelPath := range sourceState.TargetRelPaths() {
if _, err := targetRelPath.TrimDirPrefix(parentRelPath); err == nil {
targetRelPaths = append(targetRelPaths, targetRelPath)
}
}
}
}
if len(targetRelPaths) == 0 {
return nil, nil
}
// Sort and de-duplicate targetRelPaths in place.
sort.Sort(targetRelPaths)
n := 1
for i := 1; i < len(targetRelPaths); i++ {
if targetRelPaths[i] != targetRelPaths[i-1] {
targetRelPaths[n] = targetRelPaths[i]
n++
}
}
return targetRelPaths[:n], nil
}
// targetRelPathsBySourcePath returns the target relative paths for each arg in
// args.
func (c *Config) targetRelPathsBySourcePath(
sourceState *chezmoi.SourceState, args []string,
) ([]chezmoi.RelPath, error) {
targetRelPaths := make([]chezmoi.RelPath, 0, len(args))
targetRelPathsBySourceRelPath := make(map[chezmoi.RelPath]chezmoi.RelPath)
_ = sourceState.ForEach(func(targetRelPath chezmoi.RelPath, sourceStateEntry chezmoi.SourceStateEntry) error {
sourceRelPath := sourceStateEntry.SourceRelPath().RelPath()
targetRelPathsBySourceRelPath[sourceRelPath] = targetRelPath
return nil
})
for _, arg := range args {
argAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return nil, err
}
sourceRelPath, err := argAbsPath.TrimDirPrefix(c.SourceDirAbsPath)
if err != nil {
return nil, err
}
targetRelPath, ok := targetRelPathsBySourceRelPath[sourceRelPath]
if !ok {
return nil, fmt.Errorf("%s: not in source state", arg)
}
targetRelPaths = append(targetRelPaths, targetRelPath)
}
return targetRelPaths, nil
}
// tempDir returns the temporary directory for the given key, creating it if
// needed.
func (c *Config) tempDir(key string) (chezmoi.AbsPath, error) {
if tempDirAbsPath, ok := c.tempDirs[key]; ok {
return tempDirAbsPath, nil
}
tempDir, err := os.MkdirTemp("", key)
c.logger.Err(err).
Str("tempDir", tempDir).
Msg("MkdirTemp")
if err != nil {
return chezmoi.EmptyAbsPath, err
}
tempDirAbsPath := chezmoi.NewAbsPath(tempDir)
c.tempDirs[key] = tempDirAbsPath
if runtime.GOOS != "windows" {
if err := os.Chmod(tempDir, 0o700); err != nil {
return chezmoi.EmptyAbsPath, err
}
}
return tempDirAbsPath, nil
}
// useBuiltinAgeAutoFunc detects whether the builtin age should be used.
func (c *Config) useBuiltinAgeAutoFunc() bool {
if _, err := exec.LookPath(c.Age.Command); err == nil {
return false
}
return true
}
// useBuiltinGitAutoFunc detects whether the builitin git should be used.
func (c *Config) useBuiltinGitAutoFunc() bool {
// useBuiltinGit is false by default on Solaris as it uses the unavailable
// flock function.
if runtime.GOOS == "solaris" {
return false
}
if _, err := exec.LookPath(c.Git.Command); err == nil {
return false
}
return true
}
// validateData valides that the config data does not contain any invalid keys.
func (c *Config) validateData() error {
return validateKeys(c.Data, identifierRx)
}
// writeOutput writes data to the configured output.
func (c *Config) writeOutput(data []byte) error {
if c.outputAbsPath.Empty() || c.outputAbsPath == chezmoi.NewAbsPath("-") {
_, err := c.stdout.Write(data)
return err
}
return c.baseSystem.WriteFile(c.outputAbsPath, data, 0o666)
}
// writeOutputString writes data to the configured output.
func (c *Config) writeOutputString(data string) error {
return c.writeOutput([]byte(data))
}
// withVersionInfo sets the version information.
func withVersionInfo(versionInfo VersionInfo) configOption {
return func(c *Config) error {
var version *semver.Version
var versionElems []string
if versionInfo.Version != "" {
var err error
version, err = semver.NewVersion(strings.TrimPrefix(versionInfo.Version, "v"))
if err != nil {
return err
}
versionElems = append(versionElems, "v"+version.String())
} else {
versionElems = append(versionElems, "dev")
}
if versionInfo.Commit != "" {
versionElems = append(versionElems, "commit "+versionInfo.Commit)
}
if versionInfo.Date != "" {
date := versionInfo.Date
if sec, err := strconv.ParseInt(date, 10, 64); err == nil {
date = time.Unix(sec, 0).UTC().Format(time.RFC3339)
}
versionElems = append(versionElems, "built at "+date)
}
if versionInfo.BuiltBy != "" {
versionElems = append(versionElems, "built by "+versionInfo.BuiltBy)
}
if version != nil {
c.version = *version
}
c.versionInfo = versionInfo
c.versionStr = strings.Join(versionElems, ", ")
return nil
}
}
|
[
"\"PAGER\"",
"\"VISUAL\"",
"\"EDITOR\""
] |
[] |
[
"PAGER",
"VISUAL",
"EDITOR"
] |
[]
|
["PAGER", "VISUAL", "EDITOR"]
|
go
| 3 | 0 | |
setup_and_seed.py
|
import django, os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "techtest.settings")
sys.path.append(os.path.join(os.path.realpath(os.path.dirname(__file__)), "..", ".."))
django.setup()
from techtest.articles.models import Article
from techtest.regions.models import Region
from django.core import management
# Migrate
management.call_command("migrate", no_input=True)
# Seed
Article.objects.create(title="Fake Article", content="Fake Content").regions.set(
[
Region.objects.create(code="AL", name="Albania"),
Region.objects.create(code="UK", name="United Kingdom"),
]
)
Article.objects.create(title="Fake Article", content="Fake Content")
Article.objects.create(title="Fake Article", content="Fake Content")
Article.objects.create(title="Fake Article", content="Fake Content")
Article.objects.create(title="Fake Article", content="Fake Content").regions.set(
[
Region.objects.create(code="AU", name="Austria"),
Region.objects.create(code="US", name="United States of America"),
]
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
common/common.go
|
package common
import (
"bytes"
"errors"
"fmt"
"os"
"regexp"
"io/ioutil"
"strconv"
"strings"
"path/filepath"
"encoding/json"
)
const (
TEMPLATED_STRING_REGEXP = `\{\{\.[[:alnum:][:punct:][:print:]]+?\}\}`
INTERPOLATED_STRING_REGEXP = `%(?:[#v]|[%EGUTXbcdefgopqstvx])`
)
var templatedStringRegexp, interpolatedStringRegexp *regexp.Regexp
func ParseStringList(stringList string, delimiter string) []string {
stringArray := strings.Split(stringList, delimiter)
var parsedStrings []string
for _, aString := range stringArray {
if aString != "" {
parsedStrings = append(parsedStrings, strings.Trim(strings.Trim(aString, " "), "\""))
}
}
return parsedStrings
}
func CreateTmpFile(content string) (*os.File, error) {
tmpFile, err := ioutil.TempFile("", "")
if err != nil {
return nil, err
}
ioutil.WriteFile(tmpFile.Name(), []byte(content), 0666)
return tmpFile, nil
}
func CheckFile(fileName string) (string, string, error) {
fileInfo, err := os.Stat(fileName)
if err != nil {
return "", "", err
}
if !fileInfo.Mode().IsRegular() {
return "", "", fmt.Errorf("i18n4go: Non-regular source file %s (%s)\n", fileInfo.Name(), fileInfo.Mode().String())
}
return filepath.Base(fileName), filepath.Dir(fileName), nil
}
func CopyFileContents(src, dst string) error {
err := CreateOutputDirsIfNeeded(filepath.Dir(dst))
if err != nil {
return err
}
byteArray, err := ioutil.ReadFile(src)
if err != nil {
return err
}
return ioutil.WriteFile(dst, byteArray, 0644)
}
func GetAbsFileInfo(fileNamePath string) (os.FileInfo, error) {
var absFilePath = fileNamePath
if !filepath.IsAbs(absFilePath) {
absFilePath = filepath.Join(os.Getenv("PWD"), absFilePath)
}
file, err := os.OpenFile(absFilePath, os.O_RDONLY, 0)
defer file.Close()
if err != nil {
return nil, err
}
return file.Stat()
}
func FindFilePath(filename string) (string, error) {
fileInfo, err := os.Stat(filename)
if err != nil {
return "", err
}
path := filename[0 : len(filename)-len(fileInfo.Name())]
return path, nil
}
func CreateOutputDirsIfNeeded(outputDirname string) error {
_, err := os.Stat(outputDirname)
if os.IsNotExist(err) {
err = os.MkdirAll(outputDirname, 0755)
if err != nil {
return err
}
}
return nil
}
func UnescapeHTML(byteArray []byte) []byte {
byteArray = bytes.Replace(byteArray, []byte("\\u003c"), []byte("<"), -1)
byteArray = bytes.Replace(byteArray, []byte("\\u003e"), []byte(">"), -1)
byteArray = bytes.Replace(byteArray, []byte("\\u0026"), []byte("&"), -1)
return byteArray
}
func SaveStrings(printer PrinterInterface, options Options, stringInfos map[string]StringInfo, outputDirname string, fileName string) error {
if !options.DryRunFlag {
err := CreateOutputDirsIfNeeded(outputDirname)
if err != nil {
printer.Println(err)
return err
}
}
i18nStringInfos := make([]I18nStringInfo, len(stringInfos))
i := 0
for _, stringInfo := range stringInfos {
i18nStringInfos[i] = I18nStringInfo{ID: stringInfo.Value, Translation: stringInfo.Value}
i++
}
jsonData, err := json.MarshalIndent(i18nStringInfos, "", " ")
if err != nil {
printer.Println(err)
return err
}
jsonData = UnescapeHTML(jsonData)
outputFilename := filepath.Join(outputDirname, fileName[strings.LastIndex(fileName, string(os.PathSeparator))+1:len(fileName)])
if len(stringInfos) != 0 {
printer.Println("Saving extracted i18n strings to file:", outputFilename)
}
if !options.DryRunFlag && len(i18nStringInfos) != 0 {
file, err := os.Create(outputFilename)
defer file.Close()
if err != nil {
printer.Println(err)
return err
}
file.Write(jsonData)
}
return nil
}
func SaveStringsInPo(printer PrinterInterface, options Options, stringInfos map[string]StringInfo, outputDirname string, fileName string) error {
if len(stringInfos) != 0 {
printer.Println("Creating and saving i18n strings to .po file:", fileName)
}
if !options.DryRunFlag && len(stringInfos) != 0 {
err := CreateOutputDirsIfNeeded(outputDirname)
if err != nil {
printer.Println(err)
return err
}
file, err := os.Create(filepath.Join(outputDirname, fileName[strings.LastIndex(fileName, string(os.PathSeparator))+1:len(fileName)]))
defer file.Close()
if err != nil {
printer.Println(err)
return err
}
for _, stringInfo := range stringInfos {
file.Write([]byte("# filename: " + strings.Split(fileName, ".en.po")[0] +
", offset: " + strconv.Itoa(stringInfo.Offset) +
", line: " + strconv.Itoa(stringInfo.Line) +
", column: " + strconv.Itoa(stringInfo.Column) + "\n"))
file.Write([]byte("msgid " + strconv.Quote(stringInfo.Value) + "\n"))
file.Write([]byte("msgstr " + strconv.Quote(stringInfo.Value) + "\n"))
file.Write([]byte("\n"))
}
}
return nil
}
func SaveI18nStringsInPo(printer PrinterInterface, options Options, i18nStrings []I18nStringInfo, fileName string) error {
printer.Println("i18n4go: creating and saving i18n strings to .po file:", fileName)
if !options.DryRunFlag && len(i18nStrings) != 0 {
file, err := os.Create(fileName)
defer file.Close()
if err != nil {
printer.Println(err)
return err
}
for _, stringInfo := range i18nStrings {
file.Write([]byte("msgid " + strconv.Quote(stringInfo.ID) + "\n"))
file.Write([]byte("msgstr " + strconv.Quote(stringInfo.Translation) + "\n"))
file.Write([]byte("\n"))
}
}
return nil
}
func SaveI18nStringInfos(printer PrinterInterface, options Options, i18nStringInfos []I18nStringInfo, fileName string) error {
jsonData, err := json.MarshalIndent(i18nStringInfos, "", " ")
if err != nil {
printer.Println(err)
return err
}
jsonData = UnescapeHTML(jsonData)
if !options.DryRunFlag && len(i18nStringInfos) != 0 {
err := ioutil.WriteFile(fileName, jsonData, 0644)
if err != nil {
printer.Println(err)
return err
}
}
return nil
}
func LoadI18nStringInfos(fileName string) ([]I18nStringInfo, error) {
_, err := os.Stat(fileName)
if os.IsNotExist(err) {
return nil, err
}
content, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
var i18nStringInfos []I18nStringInfo
err = json.Unmarshal(content, &i18nStringInfos)
if err != nil {
return nil, err
}
return i18nStringInfos, nil
}
func CreateI18nStringInfoMap(i18nStringInfos []I18nStringInfo) (map[string]I18nStringInfo, error) {
inputMap := make(map[string]I18nStringInfo, len(i18nStringInfos))
for _, i18nStringInfo := range i18nStringInfos {
if _, ok := inputMap[i18nStringInfo.ID]; !ok {
inputMap[i18nStringInfo.ID] = i18nStringInfo
} else {
return nil, errors.New("Duplicated key found: " + i18nStringInfo.ID)
}
}
return inputMap, nil
}
func CopyI18nStringInfoMap(i18nStringInfoMap map[string]I18nStringInfo) map[string]I18nStringInfo {
copyMap := make(map[string]I18nStringInfo, len(i18nStringInfoMap))
for key, value := range i18nStringInfoMap {
copyMap[key] = value
}
return copyMap
}
func GetTemplatedStringArgs(aString string) []string {
re, err := getTemplatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling templated string Regexp: %s\n", err.Error())
return []string{}
}
matches := re.FindAllStringSubmatch(aString, -1)
var stringMatches []string
for _, match := range matches {
stringMatch := match[0]
stringMatch = stringMatch[3 : len(stringMatch)-2]
stringMatches = append(stringMatches, stringMatch)
}
return stringMatches
}
func IsTemplatedString(aString string) bool {
re, err := getTemplatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling templated string Regexp: %s\n", err.Error())
return false
}
return re.Match([]byte(aString))
}
func IsInterpolatedString(aString string) bool {
re, err := getInterpolatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling interpolated string Regexp: %s\n", err.Error())
return false
}
return re.Match([]byte(aString))
}
func ConvertToTemplatedString(aString string) string {
if !IsInterpolatedString(aString) {
return aString
}
re, err := getInterpolatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling interpolated string Regexp: %s\n", err.Error())
return ""
}
matches := re.FindAllStringSubmatch(aString, -1)
templatedString := aString
for i, match := range matches {
argName := "{{.Arg" + strconv.Itoa(i) + "}}"
templatedString = strings.Replace(templatedString, match[0], argName, 1)
}
return templatedString
}
func I18nStringInfoMapValues2Array(i18nStringInfosMap map[string]I18nStringInfo) []I18nStringInfo {
var i18nStringInfos []I18nStringInfo
for _, i18nStringInfo := range i18nStringInfosMap {
i18nStringInfos = append(i18nStringInfos, i18nStringInfo)
}
return i18nStringInfos
}
// Private
func getTemplatedStringRegexp() (*regexp.Regexp, error) {
var err error
if templatedStringRegexp == nil {
templatedStringRegexp, err = regexp.Compile(TEMPLATED_STRING_REGEXP)
}
return templatedStringRegexp, err
}
func getInterpolatedStringRegexp() (*regexp.Regexp, error) {
var err error
if interpolatedStringRegexp == nil {
interpolatedStringRegexp, err = regexp.Compile(INTERPOLATED_STRING_REGEXP)
}
return interpolatedStringRegexp, err
}
|
[
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
tests/utils/interactive_python.py
|
"""
A script that simulates a Python shell and accepts arbitrary commands to
execute. For use by service tests.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
os.environ["FIFTYONE_DISABLE_SERVICES"] = "1"
from fiftyone.service.ipc import IPCServer
env = {}
def handle_message(message):
try:
code = compile(message, "", "eval")
except SyntaxError:
code = compile(message, "", "exec")
return eval(code, env)
IPCServer(handle_message).serve_forever()
|
[] |
[] |
[
"FIFTYONE_DISABLE_SERVICES"
] |
[]
|
["FIFTYONE_DISABLE_SERVICES"]
|
python
| 1 | 0 | |
src/run_joint_confidence_cdcOriginalGan.py
|
##############################################
# This code is based on samples from pytorch #
##############################################
# Writer: Kimin Lee
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import data_loader
import numpy as np
import torchvision.utils as vutils
import models
from torchvision import datasets, transforms
from torch.autograd import Variable
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
# Training settings
parser = argparse.ArgumentParser(description='Training code - joint confidence')
parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--log-interval', type=int, default=100,
help='how many batches to wait before logging training status')
parser.add_argument('--dataset', default='mnist', help='cifar10 | svhn')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--wd', type=float, default=0.0, help='weight decay')
parser.add_argument('--droprate', type=float, default=0.1, help='learning rate decay')
parser.add_argument('--decreasing_lr', default='60', help='decreasing strategy')
parser.add_argument('--num_classes', type=int, default=10, help='the # of classes')
parser.add_argument('--beta', type=float, default=1, help='penalty parameter for KL term')
args = parser.parse_args()
if args.dataset == 'cifar10':
args.beta = 0.1
args.batch_size = 64
print(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Random Seed: ", args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print('load data: ', args.dataset)
if args.dataset=='mnist':
transform = transforms.Compose([
transforms.Scale(32),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True, transform=transform),
batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, download=True, transform=transform),
batch_size=128, shuffle=True)
else:
train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot)
transform = transforms.Compose([
transforms.Scale(32),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader_mnist = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True, transform=transform),
batch_size=128, shuffle=True)
print('Load model')
model = models.vgg13()
print(model)
print('load GAN')
nz = 100
G = models.cdcOriginalGenerator(1, nz, 64, 3) # ngpu, nz, ngf, nc
D = models.cdcOriginalDiscriminator(1, 3, 64) # ngpu, nc, ndf
G.weight_init(mean=0.0, std=0.02)
D.weight_init(mean=0.0, std=0.02)
# Initial setup for GAN
real_label = 1
fake_label = 0
criterion = nn.BCELoss()
nz = 100
print('Setup optimizer')
lr = 0.0002
batch_size = 128
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
G_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
D_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
onehot = torch.zeros(10, 10).cuda()
onehot = onehot.scatter_(1, torch.cuda.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1), 1).view(10, 10, 1, 1)
img_size = 32
num_labels = 10
fraction = 1
fill = torch.zeros([num_labels, num_labels, img_size / fraction, img_size / fraction]).cuda()
for i in range(num_labels):
fill[i, i, :, :] = 1
fill = fill.cuda()
# os.environ["CUDA_LAUNCH_BLOCKING"]="1"
# Binary Cross Entropy loss
BCE_loss = nn.BCELoss()
# fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
fixed_noise = torch.randn((64, 100)).view(-1, 100, 1, 1)
fixed_label = None
if args.cuda:
model.cuda()
D.cuda()
G.cuda()
criterion.cuda()
fixed_noise = fixed_noise.cuda()
first = True
def train(epoch):
model.train()
# D_train_loss = 0
# G_train_loss = 3
trg = 0
trd = 0
i = 0
for batch_idx, (data, y_labels) in enumerate(train_loader):
uniform_dist = torch.Tensor(data.size(0), args.num_classes).fill_((1. / args.num_classes)).cuda()
x_ = data.cuda()
assert x_[0, :, :, :].shape == (3, 32, 32)
global first
if first:
global fixed_noise
global fixed_label
first = False
fixed_label = onehot[y_labels.squeeze()[:64]]
print("saving fixed_label!")
vutils.save_image(data[:64],
'{}/{}jointConfidencerealReference{}.png'.format(args.outf, args.dataset, epoch),
normalize=True)
# train discriminator D
D.zero_grad()
y_ = y_labels
mini_batch = x_.size()[0]
y_real_ = torch.ones(mini_batch)
y_fake_ = torch.zeros(mini_batch)
y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda())
y_fill_ = fill[y_.squeeze().tolist()]
# y_fill_ = fill[y_]
assert y_fill_[0, y_.squeeze().tolist()[0], :, :].sum() == (img_size / fraction) ** 2
assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch
x_, y_fill_ = Variable(x_.cuda()), Variable(y_fill_.cuda())
D_result = D(x_, y_fill_).squeeze()
D_real_loss = BCE_loss(D_result, y_real_)
z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)
y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze()
y_label_ = onehot[y_]
y_fill_ = fill[y_]
assert y_label_[0, y_[0]] == 1
assert y_label_.shape == (mini_batch, 10, 1, 1)
assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2
assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch
z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())
G_result = G(z_, y_label_)
D_result = D(G_result, y_fill_).squeeze()
D_fake_loss = BCE_loss(D_result, y_fake_)
D_fake_score = D_result.data.mean()
D_train_loss = D_real_loss + D_fake_loss
trg += 1
if D_train_loss > .1:
trd += 1
D_train_loss.backward()
D_optimizer.step()
# D_losses.append(D_train_loss.item())
# train generator G
G.zero_grad()
z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)
y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze()
y_label_ = onehot[y_]
y_fill_ = fill[y_]
z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())
assert y_label_[0, y_[0]] == 1
assert y_label_.shape == (mini_batch, 10, 1, 1)
assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2
assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch
G_result = G(z_, y_label_)
D_result = D(G_result, y_fill_).squeeze()
G_train_loss = BCE_loss(D_result, y_real_)
# minimize the true distribution
KL_fake_output = F.log_softmax(model(G_result))
errG_KL = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes
generator_loss = G_train_loss + args.beta * errG_KL # 12.0, .65, 0e-8
generator_loss.backward()
G_optimizer.step()
# G_losses.append(G_train_loss.item())
###########################
# (3) Update classifier #
###########################
# cross entropy loss
optimizer.zero_grad()
x_ = Variable(x_)
output = F.log_softmax(model(x_))
loss = F.nll_loss(output.cuda(), y_labels.type(torch.cuda.LongTensor).squeeze())
# KL divergence
####
z_ = torch.randn((data.shape[0], 100)).view(-1, 100, 1, 1).cuda()
y_ = (torch.rand(data.shape[0], 1) * num_labels).type(torch.LongTensor).squeeze().cuda()
y_label_ = onehot[y_]
y_fill_ = fill[y_]
assert y_label_[0, y_[0]] == 1
assert y_label_.shape == (data.shape[0], 10, 1, 1)
assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2
assert y_fill_.sum() == (img_size / fraction) ** 2 * data.shape[0]
G_result = G(z_, y_label_)
# !!!#D_result = D(G_result, y_fill_).squeeze()
####
KL_fake_output = F.log_softmax(model(G_result))
KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes
total_loss = loss + args.beta * KL_loss_fake
# total_loss = loss
total_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
"Epoch {} , Descriminator loss {:.6f} Generator loss {:.6f} traingenerator {:.6f} traindiscriminator {:.6f}".format(
epoch, D_train_loss, G_train_loss, trg, trd))
print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))
# print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))
fake = G(fixed_noise.cuda(), fixed_label)
vutils.save_image(fake.data, '%s/MNISTcDCgan_samples_epoch_%03d.png' % (args.outf, epoch), normalize=True)
def test(epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
for data, target in test_loader:
total += data.size(0)
if args.cuda:
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
output = F.log_softmax(model(data))
target = target.type(
torch.LongTensor) # https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/4
if args.cuda:
output = output.cuda()
target = target.cuda()
target = torch.squeeze(target)
test_loss += F.nll_loss(output, target).data.item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, total,
100. * correct / total))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
if epoch in decreasing_lr:
G_optimizer.param_groups[0]['lr'] *= args.droprate
D_optimizer.param_groups[0]['lr'] *= args.droprate
optimizer.param_groups[0]['lr'] *= args.droprate
if epoch % 20 == 0:
# do checkpointing
torch.save(G.state_dict(), '%s/netG_epoch_%d.pth' % (args.outf, epoch))
torch.save(D.state_dict(), '%s/netD_epoch_%d.pth' % (args.outf, epoch))
torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % (args.outf, epoch))
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES",
"CUDA_LAUNCH_BLOCKING"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "CUDA_LAUNCH_BLOCKING"]
|
python
| 3 | 0 | |
src/bin/asr_recog.py
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import os
import random
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
# general configuration
parser.add_argument('--ngpu', default=0, type=int,
help='Number of GPUs')
parser.add_argument('--backend', default='chainer', type=str,
choices=['chainer', 'pytorch'],
help='Backend library')
parser.add_argument('--debugmode', default=1, type=int,
help='Debugmode')
parser.add_argument('--seed', default=1, type=int,
help='Random seed')
parser.add_argument('--verbose', '-V', default=1, type=int,
help='Verbose option')
parser.add_argument('--batchsize', default=1, type=int,
help='Batch size for beam search')
# task related
parser.add_argument('--recog-json', type=str,
help='Filename of recognition data (json)')
parser.add_argument('--result-label', type=str, required=True,
help='Filename of result label data (json)')
# model (parameter) related
parser.add_argument('--model', type=str, required=True,
help='Model file parameters to read')
parser.add_argument('--model-conf', type=str, default=None,
help='Model config file')
# search related
parser.add_argument('--nbest', type=int, default=1,
help='Output N-best hypotheses')
parser.add_argument('--beam-size', type=int, default=1,
help='Beam size')
parser.add_argument('--penalty', default=0.0, type=float,
help='Incertion penalty')
parser.add_argument('--maxlenratio', default=0.0, type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""")
parser.add_argument('--minlenratio', default=0.0, type=float,
help='Input length ratio to obtain min output length')
parser.add_argument('--ctc-weight', default=0.0, type=float,
help='CTC weight in joint decoding')
# rnnlm related
parser.add_argument('--rnnlm', type=str, default=None,
help='RNNLM model file to read')
parser.add_argument('--rnnlm-conf', type=str, default=None,
help='RNNLM model config file to read')
parser.add_argument('--word-rnnlm', type=str, default=None,
help='Word RNNLM model file to read')
parser.add_argument('--word-rnnlm-conf', type=str, default=None,
help='Word RNNLM model config file to read')
parser.add_argument('--word-dict', type=str, default=None,
help='Word list to read')
parser.add_argument('--lm-weight', default=0.1, type=float,
help='RNNLM weight.')
args = parser.parse_args()
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warn("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(mn5k): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info('python path = ' + os.environ['PYTHONPATH'])
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info('set random seed = %d' % args.seed)
# recog
logging.info('backend = ' + args.backend)
if args.backend == "chainer":
from asr_chainer import recog
recog(args)
elif args.backend == "pytorch":
from asr_pytorch import recog
recog(args)
else:
raise ValueError("chainer and pytorch are only supported.")
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"PYTHONPATH"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "PYTHONPATH"]
|
python
| 2 | 0 | |
src/backend/qweb/qweb/wsgi.py
|
"""
WSGI config for qweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qweb.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
integration-cli/docker_api_swarm_test.go
|
// +build !windows
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/cloudflare/cfssl/csr"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/initca"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/daemon"
"github.com/docker/swarmkit/ca"
"github.com/go-check/check"
)
var defaultReconciliationTimeout = 30 * time.Second
func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
// todo: should find a better way to verify that components are running than /info
d1 := s.AddDaemon(c, true, true)
info, err := d1.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.True)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
c.Assert(info.Cluster.RootRotationInProgress, checker.False)
d2 := s.AddDaemon(c, true, false)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.False)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
// Leaving cluster
c.Assert(d2.Leave(false), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.False)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.False)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
// Current state restoring after restarts
d1.Stop(c)
d2.Stop(c)
d1.Start(c)
d2.Start(c)
info, err = d1.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.True)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.False)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
}
func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
d1 := s.AddDaemon(c, false, false)
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
// todo: error message differs depending if some components of token are valid
d2 := s.AddDaemon(c, false, false)
err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "join token is necessary")
info, err := d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "invalid join token")
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
workerToken := d1.JoinTokens(c).Worker
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
c.Assert(d2.Leave(false), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
// change tokens
d1.RotateTokens(c)
err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "join token is necessary")
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
workerToken = d1.JoinTokens(c).Worker
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
c.Assert(d2.Leave(false), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
// change spec, don't change tokens
d1.UpdateSwarm(c, func(s *swarm.Spec) {})
err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "join token is necessary")
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
c.Assert(d2.Leave(false), checker.IsNil)
info, err = d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
}
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
d1 := s.AddDaemon(c, false, false)
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
d1.UpdateSwarm(c, func(s *swarm.Spec) {
s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
{
Protocol: swarm.ExternalCAProtocolCFSSL,
URL: "https://thishasnoca.org",
},
{
Protocol: swarm.ExternalCAProtocolCFSSL,
URL: "https://thishasacacert.org",
CACert: "cacert",
},
}
})
info, err := d1.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2)
c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "")
c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert")
}
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, false, false)
splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
replacementToken := strings.Join(splitToken, "-")
err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
}
func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
d1 := s.AddDaemon(c, false, false)
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
d2 := s.AddDaemon(c, true, false)
info, err := d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.False)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
n.Spec.Role = swarm.NodeRoleManager
})
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
n.Spec.Role = swarm.NodeRoleWorker
})
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
// Wait for the role to change to worker in the cert. This is partially
// done because it's something worth testing in its own right, and
// partially because changing the role from manager to worker and then
// back to manager quickly might cause the node to pause for awhile
// while waiting for the role to change to worker, and the test can
// time out during this interval.
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
if err != nil {
return "", check.Commentf("error: %v", err)
}
certs, err := helpers.ParseCertificatesPEM(certBytes)
if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
return certs[0].Subject.OrganizationalUnit[0], nil
}
return "", check.Commentf("could not get organizational unit from certificate")
}, checker.Equals, "swarm-worker")
// Demoting last node should fail
node := d1.GetNode(c, d1.NodeID)
node.Spec.Role = swarm.NodeRoleWorker
url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
status, out, err := d1.SockRequest("POST", url, node.Spec)
c.Assert(err, checker.IsNil)
c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out)))
// The warning specific to demoting the last manager is best-effort and
// won't appear until the Role field of the demoted manager has been
// updated.
// Yes, I know this looks silly, but checker.Matches is broken, since
// it anchors the regexp contrary to the documentation, and this makes
// it impossible to match something that includes a line break.
if !strings.Contains(string(out), "last manager of the swarm") {
c.Assert(string(out), checker.Contains, "this would result in a loss of quorum")
}
info, err = d1.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
c.Assert(info.ControlAvailable, checker.True)
// Promote already demoted node
d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
n.Spec.Role = swarm.NodeRoleManager
})
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
}
func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
// add three managers, one of these is leader
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
// start a service by hitting each of the 3 managers
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
s.Spec.Name = "test1"
})
d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
s.Spec.Name = "test2"
})
d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
s.Spec.Name = "test3"
})
// 3 services should be started now, because the requests were proxied to leader
// query each node and make sure it returns 3 services
for _, d := range []*daemon.Swarm{d1, d2, d3} {
services := d.ListServices(c)
c.Assert(services, checker.HasLen, 3)
}
}
func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
// Create 3 nodes
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
// assert that the first node we made is the leader, and the other two are followers
c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True)
c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False)
c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False)
d1.Stop(c)
var (
leader *daemon.Swarm // keep track of leader
followers []*daemon.Swarm // keep track of followers
)
checkLeader := func(nodes ...*daemon.Swarm) checkF {
return func(c *check.C) (interface{}, check.CommentInterface) {
// clear these out before each run
leader = nil
followers = nil
for _, d := range nodes {
if d.GetNode(c, d.NodeID).ManagerStatus.Leader {
leader = d
} else {
followers = append(followers, d)
}
}
if leader == nil {
return false, check.Commentf("no leader elected")
}
return true, check.Commentf("elected %v", leader.ID())
}
}
// wait for an election to occur
waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
// assert that we have a new leader
c.Assert(leader, checker.NotNil)
// Keep track of the current leader, since we want that to be chosen.
stableleader := leader
// add the d1, the initial leader, back
d1.Start(c)
// TODO(stevvooe): may need to wait for rejoin here
// wait for possible election
waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
// pick out the leader and the followers again
// verify that we still only have 1 leader and 2 followers
c.Assert(leader, checker.NotNil)
c.Assert(followers, checker.HasLen, 2)
// and that after we added d1 back, the leader hasn't changed
c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID)
}
func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
d1.CreateService(c, simpleTestService)
d2.Stop(c)
// make sure there is a leader
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
s.Spec.Name = "top1"
})
d3.Stop(c)
var service swarm.Service
simpleTestService(&service)
service.Spec.Name = "top2"
status, out, err := d1.SockRequest("POST", "/services/create", service.Spec)
c.Assert(err, checker.IsNil)
c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out)))
d2.Start(c)
// make sure there is a leader
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
s.Spec.Name = "top3"
})
}
func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
d := s.AddDaemon(c, true, true)
instances := 2
d.CreateService(c, simpleTestService, setInstances(instances))
id, err := d.Cmd("run", "-d", "busybox", "top")
c.Assert(err, checker.IsNil)
id = strings.TrimSpace(id)
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
c.Assert(d.Leave(false), checker.NotNil)
c.Assert(d.Leave(true), checker.IsNil)
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
id2, err := d.Cmd("ps", "-q")
c.Assert(err, checker.IsNil)
c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
}
// #23629
func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
testRequires(c, Network)
s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, false, false)
id, err := d2.Cmd("run", "-d", "busybox", "top")
c.Assert(err, checker.IsNil)
id = strings.TrimSpace(id)
err = d2.Join(swarm.JoinRequest{
RemoteAddrs: []string{"123.123.123.123:1234"},
})
c.Assert(err, check.NotNil)
c.Assert(err.Error(), checker.Contains, "Timeout was reached")
info, err := d2.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
c.Assert(d2.Leave(true), checker.IsNil)
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
id2, err := d2.Cmd("ps", "-q")
c.Assert(err, checker.IsNil)
c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
}
// #23705
func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
testRequires(c, Network)
d := s.AddDaemon(c, false, false)
err := d.Join(swarm.JoinRequest{
RemoteAddrs: []string{"123.123.123.123:1234"},
})
c.Assert(err, check.NotNil)
c.Assert(err.Error(), checker.Contains, "Timeout was reached")
waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
d.Stop(c)
d.Start(c)
info, err := d.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
}
func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
d1 := s.AddDaemon(c, true, true)
instances := 2
id := d1.CreateService(c, simpleTestService, setInstances(instances))
d1.GetService(c, id)
d1.Stop(c)
d1.Start(c)
d1.GetService(c, id)
d2 := s.AddDaemon(c, true, true)
d2.GetService(c, id)
d2.Stop(c)
d2.Start(c)
d2.GetService(c, id)
d3 := s.AddDaemon(c, true, true)
d3.GetService(c, id)
d3.Stop(c)
d3.Start(c)
d3.GetService(c, id)
d3.Kill()
time.Sleep(1 * time.Second) // time to handle signal
d3.Start(c)
d3.GetService(c, id)
}
func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
d := s.AddDaemon(c, true, true)
instances := 2
id := d.CreateService(c, simpleTestService, setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
containers := d.ActiveContainers()
instances = 4
d.UpdateService(c, d.GetService(c, id), setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
containers2 := d.ActiveContainers()
loop0:
for _, c1 := range containers {
for _, c2 := range containers2 {
if c1 == c2 {
continue loop0
}
}
c.Errorf("container %v not found in new set %#v", c1, containers2)
}
}
func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
d := s.AddDaemon(c, false, false)
req := swarm.InitRequest{
ListenAddr: "",
}
status, _, err := d.SockRequest("POST", "/swarm/init", req)
c.Assert(err, checker.IsNil)
c.Assert(status, checker.Equals, http.StatusBadRequest)
req2 := swarm.JoinRequest{
ListenAddr: "0.0.0.0:2377",
RemoteAddrs: []string{""},
}
status, _, err = d.SockRequest("POST", "/swarm/join", req2)
c.Assert(err, checker.IsNil)
c.Assert(status, checker.Equals, http.StatusBadRequest)
}
func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
instances := 2
id := d1.CreateService(c, simpleTestService, setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
// drain d2, all containers should move to d1
d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
n.Spec.Availability = swarm.NodeAvailabilityDrain
})
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
d2.Stop(c)
c.Assert(d1.Init(swarm.InitRequest{
ForceNewCluster: true,
Spec: swarm.Spec{},
}), checker.IsNil)
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
d3 := s.AddDaemon(c, true, true)
info, err := d3.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.True)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
instances = 4
d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
}
func simpleTestService(s *swarm.Service) {
ureplicas := uint64(1)
restartDelay := time.Duration(100 * time.Millisecond)
s.Spec = swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: swarm.ContainerSpec{
Image: "busybox:latest",
Command: []string{"/bin/top"},
},
RestartPolicy: &swarm.RestartPolicy{
Delay: &restartDelay,
},
},
Mode: swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &ureplicas,
},
},
}
s.Spec.Name = "top"
}
func serviceForUpdate(s *swarm.Service) {
ureplicas := uint64(1)
restartDelay := time.Duration(100 * time.Millisecond)
s.Spec = swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: swarm.ContainerSpec{
Image: "busybox:latest",
Command: []string{"/bin/top"},
},
RestartPolicy: &swarm.RestartPolicy{
Delay: &restartDelay,
},
},
Mode: swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &ureplicas,
},
},
UpdateConfig: &swarm.UpdateConfig{
Parallelism: 2,
Delay: 4 * time.Second,
FailureAction: swarm.UpdateFailureActionContinue,
},
RollbackConfig: &swarm.UpdateConfig{
Parallelism: 3,
Delay: 4 * time.Second,
FailureAction: swarm.UpdateFailureActionContinue,
},
}
s.Spec.Name = "updatetest"
}
func setInstances(replicas int) daemon.ServiceConstructor {
ureplicas := uint64(replicas)
return func(s *swarm.Service) {
s.Spec.Mode = swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &ureplicas,
},
}
}
}
func setUpdateOrder(order string) daemon.ServiceConstructor {
return func(s *swarm.Service) {
if s.Spec.UpdateConfig == nil {
s.Spec.UpdateConfig = &swarm.UpdateConfig{}
}
s.Spec.UpdateConfig.Order = order
}
}
func setRollbackOrder(order string) daemon.ServiceConstructor {
return func(s *swarm.Service) {
if s.Spec.RollbackConfig == nil {
s.Spec.RollbackConfig = &swarm.UpdateConfig{}
}
s.Spec.RollbackConfig.Order = order
}
}
func setImage(image string) daemon.ServiceConstructor {
return func(s *swarm.Service) {
s.Spec.TaskTemplate.ContainerSpec.Image = image
}
}
func setFailureAction(failureAction string) daemon.ServiceConstructor {
return func(s *swarm.Service) {
s.Spec.UpdateConfig.FailureAction = failureAction
}
}
func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor {
return func(s *swarm.Service) {
s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
}
}
func setParallelism(parallelism uint64) daemon.ServiceConstructor {
return func(s *swarm.Service) {
s.Spec.UpdateConfig.Parallelism = parallelism
}
}
func setConstraints(constraints []string) daemon.ServiceConstructor {
return func(s *swarm.Service) {
if s.Spec.TaskTemplate.Placement == nil {
s.Spec.TaskTemplate.Placement = &swarm.Placement{}
}
s.Spec.TaskTemplate.Placement.Constraints = constraints
}
}
func setPlacementPrefs(prefs []swarm.PlacementPreference) daemon.ServiceConstructor {
return func(s *swarm.Service) {
if s.Spec.TaskTemplate.Placement == nil {
s.Spec.TaskTemplate.Placement = &swarm.Placement{}
}
s.Spec.TaskTemplate.Placement.Preferences = prefs
}
}
func setGlobalMode(s *swarm.Service) {
s.Spec.Mode = swarm.ServiceMode{
Global: &swarm.GlobalService{},
}
}
func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) {
var totalMCount, totalWCount int
for _, d := range cl {
var (
info swarm.Info
err error
)
// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
info, err = d.SwarmInfo()
return err, check.Commentf("cluster not ready in time")
}
waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
if !info.ControlAvailable {
totalWCount++
continue
}
var leaderFound bool
totalMCount++
var mCount, wCount int
for _, n := range d.ListNodes(c) {
waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
if n.Status.State == swarm.NodeStateReady {
return true, nil
}
nn := d.GetNode(c, n.ID)
n = *nn
return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID)
}
waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
if n.Spec.Availability == swarm.NodeAvailabilityActive {
return true, nil
}
nn := d.GetNode(c, n.ID)
n = *nn
return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID)
}
waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
if n.Spec.Role == swarm.NodeRoleManager {
c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
if n.ManagerStatus.Leader {
leaderFound = true
}
mCount++
} else {
c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID))
wCount++
}
}
c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
}
c.Assert(totalMCount, checker.Equals, managerCount)
c.Assert(totalWCount, checker.Equals, workerCount)
}
func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
mCount, wCount := 5, 1
var nodes []*daemon.Swarm
for i := 0; i < mCount; i++ {
manager := s.AddDaemon(c, true, true)
info, err := manager.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.True)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
nodes = append(nodes, manager)
}
for i := 0; i < wCount; i++ {
worker := s.AddDaemon(c, true, false)
info, err := worker.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.False)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
nodes = append(nodes, worker)
}
// stop whole cluster
{
var wg sync.WaitGroup
wg.Add(len(nodes))
errs := make(chan error, len(nodes))
for _, d := range nodes {
go func(daemon *daemon.Swarm) {
defer wg.Done()
if err := daemon.StopWithError(); err != nil {
errs <- err
}
// FIXME(vdemeester) This is duplicated…
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
daemon.Root = filepath.Dir(daemon.Root)
}
}(d)
}
wg.Wait()
close(errs)
for err := range errs {
c.Assert(err, check.IsNil)
}
}
// start whole cluster
{
var wg sync.WaitGroup
wg.Add(len(nodes))
errs := make(chan error, len(nodes))
for _, d := range nodes {
go func(daemon *daemon.Swarm) {
defer wg.Done()
if err := daemon.StartWithError("--iptables=false"); err != nil {
errs <- err
}
}(d)
}
wg.Wait()
close(errs)
for err := range errs {
c.Assert(err, check.IsNil)
}
}
checkClusterHealth(c, nodes, mCount, wCount)
}
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
d := s.AddDaemon(c, true, true)
instances := 2
id := d.CreateService(c, simpleTestService, setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
service := d.GetService(c, id)
instances = 5
setInstances(instances)(service)
url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index)
status, out, err := d.SockRequest("POST", url, service.Spec)
c.Assert(err, checker.IsNil)
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
}
// Unlocking an unlocked swarm results in an error
func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
d := s.AddDaemon(c, true, true)
err := d.Unlock(swarm.UnlockRequest{UnlockKey: "wrong-key"})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "swarm is not locked")
}
// #29885
func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
c.Assert(err, checker.IsNil)
defer ln.Close()
d := s.AddDaemon(c, false, false)
err = d.Init(swarm.InitRequest{})
c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "address already in use")
}
// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
// This test makes sure the fixes correctly output scopes instead.
func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
d := s.AddDaemon(c, true, true)
name := "foo"
networkCreateRequest := types.NetworkCreateRequest{
Name: name,
NetworkCreate: types.NetworkCreate{
CheckDuplicate: false,
},
}
var n1 types.NetworkCreateResponse
networkCreateRequest.NetworkCreate.Driver = "bridge"
status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
c.Assert(json.Unmarshal(out, &n1), checker.IsNil)
var n2 types.NetworkCreateResponse
networkCreateRequest.NetworkCreate.Driver = "overlay"
status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
c.Assert(json.Unmarshal(out, &n2), checker.IsNil)
var r1 types.NetworkResource
status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
c.Assert(json.Unmarshal(out, &r1), checker.IsNil)
c.Assert(r1.Scope, checker.Equals, "local")
var r2 types.NetworkResource
status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
c.Assert(json.Unmarshal(out, &r2), checker.IsNil)
c.Assert(r2.Scope, checker.Equals, "swarm")
}
// Test case for 30178
func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
d := s.AddDaemon(c, true, true)
out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
c.Assert(err, checker.IsNil, check.Commentf(out))
instances := 1
d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
{Target: "lb"},
}
})
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
containers := d.ActiveContainers()
out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
c.Assert(err, checker.IsNil, check.Commentf(out))
}
func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
m := s.AddDaemon(c, true, true)
w := s.AddDaemon(c, true, false)
info, err := m.SwarmInfo()
c.Assert(err, checker.IsNil)
currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
// rotate multiple times
for i := 0; i < 4; i++ {
var cert, key []byte
if i%2 != 0 {
cert, _, key, err = initca.New(&csr.CertificateRequest{
CN: "newRoot",
KeyRequest: csr.NewBasicKeyRequest(),
CA: &csr.CAConfig{Expiry: ca.RootCAExpiration},
})
c.Assert(err, checker.IsNil)
}
expectedCert := string(cert)
m.UpdateSwarm(c, func(s *swarm.Spec) {
s.CAConfig.SigningCACert = expectedCert
s.CAConfig.SigningCAKey = string(key)
s.CAConfig.ForceRotate++
})
// poll to make sure update succeeds
var clusterTLSInfo swarm.TLSInfo
for j := 0; j < 18; j++ {
info, err := m.SwarmInfo()
c.Assert(err, checker.IsNil)
// the desired CA cert and key is always redacted
c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "")
c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "")
clusterTLSInfo = info.Cluster.TLSInfo
// if root rotation is done and the trust root has changed, we don't have to poll anymore
if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
break
}
// root rotation not done
time.Sleep(250 * time.Millisecond)
}
if cert != nil {
c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert)
}
// could take another second or two for the nodes to trust the new roots after the've all gotten
// new TLS certificates
for j := 0; j < 18; j++ {
mInfo := m.GetNode(c, m.NodeID).Description.TLSInfo
wInfo := m.GetNode(c, w.NodeID).Description.TLSInfo
if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
break
}
// nodes don't trust root certs yet
time.Sleep(250 * time.Millisecond)
}
c.Assert(m.GetNode(c, m.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
c.Assert(m.GetNode(c, w.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
currentTrustRoot = clusterTLSInfo.TrustRoot
}
}
func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
d := s.AddDaemon(c, true, true)
name := "foo"
networkCreateRequest := types.NetworkCreateRequest{
Name: name,
}
var n types.NetworkCreateResponse
networkCreateRequest.NetworkCreate.Driver = "overlay"
status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
c.Assert(json.Unmarshal(out, &n), checker.IsNil)
var r types.NetworkResource
status, body, err := d.SockRequest("GET", "/networks/"+name, nil)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
c.Assert(json.Unmarshal(body, &r), checker.IsNil)
c.Assert(r.Scope, checker.Equals, "swarm")
c.Assert(r.ID, checker.Equals, n.ID)
v := url.Values{}
v.Set("scope", "local")
status, body, err = d.SockRequest("GET", "/networks/"+name+"?"+v.Encode(), nil)
c.Assert(err, checker.IsNil, check.Commentf(string(out)))
c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf(string(out)))
}
|
[
"\"DOCKER_REMAP_ROOT\""
] |
[] |
[
"DOCKER_REMAP_ROOT"
] |
[]
|
["DOCKER_REMAP_ROOT"]
|
go
| 1 | 0 | |
main_vicreg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import argparse
import json
import math
import os
import sys
import time
import torch
import torch.nn.functional as F
from torch import nn, optim
import torch.distributed as dist
import torchvision.datasets as datasets
import augmentations as aug
from distributed import init_distributed_mode
import resnet
def get_arguments():
parser = argparse.ArgumentParser(description="Pretrain a resnet model with VICReg", add_help=False)
# Data
parser.add_argument("--data-dir", type=Path, default="/path/to/imagenet", required=True,
help='Path to the image net dataset')
# Checkpoints
parser.add_argument("--exp-dir", type=Path, default="./exp",
help='Path to the experiment folder, where all logs/checkpoints will be stored')
parser.add_argument("--log-freq-time", type=int, default=60,
help='Print logs to the stats.txt file every [log-freq-time] seconds')
# Model
parser.add_argument("--arch", type=str, default="resnet50",
help='Architecture of the backbone encoder network')
parser.add_argument("--mlp", default="8192-8192-8192",
help='Size and number of layers of the MLP expander head')
# Optim
parser.add_argument("--epochs", type=int, default=100,
help='Number of epochs')
parser.add_argument("--batch-size", type=int, default=2048,
help='Effective batch size (per worker batch size is [batch-size] / world-size)')
parser.add_argument("--base-lr", type=float, default=0.2,
help='Base learning rate, effective learning after warmup is [base-lr] * [batch-size] / 256')
parser.add_argument("--wd", type=float, default=1e-6,
help='Weight decay')
# Loss
parser.add_argument("--sim-coeff", type=float, default=25.0,
help='Invariance regularization loss coefficient')
parser.add_argument("--std-coeff", type=float, default=25.0,
help='Variance regularization loss coefficient')
parser.add_argument("--cov-coeff", type=float, default=1.0,
help='Covariance regularization loss coefficient')
# Running
parser.add_argument("--num-workers", type=int, default=10)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
# Distributed
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist-url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
torch.backends.cudnn.benchmark = True
init_distributed_mode(args)
print(args)
gpu = torch.device(args.device)
if args.rank == 0:
args.exp_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(args.exp_dir / "stats.txt", "a", buffering=1)
print(" ".join(sys.argv))
print(" ".join(sys.argv), file=stats_file)
transforms = aug.TrainTransform()
dataset = datasets.ImageFolder(args.data_dir / "train", transforms)
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)
assert args.batch_size % args.world_size == 0
per_device_batch_size = args.batch_size // args.world_size
loader = torch.utils.data.DataLoader(
dataset,
batch_size=per_device_batch_size,
num_workers=args.num_workers,
pin_memory=True,
sampler=sampler,
)
model = VICReg(args).cuda(gpu)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
optimizer = LARS(
model.parameters(),
lr=0,
weight_decay=args.wd,
weight_decay_filter=exclude_bias_and_norm,
lars_adaptation_filter=exclude_bias_and_norm,
)
if (args.exp_dir / "model.pth").is_file():
if args.rank == 0:
print("resuming from checkpoint")
ckpt = torch.load(args.exp_dir / "model.pth", map_location="cpu")
start_epoch = ckpt["epoch"]
model.load_state_dict(ckpt["model"])
optimizer.load_state_dict(ckpt["optimizer"])
else:
start_epoch = 0
start_time = last_logging = time.time()
scaler = torch.cuda.amp.GradScaler()
for epoch in range(start_epoch, args.epochs):
sampler.set_epoch(epoch)
for step, ((x, y), _) in enumerate(loader, start=epoch * len(loader)):
x = x.cuda(gpu, non_blocking=True)
y = y.cuda(gpu, non_blocking=True)
lr = adjust_learning_rate(args, optimizer, loader, step)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
loss = model.forward(x, y)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
current_time = time.time()
if args.rank == 0 and current_time - last_logging > args.log_freq_time:
stats = dict(
epoch=epoch,
step=step,
loss=loss.item(),
time=int(current_time - start_time),
lr=lr,
)
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
last_logging = current_time
if args.rank == 0:
state = dict(
epoch=epoch + 1,
model=model.state_dict(),
optimizer=optimizer.state_dict(),
)
torch.save(state, args.exp_dir / "model.pth")
if args.rank == 0:
torch.save(model.module.backbone.state_dict(), args.exp_dir / "resnet50.pth")
def adjust_learning_rate(args, optimizer, loader, step):
max_steps = args.epochs * len(loader)
warmup_steps = 10 * len(loader)
base_lr = args.base_lr * args.batch_size / 256
if step < warmup_steps:
lr = base_lr * step / warmup_steps
else:
step -= warmup_steps
max_steps -= warmup_steps
q = 0.5 * (1 + math.cos(math.pi * step / max_steps))
end_lr = base_lr * 0.001
lr = base_lr * q + end_lr * (1 - q)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
class VICReg(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.num_features = int(args.mlp.split("-")[-1])
self.backbone, self.embedding = resnet.__dict__[args.arch](
zero_init_residual=True
)
self.projector = Projector(args, self.embedding)
def forward(self, x, y):
x = self.projector(self.backbone(x))
y = self.projector(self.backbone(y))
repr_loss = F.mse_loss(x, y)
x = torch.cat(FullGatherLayer.apply(x), dim=0)
y = torch.cat(FullGatherLayer.apply(y), dim=0)
x = x - x.mean(dim=0)
y = y - y.mean(dim=0)
std_x = torch.sqrt(x.var(dim=0) + 0.0001)
std_y = torch.sqrt(y.var(dim=0) + 0.0001)
std_loss = torch.mean(F.relu(1 - std_x)) / 2 + torch.mean(F.relu(1 - std_y)) / 2
cov_x = (x.T @ x) / (self.args.batch_size - 1)
cov_y = (y.T @ y) / (self.args.batch_size - 1)
cov_loss = off_diagonal(cov_x).pow_(2).sum().div(
self.num_features
) + off_diagonal(cov_y).pow_(2).sum().div(self.num_features)
loss = (
self.args.sim_coeff * repr_loss
+ self.args.std_coeff * std_loss
+ self.args.cov_coeff * cov_loss
)
return loss
def Projector(args, embedding):
mlp_spec = f"{embedding}-{args.mlp}"
layers = []
f = list(map(int, mlp_spec.split("-")))
for i in range(len(f) - 2):
layers.append(nn.Linear(f[i], f[i + 1]))
layers.append(nn.BatchNorm1d(f[i + 1]))
layers.append(nn.ReLU(True))
layers.append(nn.Linear(f[-2], f[-1], bias=False))
return nn.Sequential(*layers)
def exclude_bias_and_norm(p):
return p.ndim == 1
def off_diagonal(x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
class LARS(optim.Optimizer):
def __init__(
self,
params,
lr,
weight_decay=0,
momentum=0.9,
eta=0.001,
weight_decay_filter=None,
lars_adaptation_filter=None,
):
defaults = dict(
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
eta=eta,
weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter,
)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g["params"]:
dp = p.grad
if dp is None:
continue
if g["weight_decay_filter"] is None or not g["weight_decay_filter"](p):
dp = dp.add(p, alpha=g["weight_decay"])
if g["lars_adaptation_filter"] is None or not g[
"lars_adaptation_filter"
](p):
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(
param_norm > 0.0,
torch.where(
update_norm > 0, (g["eta"] * param_norm / update_norm), one
),
one,
)
dp = dp.mul(q)
param_state = self.state[p]
if "mu" not in param_state:
param_state["mu"] = torch.zeros_like(p)
mu = param_state["mu"]
mu.mul_(g["momentum"]).add_(dp)
p.add_(mu, alpha=-g["lr"])
def batch_all_gather(x):
x_list = FullGatherLayer.apply(x)
return torch.cat(x_list, dim=0)
class FullGatherLayer(torch.autograd.Function):
"""
Gather tensors from all process and support backward propagation
for the gradients across processes.
"""
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def handle_sigusr1(signum, frame):
os.system(f'scontrol requeue {os.environ["SLURM_JOB_ID"]}')
exit()
def handle_sigterm(signum, frame):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser('VICReg training script', parents=[get_arguments()])
args = parser.parse_args()
main(args)
|
[] |
[] |
[
"SLURM_JOB_ID"
] |
[]
|
["SLURM_JOB_ID"]
|
python
| 1 | 0 | |
Tests/test_Prank_tool.py
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unittests for Bio.Align.Applications interface for PRANK."""
import sys
import os
import unittest
from Bio.Application import _escape_filename
from Bio import AlignIO
from Bio import SeqIO
from Bio import MissingExternalDependencyError
from Bio.Align.Applications import PrankCommandline
from Bio.Nexus.Nexus import NexusError
# Try to avoid problems when the OS is in another language
os.environ["LANG"] = "C"
prank_exe = None
if sys.platform == "win32":
try:
# This can vary depending on the Windows language.
prog_files = os.environ["PROGRAMFILES"]
except KeyError:
prog_files = r"C:\Program Files"
# For Windows, PRANK just comes as a zip file which contains the
# prank.exe file which the user could put anywhere. We'll try a few
# sensible locations under Program Files... and then the full path.
likely_dirs = [
"", # Current dir
prog_files,
os.path.join(prog_files, "Prank"),
] + sys.path
for folder in likely_dirs:
if os.path.isdir(folder):
if os.path.isfile(os.path.join(folder, "prank.exe")):
prank_exe = os.path.join(folder, "prank.exe")
break
if prank_exe:
break
else:
from subprocess import getoutput
output = getoutput("prank")
if "not found" not in output and "not recognized" not in output:
if "prank" in output.lower():
prank_exe = "prank"
if not prank_exe:
raise MissingExternalDependencyError(
"Install PRANK if you want to use the Bio.Align.Applications wrapper."
)
class PrankApplication(unittest.TestCase):
def setUp(self):
self.infile1 = "Fasta/fa01"
def tearDown(self):
"""Remove generated files.
output.1.dnd output.1.fas output.1.xml output.2.dnd output.2.fas output.2.xml
"""
if os.path.isfile("output.1.dnd"):
os.remove("output.1.dnd")
if os.path.isfile("output.1.fas"):
os.remove("output.1.fas")
if os.path.isfile("output.1.xml"):
os.remove("output.1.xml")
if os.path.isfile("output.2.dnd"):
os.remove("output.2.dnd")
if os.path.isfile("output.2.fas"):
os.remove("output.2.fas")
if os.path.isfile("output.2.xml"):
os.remove("output.2.xml")
if os.path.isfile("output.1.nex"):
os.remove("output.1.nex")
if os.path.isfile("output.2.nex"):
os.remove("output.2.nex")
def test_Prank_simple(self):
"""Simple round-trip through app with infile.
output.?.??? files written to cwd - no way to redirect
"""
cmdline = PrankCommandline(prank_exe)
cmdline.set_parameter("d", self.infile1)
self.assertEqual(str(cmdline), _escape_filename(prank_exe) + " -d=Fasta/fa01")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
output, error = cmdline()
self.assertEqual(error, "")
self.assertIn("Total time", output)
def test_Prank_simple_with_NEXUS_output(self):
"""Simple round-trip through app with infile, output in NEXUS.
output.?.??? files written to cwd - no way to redirect
"""
records = list(SeqIO.parse(self.infile1, "fasta"))
# Try using keyword argument,
cmdline = PrankCommandline(prank_exe, d=self.infile1)
# Try using a property,
cmdline.d = self.infile1
cmdline.f = 17 # NEXUS format
cmdline.set_parameter("dots", True)
self.assertEqual(
str(cmdline), _escape_filename(prank_exe) + " -d=Fasta/fa01 -f=17 -dots"
)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdout, stderr = cmdline()
self.assertIn("Total time", stdout)
self.assertEqual(stderr, "")
try:
if os.path.isfile("output.best.nex"):
# Prank v.130820 and perhaps earlier use ".best.*" output names
nex_fname = "output.best.nex"
elif os.path.isfile("output.2.nex"):
# Older Prank versions use ".2.*" output names
nex_fname = "output.2.nex"
else:
raise RuntimeError("Can't find PRANK's NEXUS output (*.nex)")
align = AlignIO.read(nex_fname, "nexus")
for old, new in zip(records, align):
# Old versions of Prank reduced name to 9 chars
self.assertTrue(old.id == new.id or old.id[:9] == new.id)
# infile1 has alignment gaps in it
self.assertEqual(
str(new.seq).replace("-", ""), str(old.seq).replace("-", "")
)
except NexusError:
# See bug 3119,
# Bio.Nexus can't parse output from prank v100701 (1 July 2010)
pass
def test_Prank_complex_command_line(self):
"""Round-trip with complex command line."""
cmdline = PrankCommandline(prank_exe)
cmdline.set_parameter("d", self.infile1)
cmdline.set_parameter("-gaprate", 0.321)
cmdline.set_parameter("gapext", 0.6)
cmdline.set_parameter("-dots", 1) # i.e. True
# Try using a property:
cmdline.kappa = 3
cmdline.skipins = True
cmdline.set_parameter("-once", True)
cmdline.realbranches = True
self.assertEqual(
str(cmdline),
_escape_filename(prank_exe)
+ " -d=Fasta/fa01"
+ " -dots -gaprate=0.321 -gapext=0.6 -kappa=3"
+ " -once -skipins -realbranches",
)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdout, stderr = cmdline()
self.assertIn("Total time", stdout)
class PrankConversion(unittest.TestCase):
def setUp(self):
# As these reads are all 36, it can be seen as pre-aligned:
self.input = "Quality/example.fasta"
self.output = "temp with space" # prefix, PRANK will pick extensions
def conversion(self, prank_number, prank_ext, format):
"""Get PRANK to do a conversion, and check it with SeqIO."""
filename = "%s.%s" % (self.output, prank_ext)
if os.path.isfile(filename):
os.remove(filename)
cmdline = PrankCommandline(
prank_exe,
d=self.input,
convert=True,
f=prank_number,
o='"%s"' % self.output,
)
self.assertEqual(
str(cmdline),
_escape_filename(prank_exe)
+ " -d=%s" % self.input
+ ' -o="%s"' % self.output
+ " -f=%i" % prank_number
+ " -convert",
)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
message, error = cmdline()
self.assertIn("PRANK", message)
self.assertIn(
("converting '%s' to '%s'" % (self.input, filename)), message, message
)
self.assertEqual(error, "")
self.assertTrue(os.path.isfile(filename))
old = AlignIO.read(self.input, "fasta")
# Hack...
if format == "phylip":
for record in old:
record.id = record.id[:10]
new = AlignIO.read(filename, format)
self.assertEqual(len(old), len(new))
for old_r, new_r in zip(old, new):
self.assertEqual(old_r.id, new_r.id)
self.assertEqual(old_r.seq, new_r.seq)
os.remove(filename)
def test_convert_to_fasta(self):
"""Convert FASTA to FASTA format."""
self.conversion(8, "fas", "fasta")
# Prank v.100701 seems to output an invalid file here...
# def test_convert_to_phylip32(self):
# """Convert FASTA to PHYLIP 3.2 format."""
# self.conversion(11, "phy", "phylip")
def test_convert_to_phylip(self):
"""Convert FASTA to PHYLIP format."""
self.conversion(12, "phy", "phylip")
# PRANK truncated the record names in the matrix block. An error?
# def test_convert_to_paup_nexus(self):
# """Convert FASTA to PAUP/NEXUS."""
# self.conversion(17, "nex", "nexus")
# We don't support format 18, PAML
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
[] |
[] |
[
"LANG",
"PROGRAMFILES"
] |
[]
|
["LANG", "PROGRAMFILES"]
|
python
| 2 | 0 | |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_accounts.py',
'p2p_segwit.py',
'wallet_dump.py',
'rpc_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'wallet_import_rescan.py',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_config_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'feature_bip9_softforks.py',
'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BITCOINCLI",
"TRAVIS",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "TRAVIS", "BITCOIND"]
|
python
| 3 | 0 | |
src/common/text/position.go
|
// Copyright 2018 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text
import (
"fmt"
"os"
"strings"
"github.com/strawberryssg/strawberry-v0/common/terminal"
)
// Positioner represents a thing that knows its position in a text file or stream,
// typically an error.
type Positioner interface {
Position() Position
}
// Position holds a source position in a text file or stream.
type Position struct {
Filename string // filename, if any
Offset int // byte offset, starting at 0. It's set to -1 if not provided.
LineNumber int // line number, starting at 1
ColumnNumber int // column number, starting at 1 (character count per line)
}
func (pos Position) String() string {
if pos.Filename == "" {
pos.Filename = "<stream>"
}
return positionStringFormatfunc(pos)
}
// IsValid returns true if line number is > 0.
func (pos Position) IsValid() bool {
return pos.LineNumber > 0
}
var positionStringFormatfunc func(p Position) string
func createPositionStringFormatter(formatStr string) func(p Position) string {
if formatStr == "" {
formatStr = "\":file::line::col\""
}
identifiers := []string{":file", ":line", ":col"}
var identifiersFound []string
for i := range formatStr {
for _, id := range identifiers {
if strings.HasPrefix(formatStr[i:], id) {
identifiersFound = append(identifiersFound, id)
}
}
}
replacer := strings.NewReplacer(":file", "%s", ":line", "%d", ":col", "%d")
format := replacer.Replace(formatStr)
f := func(pos Position) string {
args := make([]interface{}, len(identifiersFound))
for i, id := range identifiersFound {
switch id {
case ":file":
args[i] = pos.Filename
case ":line":
args[i] = pos.LineNumber
case ":col":
args[i] = pos.ColumnNumber
}
}
msg := fmt.Sprintf(format, args...)
if terminal.IsTerminal(os.Stdout) {
return terminal.Notice(msg)
}
return msg
}
return f
}
func init() {
positionStringFormatfunc = createPositionStringFormatter(os.Getenv("HUGO_FILE_LOG_FORMAT"))
}
|
[
"\"HUGO_FILE_LOG_FORMAT\""
] |
[] |
[
"HUGO_FILE_LOG_FORMAT"
] |
[]
|
["HUGO_FILE_LOG_FORMAT"]
|
go
| 1 | 0 | |
cmd/algorand-indexer/daemon.go
|
package main
import (
"context"
"fmt"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/algorand/go-algorand/rpcs"
"github.com/algorand/indexer/api"
"github.com/algorand/indexer/api/generated/v2"
"github.com/algorand/indexer/config"
"github.com/algorand/indexer/fetcher"
"github.com/algorand/indexer/idb"
"github.com/algorand/indexer/importer"
"github.com/algorand/indexer/util/metrics"
)
var (
algodDataDir string
algodAddr string
algodToken string
daemonServerAddr string
noAlgod bool
developerMode bool
allowMigration bool
metricsMode string
tokenString string
writeTimeout time.Duration
readTimeout time.Duration
maxConn uint32
maxAPIResourcesPerAccount uint32
maxTransactionsLimit uint32
defaultTransactionsLimit uint32
maxAccountsLimit uint32
defaultAccountsLimit uint32
maxAssetsLimit uint32
defaultAssetsLimit uint32
maxBalancesLimit uint32
defaultBalancesLimit uint32
maxApplicationsLimit uint32
defaultApplicationsLimit uint32
enableAllParameters bool
)
const paramConfigEnableFlag = false
var daemonCmd = &cobra.Command{
Use: "daemon",
Short: "run indexer daemon",
Long: "run indexer daemon. Serve api on HTTP.",
//Args:
Run: func(cmd *cobra.Command, args []string) {
var err error
config.BindFlags(cmd)
err = configureLogger()
if err != nil {
fmt.Fprintf(os.Stderr, "failed to configure logger: %v", err)
os.Exit(1)
}
// If someone supplied a configuration file but also said to enable all parameters,
// that's an error
if suppliedAPIConfigFile != "" && enableAllParameters {
fmt.Fprint(os.Stderr, "not allowed to supply an api config file and enable all parameters")
os.Exit(1)
}
if algodDataDir == "" {
algodDataDir = os.Getenv("ALGORAND_DATA")
}
ctx, cf := context.WithCancel(context.Background())
defer cf()
{
cancelCh := make(chan os.Signal, 1)
signal.Notify(cancelCh, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-cancelCh
logger.Println("Stopping Indexer.")
cf()
}()
}
var bot fetcher.Fetcher
if noAlgod {
logger.Info("algod block following disabled")
} else if algodAddr != "" && algodToken != "" {
bot, err = fetcher.ForNetAndToken(algodAddr, algodToken, logger)
maybeFail(err, "fetcher setup, %v", err)
} else if algodDataDir != "" {
bot, err = fetcher.ForDataDir(algodDataDir, logger)
maybeFail(err, "fetcher setup, %v", err)
} else {
// no algod was found
noAlgod = true
}
opts := idb.IndexerDbOptions{}
if noAlgod && !allowMigration {
opts.ReadOnly = true
}
opts.MaxConn = maxConn
db, availableCh := indexerDbFromFlags(opts)
defer db.Close()
var wg sync.WaitGroup
if bot != nil {
wg.Add(1)
go func() {
defer wg.Done()
// Wait until the database is available.
<-availableCh
// Initial import if needed.
genesisReader := importer.GetGenesisFile(genesisJSONPath, bot.Algod(), logger)
_, err := importer.EnsureInitialImport(db, genesisReader, logger)
maybeFail(err, "importer.EnsureInitialImport() error")
logger.Info("Initializing block import handler.")
nextRound, err := db.GetNextRoundToAccount()
maybeFail(err, "failed to get next round, %v", err)
bot.SetNextRound(nextRound)
imp := importer.NewImporter(db)
handler := blockHandler(imp, 1*time.Second)
bot.SetBlockHandler(handler)
logger.Info("Starting block importer.")
err = bot.Run(ctx)
if err != nil {
// If context is not expired.
if ctx.Err() == nil {
logger.WithError(err).Errorf("fetcher exited with error")
os.Exit(1)
}
}
}()
} else {
logger.Info("No block importer configured.")
}
fmt.Printf("serving on %s\n", daemonServerAddr)
logger.Infof("serving on %s", daemonServerAddr)
options := makeOptions()
api.Serve(ctx, daemonServerAddr, db, bot, logger, options)
wg.Wait()
},
}
func init() {
daemonCmd.Flags().StringVarP(&algodDataDir, "algod", "d", "", "path to algod data dir, or $ALGORAND_DATA")
daemonCmd.Flags().StringVarP(&algodAddr, "algod-net", "", "", "host:port of algod")
daemonCmd.Flags().StringVarP(&algodToken, "algod-token", "", "", "api access token for algod")
daemonCmd.Flags().StringVarP(&genesisJSONPath, "genesis", "g", "", "path to genesis.json (defaults to genesis.json in algod data dir if that was set)")
daemonCmd.Flags().StringVarP(&daemonServerAddr, "server", "S", ":8980", "host:port to serve API on (default :8980)")
daemonCmd.Flags().BoolVarP(&noAlgod, "no-algod", "", false, "disable connecting to algod for block following")
daemonCmd.Flags().StringVarP(&tokenString, "token", "t", "", "an optional auth token, when set REST calls must use this token in a bearer format, or in a 'X-Indexer-API-Token' header")
daemonCmd.Flags().BoolVarP(&developerMode, "dev-mode", "", false, "allow performance intensive operations like searching for accounts at a particular round")
daemonCmd.Flags().BoolVarP(&allowMigration, "allow-migration", "", false, "allow migrations to happen even when no algod connected")
daemonCmd.Flags().StringVarP(&metricsMode, "metrics-mode", "", "OFF", "configure the /metrics endpoint to [ON, OFF, VERBOSE]")
daemonCmd.Flags().DurationVarP(&writeTimeout, "write-timeout", "", 30*time.Second, "set the maximum duration to wait before timing out writes to a http response, breaking connection")
daemonCmd.Flags().DurationVarP(&readTimeout, "read-timeout", "", 5*time.Second, "set the maximum duration for reading the entire request")
daemonCmd.Flags().Uint32VarP(&maxConn, "max-conn", "", 0, "set the maximum connections allowed in the connection pool, if the maximum is reached subsequent connections will wait until a connection becomes available, or timeout according to the read-timeout setting")
daemonCmd.Flags().StringVar(&suppliedAPIConfigFile, "api-config-file", "", "supply an API config file to enable/disable parameters")
daemonCmd.Flags().BoolVar(&enableAllParameters, "enable-all-parameters", false, "override default configuration and enable all parameters. Can't be used with --api-config-file")
if !paramConfigEnableFlag {
daemonCmd.Flags().MarkHidden("api-config-file")
daemonCmd.Flags().MarkHidden("enable-all-parameters")
}
daemonCmd.Flags().Uint32VarP(&maxAPIResourcesPerAccount, "max-api-resources-per-account", "", 1000, "set the maximum total number of resources (created assets, created apps, asset holdings, and application local state) per account that will be allowed in REST API lookupAccountByID and searchForAccounts responses before returning a 400 Bad Request. Set zero for no limit")
daemonCmd.Flags().Uint32VarP(&maxTransactionsLimit, "max-transactions-limit", "", 10000, "set the maximum allowed Limit parameter for querying transactions")
daemonCmd.Flags().Uint32VarP(&defaultTransactionsLimit, "default-transactions-limit", "", 1000, "set the default Limit parameter for querying transactions, if none is provided")
daemonCmd.Flags().Uint32VarP(&maxAccountsLimit, "max-accounts-limit", "", 1000, "set the maximum allowed Limit parameter for querying accounts")
daemonCmd.Flags().Uint32VarP(&defaultAccountsLimit, "default-accounts-limit", "", 100, "set the default Limit parameter for querying accounts, if none is provided")
daemonCmd.Flags().Uint32VarP(&maxAssetsLimit, "max-assets-limit", "", 1000, "set the maximum allowed Limit parameter for querying assets")
daemonCmd.Flags().Uint32VarP(&defaultAssetsLimit, "default-assets-limit", "", 100, "set the default Limit parameter for querying assets, if none is provided")
daemonCmd.Flags().Uint32VarP(&maxBalancesLimit, "max-balances-limit", "", 10000, "set the maximum allowed Limit parameter for querying balances")
daemonCmd.Flags().Uint32VarP(&defaultBalancesLimit, "default-balances-limit", "", 1000, "set the default Limit parameter for querying balances, if none is provided")
daemonCmd.Flags().Uint32VarP(&maxApplicationsLimit, "max-applications-limit", "", 1000, "set the maximum allowed Limit parameter for querying applications")
daemonCmd.Flags().Uint32VarP(&defaultApplicationsLimit, "default-applications-limit", "", 100, "set the default Limit parameter for querying applications, if none is provided")
viper.RegisterAlias("algod", "algod-data-dir")
viper.RegisterAlias("algod-net", "algod-address")
viper.RegisterAlias("server", "server-address")
viper.RegisterAlias("token", "api-token")
}
// makeOptions converts CLI options to server options
func makeOptions() (options api.ExtraOptions) {
options.DeveloperMode = developerMode
if tokenString != "" {
options.Tokens = append(options.Tokens, tokenString)
}
switch strings.ToUpper(metricsMode) {
case "OFF":
options.MetricsEndpoint = false
options.MetricsEndpointVerbose = false
case "ON":
options.MetricsEndpoint = true
options.MetricsEndpointVerbose = false
case "VERBOSE":
options.MetricsEndpoint = true
options.MetricsEndpointVerbose = true
}
options.WriteTimeout = writeTimeout
options.ReadTimeout = readTimeout
options.MaxAPIResourcesPerAccount = uint64(maxAPIResourcesPerAccount)
options.MaxTransactionsLimit = uint64(maxTransactionsLimit)
options.DefaultTransactionsLimit = uint64(defaultTransactionsLimit)
options.MaxAccountsLimit = uint64(maxAccountsLimit)
options.DefaultAccountsLimit = uint64(defaultAccountsLimit)
options.MaxAssetsLimit = uint64(maxAssetsLimit)
options.DefaultAssetsLimit = uint64(defaultAssetsLimit)
options.MaxBalancesLimit = uint64(maxBalancesLimit)
options.DefaultBalancesLimit = uint64(defaultBalancesLimit)
options.MaxApplicationsLimit = uint64(maxApplicationsLimit)
options.DefaultApplicationsLimit = uint64(defaultApplicationsLimit)
if paramConfigEnableFlag {
if enableAllParameters {
options.DisabledMapConfig = api.MakeDisabledMapConfig()
} else {
options.DisabledMapConfig = api.GetDefaultDisabledMapConfigForPostgres()
}
if suppliedAPIConfigFile != "" {
swag, err := generated.GetSwagger()
if err != nil {
fmt.Fprintf(os.Stderr, "failed to get swagger: %v", err)
os.Exit(1)
}
logger.Infof("supplied api configuration file located at: %s", suppliedAPIConfigFile)
potentialDisabledMapConfig, err := api.MakeDisabledMapConfigFromFile(swag, suppliedAPIConfigFile)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to created disabled map config from file: %v", err)
os.Exit(1)
}
options.DisabledMapConfig = potentialDisabledMapConfig
}
}
return
}
// blockHandler creates a handler complying to the fetcher block handler interface. In case of a failure it keeps
// attempting to add the block until the fetcher shuts down.
func blockHandler(imp importer.Importer, retryDelay time.Duration) func(context.Context, *rpcs.EncodedBlockCert) error {
return func(ctx context.Context, block *rpcs.EncodedBlockCert) error {
for {
err := handleBlock(block, imp)
if err == nil {
// return on success.
return nil
}
// Delay or terminate before next attempt.
select {
case <-ctx.Done():
return err
case <-time.After(retryDelay):
break
}
}
}
}
func handleBlock(block *rpcs.EncodedBlockCert, imp importer.Importer) error {
start := time.Now()
err := imp.ImportBlock(block)
if err != nil {
logger.WithError(err).Errorf(
"adding block %d to database failed", block.Block.Round())
return fmt.Errorf("handleBlock() err: %w", err)
}
dt := time.Since(start)
// Ignore round 0 (which is empty).
if block.Block.Round() > 0 {
metrics.BlockImportTimeSeconds.Observe(dt.Seconds())
metrics.ImportedTxnsPerBlock.Observe(float64(len(block.Block.Payset)))
metrics.ImportedRoundGauge.Set(float64(block.Block.Round()))
txnCountByType := make(map[string]int)
for _, txn := range block.Block.Payset {
txnCountByType[string(txn.Txn.Type)]++
}
for k, v := range txnCountByType {
metrics.ImportedTxns.WithLabelValues(k).Set(float64(v))
}
}
logger.Infof("round r=%d (%d txn) imported in %s", block.Block.Round(), len(block.Block.Payset), dt.String())
return nil
}
|
[
"\"ALGORAND_DATA\""
] |
[] |
[
"ALGORAND_DATA"
] |
[]
|
["ALGORAND_DATA"]
|
go
| 1 | 0 | |
vendor/gopkg.in/alecthomas/kingpin.v3-unstable/i18n_init.go
|
package kingpin
//go:generate go run ./cmd/embedi18n/main.go en-AU
//go:generate go run ./cmd/embedi18n/main.go fr
import (
"bytes"
"compress/gzip"
"io/ioutil"
"os"
"github.com/nicksnyder/go-i18n/i18n"
)
type tError struct {
msg string
args []interface{}
}
// TError is an error that translates itself.
//
// It has the same signature and usage as T().
func TError(msg string, args ...interface{}) error { return &tError{msg: msg, args: args} }
func (i *tError) Error() string { return T(i.msg, i.args...) }
// T is a translation function.
var T = initI18N()
func initI18N() i18n.TranslateFunc {
// Initialise translations.
i18n.ParseTranslationFileBytes("i18n/en-AU.all.json", decompressLang(i18n_en_AU))
i18n.ParseTranslationFileBytes("i18n/fr.all.json", decompressLang(i18n_fr))
// Detect language.
lang := os.Getenv("LANG")
t, err := i18n.Tfunc(lang, "en")
if err != nil {
panic(err)
}
return t
}
func decompressLang(data []byte) []byte {
r := bytes.NewReader(data)
gr, err := gzip.NewReader(r)
if err != nil {
panic(err)
}
out, err := ioutil.ReadAll(gr)
if err != nil {
panic(err)
}
return out
}
// SetLanguage sets the language for Kingpin.
func SetLanguage(lang string, others ...string) error {
t, err := i18n.Tfunc(lang, others...)
if err != nil {
return err
}
T = t
return nil
}
// V is a convenience alias for translation function variables.
// eg. T("Something {{.Arg0}}", V{"Arg0": "moo"})
type V map[string]interface{}
|
[
"\"LANG\""
] |
[] |
[
"LANG"
] |
[]
|
["LANG"]
|
go
| 1 | 0 | |
ui/utils.go
|
package ui
import (
"errors"
"io/ioutil"
"os"
)
func TempFile(size int64) (*os.File, error) {
dir := os.Getenv("XDG_RUNTIME_DIR")
if dir == "" {
return nil, errors.New("XDG_RUNTIME_DIR is not defined in env")
}
file, err := ioutil.TempFile(dir, "go-wayland-shared")
if err != nil {
return nil, err
}
err = file.Truncate(size)
if err != nil {
return nil, err
}
err = os.Remove(file.Name())
if err != nil {
return nil, err
}
return file, nil
}
|
[
"\"XDG_RUNTIME_DIR\""
] |
[] |
[
"XDG_RUNTIME_DIR"
] |
[]
|
["XDG_RUNTIME_DIR"]
|
go
| 1 | 0 | |
src/pagure/hooks/files/git_multimail_upstream.py
|
#!/usr/bin/env python
__version__ = "1.4.0"
# Copyright (c) 2015-2016 Matthieu Moy and others
# Copyright (c) 2012-2014 Michael Haggerty and others
# Derived from contrib/hooks/post-receive-email, which is
# Copyright (c) 2007 Andy Parkins
# and also includes contributions by other authors.
#
# This file is part of git-multimail.
#
# git-multimail is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# Sources: https://github.com/git-multimail/git-multimail/
"""Generate notification emails for pushes to a git repository.
This hook sends emails describing changes introduced by pushes to a
git repository. For each reference that was changed, it emits one
ReferenceChange email summarizing how the reference was changed,
followed by one Revision email for each new commit that was introduced
by the reference change.
Each commit is announced in exactly one Revision email. If the same
commit is merged into another branch in the same or a later push, then
the ReferenceChange email will list the commit's SHA1 and its one-line
summary, but no new Revision email will be generated.
This script is designed to be used as a "post-receive" hook in a git
repository (see githooks(5)). It can also be used as an "update"
script, but this usage is not completely reliable and is deprecated.
To help with debugging, this script accepts a --stdout option, which
causes the emails to be written to standard output rather than sent
using sendmail.
See the accompanying README file for the complete documentation.
"""
import sys
import os
import re
import bisect
import socket
import subprocess
import shlex
import optparse
import logging
import smtplib
try:
import ssl
except ImportError:
# Python < 2.6 do not have ssl, but that's OK if we don't use it.
pass
import time
import cgi
PYTHON3 = sys.version_info >= (3, 0)
if sys.version_info <= (2, 5):
def all(iterable):
for element in iterable:
if not element:
return False
return True
def is_ascii(s):
return all(ord(c) < 128 and ord(c) > 0 for c in s)
if PYTHON3:
def is_string(s):
return isinstance(s, str)
def str_to_bytes(s):
return s.encode(ENCODING)
def bytes_to_str(s, errors="strict"):
return s.decode(ENCODING, errors)
unicode = str
def write_str(f, msg):
# Try outputing with the default encoding. If it fails,
# try UTF-8.
try:
f.buffer.write(msg.encode(sys.getdefaultencoding()))
except UnicodeEncodeError:
f.buffer.write(msg.encode(ENCODING))
def read_line(f):
# Try reading with the default encoding. If it fails,
# try UTF-8.
out = f.buffer.readline()
try:
return out.decode(sys.getdefaultencoding())
except UnicodeEncodeError:
return out.decode(ENCODING)
else:
def is_string(s):
try:
return isinstance(s, basestring)
except NameError: # Silence Pyflakes warning
raise
def str_to_bytes(s):
return s
def bytes_to_str(s, errors="strict"):
return s
def write_str(f, msg):
f.write(msg)
def read_line(f):
return f.readline()
def next(it):
return it.next()
try:
from email.charset import Charset
from email.utils import make_msgid
from email.utils import getaddresses
from email.utils import formataddr
from email.utils import formatdate
from email.header import Header
except ImportError:
# Prior to Python 2.5, the email module used different names:
from email.Charset import Charset
from email.Utils import make_msgid
from email.Utils import getaddresses
from email.Utils import formataddr
from email.Utils import formatdate
from email.Header import Header
DEBUG = False
ZEROS = "0" * 40
LOGBEGIN = (
"- Log -----------------------------------------------------------------\n"
)
LOGEND = (
"-----------------------------------------------------------------------\n"
)
ADDR_HEADERS = set(["from", "to", "cc", "bcc", "reply-to", "sender"])
# It is assumed in many places that the encoding is uniformly UTF-8,
# so changing these constants is unsupported. But define them here
# anyway, to make it easier to find (at least most of) the places
# where the encoding is important.
(ENCODING, CHARSET) = ("UTF-8", "utf-8")
REF_CREATED_SUBJECT_TEMPLATE = (
"%(emailprefix)s%(refname_type)s %(short_refname)s created"
" (now %(newrev_short)s)"
)
REF_UPDATED_SUBJECT_TEMPLATE = (
"%(emailprefix)s%(refname_type)s %(short_refname)s updated"
" (%(oldrev_short)s -> %(newrev_short)s)"
)
REF_DELETED_SUBJECT_TEMPLATE = (
"%(emailprefix)s%(refname_type)s %(short_refname)s deleted"
" (was %(oldrev_short)s)"
)
COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE = (
"%(emailprefix)s%(refname_type)s %(short_refname)s updated: %(oneline)s"
)
REFCHANGE_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/%(contenttype)s; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
X-Git-NotificationType: ref_changed
X-Git-Multimail-Version: %(multimail_version)s
Auto-Submitted: auto-generated
"""
REFCHANGE_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a change to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
FOOTER_TEMPLATE = """\
-- \n\
To stop receiving notification emails like this one, please contact
%(administrator)s.
"""
REWIND_ONLY_TEMPLATE = """\
This update removed existing revisions from the reference, leaving the
reference pointing at a previous point in the repository history.
* -- * -- N %(refname)s (%(newrev_short)s)
\\
O -- O -- O (%(oldrev_short)s)
Any revisions marked "omit" are not gone; other references still
refer to them. Any revisions marked "discard" are gone forever.
"""
NON_FF_TEMPLATE = """\
This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
%(refname_type)s are not in the new version. This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:
* -- * -- B -- O -- O -- O (%(oldrev_short)s)
\\
N -- N -- N %(refname)s (%(newrev_short)s)
You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.
Any revisions marked "omit" are not gone; other references still
refer to them. Any revisions marked "discard" are gone forever.
"""
NO_NEW_REVISIONS_TEMPLATE = """\
No new revisions were added by this update.
"""
DISCARDED_REVISIONS_TEMPLATE = """\
This change permanently discards the following revisions:
"""
NO_DISCARDED_REVISIONS_TEMPLATE = """\
The revisions that were on this %(refname_type)s are still contained in
other references; therefore, this change does not discard any commits
from the repository.
"""
NEW_REVISIONS_TEMPLATE = """\
The %(tot)s revisions listed above as "new" are entirely new to this
repository and will be described in separate emails. The revisions
listed as "add" were already present in the repository and have only
been added to this reference.
"""
TAG_CREATED_TEMPLATE = """\
at %(newrev_short)-8s (%(newrev_type)s)
"""
TAG_UPDATED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was modified! ***
from %(oldrev_short)-8s (%(oldrev_type)s)
to %(newrev_short)-8s (%(newrev_type)s)
"""
TAG_DELETED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was deleted! ***
"""
# The template used in summary tables. It looks best if this uses the
# same alignment as TAG_CREATED_TEMPLATE and TAG_UPDATED_TEMPLATE.
BRIEF_SUMMARY_TEMPLATE = """\
%(action)8s %(rev_short)-8s %(text)s
"""
NON_COMMIT_UPDATE_TEMPLATE = """\
This is an unusual reference change because the reference did not
refer to a commit either before or after the change. We do not know
how to provide full information about this reference change.
"""
REVISION_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Cc: %(cc_recipients)s
Subject: %(emailprefix)s%(num)02d/%(tot)02d: %(oneline)s
MIME-Version: 1.0
Content-Type: text/%(contenttype)s; charset=%(charset)s
Content-Transfer-Encoding: 8bit
From: %(fromaddr)s
Reply-To: %(reply_to)s
In-Reply-To: %(reply_to_msgid)s
References: %(reply_to_msgid)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Rev: %(rev)s
X-Git-NotificationType: diff
X-Git-Multimail-Version: %(multimail_version)s
Auto-Submitted: auto-generated
"""
REVISION_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
LINK_TEXT_TEMPLATE = """\
View the commit online:
%(browse_url)s
"""
LINK_HTML_TEMPLATE = """\
<p><a href="%(browse_url)s">View the commit online</a>.</p>
"""
REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE
# Combined, meaning refchange+revision email (for single-commit additions)
COMBINED_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/%(contenttype)s; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
X-Git-Rev: %(rev)s
X-Git-NotificationType: ref_changed_plus_diff
X-Git-Multimail-Version: %(multimail_version)s
Auto-Submitted: auto-generated
"""
COMBINED_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
COMBINED_FOOTER_TEMPLATE = FOOTER_TEMPLATE
class CommandError(Exception):
def __init__(self, cmd, retcode):
self.cmd = cmd
self.retcode = retcode
Exception.__init__(
self,
'Command "%s" failed with retcode %s' % (" ".join(cmd), retcode),
)
class ConfigurationException(Exception):
pass
# The "git" program (this could be changed to include a full path):
GIT_EXECUTABLE = "git"
# How "git" should be invoked (including global arguments), as a list
# of words. This variable is usually initialized automatically by
# read_git_output() via choose_git_command(), but if a value is set
# here then it will be used unconditionally.
GIT_CMD = None
def choose_git_command():
"""Decide how to invoke git, and record the choice in GIT_CMD."""
global GIT_CMD
if GIT_CMD is None:
try:
# Check to see whether the "-c" option is accepted (it was
# only added in Git 1.7.2). We don't actually use the
# output of "git --version", though if we needed more
# specific version information this would be the place to
# do it.
cmd = [GIT_EXECUTABLE, "-c", "foo.bar=baz", "--version"]
read_output(cmd)
GIT_CMD = [
GIT_EXECUTABLE,
"-c",
"i18n.logoutputencoding=%s" % (ENCODING,),
]
except CommandError:
GIT_CMD = [GIT_EXECUTABLE]
def read_git_output(args, input=None, keepends=False, **kw):
"""Read the output of a Git command."""
if GIT_CMD is None:
choose_git_command()
return read_output(GIT_CMD + args, input=input, keepends=keepends, **kw)
def read_output(cmd, input=None, keepends=False, **kw):
if input:
stdin = subprocess.PIPE
input = str_to_bytes(input)
else:
stdin = None
errors = "strict"
if "errors" in kw:
errors = kw["errors"]
del kw["errors"]
p = subprocess.Popen(
tuple(str_to_bytes(w) for w in cmd),
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kw
)
(out, err) = p.communicate(input)
out = bytes_to_str(out, errors=errors)
retcode = p.wait()
if retcode:
raise CommandError(cmd, retcode)
if not keepends:
out = out.rstrip("\n\r")
return out
def read_git_lines(args, keepends=False, **kw):
"""Return the lines output by Git command.
Return as single lines, with newlines stripped off."""
return read_git_output(args, keepends=True, **kw).splitlines(keepends)
def git_rev_list_ish(cmd, spec, args=None, **kw):
"""Common functionality for invoking a 'git rev-list'-like command.
Parameters:
* cmd is the Git command to run, e.g., 'rev-list' or 'log'.
* spec is a list of revision arguments to pass to the named
command. If None, this function returns an empty list.
* args is a list of extra arguments passed to the named command.
* All other keyword arguments (if any) are passed to the
underlying read_git_lines() function.
Return the output of the Git command in the form of a list, one
entry per output line.
"""
if spec is None:
return []
if args is None:
args = []
args = [cmd, "--stdin"] + args
spec_stdin = "".join(s + "\n" for s in spec)
return read_git_lines(args, input=spec_stdin, **kw)
def git_rev_list(spec, **kw):
"""Run 'git rev-list' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish("rev-list", spec, **kw)
def git_log(spec, **kw):
"""Run 'git log' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish("log", spec, **kw)
def header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field."""
# Convert to unicode, if required.
if not isinstance(text, unicode):
text = unicode(text, "utf-8")
if is_ascii(text):
charset = "ascii"
else:
charset = "utf-8"
return Header(
text, header_name=header_name, charset=Charset(charset)
).encode()
def addr_header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field containing
email addresses."""
# Convert to unicode, if required.
if not isinstance(text, unicode):
text = unicode(text, "utf-8")
text = ", ".join(
formataddr((header_encode(name), emailaddr))
for name, emailaddr in getaddresses([text])
)
if is_ascii(text):
charset = "ascii"
else:
charset = "utf-8"
return Header(
text, header_name=header_name, charset=Charset(charset)
).encode()
class Config(object):
def __init__(self, section, git_config=None):
"""Represent a section of the git configuration.
If git_config is specified, it is passed to "git config" in
the GIT_CONFIG environment variable, meaning that "git config"
will read the specified path rather than the Git default
config paths."""
self.section = section
if git_config:
self.env = os.environ.copy()
self.env["GIT_CONFIG"] = git_config
else:
self.env = None
@staticmethod
def _split(s):
"""Split NUL-terminated values."""
words = s.split("\0")
assert words[-1] == ""
return words[:-1]
@staticmethod
def add_config_parameters(c):
"""Add configuration parameters to Git.
c is either an str or a list of str, each element being of the
form 'var=val' or 'var', with the same syntax and meaning as
the argument of 'git -c var=val'.
"""
if isinstance(c, str):
c = (c,)
parameters = os.environ.get("GIT_CONFIG_PARAMETERS", "")
if parameters:
parameters += " "
# git expects GIT_CONFIG_PARAMETERS to be of the form
# "'name1=value1' 'name2=value2' 'name3=value3'"
# including everything inside the double quotes (but not the double
# quotes themselves). Spacing is critical. Also, if a value contains
# a literal single quote that quote must be represented using the
# four character sequence: '\''
parameters += " ".join("'" + x.replace("'", "'\\''") + "'" for x in c)
os.environ["GIT_CONFIG_PARAMETERS"] = parameters
def get(self, name, default=None):
try:
values = self._split(
read_git_output(
[
"config",
"--get",
"--null",
"%s.%s" % (self.section, name),
],
env=self.env,
keepends=True,
)
)
assert len(values) == 1
return values[0]
except CommandError:
return default
def get_bool(self, name, default=None):
try:
value = read_git_output(
["config", "--get", "--bool", "%s.%s" % (self.section, name)],
env=self.env,
)
except CommandError:
return default
return value == "true"
def get_all(self, name, default=None):
"""Read a (possibly multivalued) setting from the configuration.
Return the result as a list of values, or default if the name
is unset."""
try:
return self._split(
read_git_output(
[
"config",
"--get-all",
"--null",
"%s.%s" % (self.section, name),
],
env=self.env,
keepends=True,
)
)
except CommandError:
t, e, traceback = sys.exc_info()
if e.retcode == 1:
# "the section or key is invalid"; i.e., there is no
# value for the specified key.
return default
else:
raise
def set(self, name, value):
read_git_output(
["config", "%s.%s" % (self.section, name), value], env=self.env
)
def add(self, name, value):
read_git_output(
["config", "--add", "%s.%s" % (self.section, name), value],
env=self.env,
)
def __contains__(self, name):
return self.get_all(name, default=None) is not None
# We don't use this method anymore internally, but keep it here in
# case somebody is calling it from their own code:
def has_key(self, name):
return name in self
def unset_all(self, name):
try:
read_git_output(
["config", "--unset-all", "%s.%s" % (self.section, name)],
env=self.env,
)
except CommandError:
t, e, traceback = sys.exc_info()
if e.retcode == 5:
# The name doesn't exist, which is what we wanted anyway...
pass
else:
raise
def set_recipients(self, name, value):
self.unset_all(name)
for pair in getaddresses([value]):
self.add(name, formataddr(pair))
def generate_summaries(*log_args):
"""Generate a brief summary for each revision requested.
log_args are strings that will be passed directly to "git log" as
revision selectors. Iterate over (sha1_short, subject) for each
commit specified by log_args (subject is the first line of the
commit message as a string without EOLs)."""
cmd = ["log", "--abbrev", "--format=%h %s"] + list(log_args) + ["--"]
for line in read_git_lines(cmd):
yield tuple(line.split(" ", 1))
def limit_lines(lines, max_lines):
for (index, line) in enumerate(lines):
if index < max_lines:
yield line
if index >= max_lines:
yield "... %d lines suppressed ...\n" % (index + 1 - max_lines,)
def limit_linelength(lines, max_linelength):
for line in lines:
# Don't forget that lines always include a trailing newline.
if len(line) > max_linelength + 1:
line = line[: max_linelength - 7] + " [...]\n"
yield line
class CommitSet(object):
"""A (constant) set of object names.
The set should be initialized with full SHA1 object names. The
__contains__() method returns True iff its argument is an
abbreviation of any the names in the set."""
def __init__(self, names):
self._names = sorted(names)
def __len__(self):
return len(self._names)
def __contains__(self, sha1_abbrev):
"""Return True iff this set contains sha1_abbrev (which might be abbreviated)."""
i = bisect.bisect_left(self._names, sha1_abbrev)
return i < len(self) and self._names[i].startswith(sha1_abbrev)
class GitObject(object):
def __init__(self, sha1, type=None):
if sha1 == ZEROS:
self.sha1 = self.type = self.commit_sha1 = None
else:
self.sha1 = sha1
self.type = type or read_git_output(["cat-file", "-t", self.sha1])
if self.type == "commit":
self.commit_sha1 = self.sha1
elif self.type == "tag":
try:
self.commit_sha1 = read_git_output(
["rev-parse", "--verify", "%s^0" % (self.sha1,)]
)
except CommandError:
# Cannot deref tag to determine commit_sha1
self.commit_sha1 = None
else:
self.commit_sha1 = None
self.short = read_git_output(["rev-parse", "--short", sha1])
def get_summary(self):
"""Return (sha1_short, subject) for this commit."""
if not self.sha1:
raise ValueError("Empty commit has no summary")
return next(iter(generate_summaries("--no-walk", self.sha1)))
def __eq__(self, other):
return isinstance(other, GitObject) and self.sha1 == other.sha1
def __hash__(self):
return hash(self.sha1)
def __nonzero__(self):
return bool(self.sha1)
def __bool__(self):
"""Python 2 backward compatibility"""
return self.__nonzero__()
def __str__(self):
return self.sha1 or ZEROS
class Change(object):
"""A Change that has been made to the Git repository.
Abstract class from which both Revisions and ReferenceChanges are
derived. A Change knows how to generate a notification email
describing itself."""
def __init__(self, environment):
self.environment = environment
self._values = None
self._contains_html_diff = False
def _contains_diff(self):
# We do contain a diff, should it be rendered in HTML?
if self.environment.commit_email_format == "html":
self._contains_html_diff = True
def _compute_values(self):
"""Return a dictionary {keyword: expansion} for this Change.
Derived classes overload this method to add more entries to
the return value. This method is used internally by
get_values(). The return value should always be a new
dictionary."""
values = self.environment.get_values()
fromaddr = self.environment.get_fromaddr(change=self)
if fromaddr is not None:
values["fromaddr"] = fromaddr
values["multimail_version"] = get_version()
return values
# Aliases usable in template strings. Tuple of pairs (destination,
# source).
VALUES_ALIAS = (("id", "newrev"),)
def get_values(self, **extra_values):
"""Return a dictionary {keyword: expansion} for this Change.
Return a dictionary mapping keywords to the values that they
should be expanded to for this Change (used when interpolating
template strings). If any keyword arguments are supplied, add
those to the return value as well. The return value is always
a new dictionary."""
if self._values is None:
self._values = self._compute_values()
values = self._values.copy()
if extra_values:
values.update(extra_values)
for alias, val in self.VALUES_ALIAS:
values[alias] = values[val]
return values
def expand(self, template, **extra_values):
"""Expand template.
Expand the template (which should be a string) using string
interpolation of the values for this Change. If any keyword
arguments are provided, also include those in the keywords
available for interpolation."""
return template % self.get_values(**extra_values)
def expand_lines(self, template, html_escape_val=False, **extra_values):
"""Break template into lines and expand each line."""
values = self.get_values(**extra_values)
if html_escape_val:
for k in values:
if is_string(values[k]):
values[k] = cgi.escape(values[k], True)
for line in template.splitlines(True):
yield line % values
def expand_header_lines(self, template, **extra_values):
"""Break template into lines and expand each line as an RFC 2822 header.
Encode values and split up lines that are too long. Silently
skip lines that contain references to unknown variables."""
values = self.get_values(**extra_values)
if self._contains_html_diff:
self._content_type = "html"
else:
self._content_type = "plain"
values["contenttype"] = self._content_type
for line in template.splitlines():
(name, value) = line.split(": ", 1)
try:
value = value % values
except KeyError:
t, e, traceback = sys.exc_info()
if DEBUG:
self.environment.log_warning(
"Warning: unknown variable %r in the following line; line skipped:\n"
" %s\n" % (e.args[0], line)
)
else:
if name.lower() in ADDR_HEADERS:
value = addr_header_encode(value, name)
else:
value = header_encode(value, name)
for splitline in ("%s: %s\n" % (name, value)).splitlines(True):
yield splitline
def generate_email_header(self):
"""Generate the RFC 2822 email headers for this Change, a line at a time.
The output should not include the trailing blank line."""
raise NotImplementedError()
def generate_browse_link(self, base_url):
"""Generate a link to an online repository browser."""
return iter(())
def generate_email_intro(self, html_escape_val=False):
"""Generate the email intro for this Change, a line at a time.
The output will be used as the standard boilerplate at the top
of the email body."""
raise NotImplementedError()
def generate_email_body(self):
"""Generate the main part of the email body, a line at a time.
The text in the body might be truncated after a specified
number of lines (see multimailhook.emailmaxlines)."""
raise NotImplementedError()
def generate_email_footer(self, html_escape_val):
"""Generate the footer of the email, a line at a time.
The footer is always included, irrespective of
multimailhook.emailmaxlines."""
raise NotImplementedError()
def _wrap_for_html(self, lines):
"""Wrap the lines in HTML <pre> tag when using HTML format.
Escape special HTML characters and add <pre> and </pre> tags around
the given lines if we should be generating HTML as indicated by
self._contains_html_diff being set to true.
"""
if self._contains_html_diff:
yield "<pre style='margin:0'>\n"
for line in lines:
yield cgi.escape(line)
yield "</pre>\n"
else:
for line in lines:
yield line
def generate_email(self, push, body_filter=None, extra_header_values={}):
"""Generate an email describing this change.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()"""
for line in self.generate_email_header(**extra_header_values):
yield line
yield "\n"
html_escape_val = (
self.environment.html_in_intro and self._contains_html_diff
)
intro = self.generate_email_intro(html_escape_val)
if not self.environment.html_in_intro:
intro = self._wrap_for_html(intro)
for line in intro:
yield line
if self.environment.commitBrowseURL:
for line in self.generate_browse_link(
self.environment.commitBrowseURL
):
yield line
body = self.generate_email_body(push)
if body_filter is not None:
body = body_filter(body)
diff_started = False
if self._contains_html_diff:
# "white-space: pre" is the default, but we need to
# specify it again in case the message is viewed in a
# webmail which wraps it in an element setting white-space
# to something else (Zimbra does this and sets
# white-space: pre-line).
yield '<pre style="white-space: pre; background: #F8F8F8">'
for line in body:
if self._contains_html_diff:
# This is very, very naive. It would be much better to really
# parse the diff, i.e. look at how many lines do we have in
# the hunk headers instead of blindly highlighting everything
# that looks like it might be part of a diff.
bgcolor = ""
fgcolor = ""
if line.startswith("--- a/"):
diff_started = True
bgcolor = "e0e0ff"
elif line.startswith("diff ") or line.startswith("index "):
diff_started = True
fgcolor = "808080"
elif diff_started:
if line.startswith("+++ "):
bgcolor = "e0e0ff"
elif line.startswith("@@"):
bgcolor = "e0e0e0"
elif line.startswith("+"):
bgcolor = "e0ffe0"
elif line.startswith("-"):
bgcolor = "ffe0e0"
elif line.startswith("commit "):
fgcolor = "808000"
elif line.startswith(" "):
fgcolor = "404040"
# Chop the trailing LF, we don't want it inside <pre>.
line = cgi.escape(line[:-1])
if bgcolor or fgcolor:
style = "display:block; white-space:pre;"
if bgcolor:
style += "background:#" + bgcolor + ";"
if fgcolor:
style += "color:#" + fgcolor + ";"
# Use a <span style='display:block> to color the
# whole line. The newline must be inside the span
# to display properly both in Firefox and in
# text-based browser.
line = "<span style='%s'>%s\n</span>" % (style, line)
else:
line = line + "\n"
yield line
if self._contains_html_diff:
yield "</pre>"
html_escape_val = (
self.environment.html_in_footer and self._contains_html_diff
)
footer = self.generate_email_footer(html_escape_val)
if not self.environment.html_in_footer:
footer = self._wrap_for_html(footer)
for line in footer:
yield line
def get_specific_fromaddr(self):
"""For kinds of Changes which specify it, return the kind-specific
From address to use."""
return None
class Revision(Change):
"""A Change consisting of a single git commit."""
CC_RE = re.compile(r"^\s*C[Cc]:\s*(?P<to>[^#]+@[^\s#]*)\s*(#.*)?$")
def __init__(self, reference_change, rev, num, tot):
Change.__init__(self, reference_change.environment)
self.reference_change = reference_change
self.rev = rev
self.change_type = self.reference_change.change_type
self.refname = self.reference_change.refname
self.num = num
self.tot = tot
self.author = read_git_output(
["log", "--no-walk", "--format=%aN <%aE>", self.rev.sha1]
)
self.recipients = self.environment.get_revision_recipients(self)
self.cc_recipients = ""
if self.environment.get_scancommitforcc():
self.cc_recipients = ", ".join(
to.strip() for to in self._cc_recipients()
)
if self.cc_recipients:
self.environment.log_msg(
"Add %s to CC for %s" % (self.cc_recipients, self.rev.sha1)
)
def _cc_recipients(self):
cc_recipients = []
message = read_git_output(
["log", "--no-walk", "--format=%b", self.rev.sha1]
)
lines = message.strip().split("\n")
for line in lines:
m = re.match(self.CC_RE, line)
if m:
cc_recipients.append(m.group("to"))
return cc_recipients
def _compute_values(self):
values = Change._compute_values(self)
oneline = read_git_output(
["log", "--format=%s", "--no-walk", self.rev.sha1]
)
max_subject_length = self.environment.get_max_subject_length()
if max_subject_length > 0 and len(oneline) > max_subject_length:
oneline = oneline[: max_subject_length - 6] + " [...]"
values["rev"] = self.rev.sha1
values["rev_short"] = self.rev.short
values["change_type"] = self.change_type
values["refname"] = self.refname
values["newrev"] = self.rev.sha1
values["short_refname"] = self.reference_change.short_refname
values["refname_type"] = self.reference_change.refname_type
values["reply_to_msgid"] = self.reference_change.msgid
values["num"] = self.num
values["tot"] = self.tot
values["recipients"] = self.recipients
if self.cc_recipients:
values["cc_recipients"] = self.cc_recipients
values["oneline"] = oneline
values["author"] = self.author
reply_to = self.environment.get_reply_to_commit(self)
if reply_to:
values["reply_to"] = reply_to
return values
def generate_email_header(self, **extra_values):
for line in self.expand_header_lines(
REVISION_HEADER_TEMPLATE, **extra_values
):
yield line
def generate_browse_link(self, base_url):
if "%(" not in base_url:
base_url += "%(id)s"
url = "".join(self.expand_lines(base_url))
if self._content_type == "html":
for line in self.expand_lines(
LINK_HTML_TEMPLATE, html_escape_val=True, browse_url=url
):
yield line
elif self._content_type == "plain":
for line in self.expand_lines(
LINK_TEXT_TEMPLATE, html_escape_val=False, browse_url=url
):
yield line
else:
raise NotImplementedError(
"Content-type %s unsupported. Please report it as a bug."
)
def generate_email_intro(self, html_escape_val=False):
for line in self.expand_lines(
REVISION_INTRO_TEMPLATE, html_escape_val=html_escape_val
):
yield line
def generate_email_body(self, push):
"""Show this revision."""
for line in read_git_lines(
["log"] + self.environment.commitlogopts + ["-1", self.rev.sha1],
keepends=True,
errors="replace",
):
if (
line.startswith("Date: ")
and self.environment.date_substitute
):
yield self.environment.date_substitute + line[
len("Date: ") :
]
else:
yield line
def generate_email_footer(self, html_escape_val):
return self.expand_lines(
REVISION_FOOTER_TEMPLATE, html_escape_val=html_escape_val
)
def generate_email(self, push, body_filter=None, extra_header_values={}):
self._contains_diff()
return Change.generate_email(
self, push, body_filter, extra_header_values
)
def get_specific_fromaddr(self):
return self.environment.from_commit
class ReferenceChange(Change):
"""A Change to a Git reference.
An abstract class representing a create, update, or delete of a
Git reference. Derived classes handle specific types of reference
(e.g., tags vs. branches). These classes generate the main
reference change email summarizing the reference change and
whether it caused any any commits to be added or removed.
ReferenceChange objects are usually created using the static
create() method, which has the logic to decide which derived class
to instantiate."""
REF_RE = re.compile(r"^refs\/(?P<area>[^\/]+)\/(?P<shortname>.*)$")
@staticmethod
def create(environment, oldrev, newrev, refname):
"""Return a ReferenceChange object representing the change.
Return an object that represents the type of change that is being
made. oldrev and newrev should be SHA1s or ZEROS."""
old = GitObject(oldrev)
new = GitObject(newrev)
rev = new or old
# The revision type tells us what type the commit is, combined with
# the location of the ref we can decide between
# - working branch
# - tracking branch
# - unannotated tag
# - annotated tag
m = ReferenceChange.REF_RE.match(refname)
if m:
area = m.group("area")
short_refname = m.group("shortname")
else:
area = ""
short_refname = refname
if rev.type == "tag":
# Annotated tag:
klass = AnnotatedTagChange
elif rev.type == "commit":
if area == "tags":
# Non-annotated tag:
klass = NonAnnotatedTagChange
elif area == "heads":
# Branch:
klass = BranchChange
elif area == "remotes":
# Tracking branch:
environment.log_warning(
"*** Push-update of tracking branch %r\n"
"*** - incomplete email generated." % (refname,)
)
klass = OtherReferenceChange
else:
# Some other reference namespace:
environment.log_warning(
"*** Push-update of strange reference %r\n"
"*** - incomplete email generated." % (refname,)
)
klass = OtherReferenceChange
else:
# Anything else (is there anything else?)
environment.log_warning(
"*** Unknown type of update to %r (%s)\n"
"*** - incomplete email generated." % (refname, rev.type)
)
klass = OtherReferenceChange
return klass(
environment,
refname=refname,
short_refname=short_refname,
old=old,
new=new,
rev=rev,
)
def __init__(self, environment, refname, short_refname, old, new, rev):
Change.__init__(self, environment)
self.change_type = {
(False, True): "create",
(True, True): "update",
(True, False): "delete",
}[bool(old), bool(new)]
self.refname = refname
self.short_refname = short_refname
self.old = old
self.new = new
self.rev = rev
self.msgid = make_msgid()
self.diffopts = environment.diffopts
self.graphopts = environment.graphopts
self.logopts = environment.logopts
self.commitlogopts = environment.commitlogopts
self.showgraph = environment.refchange_showgraph
self.showlog = environment.refchange_showlog
self.header_template = REFCHANGE_HEADER_TEMPLATE
self.intro_template = REFCHANGE_INTRO_TEMPLATE
self.footer_template = FOOTER_TEMPLATE
def _compute_values(self):
values = Change._compute_values(self)
values["change_type"] = self.change_type
values["refname_type"] = self.refname_type
values["refname"] = self.refname
values["short_refname"] = self.short_refname
values["msgid"] = self.msgid
values["recipients"] = self.recipients
values["oldrev"] = str(self.old)
values["oldrev_short"] = self.old.short
values["newrev"] = str(self.new)
values["newrev_short"] = self.new.short
if self.old:
values["oldrev_type"] = self.old.type
if self.new:
values["newrev_type"] = self.new.type
reply_to = self.environment.get_reply_to_refchange(self)
if reply_to:
values["reply_to"] = reply_to
return values
def send_single_combined_email(self, known_added_sha1s):
"""Determine if a combined refchange/revision email should be sent
If there is only a single new (non-merge) commit added by a
change, it is useful to combine the ReferenceChange and
Revision emails into one. In such a case, return the single
revision; otherwise, return None.
This method is overridden in BranchChange."""
return None
def generate_combined_email(
self, push, revision, body_filter=None, extra_header_values={}
):
"""Generate an email describing this change AND specified revision.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()
This method is overridden in BranchChange."""
raise NotImplementedError
def get_subject(self):
template = {
"create": REF_CREATED_SUBJECT_TEMPLATE,
"update": REF_UPDATED_SUBJECT_TEMPLATE,
"delete": REF_DELETED_SUBJECT_TEMPLATE,
}[self.change_type]
return self.expand(template)
def generate_email_header(self, **extra_values):
if "subject" not in extra_values:
extra_values["subject"] = self.get_subject()
for line in self.expand_header_lines(
self.header_template, **extra_values
):
yield line
def generate_email_intro(self, html_escape_val=False):
for line in self.expand_lines(
self.intro_template, html_escape_val=html_escape_val
):
yield line
def generate_email_body(self, push):
"""Call the appropriate body-generation routine.
Call one of generate_create_summary() /
generate_update_summary() / generate_delete_summary()."""
change_summary = {
"create": self.generate_create_summary,
"delete": self.generate_delete_summary,
"update": self.generate_update_summary,
}[self.change_type](push)
for line in change_summary:
yield line
for line in self.generate_revision_change_summary(push):
yield line
def generate_email_footer(self, html_escape_val):
return self.expand_lines(
self.footer_template, html_escape_val=html_escape_val
)
def generate_revision_change_graph(self, push):
if self.showgraph:
args = ["--graph"] + self.graphopts
for newold in ("new", "old"):
has_newold = False
spec = push.get_commits_spec(newold, self)
for line in git_log(spec, args=args, keepends=True):
if not has_newold:
has_newold = True
yield "\n"
yield "Graph of %s commits:\n\n" % (
{"new": "new", "old": "discarded"}[newold],
)
yield " " + line
if has_newold:
yield "\n"
def generate_revision_change_log(self, new_commits_list):
if self.showlog:
yield "\n"
yield "Detailed log of new commits:\n\n"
for line in read_git_lines(
["log", "--no-walk"]
+ self.logopts
+ new_commits_list
+ ["--"],
keepends=True,
):
yield line
def generate_new_revision_summary(self, tot, new_commits_list, push):
for line in self.expand_lines(NEW_REVISIONS_TEMPLATE, tot=tot):
yield line
for line in self.generate_revision_change_graph(push):
yield line
for line in self.generate_revision_change_log(new_commits_list):
yield line
def generate_revision_change_summary(self, push):
"""Generate a summary of the revisions added/removed by this change."""
if self.new.commit_sha1 and not self.old.commit_sha1:
# A new reference was created. List the new revisions
# brought by the new reference (i.e., those revisions that
# were not in the repository before this reference
# change).
sha1s = list(push.get_new_commits(self))
sha1s.reverse()
tot = len(sha1s)
new_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if new_revisions:
yield self.expand(
"This %(refname_type)s includes the following new commits:\n"
)
yield "\n"
for r in new_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action="new", text=subject
)
yield "\n"
for line in self.generate_new_revision_summary(
tot, [r.rev.sha1 for r in new_revisions], push
):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
elif self.new.commit_sha1 and self.old.commit_sha1:
# A reference was changed to point at a different commit.
# List the revisions that were removed and/or added *from
# that reference* by this reference change, along with a
# diff between the trees for its old and new values.
# List of the revisions that were added to the branch by
# this update. Note this list can include revisions that
# have already had notification emails; we want such
# revisions in the summary even though we will not send
# new notification emails for them.
adds = list(
generate_summaries(
"--topo-order",
"--reverse",
"%s..%s" % (self.old.commit_sha1, self.new.commit_sha1),
)
)
# List of the revisions that were removed from the branch
# by this update. This will be empty except for
# non-fast-forward updates.
discards = list(
generate_summaries(
"%s..%s" % (self.new.commit_sha1, self.old.commit_sha1)
)
)
if adds:
new_commits_list = push.get_new_commits(self)
else:
new_commits_list = []
new_commits = CommitSet(new_commits_list)
if discards:
discarded_commits = CommitSet(push.get_discarded_commits(self))
else:
discarded_commits = CommitSet([])
if discards and adds:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = "discard"
else:
action = "omit"
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action=action,
rev_short=sha1,
text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = "new"
else:
action = "add"
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action=action,
rev_short=sha1,
text=subject,
)
yield "\n"
for line in self.expand_lines(NON_FF_TEMPLATE):
yield line
elif discards:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = "discard"
else:
action = "omit"
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action=action,
rev_short=sha1,
text=subject,
)
yield "\n"
for line in self.expand_lines(REWIND_ONLY_TEMPLATE):
yield line
elif adds:
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action="from",
rev_short=sha1,
text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = "new"
else:
action = "add"
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action=action,
rev_short=sha1,
text=subject,
)
yield "\n"
if new_commits:
for line in self.generate_new_revision_summary(
len(new_commits), new_commits_list, push
):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
for line in self.generate_revision_change_graph(push):
yield line
# The diffstat is shown from the old revision to the new
# revision. This is to show the truth of what happened in
# this change. There's no point showing the stat from the
# base to the new revision because the base is effectively a
# random revision at this point - the user will be interested
# in what this revision changed - including the undoing of
# previous revisions in the case of non-fast-forward updates.
yield "\n"
yield "Summary of changes:\n"
for line in read_git_lines(
["diff-tree"]
+ self.diffopts
+ ["%s..%s" % (self.old.commit_sha1, self.new.commit_sha1)],
keepends=True,
):
yield line
elif self.old.commit_sha1 and not self.new.commit_sha1:
# A reference was deleted. List the revisions that were
# removed from the repository by this reference change.
sha1s = list(push.get_discarded_commits(self))
tot = len(sha1s)
discarded_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if discarded_revisions:
for line in self.expand_lines(DISCARDED_REVISIONS_TEMPLATE):
yield line
yield "\n"
for r in discarded_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action="discard", text=subject
)
for line in self.generate_revision_change_graph(push):
yield line
else:
for line in self.expand_lines(NO_DISCARDED_REVISIONS_TEMPLATE):
yield line
elif not self.old.commit_sha1 and not self.new.commit_sha1:
for line in self.expand_lines(NON_COMMIT_UPDATE_TEMPLATE):
yield line
def generate_create_summary(self, push):
"""Called for the creation of a reference."""
# This is a new reference and so oldrev is not valid
(sha1, subject) = self.new.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action="at", rev_short=sha1, text=subject
)
yield "\n"
def generate_update_summary(self, push):
"""Called for the change of a pre-existing branch."""
return iter([])
def generate_delete_summary(self, push):
"""Called for the deletion of any type of reference."""
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action="was", rev_short=sha1, text=subject
)
yield "\n"
def get_specific_fromaddr(self):
return self.environment.from_refchange
class BranchChange(ReferenceChange):
refname_type = "branch"
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self,
environment,
refname=refname,
short_refname=short_refname,
old=old,
new=new,
rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
self._single_revision = None
def send_single_combined_email(self, known_added_sha1s):
if not self.environment.combine_when_single_commit:
return None
# In the sadly-all-too-frequent usecase of people pushing only
# one of their commits at a time to a repository, users feel
# the reference change summary emails are noise rather than
# important signal. This is because, in this particular
# usecase, there is a reference change summary email for each
# new commit, and all these summaries do is point out that
# there is one new commit (which can readily be inferred by
# the existence of the individual revision email that is also
# sent). In such cases, our users prefer there to be a combined
# reference change summary/new revision email.
#
# So, if the change is an update and it doesn't discard any
# commits, and it adds exactly one non-merge commit (gerrit
# forces a workflow where every commit is individually merged
# and the git-multimail hook fired off for just this one
# change), then we send a combined refchange/revision email.
try:
# If this change is a reference update that doesn't discard
# any commits...
if self.change_type != "update":
return None
if read_git_lines(
["merge-base", self.old.sha1, self.new.sha1]
) != [self.old.sha1]:
return None
# Check if this update introduced exactly one non-merge
# commit:
def split_line(line):
"""Split line into (sha1, [parent,...])."""
words = line.split()
return (words[0], words[1:])
# Get the new commits introduced by the push as a list of
# (sha1, [parent,...])
new_commits = [
split_line(line)
for line in read_git_lines(
[
"log",
"-3",
"--format=%H %P",
"%s..%s" % (self.old.sha1, self.new.sha1),
]
)
]
if not new_commits:
return None
# If the newest commit is a merge, save it for a later check
# but otherwise ignore it
merge = None
tot = len(new_commits)
if len(new_commits[0][1]) > 1:
merge = new_commits[0][0]
del new_commits[0]
# Our primary check: we can't combine if more than one commit
# is introduced. We also currently only combine if the new
# commit is a non-merge commit, though it may make sense to
# combine if it is a merge as well.
if not (
len(new_commits) == 1
and len(new_commits[0][1]) == 1
and new_commits[0][0] in known_added_sha1s
):
return None
# We do not want to combine revision and refchange emails if
# those go to separate locations.
rev = Revision(self, GitObject(new_commits[0][0]), 1, tot)
if rev.recipients != self.recipients:
return None
# We ignored the newest commit if it was just a merge of the one
# commit being introduced. But we don't want to ignore that
# merge commit it it involved conflict resolutions. Check that.
if merge and merge != read_git_output(
["diff-tree", "--cc", merge]
):
return None
# We can combine the refchange and one new revision emails
# into one. Return the Revision that a combined email should
# be sent about.
return rev
except CommandError:
# Cannot determine number of commits in old..new or new..old;
# don't combine reference/revision emails:
return None
def generate_combined_email(
self, push, revision, body_filter=None, extra_header_values={}
):
values = revision.get_values()
if extra_header_values:
values.update(extra_header_values)
if "subject" not in extra_header_values:
values["subject"] = self.expand(
COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE, **values
)
self._single_revision = revision
self._contains_diff()
self.header_template = COMBINED_HEADER_TEMPLATE
self.intro_template = COMBINED_INTRO_TEMPLATE
self.footer_template = COMBINED_FOOTER_TEMPLATE
def revision_gen_link(base_url):
# revision is used only to generate the body, and
# _content_type is set while generating headers. Get it
# from the BranchChange object.
revision._content_type = self._content_type
return revision.generate_browse_link(base_url)
self.generate_browse_link = revision_gen_link
for line in self.generate_email(push, body_filter, values):
yield line
def generate_email_body(self, push):
"""Call the appropriate body generation routine.
If this is a combined refchange/revision email, the special logic
for handling this combined email comes from this function. For
other cases, we just use the normal handling."""
# If self._single_revision isn't set; don't override
if not self._single_revision:
for line in super(BranchChange, self).generate_email_body(push):
yield line
return
# This is a combined refchange/revision email; we first provide
# some info from the refchange portion, and then call the revision
# generate_email_body function to handle the revision portion.
adds = list(
generate_summaries(
"--topo-order",
"--reverse",
"%s..%s" % (self.old.commit_sha1, self.new.commit_sha1),
)
)
yield self.expand(
"The following commit(s) were added to %(refname)s by this push:\n"
)
for (sha1, subject) in adds:
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action="new",
rev_short=sha1,
text=subject,
)
yield self._single_revision.rev.short + " is described below\n"
yield "\n"
for line in self._single_revision.generate_email_body(push):
yield line
class AnnotatedTagChange(ReferenceChange):
refname_type = "annotated tag"
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self,
environment,
refname=refname,
short_refname=short_refname,
old=old,
new=new,
rev=rev,
)
self.recipients = environment.get_announce_recipients(self)
self.show_shortlog = environment.announce_show_shortlog
ANNOTATED_TAG_FORMAT = (
"%(*objectname)\n" "%(*objecttype)\n" "%(taggername)\n" "%(taggerdate)"
)
def describe_tag(self, push):
"""Describe the new value of an annotated tag."""
# Use git for-each-ref to pull out the individual fields from
# the tag
[tagobject, tagtype, tagger, tagged] = read_git_lines(
[
"for-each-ref",
"--format=%s" % (self.ANNOTATED_TAG_FORMAT,),
self.refname,
]
)
yield self.expand(
BRIEF_SUMMARY_TEMPLATE,
action="tagging",
rev_short=tagobject,
text="(%s)" % (tagtype,),
)
if tagtype == "commit":
# If the tagged object is a commit, then we assume this is a
# release, and so we calculate which tag this tag is
# replacing
try:
prevtag = read_git_output(
["describe", "--abbrev=0", "%s^" % (self.new,)]
)
except CommandError:
prevtag = None
if prevtag:
yield " replaces %s\n" % (prevtag,)
else:
prevtag = None
yield " length %s bytes\n" % (
read_git_output(["cat-file", "-s", tagobject]),
)
yield " by %s\n" % (tagger,)
yield " on %s\n" % (tagged,)
yield "\n"
# Show the content of the tag message; this might contain a
# change log or release notes so is worth displaying.
yield LOGBEGIN
contents = list(
read_git_lines(["cat-file", "tag", self.new.sha1], keepends=True)
)
contents = contents[contents.index("\n") + 1 :]
if contents and contents[-1][-1:] != "\n":
contents.append("\n")
for line in contents:
yield line
if self.show_shortlog and tagtype == "commit":
# Only commit tags make sense to have rev-list operations
# performed on them
yield "\n"
if prevtag:
# Show changes since the previous release
revlist = read_git_output(
[
"rev-list",
"--pretty=short",
"%s..%s" % (prevtag, self.new),
],
keepends=True,
)
else:
# No previous tag, show all the changes since time
# began
revlist = read_git_output(
["rev-list", "--pretty=short", "%s" % (self.new,)],
keepends=True,
)
for line in read_git_lines(
["shortlog"], input=revlist, keepends=True
):
yield line
yield LOGEND
yield "\n"
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_update_summary(self, push):
"""Called for the update of an annotated tag.
This is probably a rare event and may not even be allowed."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
yield self.expand(" tag was %(oldrev_short)s\n")
yield "\n"
class NonAnnotatedTagChange(ReferenceChange):
refname_type = "tag"
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self,
environment,
refname=refname,
short_refname=short_refname,
old=old,
new=new,
rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
def generate_update_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
for line in ReferenceChange.generate_delete_summary(self, push):
yield line
class OtherReferenceChange(ReferenceChange):
refname_type = "reference"
def __init__(self, environment, refname, short_refname, old, new, rev):
# We use the full refname as short_refname, because otherwise
# the full name of the reference would not be obvious from the
# text of the email.
ReferenceChange.__init__(
self,
environment,
refname=refname,
short_refname=refname,
old=old,
new=new,
rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
class Mailer(object):
"""An object that can send emails."""
def __init__(self, environment):
self.environment = environment
def send(self, lines, to_addrs):
"""Send an email consisting of lines.
lines must be an iterable over the lines constituting the
header and body of the email. to_addrs is a list of recipient
addresses (can be needed even if lines already contains a
"To:" field). It can be either a string (comma-separated list
of email addresses) or a Python list of individual email
addresses.
"""
raise NotImplementedError()
class SendMailer(Mailer):
"""Send emails using 'sendmail -oi -t'."""
SENDMAIL_CANDIDATES = ["/usr/sbin/sendmail", "/usr/lib/sendmail"]
@staticmethod
def find_sendmail():
for path in SendMailer.SENDMAIL_CANDIDATES:
if os.access(path, os.X_OK):
return path
else:
raise ConfigurationException(
"No sendmail executable found. "
"Try setting multimailhook.sendmailCommand."
)
def __init__(self, environment, command=None, envelopesender=None):
"""Construct a SendMailer instance.
command should be the command and arguments used to invoke
sendmail, as a list of strings. If an envelopesender is
provided, it will also be passed to the command, via '-f
envelopesender'."""
super(SendMailer, self).__init__(environment)
if command:
self.command = command[:]
else:
self.command = [self.find_sendmail(), "-oi", "-t"]
if envelopesender:
self.command.extend(["-f", envelopesender])
def send(self, lines, to_addrs):
try:
p = subprocess.Popen(self.command, stdin=subprocess.PIPE)
except OSError:
self.environment.get_logger().error(
"*** Cannot execute command: %s\n" % " ".join(self.command)
+ "*** %s\n" % sys.exc_info()[1]
+ '*** Try setting multimailhook.mailer to "smtp"\n'
+ "*** to send emails without using the sendmail command.\n"
)
sys.exit(1)
try:
lines = (str_to_bytes(line) for line in lines)
p.stdin.writelines(lines)
except Exception:
self.environment.get_logger().error(
"*** Error while generating commit email\n"
"*** - mail sending aborted.\n"
)
if hasattr(p, "terminate"):
# subprocess.terminate() is not available in Python 2.4
p.terminate()
else:
import signal
os.kill(p.pid, signal.SIGTERM)
raise
else:
p.stdin.close()
retcode = p.wait()
if retcode:
raise CommandError(self.command, retcode)
class SMTPMailer(Mailer):
"""Send emails using Python's smtplib."""
def __init__(
self,
environment,
envelopesender,
smtpserver,
smtpservertimeout=10.0,
smtpserverdebuglevel=0,
smtpencryption="none",
smtpuser="",
smtppass="",
smtpcacerts="",
):
super(SMTPMailer, self).__init__(environment)
if not envelopesender:
self.environment.get_logger().error(
"fatal: git_multimail: cannot use SMTPMailer without a sender address.\n"
"please set either multimailhook.envelopeSender or user.email\n"
)
sys.exit(1)
if smtpencryption == "ssl" and not (smtpuser and smtppass):
raise ConfigurationException(
"Cannot use SMTPMailer with security option ssl "
"without options username and password."
)
self.envelopesender = envelopesender
self.smtpserver = smtpserver
self.smtpservertimeout = smtpservertimeout
self.smtpserverdebuglevel = smtpserverdebuglevel
self.security = smtpencryption
self.username = smtpuser
self.password = smtppass
self.smtpcacerts = smtpcacerts
try:
def call(klass, server, timeout):
try:
return klass(server, timeout=timeout)
except TypeError:
# Old Python versions do not have timeout= argument.
return klass(server)
if self.security == "none":
self.smtp = call(
smtplib.SMTP,
self.smtpserver,
timeout=self.smtpservertimeout,
)
elif self.security == "ssl":
if self.smtpcacerts:
raise smtplib.SMTPException(
"Checking certificate is not supported for ssl, prefer starttls"
)
self.smtp = call(
smtplib.SMTP_SSL,
self.smtpserver,
timeout=self.smtpservertimeout,
)
elif self.security == "tls":
if "ssl" not in sys.modules:
self.environment.get_logger().error(
"*** Your Python version does not have the ssl library installed\n"
"*** smtpEncryption=tls is not available.\n"
"*** Either upgrade Python to 2.6 or later\n"
" or use git_multimail.py version 1.2.\n"
)
if ":" not in self.smtpserver:
self.smtpserver += ":587" # default port for TLS
self.smtp = call(
smtplib.SMTP,
self.smtpserver,
timeout=self.smtpservertimeout,
)
# start: ehlo + starttls
# equivalent to
# self.smtp.ehlo()
# self.smtp.starttls()
# with acces to the ssl layer
self.smtp.ehlo()
if not self.smtp.has_extn("starttls"):
raise smtplib.SMTPException(
"STARTTLS extension not supported by server"
)
resp, reply = self.smtp.docmd("STARTTLS")
if resp != 220:
raise smtplib.SMTPException(
"Wrong answer to the STARTTLS command"
)
if self.smtpcacerts:
self.smtp.sock = ssl.wrap_socket(
self.smtp.sock,
ca_certs=self.smtpcacerts,
cert_reqs=ssl.CERT_REQUIRED,
)
else:
self.smtp.sock = ssl.wrap_socket(
self.smtp.sock, cert_reqs=ssl.CERT_NONE
)
self.environment.get_logger().error(
"*** Warning, the server certificat is not verified (smtp) ***\n"
"*** set the option smtpCACerts ***\n"
)
if not hasattr(self.smtp.sock, "read"):
# using httplib.FakeSocket with Python 2.5.x or earlier
self.smtp.sock.read = self.smtp.sock.recv
self.smtp.file = smtplib.SSLFakeFile(self.smtp.sock)
self.smtp.helo_resp = None
self.smtp.ehlo_resp = None
self.smtp.esmtp_features = {}
self.smtp.does_esmtp = 0
# end: ehlo + starttls
self.smtp.ehlo()
else:
sys.stdout.write(
"*** Error: Control reached an invalid option. ***"
)
sys.exit(1)
if self.smtpserverdebuglevel > 0:
sys.stdout.write(
"*** Setting debug on for SMTP server connection (%s) ***\n"
% self.smtpserverdebuglevel
)
self.smtp.set_debuglevel(self.smtpserverdebuglevel)
except Exception:
self.environment.get_logger().error(
"*** Error establishing SMTP connection to %s ***\n"
"*** %s\n" % (self.smtpserver, sys.exc_info()[1])
)
sys.exit(1)
def __del__(self):
if hasattr(self, "smtp"):
self.smtp.quit()
del self.smtp
def send(self, lines, to_addrs):
try:
if self.username or self.password:
self.smtp.login(self.username, self.password)
msg = "".join(lines)
# turn comma-separated list into Python list if needed.
if is_string(to_addrs):
to_addrs = [
email for (name, email) in getaddresses([to_addrs])
]
self.smtp.sendmail(self.envelopesender, to_addrs, msg)
except smtplib.SMTPResponseException:
err = sys.exc_info()[1]
self.environment.get_logger().error(
"*** Error sending email ***\n"
"*** Error %d: %s\n"
% (err.smtp_code, bytes_to_str(err.smtp_error))
)
try:
smtp = self.smtp
# delete the field before quit() so that in case of
# error, self.smtp is deleted anyway.
del self.smtp
smtp.quit()
except:
self.environment.get_logger().error(
"*** Error closing the SMTP connection ***\n"
"*** Exiting anyway ... ***\n"
"*** %s\n" % sys.exc_info()[1]
)
sys.exit(1)
class OutputMailer(Mailer):
"""Write emails to an output stream, bracketed by lines of '=' characters.
This is intended for debugging purposes."""
SEPARATOR = "=" * 75 + "\n"
def __init__(self, f):
self.f = f
def send(self, lines, to_addrs):
write_str(self.f, self.SEPARATOR)
for line in lines:
write_str(self.f, line)
write_str(self.f, self.SEPARATOR)
def get_git_dir():
"""Determine GIT_DIR.
Determine GIT_DIR either from the GIT_DIR environment variable or
from the working directory, using Git's usual rules."""
try:
return read_git_output(["rev-parse", "--git-dir"])
except CommandError:
sys.stderr.write("fatal: git_multimail: not in a git directory\n")
sys.exit(1)
class Environment(object):
"""Describes the environment in which the push is occurring.
An Environment object encapsulates information about the local
environment. For example, it knows how to determine:
* the name of the repository to which the push occurred
* what user did the push
* what users want to be informed about various types of changes.
An Environment object is expected to have the following methods:
get_repo_shortname()
Return a short name for the repository, for display
purposes.
get_repo_path()
Return the absolute path to the Git repository.
get_emailprefix()
Return a string that will be prefixed to every email's
subject.
get_pusher()
Return the username of the person who pushed the changes.
This value is used in the email body to indicate who
pushed the change.
get_pusher_email() (may return None)
Return the email address of the person who pushed the
changes. The value should be a single RFC 2822 email
address as a string; e.g., "Joe User <[email protected]>"
if available, otherwise "[email protected]". If set, the
value is used as the Reply-To address for refchange
emails. If it is impossible to determine the pusher's
email, this attribute should be set to None (in which case
no Reply-To header will be output).
get_sender()
Return the address to be used as the 'From' email address
in the email envelope.
get_fromaddr(change=None)
Return the 'From' email address used in the email 'From:'
headers. If the change is known when this function is
called, it is passed in as the 'change' parameter. (May
be a full RFC 2822 email address like 'Joe User
<[email protected]>'.)
get_administrator()
Return the name and/or email of the repository
administrator. This value is used in the footer as the
person to whom requests to be removed from the
notification list should be sent. Ideally, it should
include a valid email address.
get_reply_to_refchange()
get_reply_to_commit()
Return the address to use in the email "Reply-To" header,
as a string. These can be an RFC 2822 email address, or
None to omit the "Reply-To" header.
get_reply_to_refchange() is used for refchange emails;
get_reply_to_commit() is used for individual commit
emails.
get_ref_filter_regex()
Return a tuple -- a compiled regex, and a boolean indicating
whether the regex picks refs to include (if False, the regex
matches on refs to exclude).
get_default_ref_ignore_regex()
Return a regex that should be ignored for both what emails
to send and when computing what commits are considered new
to the repository. Default is "^refs/notes/".
get_max_subject_length()
Return an int giving the maximal length for the subject
(git log --oneline).
They should also define the following attributes:
announce_show_shortlog (bool)
True iff announce emails should include a shortlog.
commit_email_format (string)
If "html", generate commit emails in HTML instead of plain text
used by default.
html_in_intro (bool)
html_in_footer (bool)
When generating HTML emails, the introduction (respectively,
the footer) will be HTML-escaped iff html_in_intro (respectively,
the footer) is true. When false, only the values used to expand
the template are escaped.
refchange_showgraph (bool)
True iff refchanges emails should include a detailed graph.
refchange_showlog (bool)
True iff refchanges emails should include a detailed log.
diffopts (list of strings)
The options that should be passed to 'git diff' for the
summary email. The value should be a list of strings
representing words to be passed to the command.
graphopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log --graph' when generating the detailed graph for
a set of commits (see refchange_showgraph)
logopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log' when generating the detailed log for a set of
commits (see refchange_showlog)
commitlogopts (list of strings)
The options that should be passed to 'git log' for each
commit mail. The value should be a list of strings
representing words to be passed to the command.
date_substitute (string)
String to be used in substitution for 'Date:' at start of
line in the output of 'git log'.
quiet (bool)
On success do not write to stderr
stdout (bool)
Write email to stdout rather than emailing. Useful for debugging
combine_when_single_commit (bool)
True if a combined email should be produced when a single
new commit is pushed to a branch, False otherwise.
from_refchange, from_commit (strings)
Addresses to use for the From: field for refchange emails
and commit emails respectively. Set from
multimailhook.fromRefchange and multimailhook.fromCommit
by ConfigEnvironmentMixin.
log_file, error_log_file, debug_log_file (string)
Name of a file to which logs should be sent.
verbose (int)
How verbose the system should be.
- 0 (default): show info, errors, ...
- 1 : show basic debug info
"""
REPO_NAME_RE = re.compile(r"^(?P<name>.+?)(?:\.git)$")
def __init__(self, osenv=None):
self.osenv = osenv or os.environ
self.announce_show_shortlog = False
self.commit_email_format = "text"
self.html_in_intro = False
self.html_in_footer = False
self.commitBrowseURL = None
self.maxcommitemails = 500
self.diffopts = ["--stat", "--summary", "--find-copies-harder"]
self.graphopts = ["--oneline", "--decorate"]
self.logopts = []
self.refchange_showgraph = False
self.refchange_showlog = False
self.commitlogopts = ["-C", "--stat", "-p", "--cc"]
self.date_substitute = "AuthorDate: "
self.quiet = False
self.stdout = False
self.combine_when_single_commit = True
self.logger = None
self.COMPUTED_KEYS = [
"administrator",
"charset",
"emailprefix",
"pusher",
"pusher_email",
"repo_path",
"repo_shortname",
"sender",
]
self._values = None
def get_logger(self):
"""Get (possibly creates) the logger associated to this environment."""
if self.logger is None:
self.logger = Logger(self)
return self.logger
def get_repo_shortname(self):
"""Use the last part of the repo path, with ".git" stripped off if present."""
basename = os.path.basename(os.path.abspath(self.get_repo_path()))
m = self.REPO_NAME_RE.match(basename)
if m:
return m.group("name")
else:
return basename
def get_pusher(self):
raise NotImplementedError()
def get_pusher_email(self):
return None
def get_fromaddr(self, change=None):
config = Config("user")
fromname = config.get("name", default="")
fromemail = config.get("email", default="")
if fromemail:
return formataddr([fromname, fromemail])
return self.get_sender()
def get_administrator(self):
return "the administrator of this repository"
def get_emailprefix(self):
return ""
def get_repo_path(self):
if read_git_output(["rev-parse", "--is-bare-repository"]) == "true":
path = get_git_dir()
else:
path = read_git_output(["rev-parse", "--show-toplevel"])
return os.path.abspath(path)
def get_charset(self):
return CHARSET
def get_values(self):
"""Return a dictionary {keyword: expansion} for this Environment.
This method is called by Change._compute_values(). The keys
in the returned dictionary are available to be used in any of
the templates. The dictionary is created by calling
self.get_NAME() for each of the attributes named in
COMPUTED_KEYS and recording those that do not return None.
The return value is always a new dictionary."""
if self._values is None:
values = {"": ""} # %()s expands to the empty string.
for key in self.COMPUTED_KEYS:
value = getattr(self, "get_%s" % (key,))()
if value is not None:
values[key] = value
self._values = values
return self._values.copy()
def get_refchange_recipients(self, refchange):
"""Return the recipients for notifications about refchange.
Return the list of email addresses to which notifications
about the specified ReferenceChange should be sent."""
raise NotImplementedError()
def get_announce_recipients(self, annotated_tag_change):
"""Return the recipients for notifications about annotated_tag_change.
Return the list of email addresses to which notifications
about the specified AnnotatedTagChange should be sent."""
raise NotImplementedError()
def get_reply_to_refchange(self, refchange):
return self.get_pusher_email()
def get_revision_recipients(self, revision):
"""Return the recipients for messages about revision.
Return the list of email addresses to which notifications
about the specified Revision should be sent. This method
could be overridden, for example, to take into account the
contents of the revision when deciding whom to notify about
it. For example, there could be a scheme for users to express
interest in particular files or subdirectories, and only
receive notification emails for revisions that affecting those
files."""
raise NotImplementedError()
def get_reply_to_commit(self, revision):
return revision.author
def get_default_ref_ignore_regex(self):
# The commit messages of git notes are essentially meaningless
# and "filenames" in git notes commits are an implementational
# detail that might surprise users at first. As such, we
# would need a completely different method for handling emails
# of git notes in order for them to be of benefit for users,
# which we simply do not have right now.
return "^refs/notes/"
def get_max_subject_length(self):
"""Return the maximal subject line (git log --oneline) length.
Longer subject lines will be truncated."""
raise NotImplementedError()
def filter_body(self, lines):
"""Filter the lines intended for an email body.
lines is an iterable over the lines that would go into the
email body. Filter it (e.g., limit the number of lines, the
line length, character set, etc.), returning another iterable.
See FilterLinesEnvironmentMixin and MaxlinesEnvironmentMixin
for classes implementing this functionality."""
return lines
def log_msg(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
self.get_logger().info(msg)
def log_warning(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
self.get_logger().warning(msg)
def log_error(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
self.get_logger().error(msg)
def check(self):
pass
class ConfigEnvironmentMixin(Environment):
"""A mixin that sets self.config to its constructor's config argument.
This class's constructor consumes the "config" argument.
Mixins that need to inspect the config should inherit from this
class (1) to make sure that "config" is still in the constructor
arguments with its own constructor runs and/or (2) to be sure that
self.config is set after construction."""
def __init__(self, config, **kw):
super(ConfigEnvironmentMixin, self).__init__(**kw)
self.config = config
class ConfigOptionsEnvironmentMixin(ConfigEnvironmentMixin):
"""An Environment that reads most of its information from "git config"."""
@staticmethod
def forbid_field_values(name, value, forbidden):
for forbidden_val in forbidden:
if value is not None and value.lower() == forbidden:
raise ConfigurationException(
'"%s" is not an allowed setting for %s' % (value, name)
)
def __init__(self, config, **kw):
super(ConfigOptionsEnvironmentMixin, self).__init__(
config=config, **kw
)
for var, cfg in (
("announce_show_shortlog", "announceshortlog"),
("refchange_showgraph", "refchangeShowGraph"),
("refchange_showlog", "refchangeshowlog"),
("quiet", "quiet"),
("stdout", "stdout"),
):
val = config.get_bool(cfg)
if val is not None:
setattr(self, var, val)
commit_email_format = config.get("commitEmailFormat")
if commit_email_format is not None:
if commit_email_format != "html" and commit_email_format != "text":
self.log_warning(
"*** Unknown value for multimailhook.commitEmailFormat: %s\n"
% commit_email_format
+ '*** Expected either "text" or "html". Ignoring.\n'
)
else:
self.commit_email_format = commit_email_format
html_in_intro = config.get_bool("htmlInIntro")
if html_in_intro is not None:
self.html_in_intro = html_in_intro
html_in_footer = config.get_bool("htmlInFooter")
if html_in_footer is not None:
self.html_in_footer = html_in_footer
self.commitBrowseURL = config.get("commitBrowseURL")
maxcommitemails = config.get("maxcommitemails")
if maxcommitemails is not None:
try:
self.maxcommitemails = int(maxcommitemails)
except ValueError:
self.log_warning(
"*** Malformed value for multimailhook.maxCommitEmails: %s\n"
% maxcommitemails
+ "*** Expected a number. Ignoring.\n"
)
diffopts = config.get("diffopts")
if diffopts is not None:
self.diffopts = shlex.split(diffopts)
graphopts = config.get("graphOpts")
if graphopts is not None:
self.graphopts = shlex.split(graphopts)
logopts = config.get("logopts")
if logopts is not None:
self.logopts = shlex.split(logopts)
commitlogopts = config.get("commitlogopts")
if commitlogopts is not None:
self.commitlogopts = shlex.split(commitlogopts)
date_substitute = config.get("dateSubstitute")
if date_substitute == "none":
self.date_substitute = None
elif date_substitute is not None:
self.date_substitute = date_substitute
reply_to = config.get("replyTo")
self.__reply_to_refchange = config.get(
"replyToRefchange", default=reply_to
)
self.forbid_field_values(
"replyToRefchange", self.__reply_to_refchange, ["author"]
)
self.__reply_to_commit = config.get("replyToCommit", default=reply_to)
self.from_refchange = config.get("fromRefchange")
self.forbid_field_values(
"fromRefchange", self.from_refchange, ["author", "none"]
)
self.from_commit = config.get("fromCommit")
self.forbid_field_values("fromCommit", self.from_commit, ["none"])
combine = config.get_bool("combineWhenSingleCommit")
if combine is not None:
self.combine_when_single_commit = combine
self.log_file = config.get("logFile", default=None)
self.error_log_file = config.get("errorLogFile", default=None)
self.debug_log_file = config.get("debugLogFile", default=None)
if config.get_bool("Verbose", default=False):
self.verbose = 1
else:
self.verbose = 0
def get_administrator(self):
return (
self.config.get("administrator")
or self.get_sender()
or super(ConfigOptionsEnvironmentMixin, self).get_administrator()
)
def get_repo_shortname(self):
return (
self.config.get("reponame")
or super(ConfigOptionsEnvironmentMixin, self).get_repo_shortname()
)
def get_emailprefix(self):
emailprefix = self.config.get("emailprefix")
if emailprefix is not None:
emailprefix = emailprefix.strip()
if emailprefix:
emailprefix += " "
else:
emailprefix = "[%(repo_shortname)s] "
short_name = self.get_repo_shortname()
try:
return emailprefix % {"repo_shortname": short_name}
except:
self.get_logger().error(
"*** Invalid multimailhook.emailPrefix: %s\n" % emailprefix
+ "*** %s\n" % sys.exc_info()[1]
+ "*** Only the '%(repo_shortname)s' placeholder is allowed\n"
)
raise ConfigurationException(
'"%s" is not an allowed setting for emailPrefix' % emailprefix
)
def get_sender(self):
return self.config.get("envelopesender")
def process_addr(self, addr, change):
if addr.lower() == "author":
if hasattr(change, "author"):
return change.author
else:
return None
elif addr.lower() == "pusher":
return self.get_pusher_email()
elif addr.lower() == "none":
return None
else:
return addr
def get_fromaddr(self, change=None):
fromaddr = self.config.get("from")
if change:
specific_fromaddr = change.get_specific_fromaddr()
if specific_fromaddr:
fromaddr = specific_fromaddr
if fromaddr:
fromaddr = self.process_addr(fromaddr, change)
if fromaddr:
return fromaddr
return super(ConfigOptionsEnvironmentMixin, self).get_fromaddr(change)
def get_reply_to_refchange(self, refchange):
if self.__reply_to_refchange is None:
return super(
ConfigOptionsEnvironmentMixin, self
).get_reply_to_refchange(refchange)
else:
return self.process_addr(self.__reply_to_refchange, refchange)
def get_reply_to_commit(self, revision):
if self.__reply_to_commit is None:
return super(
ConfigOptionsEnvironmentMixin, self
).get_reply_to_commit(revision)
else:
return self.process_addr(self.__reply_to_commit, revision)
def get_scancommitforcc(self):
return self.config.get("scancommitforcc")
class FilterLinesEnvironmentMixin(Environment):
"""Handle encoding and maximum line length of body lines.
email_max_line_length (int or None)
The maximum length of any single line in the email body.
Longer lines are truncated at that length with ' [...]'
appended.
strict_utf8 (bool)
If this field is set to True, then the email body text is
expected to be UTF-8. Any invalid characters are
converted to U+FFFD, the Unicode replacement character
(encoded as UTF-8, of course).
"""
def __init__(
self,
strict_utf8=True,
email_max_line_length=500,
max_subject_length=500,
**kw
):
super(FilterLinesEnvironmentMixin, self).__init__(**kw)
self.__strict_utf8 = strict_utf8
self.__email_max_line_length = email_max_line_length
self.__max_subject_length = max_subject_length
def filter_body(self, lines):
lines = super(FilterLinesEnvironmentMixin, self).filter_body(lines)
if self.__strict_utf8:
if not PYTHON3:
lines = (line.decode(ENCODING, "replace") for line in lines)
# Limit the line length in Unicode-space to avoid
# splitting characters:
if self.__email_max_line_length > 0:
lines = limit_linelength(lines, self.__email_max_line_length)
if not PYTHON3:
lines = (line.encode(ENCODING, "replace") for line in lines)
elif self.__email_max_line_length:
lines = limit_linelength(lines, self.__email_max_line_length)
return lines
def get_max_subject_length(self):
return self.__max_subject_length
class ConfigFilterLinesEnvironmentMixin(
ConfigEnvironmentMixin, FilterLinesEnvironmentMixin
):
"""Handle encoding and maximum line length based on config."""
def __init__(self, config, **kw):
strict_utf8 = config.get_bool("emailstrictutf8", default=None)
if strict_utf8 is not None:
kw["strict_utf8"] = strict_utf8
email_max_line_length = config.get("emailmaxlinelength")
if email_max_line_length is not None:
kw["email_max_line_length"] = int(email_max_line_length)
max_subject_length = config.get(
"subjectMaxLength", default=email_max_line_length
)
if max_subject_length is not None:
kw["max_subject_length"] = int(max_subject_length)
super(ConfigFilterLinesEnvironmentMixin, self).__init__(
config=config, **kw
)
class MaxlinesEnvironmentMixin(Environment):
"""Limit the email body to a specified number of lines."""
def __init__(self, emailmaxlines, **kw):
super(MaxlinesEnvironmentMixin, self).__init__(**kw)
self.__emailmaxlines = emailmaxlines
def filter_body(self, lines):
lines = super(MaxlinesEnvironmentMixin, self).filter_body(lines)
if self.__emailmaxlines > 0:
lines = limit_lines(lines, self.__emailmaxlines)
return lines
class ConfigMaxlinesEnvironmentMixin(
ConfigEnvironmentMixin, MaxlinesEnvironmentMixin
):
"""Limit the email body to the number of lines specified in config."""
def __init__(self, config, **kw):
emailmaxlines = int(config.get("emailmaxlines", default="0"))
super(ConfigMaxlinesEnvironmentMixin, self).__init__(
config=config, emailmaxlines=emailmaxlines, **kw
)
class FQDNEnvironmentMixin(Environment):
"""A mixin that sets the host's FQDN to its constructor argument."""
def __init__(self, fqdn, **kw):
super(FQDNEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ["fqdn"]
self.__fqdn = fqdn
def get_fqdn(self):
"""Return the fully-qualified domain name for this host.
Return None if it is unavailable or unwanted."""
return self.__fqdn
class ConfigFQDNEnvironmentMixin(ConfigEnvironmentMixin, FQDNEnvironmentMixin):
"""Read the FQDN from the config."""
def __init__(self, config, **kw):
fqdn = config.get("fqdn")
super(ConfigFQDNEnvironmentMixin, self).__init__(
config=config, fqdn=fqdn, **kw
)
class ComputeFQDNEnvironmentMixin(FQDNEnvironmentMixin):
"""Get the FQDN by calling socket.getfqdn()."""
def __init__(self, **kw):
super(ComputeFQDNEnvironmentMixin, self).__init__(
fqdn=socket.getfqdn(), **kw
)
class PusherDomainEnvironmentMixin(ConfigEnvironmentMixin):
"""Deduce pusher_email from pusher by appending an emaildomain."""
def __init__(self, **kw):
super(PusherDomainEnvironmentMixin, self).__init__(**kw)
self.__emaildomain = self.config.get("emaildomain")
def get_pusher_email(self):
if self.__emaildomain:
# Derive the pusher's full email address in the default way:
return "%s@%s" % (self.get_pusher(), self.__emaildomain)
else:
return super(PusherDomainEnvironmentMixin, self).get_pusher_email()
class StaticRecipientsEnvironmentMixin(Environment):
"""Set recipients statically based on constructor parameters."""
def __init__(
self,
refchange_recipients,
announce_recipients,
revision_recipients,
scancommitforcc,
**kw
):
super(StaticRecipientsEnvironmentMixin, self).__init__(**kw)
# The recipients for various types of notification emails, as
# RFC 2822 email addresses separated by commas (or the empty
# string if no recipients are configured). Although there is
# a mechanism to choose the recipient lists based on on the
# actual *contents* of the change being reported, we only
# choose based on the *type* of the change. Therefore we can
# compute them once and for all:
self.__refchange_recipients = refchange_recipients
self.__announce_recipients = announce_recipients
self.__revision_recipients = revision_recipients
def check(self):
if not (
self.get_refchange_recipients(None)
or self.get_announce_recipients(None)
or self.get_revision_recipients(None)
or self.get_scancommitforcc()
):
raise ConfigurationException("No email recipients configured!")
super(StaticRecipientsEnvironmentMixin, self).check()
def get_refchange_recipients(self, refchange):
if self.__refchange_recipients is None:
return super(
StaticRecipientsEnvironmentMixin, self
).get_refchange_recipients(refchange)
return self.__refchange_recipients
def get_announce_recipients(self, annotated_tag_change):
if self.__announce_recipients is None:
return super(
StaticRecipientsEnvironmentMixin, self
).get_refchange_recipients(annotated_tag_change)
return self.__announce_recipients
def get_revision_recipients(self, revision):
if self.__revision_recipients is None:
return super(
StaticRecipientsEnvironmentMixin, self
).get_refchange_recipients(revision)
return self.__revision_recipients
class CLIRecipientsEnvironmentMixin(Environment):
"""Mixin storing recipients information comming from the
command-line."""
def __init__(self, cli_recipients=None, **kw):
super(CLIRecipientsEnvironmentMixin, self).__init__(**kw)
self.__cli_recipients = cli_recipients
def get_refchange_recipients(self, refchange):
if self.__cli_recipients is None:
return super(
CLIRecipientsEnvironmentMixin, self
).get_refchange_recipients(refchange)
return self.__cli_recipients
def get_announce_recipients(self, annotated_tag_change):
if self.__cli_recipients is None:
return super(
CLIRecipientsEnvironmentMixin, self
).get_announce_recipients(annotated_tag_change)
return self.__cli_recipients
def get_revision_recipients(self, revision):
if self.__cli_recipients is None:
return super(
CLIRecipientsEnvironmentMixin, self
).get_revision_recipients(revision)
return self.__cli_recipients
class ConfigRecipientsEnvironmentMixin(
ConfigEnvironmentMixin, StaticRecipientsEnvironmentMixin
):
"""Determine recipients statically based on config."""
def __init__(self, config, **kw):
super(ConfigRecipientsEnvironmentMixin, self).__init__(
config=config,
refchange_recipients=self._get_recipients(
config, "refchangelist", "mailinglist"
),
announce_recipients=self._get_recipients(
config, "announcelist", "refchangelist", "mailinglist"
),
revision_recipients=self._get_recipients(
config, "commitlist", "mailinglist"
),
scancommitforcc=config.get("scancommitforcc"),
**kw
)
def _get_recipients(self, config, *names):
"""Return the recipients for a particular type of message.
Return the list of email addresses to which a particular type
of notification email should be sent, by looking at the config
value for "multimailhook.$name" for each of names. Use the
value from the first name that is configured. The return
value is a (possibly empty) string containing RFC 2822 email
addresses separated by commas. If no configuration could be
found, raise a ConfigurationException."""
for name in names:
lines = config.get_all(name)
if lines is not None:
lines = [line.strip() for line in lines]
# Single "none" is a special value equivalen to empty string.
if lines == ["none"]:
lines = [""]
return ", ".join(lines)
else:
return ""
class StaticRefFilterEnvironmentMixin(Environment):
"""Set branch filter statically based on constructor parameters."""
def __init__(
self,
ref_filter_incl_regex,
ref_filter_excl_regex,
ref_filter_do_send_regex,
ref_filter_dont_send_regex,
**kw
):
super(StaticRefFilterEnvironmentMixin, self).__init__(**kw)
if ref_filter_incl_regex and ref_filter_excl_regex:
raise ConfigurationException(
"Cannot specify both a ref inclusion and exclusion regex."
)
self.__is_inclusion_filter = bool(ref_filter_incl_regex)
default_exclude = self.get_default_ref_ignore_regex()
if ref_filter_incl_regex:
ref_filter_regex = ref_filter_incl_regex
elif ref_filter_excl_regex:
ref_filter_regex = ref_filter_excl_regex + "|" + default_exclude
else:
ref_filter_regex = default_exclude
try:
self.__compiled_regex = re.compile(ref_filter_regex)
except Exception:
raise ConfigurationException(
'Invalid Ref Filter Regex "%s": %s'
% (ref_filter_regex, sys.exc_info()[1])
)
if ref_filter_do_send_regex and ref_filter_dont_send_regex:
raise ConfigurationException(
"Cannot specify both a ref doSend and dontSend regex."
)
self.__is_do_send_filter = bool(ref_filter_do_send_regex)
if ref_filter_do_send_regex:
ref_filter_send_regex = ref_filter_do_send_regex
elif ref_filter_dont_send_regex:
ref_filter_send_regex = ref_filter_dont_send_regex
else:
ref_filter_send_regex = ".*"
self.__is_do_send_filter = True
try:
self.__send_compiled_regex = re.compile(ref_filter_send_regex)
except Exception:
raise ConfigurationException(
'Invalid Ref Filter Regex "%s": %s'
% (ref_filter_send_regex, sys.exc_info()[1])
)
def get_ref_filter_regex(self, send_filter=False):
if send_filter:
return self.__send_compiled_regex, self.__is_do_send_filter
else:
return self.__compiled_regex, self.__is_inclusion_filter
class ConfigRefFilterEnvironmentMixin(
ConfigEnvironmentMixin, StaticRefFilterEnvironmentMixin
):
"""Determine branch filtering statically based on config."""
def _get_regex(self, config, key):
"""Get a list of whitespace-separated regex. The refFilter* config
variables are multivalued (hence the use of get_all), and we
allow each entry to be a whitespace-separated list (hence the
split on each line). The whole thing is glued into a single regex."""
values = config.get_all(key)
if values is None:
return values
items = []
for line in values:
for i in line.split():
items.append(i)
if items == []:
return None
return "|".join(items)
def __init__(self, config, **kw):
super(ConfigRefFilterEnvironmentMixin, self).__init__(
config=config,
ref_filter_incl_regex=self._get_regex(
config, "refFilterInclusionRegex"
),
ref_filter_excl_regex=self._get_regex(
config, "refFilterExclusionRegex"
),
ref_filter_do_send_regex=self._get_regex(
config, "refFilterDoSendRegex"
),
ref_filter_dont_send_regex=self._get_regex(
config, "refFilterDontSendRegex"
),
**kw
)
class ProjectdescEnvironmentMixin(Environment):
"""Make a "projectdesc" value available for templates.
By default, it is set to the first line of $GIT_DIR/description
(if that file is present and appears to be set meaningfully)."""
def __init__(self, **kw):
super(ProjectdescEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ["projectdesc"]
def get_projectdesc(self):
"""Return a one-line descripition of the project."""
git_dir = get_git_dir()
try:
projectdesc = (
open(os.path.join(git_dir, "description")).readline().strip()
)
if projectdesc and not projectdesc.startswith(
"Unnamed repository"
):
return projectdesc
except IOError:
pass
return "UNNAMED PROJECT"
class GenericEnvironmentMixin(Environment):
def get_pusher(self):
return self.osenv.get(
"USER", self.osenv.get("USERNAME", "unknown user")
)
class GitoliteEnvironmentHighPrecMixin(Environment):
def get_pusher(self):
return self.osenv.get("GL_USER", "unknown user")
class GitoliteEnvironmentLowPrecMixin(Environment):
def get_repo_shortname(self):
# The gitolite environment variable $GL_REPO is a pretty good
# repo_shortname (though it's probably not as good as a value
# the user might have explicitly put in his config).
return (
self.osenv.get("GL_REPO", None)
or super(
GitoliteEnvironmentLowPrecMixin, self
).get_repo_shortname()
)
def get_fromaddr(self, change=None):
GL_USER = self.osenv.get("GL_USER")
if GL_USER is not None:
# Find the path to gitolite.conf. Note that gitolite v3
# did away with the GL_ADMINDIR and GL_CONF environment
# variables (they are now hard-coded).
GL_ADMINDIR = self.osenv.get(
"GL_ADMINDIR",
os.path.expanduser(os.path.join("~", ".gitolite")),
)
GL_CONF = self.osenv.get(
"GL_CONF", os.path.join(GL_ADMINDIR, "conf", "gitolite.conf")
)
if os.path.isfile(GL_CONF):
f = open(GL_CONF, "rU")
try:
in_user_emails_section = False
re_template = r"^\s*#\s*%s\s*$"
re_begin, re_user, re_end = (
re.compile(re_template % x)
for x in (
r"BEGIN\s+USER\s+EMAILS",
re.escape(GL_USER) + r"\s+(.*)",
r"END\s+USER\s+EMAILS",
)
)
for l in f:
l = l.rstrip("\n")
if not in_user_emails_section:
if re_begin.match(l):
in_user_emails_section = True
continue
if re_end.match(l):
break
m = re_user.match(l)
if m:
return m.group(1)
finally:
f.close()
return super(GitoliteEnvironmentLowPrecMixin, self).get_fromaddr(
change
)
class IncrementalDateTime(object):
"""Simple wrapper to give incremental date/times.
Each call will result in a date/time a second later than the
previous call. This can be used to falsify email headers, to
increase the likelihood that email clients sort the emails
correctly."""
def __init__(self):
self.time = time.time()
self.next = self.__next__ # Python 2 backward compatibility
def __next__(self):
formatted = formatdate(self.time, True)
self.time += 1
return formatted
class StashEnvironmentHighPrecMixin(Environment):
def __init__(self, user=None, repo=None, **kw):
super(StashEnvironmentHighPrecMixin, self).__init__(
user=user, repo=repo, **kw
)
self.__user = user
self.__repo = repo
def get_pusher(self):
return re.match("(.*?)\s*<", self.__user).group(1)
def get_pusher_email(self):
return self.__user
class StashEnvironmentLowPrecMixin(Environment):
def __init__(self, user=None, repo=None, **kw):
super(StashEnvironmentLowPrecMixin, self).__init__(**kw)
self.__repo = repo
self.__user = user
def get_repo_shortname(self):
return self.__repo
def get_fromaddr(self, change=None):
return self.__user
class GerritEnvironmentHighPrecMixin(Environment):
def __init__(self, project=None, submitter=None, update_method=None, **kw):
super(GerritEnvironmentHighPrecMixin, self).__init__(
submitter=submitter, project=project, **kw
)
self.__project = project
self.__submitter = submitter
self.__update_method = update_method
"Make an 'update_method' value available for templates."
self.COMPUTED_KEYS += ["update_method"]
def get_pusher(self):
if self.__submitter:
if self.__submitter.find("<") != -1:
# Submitter has a configured email, we transformed
# __submitter into an RFC 2822 string already.
return re.match("(.*?)\s*<", self.__submitter).group(1)
else:
# Submitter has no configured email, it's just his name.
return self.__submitter
else:
# If we arrive here, this means someone pushed "Submit" from
# the gerrit web UI for the CR (or used one of the programmatic
# APIs to do the same, such as gerrit review) and the
# merge/push was done by the Gerrit user. It was technically
# triggered by someone else, but sadly we have no way of
# determining who that someone else is at this point.
return "Gerrit" # 'unknown user'?
def get_pusher_email(self):
if self.__submitter:
return self.__submitter
else:
return super(
GerritEnvironmentHighPrecMixin, self
).get_pusher_email()
def get_default_ref_ignore_regex(self):
default = super(
GerritEnvironmentHighPrecMixin, self
).get_default_ref_ignore_regex()
return default + "|^refs/changes/|^refs/cache-automerge/|^refs/meta/"
def get_revision_recipients(self, revision):
# Merge commits created by Gerrit when users hit "Submit this patchset"
# in the Web UI (or do equivalently with REST APIs or the gerrit review
# command) are not something users want to see an individual email for.
# Filter them out.
committer = read_git_output(
["log", "--no-walk", "--format=%cN", revision.rev.sha1]
)
if committer == "Gerrit Code Review":
return []
else:
return super(
GerritEnvironmentHighPrecMixin, self
).get_revision_recipients(revision)
def get_update_method(self):
return self.__update_method
class GerritEnvironmentLowPrecMixin(Environment):
def __init__(self, project=None, submitter=None, **kw):
super(GerritEnvironmentLowPrecMixin, self).__init__(**kw)
self.__project = project
self.__submitter = submitter
def get_repo_shortname(self):
return self.__project
def get_fromaddr(self, change=None):
if self.__submitter and self.__submitter.find("<") != -1:
return self.__submitter
else:
return super(GerritEnvironmentLowPrecMixin, self).get_fromaddr(
change
)
class Push(object):
"""Represent an entire push (i.e., a group of ReferenceChanges).
It is easy to figure out what commits were added to a *branch* by
a Reference change:
git rev-list change.old..change.new
or removed from a *branch*:
git rev-list change.new..change.old
But it is not quite so trivial to determine which entirely new
commits were added to the *repository* by a push and which old
commits were discarded by a push. A big part of the job of this
class is to figure out these things, and to make sure that new
commits are only detailed once even if they were added to multiple
references.
The first step is to determine the "other" references--those
unaffected by the current push. They are computed by listing all
references then removing any affected by this push. The results
are stored in Push._other_ref_sha1s.
The commits contained in the repository before this push were
git rev-list other1 other2 other3 ... change1.old change2.old ...
Where "changeN.old" is the old value of one of the references
affected by this push.
The commits contained in the repository after this push are
git rev-list other1 other2 other3 ... change1.new change2.new ...
The commits added by this push are the difference between these
two sets, which can be written
git rev-list \
^other1 ^other2 ... \
^change1.old ^change2.old ... \
change1.new change2.new ...
The commits removed by this push can be computed by
git rev-list \
^other1 ^other2 ... \
^change1.new ^change2.new ... \
change1.old change2.old ...
The last point is that it is possible that other pushes are
occurring simultaneously to this one, so reference values can
change at any time. It is impossible to eliminate all race
conditions, but we reduce the window of time during which problems
can occur by translating reference names to SHA1s as soon as
possible and working with SHA1s thereafter (because SHA1s are
immutable)."""
# A map {(changeclass, changetype): integer} specifying the order
# that reference changes will be processed if multiple reference
# changes are included in a single push. The order is significant
# mostly because new commit notifications are threaded together
# with the first reference change that includes the commit. The
# following order thus causes commits to be grouped with branch
# changes (as opposed to tag changes) if possible.
SORT_ORDER = dict(
(value, i)
for (i, value) in enumerate(
[
(BranchChange, "update"),
(BranchChange, "create"),
(AnnotatedTagChange, "update"),
(AnnotatedTagChange, "create"),
(NonAnnotatedTagChange, "update"),
(NonAnnotatedTagChange, "create"),
(BranchChange, "delete"),
(AnnotatedTagChange, "delete"),
(NonAnnotatedTagChange, "delete"),
(OtherReferenceChange, "update"),
(OtherReferenceChange, "create"),
(OtherReferenceChange, "delete"),
]
)
)
def __init__(self, environment, changes, ignore_other_refs=False):
self.changes = sorted(changes, key=self._sort_key)
self.__other_ref_sha1s = None
self.__cached_commits_spec = {}
self.environment = environment
if ignore_other_refs:
self.__other_ref_sha1s = set()
@classmethod
def _sort_key(klass, change):
return (
klass.SORT_ORDER[change.__class__, change.change_type],
change.refname,
)
@property
def _other_ref_sha1s(self):
"""The GitObjects referred to by references unaffected by this push."""
if self.__other_ref_sha1s is None:
# The refnames being changed by this push:
updated_refs = set(change.refname for change in self.changes)
# The SHA-1s of commits referred to by all references in this
# repository *except* updated_refs:
sha1s = set()
fmt = (
"%(objectname) %(objecttype) %(refname)\n"
"%(*objectname) %(*objecttype) %(refname)"
)
(
ref_filter_regex,
is_inclusion_filter,
) = self.environment.get_ref_filter_regex()
for line in read_git_lines(
["for-each-ref", "--format=%s" % (fmt,)]
):
(sha1, type, name) = line.split(" ", 2)
if (
sha1
and type == "commit"
and name not in updated_refs
and include_ref(
name, ref_filter_regex, is_inclusion_filter
)
):
sha1s.add(sha1)
self.__other_ref_sha1s = sha1s
return self.__other_ref_sha1s
def _get_commits_spec_incl(self, new_or_old, reference_change=None):
"""Get new or old SHA-1 from one or each of the changed refs.
Return a list of SHA-1 commit identifier strings suitable as
arguments to 'git rev-list' (or 'git log' or ...). The
returned identifiers are either the old or new values from one
or all of the changed references, depending on the values of
new_or_old and reference_change.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned SHA-1 identifiers are the new values from
each changed reference. If 'old', the SHA-1 identifiers are
the old values from each changed reference.
If reference_change is specified and not None, only the new or
old reference from the specified reference is included in the
return value.
This function returns None if there are no matching revisions
(e.g., because a branch was deleted and new_or_old is 'new').
"""
if not reference_change:
incl_spec = sorted(
getattr(change, new_or_old).sha1
for change in self.changes
if getattr(change, new_or_old)
)
if not incl_spec:
incl_spec = None
elif not getattr(reference_change, new_or_old).commit_sha1:
incl_spec = None
else:
incl_spec = [getattr(reference_change, new_or_old).commit_sha1]
return incl_spec
def _get_commits_spec_excl(self, new_or_old):
"""Get exclusion revisions for determining new or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that will exclude all
commits that, depending on the value of new_or_old, were
either previously in the repository (useful for determining
which commits are new to the repository) or currently in the
repository (useful for determining which commits were
discarded from the repository).
new_or_old is either the string 'new' or the string 'old'. If
'new', the commits to be excluded are those that were in the
repository before the push. If 'old', the commits to be
excluded are those that are currently in the repository."""
old_or_new = {"old": "new", "new": "old"}[new_or_old]
excl_revs = self._other_ref_sha1s.union(
getattr(change, old_or_new).sha1
for change in self.changes
if getattr(change, old_or_new).type in ["commit", "tag"]
)
return ["^" + sha1 for sha1 in sorted(excl_revs)]
def get_commits_spec(self, new_or_old, reference_change=None):
"""Get rev-list arguments for added or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that select those commits
that, depending on the value of new_or_old, are either new to
the repository or were discarded from the repository.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned list is used to select commits that are
new to the repository. If 'old', the returned value is used
to select the commits that have been discarded from the
repository.
If reference_change is specified and not None, the new or
discarded commits are limited to those that are reachable from
the new or old value of the specified reference.
This function returns None if there are no added (or discarded)
revisions.
"""
key = (new_or_old, reference_change)
if key not in self.__cached_commits_spec:
ret = self._get_commits_spec_incl(new_or_old, reference_change)
if ret is not None:
ret.extend(self._get_commits_spec_excl(new_or_old))
self.__cached_commits_spec[key] = ret
return self.__cached_commits_spec[key]
def get_new_commits(self, reference_change=None):
"""Return a list of commits added by this push.
Return a list of the object names of commits that were added
by the part of this push represented by reference_change. If
reference_change is None, then return a list of *all* commits
added by this push."""
spec = self.get_commits_spec("new", reference_change)
return git_rev_list(spec)
def get_discarded_commits(self, reference_change):
"""Return a list of commits discarded by this push.
Return a list of the object names of commits that were
entirely discarded from the repository by the part of this
push represented by reference_change."""
spec = self.get_commits_spec("old", reference_change)
return git_rev_list(spec)
def send_emails(self, mailer, body_filter=None):
"""Use send all of the notification emails needed for this push.
Use send all of the notification emails (including reference
change emails and commit emails) needed for this push. Send
the emails using mailer. If body_filter is not None, then use
it to filter the lines that are intended for the email
body."""
# The sha1s of commits that were introduced by this push.
# They will be removed from this set as they are processed, to
# guarantee that one (and only one) email is generated for
# each new commit.
unhandled_sha1s = set(self.get_new_commits())
send_date = IncrementalDateTime()
for change in self.changes:
sha1s = []
for sha1 in reversed(list(self.get_new_commits(change))):
if sha1 in unhandled_sha1s:
sha1s.append(sha1)
unhandled_sha1s.remove(sha1)
# Check if we've got anyone to send to
if not change.recipients:
change.environment.log_warning(
"*** no recipients configured so no email will be sent\n"
"*** for %r update %s->%s"
% (change.refname, change.old.sha1, change.new.sha1)
)
else:
if not change.environment.quiet:
change.environment.log_msg(
"Sending notification emails to: %s"
% (change.recipients,)
)
extra_values = {"send_date": next(send_date)}
rev = change.send_single_combined_email(sha1s)
if rev:
mailer.send(
change.generate_combined_email(
self, rev, body_filter, extra_values
),
rev.recipients,
)
# This change is now fully handled; no need to handle
# individual revisions any further.
continue
else:
mailer.send(
change.generate_email(self, body_filter, extra_values),
change.recipients,
)
max_emails = change.environment.maxcommitemails
if max_emails and len(sha1s) > max_emails:
change.environment.log_warning(
"*** Too many new commits (%d), not sending commit emails.\n"
% len(sha1s)
+ "*** Try setting multimailhook.maxCommitEmails to a greater value\n"
+ "*** Currently, multimailhook.maxCommitEmails=%d"
% max_emails
)
return
for (num, sha1) in enumerate(sha1s):
rev = Revision(
change, GitObject(sha1), num=num + 1, tot=len(sha1s)
)
if not rev.recipients and rev.cc_recipients:
change.environment.log_msg("*** Replacing Cc: with To:")
rev.recipients = rev.cc_recipients
rev.cc_recipients = None
if rev.recipients:
extra_values = {"send_date": next(send_date)}
mailer.send(
rev.generate_email(self, body_filter, extra_values),
rev.recipients,
)
# Consistency check:
if unhandled_sha1s:
change.environment.log_error(
"ERROR: No emails were sent for the following new commits:\n"
" %s" % ("\n ".join(sorted(unhandled_sha1s)),)
)
def include_ref(refname, ref_filter_regex, is_inclusion_filter):
does_match = bool(ref_filter_regex.search(refname))
if is_inclusion_filter:
return does_match
else: # exclusion filter -- we include the ref if the regex doesn't match
return not does_match
def run_as_post_receive_hook(environment, mailer):
environment.check()
(
send_filter_regex,
send_is_inclusion_filter,
) = environment.get_ref_filter_regex(True)
ref_filter_regex, is_inclusion_filter = environment.get_ref_filter_regex(
False
)
changes = []
while True:
line = read_line(sys.stdin)
if line == "":
break
(oldrev, newrev, refname) = line.strip().split(" ", 2)
environment.get_logger().debug(
"run_as_post_receive_hook: oldrev=%s, newrev=%s, refname=%s"
% (oldrev, newrev, refname)
)
if not include_ref(refname, ref_filter_regex, is_inclusion_filter):
continue
if not include_ref(
refname, send_filter_regex, send_is_inclusion_filter
):
continue
changes.append(
ReferenceChange.create(environment, oldrev, newrev, refname)
)
if changes:
push = Push(environment, changes)
push.send_emails(mailer, body_filter=environment.filter_body)
if hasattr(mailer, "__del__"):
mailer.__del__()
def run_as_update_hook(
environment, mailer, refname, oldrev, newrev, force_send=False
):
environment.check()
(
send_filter_regex,
send_is_inclusion_filter,
) = environment.get_ref_filter_regex(True)
ref_filter_regex, is_inclusion_filter = environment.get_ref_filter_regex(
False
)
if not include_ref(refname, ref_filter_regex, is_inclusion_filter):
return
if not include_ref(refname, send_filter_regex, send_is_inclusion_filter):
return
changes = [
ReferenceChange.create(
environment,
read_git_output(["rev-parse", "--verify", oldrev]),
read_git_output(["rev-parse", "--verify", newrev]),
refname,
)
]
push = Push(environment, changes, force_send)
push.send_emails(mailer, body_filter=environment.filter_body)
if hasattr(mailer, "__del__"):
mailer.__del__()
def check_ref_filter(environment):
send_filter_regex, send_is_inclusion = environment.get_ref_filter_regex(
True
)
ref_filter_regex, ref_is_inclusion = environment.get_ref_filter_regex(
False
)
def inc_exc_lusion(b):
if b:
return "inclusion"
else:
return "exclusion"
if send_filter_regex:
sys.stdout.write(
"DoSend/DontSend filter regex ("
+ (inc_exc_lusion(send_is_inclusion))
+ "): "
+ send_filter_regex.pattern
+ "\n"
)
if send_filter_regex:
sys.stdout.write(
"Include/Exclude filter regex ("
+ (inc_exc_lusion(ref_is_inclusion))
+ "): "
+ ref_filter_regex.pattern
+ "\n"
)
sys.stdout.write(os.linesep)
sys.stdout.write(
"Refs marked as EXCLUDE are excluded by either refFilterInclusionRegex\n"
"or refFilterExclusionRegex. No emails will be sent for commits included\n"
"in these refs.\n"
"Refs marked as DONT-SEND are excluded by either refFilterDoSendRegex or\n"
"refFilterDontSendRegex, but not by either refFilterInclusionRegex or\n"
"refFilterExclusionRegex. Emails will be sent for commits included in these\n"
"refs only when the commit reaches a ref which isn't excluded.\n"
"Refs marked as DO-SEND are not excluded by any filter. Emails will\n"
"be sent normally for commits included in these refs.\n"
)
sys.stdout.write(os.linesep)
for refname in read_git_lines(["for-each-ref", "--format", "%(refname)"]):
sys.stdout.write(refname)
if not include_ref(refname, ref_filter_regex, ref_is_inclusion):
sys.stdout.write(" EXCLUDE")
elif not include_ref(refname, send_filter_regex, send_is_inclusion):
sys.stdout.write(" DONT-SEND")
else:
sys.stdout.write(" DO-SEND")
sys.stdout.write(os.linesep)
def show_env(environment, out):
out.write("Environment values:\n")
for (k, v) in sorted(environment.get_values().items()):
if k: # Don't show the {'' : ''} pair.
out.write(" %s : %r\n" % (k, v))
out.write("\n")
# Flush to avoid interleaving with further log output
out.flush()
def check_setup(environment):
environment.check()
show_env(environment, sys.stdout)
sys.stdout.write(
"Now, checking that git-multimail's standard input "
"is properly set ..." + os.linesep
)
sys.stdout.write(
"Please type some text and then press Return" + os.linesep
)
stdin = sys.stdin.readline()
sys.stdout.write("You have just entered:" + os.linesep)
sys.stdout.write(stdin)
sys.stdout.write("git-multimail seems properly set up." + os.linesep)
def choose_mailer(config, environment):
mailer = config.get("mailer", default="sendmail")
if mailer == "smtp":
smtpserver = config.get("smtpserver", default="localhost")
smtpservertimeout = float(
config.get("smtpservertimeout", default=10.0)
)
smtpserverdebuglevel = int(
config.get("smtpserverdebuglevel", default=0)
)
smtpencryption = config.get("smtpencryption", default="none")
smtpuser = config.get("smtpuser", default="")
smtppass = config.get("smtppass", default="")
smtpcacerts = config.get("smtpcacerts", default="")
mailer = SMTPMailer(
environment,
envelopesender=(
environment.get_sender() or environment.get_fromaddr()
),
smtpserver=smtpserver,
smtpservertimeout=smtpservertimeout,
smtpserverdebuglevel=smtpserverdebuglevel,
smtpencryption=smtpencryption,
smtpuser=smtpuser,
smtppass=smtppass,
smtpcacerts=smtpcacerts,
)
elif mailer == "sendmail":
command = config.get("sendmailcommand")
if command:
command = shlex.split(command)
mailer = SendMailer(
environment,
command=command,
envelopesender=environment.get_sender(),
)
else:
environment.log_error(
'fatal: multimailhook.mailer is set to an incorrect value: "%s"\n'
% mailer
+ 'please use one of "smtp" or "sendmail".'
)
sys.exit(1)
return mailer
KNOWN_ENVIRONMENTS = {
"generic": {"highprec": GenericEnvironmentMixin},
"gitolite": {
"highprec": GitoliteEnvironmentHighPrecMixin,
"lowprec": GitoliteEnvironmentLowPrecMixin,
},
"stash": {
"highprec": StashEnvironmentHighPrecMixin,
"lowprec": StashEnvironmentLowPrecMixin,
},
"gerrit": {
"highprec": GerritEnvironmentHighPrecMixin,
"lowprec": GerritEnvironmentLowPrecMixin,
},
}
def choose_environment(
config, osenv=None, env=None, recipients=None, hook_info=None
):
env_name = choose_environment_name(config, env, osenv)
environment_klass = build_environment_klass(env_name)
env = build_environment(
environment_klass, env_name, config, osenv, recipients, hook_info
)
return env
def choose_environment_name(config, env, osenv):
if not osenv:
osenv = os.environ
if not env:
env = config.get("environment")
if not env:
if "GL_USER" in osenv and "GL_REPO" in osenv:
env = "gitolite"
else:
env = "generic"
return env
COMMON_ENVIRONMENT_MIXINS = [
ConfigRecipientsEnvironmentMixin,
CLIRecipientsEnvironmentMixin,
ConfigRefFilterEnvironmentMixin,
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
]
def build_environment_klass(env_name):
if "class" in KNOWN_ENVIRONMENTS[env_name]:
return KNOWN_ENVIRONMENTS[env_name]["class"]
environment_mixins = []
known_env = KNOWN_ENVIRONMENTS[env_name]
if "highprec" in known_env:
high_prec_mixin = known_env["highprec"]
environment_mixins.append(high_prec_mixin)
environment_mixins = environment_mixins + COMMON_ENVIRONMENT_MIXINS
if "lowprec" in known_env:
low_prec_mixin = known_env["lowprec"]
environment_mixins.append(low_prec_mixin)
environment_mixins.append(Environment)
klass_name = env_name.capitalize() + "Environement"
environment_klass = type(klass_name, tuple(environment_mixins), {})
KNOWN_ENVIRONMENTS[env_name]["class"] = environment_klass
return environment_klass
GerritEnvironment = build_environment_klass("gerrit")
StashEnvironment = build_environment_klass("stash")
GitoliteEnvironment = build_environment_klass("gitolite")
GenericEnvironment = build_environment_klass("generic")
def build_environment(
environment_klass, env, config, osenv, recipients, hook_info
):
environment_kw = {"osenv": osenv, "config": config}
if env == "stash":
environment_kw["user"] = hook_info["stash_user"]
environment_kw["repo"] = hook_info["stash_repo"]
elif env == "gerrit":
environment_kw["project"] = hook_info["project"]
environment_kw["submitter"] = hook_info["submitter"]
environment_kw["update_method"] = hook_info["update_method"]
environment_kw["cli_recipients"] = recipients
return environment_klass(**environment_kw)
def get_version():
oldcwd = os.getcwd()
try:
try:
os.chdir(os.path.dirname(os.path.realpath(__file__)))
git_version = read_git_output(["describe", "--tags", "HEAD"])
if git_version == __version__:
return git_version
else:
return "%s (%s)" % (__version__, git_version)
except:
pass
finally:
os.chdir(oldcwd)
return __version__
def compute_gerrit_options(
options, args, required_gerrit_options, raw_refname
):
if None in required_gerrit_options:
raise SystemExit(
"Error: Specify all of --oldrev, --newrev, --refname, "
"and --project; or none of them."
)
if options.environment not in (None, "gerrit"):
raise SystemExit(
"Non-gerrit environments incompatible with --oldrev, "
"--newrev, --refname, and --project"
)
options.environment = "gerrit"
if args:
raise SystemExit(
"Error: Positional parameters not allowed with "
"--oldrev, --newrev, and --refname."
)
# Gerrit oddly omits 'refs/heads/' in the refname when calling
# ref-updated hook; put it back.
git_dir = get_git_dir()
if not os.path.exists(
os.path.join(git_dir, raw_refname)
) and os.path.exists(os.path.join(git_dir, "refs", "heads", raw_refname)):
options.refname = "refs/heads/" + options.refname
# New revisions can appear in a gerrit repository either due to someone
# pushing directly (in which case options.submitter will be set), or they
# can press "Submit this patchset" in the web UI for some CR (in which
# case options.submitter will not be set and gerrit will not have provided
# us the information about who pressed the button).
#
# Note for the nit-picky: I'm lumping in REST API calls and the ssh
# gerrit review command in with "Submit this patchset" button, since they
# have the same effect.
if options.submitter:
update_method = "pushed"
# The submitter argument is almost an RFC 2822 email address; change it
# from 'User Name (email@domain)' to 'User Name <email@domain>' so it is
options.submitter = options.submitter.replace("(", "<").replace(
")", ">"
)
else:
update_method = "submitted"
# Gerrit knew who submitted this patchset, but threw that information
# away when it invoked this hook. However, *IF* Gerrit created a
# merge to bring the patchset in (project 'Submit Type' is either
# "Always Merge", or is "Merge if Necessary" and happens to be
# necessary for this particular CR), then it will have the committer
# of that merge be 'Gerrit Code Review' and the author will be the
# person who requested the submission of the CR. Since this is fairly
# likely for most gerrit installations (of a reasonable size), it's
# worth the extra effort to try to determine the actual submitter.
rev_info = read_git_lines(
[
"log",
"--no-walk",
"--merges",
"--format=%cN%n%aN <%aE>",
options.newrev,
]
)
if rev_info and rev_info[0] == "Gerrit Code Review":
options.submitter = rev_info[1]
# We pass back refname, oldrev, newrev as args because then the
# gerrit ref-updated hook is much like the git update hook
return (
options,
[options.refname, options.oldrev, options.newrev],
{
"project": options.project,
"submitter": options.submitter,
"update_method": update_method,
},
)
def check_hook_specific_args(options, args):
raw_refname = options.refname
# Convert each string option unicode for Python3.
if PYTHON3:
opts = [
"environment",
"recipients",
"oldrev",
"newrev",
"refname",
"project",
"submitter",
"stash_user",
"stash_repo",
]
for opt in opts:
if not hasattr(options, opt):
continue
obj = getattr(options, opt)
if obj:
enc = obj.encode("utf-8", "surrogateescape")
dec = enc.decode("utf-8", "replace")
setattr(options, opt, dec)
# First check for stash arguments
if (options.stash_user is None) != (options.stash_repo is None):
raise SystemExit(
"Error: Specify both of --stash-user and "
"--stash-repo or neither."
)
if options.stash_user:
options.environment = "stash"
return (
options,
args,
{
"stash_user": options.stash_user,
"stash_repo": options.stash_repo,
},
)
# Finally, check for gerrit specific arguments
required_gerrit_options = (
options.oldrev,
options.newrev,
options.refname,
options.project,
)
if required_gerrit_options != (None,) * 4:
return compute_gerrit_options(
options, args, required_gerrit_options, raw_refname
)
# No special options in use, just return what we started with
return options, args, {}
class Logger(object):
def parse_verbose(self, verbose):
if verbose > 0:
return logging.DEBUG
else:
return logging.INFO
def create_log_file(self, environment, name, path, verbosity):
log_file = logging.getLogger(name)
file_handler = logging.FileHandler(path)
log_fmt = logging.Formatter(
"%(asctime)s [%(levelname)-5.5s] %(message)s"
)
file_handler.setFormatter(log_fmt)
log_file.addHandler(file_handler)
log_file.setLevel(verbosity)
return log_file
def __init__(self, environment):
self.environment = environment
self.loggers = []
stderr_log = logging.getLogger("git_multimail.stderr")
class EncodedStderr(object):
def write(self, x):
write_str(sys.stderr, x)
def flush(self):
sys.stderr.flush()
stderr_handler = logging.StreamHandler(EncodedStderr())
stderr_log.addHandler(stderr_handler)
stderr_log.setLevel(self.parse_verbose(environment.verbose))
self.loggers.append(stderr_log)
if environment.debug_log_file is not None:
debug_log_file = self.create_log_file(
environment,
"git_multimail.debug",
environment.debug_log_file,
logging.DEBUG,
)
self.loggers.append(debug_log_file)
if environment.log_file is not None:
log_file = self.create_log_file(
environment,
"git_multimail.file",
environment.log_file,
logging.INFO,
)
self.loggers.append(log_file)
if environment.error_log_file is not None:
error_log_file = self.create_log_file(
environment,
"git_multimail.error",
environment.error_log_file,
logging.ERROR,
)
self.loggers.append(error_log_file)
def info(self, msg):
for l in self.loggers:
l.info(msg)
def debug(self, msg):
for l in self.loggers:
l.debug(msg)
def warning(self, msg):
for l in self.loggers:
l.warning(msg)
def error(self, msg):
for l in self.loggers:
l.error(msg)
def main(args):
parser = optparse.OptionParser(
description=__doc__,
usage="%prog [OPTIONS]\n or: %prog [OPTIONS] REFNAME OLDREV NEWREV",
)
parser.add_option(
"--environment",
"--env",
action="store",
type="choice",
choices=list(KNOWN_ENVIRONMENTS.keys()),
default=None,
help=(
"Choose type of environment is in use. Default is taken from "
'multimailhook.environment if set; otherwise "generic".'
),
)
parser.add_option(
"--stdout",
action="store_true",
default=False,
help="Output emails to stdout rather than sending them.",
)
parser.add_option(
"--recipients",
action="store",
default=None,
help="Set list of email recipients for all types of emails.",
)
parser.add_option(
"--show-env",
action="store_true",
default=False,
help=(
"Write to stderr the values determined for the environment "
"(intended for debugging purposes), then proceed normally."
),
)
parser.add_option(
"--force-send",
action="store_true",
default=False,
help=(
"Force sending refchange email when using as an update hook. "
"This is useful to work around the unreliable new commits "
"detection in this mode."
),
)
parser.add_option(
"-c",
metavar="<name>=<value>",
action="append",
help=(
"Pass a configuration parameter through to git. The value given "
"will override values from configuration files. See the -c option "
"of git(1) for more details. (Only works with git >= 1.7.3)"
),
)
parser.add_option(
"--version",
"-v",
action="store_true",
default=False,
help=("Display git-multimail's version"),
)
parser.add_option(
"--python-version",
action="store_true",
default=False,
help=("Display the version of Python used by git-multimail"),
)
parser.add_option(
"--check-ref-filter",
action="store_true",
default=False,
help=(
"List refs and show information on how git-multimail "
"will process them."
),
)
# The following options permit this script to be run as a gerrit
# ref-updated hook. See e.g.
# code.google.com/p/gerrit/source/browse/Documentation/config-hooks.txt
# We suppress help for these items, since these are specific to gerrit,
# and we don't want users directly using them any way other than how the
# gerrit ref-updated hook is called.
parser.add_option("--oldrev", action="store", help=optparse.SUPPRESS_HELP)
parser.add_option("--newrev", action="store", help=optparse.SUPPRESS_HELP)
parser.add_option("--refname", action="store", help=optparse.SUPPRESS_HELP)
parser.add_option("--project", action="store", help=optparse.SUPPRESS_HELP)
parser.add_option(
"--submitter", action="store", help=optparse.SUPPRESS_HELP
)
# The following allow this to be run as a stash asynchronous post-receive
# hook (almost identical to a git post-receive hook but triggered also for
# merges of pull requests from the UI). We suppress help for these items,
# since these are specific to stash.
parser.add_option(
"--stash-user", action="store", help=optparse.SUPPRESS_HELP
)
parser.add_option(
"--stash-repo", action="store", help=optparse.SUPPRESS_HELP
)
(options, args) = parser.parse_args(args)
(options, args, hook_info) = check_hook_specific_args(options, args)
if options.version:
sys.stdout.write("git-multimail version " + get_version() + "\n")
return
if options.python_version:
sys.stdout.write("Python version " + sys.version + "\n")
return
if options.c:
Config.add_config_parameters(options.c)
config = Config("multimailhook")
environment = None
try:
environment = choose_environment(
config,
osenv=os.environ,
env=options.environment,
recipients=options.recipients,
hook_info=hook_info,
)
if options.show_env:
show_env(environment, sys.stderr)
if options.stdout or environment.stdout:
mailer = OutputMailer(sys.stdout)
else:
mailer = choose_mailer(config, environment)
must_check_setup = os.environ.get("GIT_MULTIMAIL_CHECK_SETUP")
if must_check_setup == "":
must_check_setup = False
if options.check_ref_filter:
check_ref_filter(environment)
elif must_check_setup:
check_setup(environment)
# Dual mode: if arguments were specified on the command line, run
# like an update hook; otherwise, run as a post-receive hook.
elif args:
if len(args) != 3:
parser.error("Need zero or three non-option arguments")
(refname, oldrev, newrev) = args
environment.get_logger().debug(
"run_as_update_hook: refname=%s, oldrev=%s, newrev=%s, force_send=%s"
% (refname, oldrev, newrev, options.force_send)
)
run_as_update_hook(
environment,
mailer,
refname,
oldrev,
newrev,
options.force_send,
)
else:
run_as_post_receive_hook(environment, mailer)
except ConfigurationException:
sys.exit(sys.exc_info()[1])
except SystemExit:
raise
except Exception:
t, e, tb = sys.exc_info()
import traceback
sys.stderr.write("\n") # Avoid mixing message with previous output
msg = (
"Exception '"
+ t.__name__
+ "' raised. Please report this as a bug to\n"
"https://github.com/git-multimail/git-multimail/issues\n"
"with the information below:\n\n"
"git-multimail version " + get_version() + "\n"
"Python version " + sys.version + "\n" + traceback.format_exc()
)
try:
environment.get_logger().error(msg)
except:
sys.stderr.write(msg)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
[] |
[] |
[
"GIT_CONFIG_PARAMETERS",
"GIT_MULTIMAIL_CHECK_SETUP"
] |
[]
|
["GIT_CONFIG_PARAMETERS", "GIT_MULTIMAIL_CHECK_SETUP"]
|
python
| 2 | 0 | |
eventstream/memorystream/doc.go
|
// Package memorystream provides an in-memory implementation of
// eventstream.Stream.
package memorystream
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/mod/golang.org/x/[email protected]/internal/lsp/cache/view.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cache implements the caching layer for gopls.
package cache
import (
"context"
"encoding/json"
"fmt"
"go/ast"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
"golang.org/x/tools/internal/lsp/debug/tag"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/xcontext"
errors "golang.org/x/xerrors"
)
type View struct {
session *Session
id string
optionsMu sync.Mutex
options source.Options
// mu protects most mutable state of the view.
mu sync.Mutex
// baseCtx is the context handed to NewView. This is the parent of all
// background contexts created for this view.
baseCtx context.Context
// backgroundCtx is the current context used by background tasks initiated
// by the view.
backgroundCtx context.Context
// cancel is called when all action being performed by the current view
// should be stopped.
cancel context.CancelFunc
// Name is the user visible name of this view.
name string
// Folder is the root of this view.
folder span.URI
// importsMu guards imports-related state, particularly the ProcessEnv.
importsMu sync.Mutex
// process is the process env for this view.
// Note: this contains cached module and filesystem state.
//
// TODO(suzmue): the state cached in the process env is specific to each view,
// however, there is state that can be shared between views that is not currently
// cached, like the module cache.
processEnv *imports.ProcessEnv
cacheRefreshDuration time.Duration
cacheRefreshTimer *time.Timer
cachedModFileVersion source.FileIdentity
// keep track of files by uri and by basename, a single file may be mapped
// to multiple uris, and the same basename may map to multiple files
filesByURI map[span.URI]*fileBase
filesByBase map[string][]*fileBase
snapshotMu sync.Mutex
snapshot *snapshot
// ignoredURIs is the set of URIs of files that we ignore.
ignoredURIsMu sync.Mutex
ignoredURIs map[span.URI]struct{}
// initialized is closed when the view has been fully initialized.
// On initialization, the view's workspace packages are loaded.
// All of the fields below are set as part of initialization.
// If we failed to load, we don't re-try to avoid too many go/packages calls.
initializeOnce sync.Once
initialized chan struct{}
// initializedErr needs no mutex, since any access to it happens after it
// has been set.
initializedErr error
// builtin pins the AST and package for builtin.go in memory.
builtin *builtinPackageHandle
// True if the view is either in GOPATH, a module, or some other
// non go command build system.
hasValidBuildConfiguration bool
// The real and temporary go.mod files that are attributed to a view.
// The temporary go.mod is for use with the Go command's -modfile flag.
realMod, tempMod span.URI
// goCommand indicates if the user is using the go command or some other
// build system.
goCommand bool
// `go env` variables that need to be tracked.
gopath, gocache, goprivate string
// gocmdRunner guards go command calls from concurrency errors.
gocmdRunner *gocommand.Runner
}
type builtinPackageHandle struct {
handle *memoize.Handle
file source.ParseGoHandle
}
type builtinPackageData struct {
memoize.NoCopy
pkg *ast.Package
err error
}
// fileBase holds the common functionality for all files.
// It is intended to be embedded in the file implementations
type fileBase struct {
uris []span.URI
fname string
view *View
}
func (f *fileBase) URI() span.URI {
return f.uris[0]
}
func (f *fileBase) filename() string {
return f.fname
}
func (f *fileBase) addURI(uri span.URI) int {
f.uris = append(f.uris, uri)
return len(f.uris)
}
func (v *View) ID() string { return v.id }
func (v *View) ValidBuildConfiguration() bool {
return v.hasValidBuildConfiguration
}
func (v *View) ModFiles() (span.URI, span.URI) {
return v.realMod, v.tempMod
}
func (v *View) Session() source.Session {
return v.session
}
// Name returns the user visible name of this view.
func (v *View) Name() string {
return v.name
}
// Folder returns the root of this view.
func (v *View) Folder() span.URI {
return v.folder
}
func (v *View) Options() source.Options {
v.optionsMu.Lock()
defer v.optionsMu.Unlock()
return v.options
}
func minorOptionsChange(a, b source.Options) bool {
// Check if any of the settings that modify our understanding of files have been changed
if !reflect.DeepEqual(a.Env, b.Env) {
return false
}
if !reflect.DeepEqual(a.BuildFlags, b.BuildFlags) {
return false
}
// the rest of the options are benign
return true
}
func (v *View) SetOptions(ctx context.Context, options source.Options) (source.View, error) {
// no need to rebuild the view if the options were not materially changed
v.optionsMu.Lock()
if minorOptionsChange(v.options, options) {
v.options = options
v.optionsMu.Unlock()
return v, nil
}
v.optionsMu.Unlock()
newView, _, err := v.session.updateView(ctx, v, options)
return newView, err
}
func (v *View) Rebuild(ctx context.Context) (source.Snapshot, error) {
_, snapshot, err := v.session.updateView(ctx, v, v.Options())
return snapshot, err
}
func (v *View) LookupBuiltin(ctx context.Context, name string) (*ast.Object, error) {
v.awaitInitialized(ctx)
if v.builtin == nil {
return nil, errors.Errorf("no builtin package for view %s", v.name)
}
data := v.builtin.handle.Get(ctx)
if ctx.Err() != nil {
return nil, ctx.Err()
}
if data == nil {
return nil, errors.Errorf("unexpected nil builtin package")
}
d, ok := data.(*builtinPackageData)
if !ok {
return nil, errors.Errorf("unexpected type %T", data)
}
if d.err != nil {
return nil, d.err
}
if d.pkg == nil || d.pkg.Scope == nil {
return nil, errors.Errorf("no builtin package")
}
astObj := d.pkg.Scope.Lookup(name)
if astObj == nil {
return nil, errors.Errorf("no builtin object for %s", name)
}
return astObj, nil
}
func (v *View) buildBuiltinPackage(ctx context.Context, goFiles []string) error {
if len(goFiles) != 1 {
return errors.Errorf("only expected 1 file, got %v", len(goFiles))
}
uri := span.URIFromPath(goFiles[0])
v.addIgnoredFile(uri) // to avoid showing diagnostics for builtin.go
// Get the FileHandle through the cache to avoid adding it to the snapshot
// and to get the file content from disk.
pgh := v.session.cache.ParseGoHandle(v.session.cache.GetFile(uri), source.ParseFull)
fset := v.session.cache.fset
h := v.session.cache.store.Bind(pgh.File().Identity(), func(ctx context.Context) interface{} {
data := &builtinPackageData{}
file, _, _, _, err := pgh.Parse(ctx)
if err != nil {
data.err = err
return data
}
data.pkg, data.err = ast.NewPackage(fset, map[string]*ast.File{
pgh.File().Identity().URI.Filename(): file,
}, nil, nil)
return data
})
v.builtin = &builtinPackageHandle{
handle: h,
file: pgh,
}
return nil
}
func (v *View) WriteEnv(ctx context.Context, w io.Writer) error {
v.optionsMu.Lock()
env, buildFlags := v.envLocked()
v.optionsMu.Unlock()
// TODO(rstambler): We could probably avoid running this by saving the
// output on original create, but I'm not sure if it's worth it.
inv := gocommand.Invocation{
Verb: "env",
Env: env,
WorkingDir: v.Folder().Filename(),
}
stdout, err := v.gocmdRunner.Run(ctx, inv)
if err != nil {
return err
}
fmt.Fprintf(w, "go env for %v\n(valid build configuration = %v)\n(build flags: %v)\n", v.folder.Filename(), v.hasValidBuildConfiguration, buildFlags)
fmt.Fprint(w, stdout)
return nil
}
func (v *View) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error {
v.importsMu.Lock()
defer v.importsMu.Unlock()
if v.processEnv == nil {
var err error
if v.processEnv, err = v.buildProcessEnv(ctx); err != nil {
return err
}
}
// In module mode, check if the mod file has changed.
if v.realMod != "" {
if mod := v.session.cache.GetFile(v.realMod); mod.Identity() != v.cachedModFileVersion {
v.processEnv.GetResolver().(*imports.ModuleResolver).ClearForNewMod()
v.cachedModFileVersion = mod.Identity()
}
}
// Run the user function.
opts := &imports.Options{
// Defaults.
AllErrors: true,
Comments: true,
Fragment: true,
FormatOnly: false,
TabIndent: true,
TabWidth: 8,
Env: v.processEnv,
}
if err := fn(opts); err != nil {
return err
}
if v.cacheRefreshTimer == nil {
// Don't refresh more than twice per minute.
delay := 30 * time.Second
// Don't spend more than a couple percent of the time refreshing.
if adaptive := 50 * v.cacheRefreshDuration; adaptive > delay {
delay = adaptive
}
v.cacheRefreshTimer = time.AfterFunc(delay, v.refreshProcessEnv)
}
return nil
}
func (v *View) refreshProcessEnv() {
start := time.Now()
v.importsMu.Lock()
env := v.processEnv
env.GetResolver().ClearForNewScan()
v.importsMu.Unlock()
// We don't have a context handy to use for logging, so use the stdlib for now.
event.Log(v.baseCtx, "background imports cache refresh starting")
err := imports.PrimeCache(context.Background(), env)
if err == nil {
event.Log(v.baseCtx, fmt.Sprintf("background refresh finished after %v", time.Since(start)))
} else {
event.Log(v.baseCtx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err))
}
v.importsMu.Lock()
v.cacheRefreshDuration = time.Since(start)
v.cacheRefreshTimer = nil
v.importsMu.Unlock()
}
func (v *View) buildProcessEnv(ctx context.Context) (*imports.ProcessEnv, error) {
v.optionsMu.Lock()
env, buildFlags := v.envLocked()
localPrefix, verboseOutput := v.options.LocalPrefix, v.options.VerboseOutput
v.optionsMu.Unlock()
processEnv := &imports.ProcessEnv{
WorkingDir: v.folder.Filename(),
BuildFlags: buildFlags,
LocalPrefix: localPrefix,
GocmdRunner: v.gocmdRunner,
}
if verboseOutput {
processEnv.Logf = func(format string, args ...interface{}) {
event.Log(ctx, fmt.Sprintf(format, args...))
}
}
for _, kv := range env {
split := strings.SplitN(kv, "=", 2)
if len(split) < 2 {
continue
}
switch split[0] {
case "GOPATH":
processEnv.GOPATH = split[1]
case "GOROOT":
processEnv.GOROOT = split[1]
case "GO111MODULE":
processEnv.GO111MODULE = split[1]
case "GOPROXY":
processEnv.GOPROXY = split[1]
case "GOFLAGS":
processEnv.GOFLAGS = split[1]
case "GOSUMDB":
processEnv.GOSUMDB = split[1]
}
}
if processEnv.GOPATH == "" {
return nil, fmt.Errorf("no GOPATH for view %s", v.folder)
}
return processEnv, nil
}
func (v *View) envLocked() ([]string, []string) {
// We want to run the go commands with the -modfile flag if the version of go
// that we are using supports it.
buildFlags := v.options.BuildFlags
if v.tempMod != "" {
buildFlags = append(buildFlags, fmt.Sprintf("-modfile=%s", v.tempMod.Filename()))
}
env := []string{fmt.Sprintf("GOPATH=%s", v.gopath)}
env = append(env, v.options.Env...)
return env, buildFlags
}
func (v *View) contains(uri span.URI) bool {
return strings.HasPrefix(string(uri), string(v.folder))
}
func (v *View) mapFile(uri span.URI, f *fileBase) {
v.filesByURI[uri] = f
if f.addURI(uri) == 1 {
basename := basename(f.filename())
v.filesByBase[basename] = append(v.filesByBase[basename], f)
}
}
func basename(filename string) string {
return strings.ToLower(filepath.Base(filename))
}
func (v *View) relevantChange(c source.FileModification) bool {
// If the file is known to the view, the change is relevant.
known := v.knownFile(c.URI)
// If the file is not known to the view, and the change is only on-disk,
// we should not invalidate the snapshot. This is necessary because Emacs
// sends didChangeWatchedFiles events for temp files.
if !known && c.OnDisk && (c.Action == source.Change || c.Action == source.Delete) {
return false
}
return v.contains(c.URI) || known
}
func (v *View) knownFile(uri span.URI) bool {
v.mu.Lock()
defer v.mu.Unlock()
f, err := v.findFile(uri)
return f != nil && err == nil
}
// getFile returns a file for the given URI. It will always succeed because it
// adds the file to the managed set if needed.
func (v *View) getFile(uri span.URI) (*fileBase, error) {
v.mu.Lock()
defer v.mu.Unlock()
f, err := v.findFile(uri)
if err != nil {
return nil, err
} else if f != nil {
return f, nil
}
f = &fileBase{
view: v,
fname: uri.Filename(),
}
v.mapFile(uri, f)
return f, nil
}
// findFile checks the cache for any file matching the given uri.
//
// An error is only returned for an irreparable failure, for example, if the
// filename in question does not exist.
func (v *View) findFile(uri span.URI) (*fileBase, error) {
if f := v.filesByURI[uri]; f != nil {
// a perfect match
return f, nil
}
// no exact match stored, time to do some real work
// check for any files with the same basename
fname := uri.Filename()
basename := basename(fname)
if candidates := v.filesByBase[basename]; candidates != nil {
pathStat, err := os.Stat(fname)
if os.IsNotExist(err) {
return nil, err
}
if err != nil {
return nil, nil // the file may exist, return without an error
}
for _, c := range candidates {
if cStat, err := os.Stat(c.filename()); err == nil {
if os.SameFile(pathStat, cStat) {
// same file, map it
v.mapFile(uri, c)
return c, nil
}
}
}
}
// no file with a matching name was found, it wasn't in our cache
return nil, nil
}
func (v *View) Shutdown(ctx context.Context) {
v.session.removeView(ctx, v)
}
func (v *View) shutdown(ctx context.Context) {
// TODO: Cancel the view's initialization.
v.mu.Lock()
defer v.mu.Unlock()
if v.cancel != nil {
v.cancel()
v.cancel = nil
}
if v.tempMod != "" {
os.Remove(v.tempMod.Filename())
os.Remove(tempSumFile(v.tempMod.Filename()))
}
}
// Ignore checks if the given URI is a URI we ignore.
// As of right now, we only ignore files in the "builtin" package.
func (v *View) Ignore(uri span.URI) bool {
v.ignoredURIsMu.Lock()
defer v.ignoredURIsMu.Unlock()
_, ok := v.ignoredURIs[uri]
// Files with _ prefixes are always ignored.
if !ok && strings.HasPrefix(filepath.Base(uri.Filename()), "_") {
v.ignoredURIs[uri] = struct{}{}
return true
}
return ok
}
func (v *View) addIgnoredFile(uri span.URI) {
v.ignoredURIsMu.Lock()
defer v.ignoredURIsMu.Unlock()
v.ignoredURIs[uri] = struct{}{}
}
func (v *View) BackgroundContext() context.Context {
v.mu.Lock()
defer v.mu.Unlock()
return v.backgroundCtx
}
func (v *View) Snapshot() source.Snapshot {
return v.getSnapshot()
}
func (v *View) getSnapshot() *snapshot {
v.snapshotMu.Lock()
defer v.snapshotMu.Unlock()
return v.snapshot
}
func (v *View) initialize(ctx context.Context, s *snapshot) {
v.initializeOnce.Do(func() {
defer close(v.initialized)
if err := s.load(ctx, viewLoadScope("LOAD_VIEW"), packagePath("builtin")); err != nil {
v.initializedErr = err
event.Error(ctx, "initial workspace load failed", err)
}
})
}
func (v *View) awaitInitialized(ctx context.Context) {
select {
case <-ctx.Done():
case <-v.initialized:
}
}
// invalidateContent invalidates the content of a Go file,
// including any position and type information that depends on it.
// It returns true if we were already tracking the given file, false otherwise.
func (v *View) invalidateContent(ctx context.Context, uris map[span.URI]source.FileHandle, forceReloadMetadata bool) source.Snapshot {
// Detach the context so that content invalidation cannot be canceled.
ctx = xcontext.Detach(ctx)
// Cancel all still-running previous requests, since they would be
// operating on stale data.
v.cancelBackground()
// Do not clone a snapshot until its view has finished initializing.
v.awaitInitialized(ctx)
// This should be the only time we hold the view's snapshot lock for any period of time.
v.snapshotMu.Lock()
defer v.snapshotMu.Unlock()
v.snapshot = v.snapshot.clone(ctx, uris, forceReloadMetadata)
return v.snapshot
}
func (v *View) cancelBackground() {
v.mu.Lock()
defer v.mu.Unlock()
if v.cancel == nil {
// this can happen during shutdown
return
}
v.cancel()
v.backgroundCtx, v.cancel = context.WithCancel(v.baseCtx)
}
func (v *View) setBuildInformation(ctx context.Context, folder span.URI, env []string, modfileFlagEnabled bool) error {
if err := checkPathCase(folder.Filename()); err != nil {
return fmt.Errorf("invalid workspace configuration: %w", err)
}
// Make sure to get the `go env` before continuing with initialization.
gomod, err := v.getGoEnv(ctx, env)
if err != nil {
return err
}
modFile := strings.TrimSpace(gomod)
if modFile == os.DevNull {
return nil
}
v.realMod = span.URIFromPath(modFile)
// Now that we have set all required fields,
// check if the view has a valid build configuration.
v.hasValidBuildConfiguration = checkBuildConfiguration(v.goCommand, v.realMod, v.folder, v.gopath)
// The user has disabled the use of the -modfile flag or has no go.mod file.
if !modfileFlagEnabled || v.realMod == "" {
return nil
}
if modfileFlag, err := v.modfileFlagExists(ctx, v.Options().Env); err != nil {
return err
} else if !modfileFlag {
return nil
}
// Copy the current go.mod file into the temporary go.mod file.
// The file's name will be of the format go.directory.1234.mod.
// It's temporary go.sum file should have the corresponding format of go.directory.1234.sum.
tmpPattern := fmt.Sprintf("go.%s.*.mod", filepath.Base(folder.Filename()))
tempModFile, err := ioutil.TempFile("", tmpPattern)
if err != nil {
return err
}
defer tempModFile.Close()
origFile, err := os.Open(modFile)
if err != nil {
return err
}
defer origFile.Close()
if _, err := io.Copy(tempModFile, origFile); err != nil {
return err
}
v.tempMod = span.URIFromPath(tempModFile.Name())
// Copy go.sum file as well (if there is one).
sumFile := filepath.Join(filepath.Dir(modFile), "go.sum")
stat, err := os.Stat(sumFile)
if err != nil || !stat.Mode().IsRegular() {
return nil
}
contents, err := ioutil.ReadFile(sumFile)
if err != nil {
return err
}
if err := ioutil.WriteFile(tempSumFile(tempModFile.Name()), contents, stat.Mode()); err != nil {
return err
}
return nil
}
// OS-specific path case check, for case-insensitive filesystems.
var checkPathCase = defaultCheckPathCase
func defaultCheckPathCase(path string) error {
return nil
}
func checkBuildConfiguration(goCommand bool, mod, folder span.URI, gopath string) bool {
// Since we only really understand the `go` command, if the user is not
// using the go command, assume that their configuration is valid.
if !goCommand {
return true
}
// Check if the user is working within a module.
if mod != "" {
return true
}
// The user may have a multiple directories in their GOPATH.
// Check if the workspace is within any of them.
for _, gp := range filepath.SplitList(gopath) {
if isSubdirectory(filepath.Join(gp, "src"), folder.Filename()) {
return true
}
}
return false
}
func isSubdirectory(root, leaf string) bool {
rel, err := filepath.Rel(root, leaf)
return err == nil && !strings.HasPrefix(rel, "..")
}
// getGoEnv sets the view's build information's GOPATH, GOCACHE, GOPRIVATE, and
// GOPACKAGESDRIVER values. It also returns the view's GOMOD value, which need
// not be cached.
func (v *View) getGoEnv(ctx context.Context, env []string) (string, error) {
var gocache, gopath, gopackagesdriver, goprivate bool
isGoCommand := func(gopackagesdriver string) bool {
return gopackagesdriver == "" || gopackagesdriver == "off"
}
for _, e := range env {
split := strings.Split(e, "=")
if len(split) != 2 {
continue
}
switch split[0] {
case "GOCACHE":
v.gocache = split[1]
gocache = true
case "GOPATH":
v.gopath = split[1]
gopath = true
case "GOPRIVATE":
v.goprivate = split[1]
goprivate = true
case "GOPACKAGESDRIVER":
v.goCommand = isGoCommand(split[1])
gopackagesdriver = true
}
}
inv := gocommand.Invocation{
Verb: "env",
Args: []string{"-json"},
Env: env,
WorkingDir: v.Folder().Filename(),
}
stdout, err := v.gocmdRunner.Run(ctx, inv)
if err != nil {
return "", err
}
envMap := make(map[string]string)
decoder := json.NewDecoder(stdout)
if err := decoder.Decode(&envMap); err != nil {
return "", err
}
if !gopath {
if gopath, ok := envMap["GOPATH"]; ok {
v.gopath = gopath
} else {
return "", errors.New("unable to determine GOPATH")
}
}
if !gocache {
if gocache, ok := envMap["GOCACHE"]; ok {
v.gocache = gocache
} else {
return "", errors.New("unable to determine GOCACHE")
}
}
if !goprivate {
if goprivate, ok := envMap["GOPRIVATE"]; ok {
v.goprivate = goprivate
}
// No error here: GOPRIVATE is not essential.
}
// The value of GOPACKAGESDRIVER is not returned through the go command.
if !gopackagesdriver {
v.goCommand = isGoCommand(os.Getenv("GOPACKAGESDRIVER"))
}
if gomod, ok := envMap["GOMOD"]; ok {
return gomod, nil
}
return "", nil
}
func (v *View) IsGoPrivatePath(target string) bool {
return globsMatchPath(v.goprivate, target)
}
// Copied from
// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/str/path.go;l=58;drc=2910c5b4a01a573ebc97744890a07c1a3122c67a
func globsMatchPath(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}
// This function will return the main go.mod file for this folder if it exists and whether the -modfile
// flag exists for this version of go.
func (v *View) modfileFlagExists(ctx context.Context, env []string) (bool, error) {
// Check the go version by running "go list" with modules off.
// Borrowed from internal/imports/mod.go:620.
const format = `{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}`
folder := v.folder.Filename()
inv := gocommand.Invocation{
Verb: "list",
Args: []string{"-e", "-f", format},
Env: append(env, "GO111MODULE=off"),
WorkingDir: v.Folder().Filename(),
}
stdout, err := v.gocmdRunner.Run(ctx, inv)
if err != nil {
return false, err
}
// If the output is not go1.14 or an empty string, then it could be an error.
lines := strings.Split(stdout.String(), "\n")
if len(lines) < 2 && stdout.String() != "" {
event.Error(ctx, "unexpected stdout when checking for go1.14", errors.Errorf("%q", stdout), tag.Directory.Of(folder))
return false, nil
}
return lines[0] == "go1.14", nil
}
// tempSumFile returns the path to the copied temporary go.sum file.
// It simply replaces the extension of the temporary go.mod file with "sum".
func tempSumFile(filename string) string {
if filename == "" {
return ""
}
return filename[:len(filename)-len("mod")] + "sum"
}
|
[
"\"GOPACKAGESDRIVER\""
] |
[] |
[
"GOPACKAGESDRIVER"
] |
[]
|
["GOPACKAGESDRIVER"]
|
go
| 1 | 0 | |
dltb/thirdparty/datasource/widerface.py
|
"""The WiderFace dataset.
"""
# standard imports
import os
import logging
# third party imports
import numpy as np
# toolbox imports
from dltb.base.data import Data
from dltb.base.image import BoundingBox, Region, Landmarks
from dltb.tool.classifier import ClassScheme
from dltb.datasource import Imagesource, Sectioned, DataDirectory
# logging
LOG = logging.getLogger(__name__)
class WiderfaceScheme(ClassScheme):
"""The WiderFace dataset divides its data into
62 classes (actually just 61, as class 60 is missing).
Class labels can be obtained from directory names in the
data directories.
"""
def __init__(self) -> None:
"""Iniitalization of the :py:class:`WiderfaceScheme`.
"""
# The WIDER face dataset has 62 classes (but it seems
# that only 61 are used - class '60' is missing).
super().__init__(length=62, key='widerface')
@property
def prepared(self) -> bool:
"""Check if the :py:class:`WiderfaceScheme` has been initialized.
"""
return 'text' in self._labels
def prepare(self) -> None:
"""Prepare the labels for the Widerface dataset.
The labels will be read in from the directory names
in the WIDERFACE_DATA directory.
"""
if self.prepared:
return # nothing to do ...
widerface_data = os.getenv('WIDERFACE_DATA')
train_dir = os.path.join(widerface_data, 'WIDER_train', 'images')
text = [''] * len(self)
for dirname in os.listdir(train_dir):
number, label = dirname.split('--', maxsplit=1)
text[int(number)] = label
self.add_labels(text, 'text')
WiderfaceScheme()
class WiderFace(DataDirectory, Imagesource, Sectioned,
sections={'train', 'val', 'test'}):
# pylint: disable=too-many-ancestors
"""
http://shuoyang1213.me/WIDERFACE/
"Wider Face" is A face detection benchmark consisting of 32,203
images with 393,703 labeled faces.
The faces have wide variability in scale, pose, occlusion.
Images are categorized in 61 event class.
From each class train/validation/test datasets where split
in relation 40%/10%/50%.
Attributes
----------
blur: Tuple[str]
expression: Tuple[str]
illumination: Tuple[str]
occlusion: Tuple[str]
invalid: Tuple[str]
"""
blur = ('clear', 'normal blur', 'heavy blur')
expression = ('typical expression', 'exaggerate expression')
illumination = ('normal illumination', 'extreme illumination')
occlusion = ('no occlusion', 'partial occlusion', 'heavy occlusion')
pose = ('typical pose', 'atypical pose')
invalid = ('valid image', 'invalid image')
def __init__(self, section: str = 'train',
key: str = None, **kwargs) -> None:
"""Initialize the WIDER Face Datasource.
"""
self._widerface_data = os.getenv('WIDERFACE_DATA', '.')
self._section = section
scheme = ClassScheme['widerface']
directory = os.path.join(self._widerface_data,
'WIDER_' + self._section, 'images')
super().__init__(key=key or f"wider-faces-{section}",
section=section, directory=directory, scheme=scheme,
description=f"WIDER Faces", **kwargs)
self._annotations = None
def __str__(self):
return f'WIDER Faces ({self._section})'
#
# Preparation
#
def _prepare(self, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Prepare the WIDER Face dataset. This will provide in a list of
all images provided by the dataset, either by reading in a
prepared file, or by traversing the directory.
"""
LOG.info("Preparing WiderFace[%r]: %s",
self.preparable, self.directory)
cache = f"widerface_{self._section}_filelist.p"
super()._prepare(filenames_cache=cache, **kwargs)
self._scheme.prepare()
self._prepare_annotations()
def _unprepare(self):
"""Prepare the WIDER Face dataset. This will provide in a list of
all images provided by the dataset, either by reading in a
prepared file, or by traversing the directory.
"""
self._annotations = None
super()._unprepare()
def _prepare_annotations(self):
"""Load the annotations for the training images.
The annotations are stored in a single large text file
('wider_face_train_bbx_gt.txtX'), with a multi-line entry per file.
An entry has the following structure: The first line contains
the filename of the training image. The second line contains
the number of faces in that image. Then follows one line for
each face, consisting of a bounding box (x,y,w,h) and attributes
(blur, expression, illumination, invalid, occlusion, pose)
encoded numerically. In these lines, all numbers are separated
by spaces. Example:
0--Parade/0_Parade_marchingband_1_95.jpg
5
828 209 56 76 0 0 0 0 0 0
661 258 49 65 0 0 0 0 0 0
503 253 48 66 0 0 1 0 0 0
366 181 51 74 0 0 1 0 0 0
148 176 54 68 0 0 1 0 0 0
"""
self._annotations = {}
# check if annotations file exists
filename = None
if self._widerface_data is not None:
filename = os.path.join(self._widerface_data, 'wider_face_split',
'wider_face_train_bbx_gt.txt')
if not os.path.isfile(filename):
return # file not found
# load the annotations
try:
with open(filename, "r") as file:
for filename in file:
filename = filename.rstrip()
lines = int(file.readline())
faces = []
for line_number in range(lines):
# x1, y1, w, h, blur, expression, illumination,
# invalid, occlusion, pose
attributes = tuple(int(a)
for a in file.readline().split())
if len(attributes) == 10:
faces.append(attributes)
else:
LOG.warning("bad annotation for '%s', line %d/%d':"
"got %d instead of 10 values",
filename, line_number,
lines, len(attributes))
if lines == 0:
# images with 0 faces nevertheless have one
# line with dummy attributes -> just ignore that line
file.readline()
# Store all faces for the current file
self._annotations[filename] = faces
except FileNotFoundError:
self._annotations = {}
#
# Data
#
def _get_meta(self, data: Data, **kwargs) -> None:
data.add_attribute('label', batch=True)
super()._get_meta(data, **kwargs)
def _get_data_from_file(self, data, filename: str) -> str:
"""
Arguments
---------
filename: str
The relative filename.
"""
super()._get_data_from_file(data, filename)
regions = []
for (pos_x, pos_y, width, height, blur, expression, illumination,
invalid, occlusion, pose) in self._annotations[filename]:
region = Region(BoundingBox(x=pos_x, y=pos_y,
width=width, height=height),
blur=blur, expression=expression,
illumination=illumination,
invalid=invalid, occlusion=occlusion,
pose=pose)
regions.append(region)
data.label = regions
# FIXME[todo]
class W300(DataDirectory, Imagesource):
"""The 300 Faces In-the-Wild Challenge (300-W), form the ICCV 2013.
The challenge targets facial landmark detection, using a 68 point
annotation scheme.
Besides 300-W, there are several other datasets annotated in the
same scheme: AFW, FRGC, HELEN, IBUG, LPFW, and XM2VTS.
For more information visit:
https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def _load_annotation(filename: str) -> Landmarks:
"""Parse the landmark annotation file. Each image of the dataset is
accompanied by a file with the same name und the suffix '.pts'
providing the positions of the 68 points.
"""
# The file has the following format:
#
# version: 1
# n_points: 68
# {
# 403.167108 479.842932
# 407.333804 542.927159
# ...
# 625.877482 717.615332
# }
#
with open(filename) as file:
_ = file.readline().split(':')[1] # version
n_points = int(file.readline().split(':')[1])
points = np.ndarray((n_points, 2))
_ = file.readline() # '{'
for i in range(n_points):
pos_x, pos_y = file.readline.rstrip().split(' ')
points[i] = float(pos_x), float(pos_y)
return Landmarks(points)
|
[] |
[] |
[
"WIDERFACE_DATA"
] |
[]
|
["WIDERFACE_DATA"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.