filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
LottoHub/settings.py
|
"""
Django settings for LottoHub project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "38ylz*z8!!z6cma!9v!ahwa_g^cf(fmzxq0y6@say!^%v8d=uh"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
RECAPTCHA_PUBLIC_KEY = '6LdGSp8UAAAAAJTlN8Gm-zI8V14QcyAskpZn0v90'
RECAPTCHA_PRIVATE_KEY = '6LdGSp8UAAAAANnClmI72qLW0h7g5bziy597SPPF'
RECAPTCHA_DOMAIN = 'www.recaptcha.net'
INSTALLED_APPS = [
'captcha',
'grappelli',
'corsheaders',
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
# 'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
# 'LottoWebCore.apps.LottoHubConfig',
'LottoHub',
'LottoWebCore',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LottoHub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'LottoHub.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = True
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
HEROKU = False
if 'DYNO' in os.environ:
SITE_ID = 2
HEROKU = True
# EMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']
# EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# EMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']
else:
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals())
# Accounts REDIRECTS
LOGIN_REDIRECT_URL = '/dashboard'
LOGOUT_REDIRECT_URL = '/'
|
[] |
[] |
[
"SENDGRID_PASSWORD",
"SENDGRID_USERNAME"
] |
[]
|
["SENDGRID_PASSWORD", "SENDGRID_USERNAME"]
|
python
| 2 | 0 | |
quantity/quantity_test.go
|
package quantity
import (
"fmt"
"math"
"os"
"sort"
"testing"
"time"
)
func TestPanic(t *testing.T) {
enablePanic := os.Getenv("GOUNITSPANIC") == "1"
if enablePanic {
fmt.Println("Panic if working with incompatible units")
defer func() {
if r := recover(); r != nil {
fmt.Println("TestPanic OK")
}
}()
Add(Q(10, "kph"), Q(20, "V"))
t.Error("TestPanic didn't work as expected")
}
}
func TestInvalid(t *testing.T) {
defer func() {
recover()
}()
m := Q(0, "bla")
t.Error(m.Inspect())
}
func TestIn(t *testing.T) {
data := []struct {
val float64
sym string
val1 string
sym1 string
fail bool
}{
{454.8, "kph", "245.5724", "kn", false},
{454.8, "kph", "-1", "kn", true},
{1500, "m", "0.9321", "mi", false},
{0.9320568, "mi", "1500.0000", "m", false},
{1, "m/s", "3.6000", "kph", false},
{1, "m/s", "1", "m", true},
{-1, "m/s", "-1.0000", "m/s", false},
{34, "¤/m", "51.00", "$", true},
{1000, "$", "1000.0000", "USD", false},
{3.1, "us gal", "11.7348", "L", false},
{7, "L/100km", "0.0700", "mm2", false},
{3, "N", "3.0000", "kg.m/s2", false},
{1, "psi", "0.0689", "bar", false},
{6894.757, "Pa", "1.0000", "lbf.in-2", false},
}
for _, d := range data {
m1 := Q(d.val, d.sym)
if m1.Invalid() {
if !d.fail {
t.Error("source unit not found:", d.sym)
}
continue
}
if m2, ok := m1.ConvertTo(d.sym1); ok {
v, s := m2.Split()
vs := fmt.Sprintf("%.4f", v)
mismatch := vs != d.val1 || s != d.sym1
if mismatch && !d.fail || !mismatch && d.fail {
if d.fail {
t.Error("expected to fail:", d.val, d.sym, "->", d.val1, d.sym1)
} else {
t.Error("expected:", d.val1, d.sym1, "; actual:", vs, s)
}
}
} else {
if !d.fail {
t.Error("not expected to fail:", d.val, d.sym, "->", d.sym1)
}
}
}
}
func TestString(t *testing.T) {
data := []struct {
input Quantity
expected string
}{
{Q(12.3456, "kn"), "12.3456 kn"},
{Q(0, "kn"), "0.0000 kn"},
{Q(-14.581699, "mph"), "-14.5817 mph"},
{Q(0.00001, "m"), "0.0000 m"},
}
for _, d := range data {
s := d.input.String()
if s != d.expected {
t.Error("expected:", d.expected, "actual:", s)
}
}
DefaultFormat = "%.0f%s"
if Q(500.9999, "mph").String() != "501mph" {
t.Error("setting default format failed")
}
DefaultFormat = "%.4f %s"
a := Q(123.5, "NZD")
if a.String() != "123.5000 NZD" {
t.Error("currency formatting failed", a)
}
}
func TestCalc1(t *testing.T) {
q := Q
data := []struct {
op string
x, y Quantity
expected string
}{
{"+", q(10, "m"), q(8, "m"), "18.0000 m"},
{"+", q(15, "km"), q(2, "mi"), "18218.6880 m"},
{"-", q(5.301, "kg"), q(302, "g"), "4.9990 kg"},
{"-", q(1.4, "mph"), q(3.0, "kn"), "-0.9175 m.s-1"},
{"*", q(2, "kg"), q(15, "m"), "30.0000 m.kg"},
{"/", q(9, "km"), q(2, "h"), "1.2500 m.s-1"},
{"1/", q(100, "m/s"), Quantity{}, "0.0100 m-1.s"},
{"1/", q(8.0, "m"), Quantity{}, "0.1250 m-1"},
}
for _, d := range data {
var result Quantity
switch d.op {
case "+":
result = Add(d.x, d.y)
case "-":
result = Subtract(d.x, d.y)
case "*":
result = Mult(d.x, d.y)
case "/":
result = Div(d.x, d.y)
case "1/":
result = Reciprocal(d.x)
}
if result.String() != d.expected {
t.Error("expected:", d.expected, "actual:", result)
}
}
}
func TestCalc2(t *testing.T) {
q := Q
data := []struct {
op string
q Quantity
f float64
expected string
}{
{"*", q(100, "m/s"), 1.2, "120.0000 m/s"},
{"/", q(100, "g"), 4.0, "25.0000 g"},
{"^", q(2.0, "m"), 3, "8.0000 m3"},
{"^", q(8.4, "m"), -3, "0.0017 m-3"},
}
for _, d := range data {
var result Quantity
switch d.op {
case "*":
result = MultFac(d.q, d.f)
case "/":
result = DivFac(d.q, d.f)
case "^":
result = Power(d.q, int8(d.f))
}
if result.String() != d.expected {
t.Error("expected:", d.expected, "actual:", result)
}
}
}
func TestCalc3(t *testing.T) {
result := Sum(Q(5.1, "Pa"), Q(0.3, "N.m-2"), Q(0.11, "m-2.N"))
expected := "5.5100 m-1.kg.s-2"
if result.String() != expected {
t.Error("expected:", expected, "actual:", result.String())
}
result = Diff(Q(100, "kph"), Q(7, "mph"), Q(1, "kn"))
expected = "24.1341 m.s-1"
if result.String() != expected {
t.Error("expected:", expected, "actual:", result.String())
}
}
func TestMixedUnits(t *testing.T) {
p1 := Q(7, "N.m-2")
p2 := Q(8, "Pa")
if AreCompatible(p1, p2) {
p3 := Add(p1, p2)
const result = "15.0000 m-1.kg.s-2"
if p3.String() != result {
t.Error("expected:", result, "actual:", p3)
}
} else {
t.Error("not same unit: ", p1.Symbol(), p2.Symbol())
}
}
func TestPer(t *testing.T) {
p1 := Q(1, "km/h")
p2 := Q(2, "kph")
p3 := Q(3, "m/s")
if !AreCompatible(p1, p2) {
t.Error("incompatible:", p1, "<>", p2)
}
if !AreCompatible(p2, p3) {
t.Error("incompatible:", p2, "<>", p3)
}
p4 := Q(4, "kg.m/s2")
p5 := Q(5, "N")
if !AreCompatible(p4, p5) {
t.Error("incompatible:", p4, "<>", p5)
}
p6 := Q(6, "W")
p7 := Q(7, "J/s")
if !AreCompatible(p6, p7) {
t.Error("same unit:", p6, p7)
}
p8 := Subtract(Q(8.8, "N.m/s"), Q(8.8, "W"))
if p8.String() != "0.0000 m2.kg.s-3" {
t.Error()
}
}
func TestEqual(t *testing.T) {
p1 := Q(999, "m")
p2 := Q(1, "km")
if !Equal(p1, p2, Q(2, "m")) {
t.Error("not equal: ", p1, p2)
}
if Equal(p1, p2, Q(1, "m")) {
t.Error("false equality:", p1, p2)
}
}
func TestNormalize(t *testing.T) {
p1 := Q(1.2, "mph")
if p1.Value() != 1.2 || p1.Symbol() != "mph" {
t.Error("unit initialization error", p1)
}
p1.Normalize()
if fmt.Sprintf("%.4f", p1.Value()) != "0.5364" || p1.Symbol() != "m.s-1" {
t.Error("unit initialization error", p1)
}
}
func TestParse(t *testing.T) {
p1 := Q(12.4, "km.s-2")
p2, err := Parse("12.4 km/s2")
if err != nil {
t.Error(err)
} else if !Equal(p1, p2, Q(0.01, "m.s-2")) {
t.Error("not equal", p1, "<>", p2)
}
p3 := Q(3894829.88, "sq in")
p4, err := Parse(" 3,894,829.88sq in ")
if err != nil {
t.Error(err)
} else if !Equal(p3, p4, Q(0.001, "sq in")) {
t.Error("not equal", p3, "<>", p4)
}
}
func TestParse2(t *testing.T) {
data := []struct {
s string
fail bool
}{
{"38J", false},
{" -15.5 K ", false},
{"1,000 kW/sr", false},
{"foo", true},
{"/12309.8m", true},
{"12,058,884.881 N/m2", false},
{"5 chickens/m2", true},
{"1.1 sq in", false},
{"5.5.6 m", true},
}
for _, d := range data {
_, err := Parse(d.s)
if err != nil && !d.fail {
t.Error("failed but shouldn't: [", d.s, "]")
} else if err == nil && d.fail {
t.Error("should fail but didn't: [", d.s, "]")
}
}
}
func TestSort(t *testing.T) {
arr := Quantities{
Q(0.2, "M"),
Q(-3, "m"),
Q(-1.5, "m"),
Q(0.1, "cm"),
Q(0.1, "mm"),
Q(4, "ft"),
}
sort.Sort(arr)
sa := fmt.Sprintf("%v", arr)
if sa != "[-3.0000 m -1.5000 m 0.1000 mm 0.1000 cm 4.0000 ft 0.2000 M]" {
t.Error("sort error", sa)
}
}
func TestDuration(t *testing.T) {
var t1 Quantity
t1 = Q(1.5, "d")
var t2 time.Duration
t2, err := Duration(t1)
if err != nil {
t.Error(err)
}
if t2.Hours() != 36 {
t.Error("expected:", 36, "actual:", t2.Hours())
}
}
//func TestPrefix(t *testing.T) {
// m1 := Q(25*Centi, "m")
// m2 := Q(25, "cm")
// if !Equal(m1, m2, Q(1e-6, "m")) {
// t.Error("not equal:", m1, m2)
// }
// m3 := Q(7*Cubic(Deci), "m3")
// m4 := Q(7, "L")
// if !AreCompatible(m3, m4) || !Equal(m3, m4, Q(1e-6, "m")) {
// t.Error("not equal:", m3, m4)
// }
//}
func TestKFC(t *testing.T) {
var k Quantity
k = Q(239.5, "K")
c, err := KtoC(k)
if err != nil {
t.Error(err)
}
if math.Abs(c - -33.65) > 1e-6 {
t.Error("expected: -33.65, actual:", c)
}
f, err := KtoF(k)
if err != nil {
t.Error(err)
}
if math.Abs(f - -28.57) > 1e-6 {
t.Error("expected: -28.57, actual:", f)
}
f = CtoF(91.833)
if math.Abs(f-197.2994) > 1e-6 {
t.Error("expected: 197.2994, actual:", f)
}
k = CtoK(38.27112)
if math.Abs(k.Value()-311.42112) > 1e-6 {
t.Error("expected: 311.42112, actual:", k)
}
k = FtoK(-1)
if math.Abs(k.Value()-254.816667) > 1e-6 {
t.Error("expected: 254.817, actual:", k)
}
}
func TestPrefix(t *testing.T) {
const shouldFail = 0 // magic value
data := []struct {
symbol string
factor float64
}{
{"km/s2", 1e3},
{"$/dam", 0.1},
{"Gs", 1e9},
{"nJ/ns", 1},
{"uA", 1e-6}, // micro-Ampere: micro "µ" -> "u"
{"mg", 1e-6},
{"dg", 1e-4},
{"dz", shouldFail}, // deci-z unknown unit z
{"kV", 1e3},
{"cm", 0.01},
{"mm-3.kg", 1e9},
{"mm3", 1e-9},
{"kHz", 1e3},
{"ccd", 0.01},
{"egg", shouldFail}, // unknown
{"kg/ft2", 10.763910},
{"um", 1e-6}, // micrometer: micro "µ" -> "u"
{"uft", shouldFail}, // microfeet not SI
{"km2", 1e6},
{"daN", 10},
{"hPa", 100},
{"aC", 1e-18},
{"mmi", shouldFail}, // millimile not SI
{"mbar", 100},
}
for _, x := range data {
q, err := ParseSymbol(x.symbol)
if (err == nil) == (x.factor == shouldFail) {
t.Errorf("should fail %s: %v", x.symbol, err)
}
if err == nil {
si := q.ToSI()
if fmt.Sprintf("%.4f", si.Value()) != fmt.Sprintf("%.4f", x.factor) {
t.Errorf("%s: %v", x.symbol, si.Value())
}
//fmt.Println(q.Inspect())
}
}
}
|
[
"\"GOUNITSPANIC\""
] |
[] |
[
"GOUNITSPANIC"
] |
[]
|
["GOUNITSPANIC"]
|
go
| 1 | 0 | |
cli.py
|
import os
import re
import csv
import sys
import json
from datetime import datetime, timedelta
import logging
from operator import itemgetter
from logging.config import dictConfig
from collections import defaultdict
import click
import tushare
from keysersoze.data import (
QiemanExporter,
EastMoneyFundExporter,
)
from keysersoze.models import (
DATABASE,
Deal,
Asset,
AssetMarketHistory,
AccountHistory,
AccountAssetsHistory,
QiemanAsset,
)
from keysersoze.utils import (
get_code_suffix,
update_account_assets_history,
compute_account_history,
)
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
def main():
pass
@main.command("export-qieman-orders")
@click.option("-c", "--config-file", required=True)
@click.option("-o", "--outfile", required=True)
@click.option("-n", "--asset-name", required=True)
def export_qieman_orders(config_file, asset_name, outfile):
"""导出且慢订单记录"""
asset = QiemanAsset.get_or_none(name=asset_name)
if asset is None:
LOGGER.warning("could not find Qieman asset with name `%s`", asset_name)
return
with open(config_file) as f:
config = json.load(f)
exporter = QiemanExporter(**config)
orders = exporter.list_orders(asset.asset_id)
with open(outfile, 'w') as fout:
for order in orders:
line = json.dumps(order, ensure_ascii=False, sort_keys=True)
print(line, file=fout)
@main.command("parse-qieman")
@click.option("-i", "--infile", required=True)
@click.option("-o", "--outfile", required=True)
@click.option("--add-transfer", is_flag=True, help="是否在买入时自动产生一笔等额资金转入")
def parse_qieman_orders(infile, outfile, add_transfer):
"""解析且慢订单记录为 csv 格式"""
results = []
with open(infile) as fin:
pattern = re.compile(r'再投资份额(\d+\.\d+)份')
unknown_buyings, transfer_in = [], defaultdict(float)
for line in fin:
item = json.loads(line)
account = item['umaName']
sub_account = item['capitalAccountName']
if item['capitalAccountName'] == '货币三佳':
pass
elif item['hasDetail']:
if item['orderStatus'] != 'SUCCESS':
continue
for order in item['compositionOrders']:
value = order['nav']
fee = order['fee']
order_time = datetime.fromtimestamp(order['acceptTime'] / 1000)
count = order['uiShare']
money = order['uiAmount']
action = 'unknown'
if order['payStatus'] == '2':
action = 'buy'
elif order['payStatus'] == '0':
action = 'sell'
fund_code = order['fund']['fundCode']
fund_name = order['fund']['fundName']
if fund_name.find('广发钱袋子') >= 0: # FIXME: 应当用基金类型来判断
continue
if 'destFund' in order:
money -= fee
unknown_buyings.append([
account, sub_account, order_time,
order['destFund']['fundCode'], order['destFund']['fundName'],
money
])
elif add_transfer and action == 'buy':
transfer_in[(account, str(order_time.date()))] += money
results.append([
account, sub_account, order_time, fund_code, fund_name,
action, count, value, money, fee
])
elif item['uiOrderDesc'].find('再投资') >= 0:
fee = 0
order_time = datetime.fromtimestamp(item['acceptTime'] / 1000)
count = float(pattern.findall(item['uiOrderDesc'])[0])
money = item['uiAmount']
value = round(float(money) / float(count), 4)
action = 'reinvest'
fund_code = item['fund']['fundCode']
fund_name = item['fund']['fundName']
# 且慢交易记录里红利再投资日期是再投资到账日期,不是实际发生的日期,
# 这里尝试根据净值往前查找得到真正的日期
fund = Asset.get_or_none(code=f'{fund_code}.OF')
if fund:
search = fund.history.where(AssetMarketHistory.date < order_time.date())
search = search.where(
AssetMarketHistory.date >= order_time.date() - timedelta(days=10)
)
search = search.order_by(AssetMarketHistory.date.desc())
candidates = []
for record in search[:3]:
candidates.append((record, abs(record.nav - value)))
record, nav_diff = min(candidates, key=itemgetter(1))
LOGGER.info(
"correct reinvestment time of `%s` from `%s` to `%s`(nav diff: %f)",
fund_code, order_time, record.date, nav_diff
)
value = record.nav
order_time = datetime.strptime(f'{record.date} 08:00:00', '%Y-%m-%d %H:%M:%S')
else:
LOGGER.warning(
"can not guess real order time of reinvestment(code: %s;time: %s; nav: %s)",
fund_code, order_time, value
)
results.append([
account, sub_account, order_time, fund_code, fund_name,
action, count, value, money, fee
])
elif item['uiOrderCodeName'].find('现金分红') >= 0:
order_time = datetime.fromtimestamp(item['acceptTime'] / 1000)
results.append([
account, sub_account, order_time,
item['fund']['fundCode'], item['fund']['fundName'],
'bonus', item['uiAmount'], 1.0, item['uiAmount'], 0.0
])
for (account, date), money in transfer_in.items():
order_time = datetime.strptime(f'{date} 08:00:00', '%Y-%m-%d %H:%M:%S')
results.append([
account, '', order_time, 'CASH', '现金',
'transfer_in', money, 1.0, money, 0.0
])
for account, sub_account, order_time, code, name, money in unknown_buyings:
fund = Asset.get_or_none(zs_code=f'{code}.OF')
if not fund:
LOGGER.warning(
"fund `%s` is not found in database, add it with `update-fund`",
code
)
continue
close_time = datetime.strptime(f'{order_time.date()} 15:00:00', '%Y-%m-%d %H:%M:%S')
if order_time > close_time:
history_date = order_time.replace(days=1).date()
else:
history_date = order_time.date()
history_records = list(fund.history.where(AssetMarketHistory.date == history_date))
if not history_records:
LOGGER.warning(
"history data of fund `%s` is not found in database, try `update-fund`",
code
)
continue
value = history_records[0].nav
count = round(money / value, 2)
results.append([
account, sub_account, order_time, code, name,
'buy', count, value, money, 0.0
])
results.sort(key=itemgetter(2, 0, 1, 3, 5))
with open(outfile, 'w') as fout:
for row in results:
if row[3] != 'CASH':
row[3] = row[3] + '.OF'
line = '\t'.join([
'\t'.join(map(str, row[:6])),
f'{row[6]:0.2f}', f'{row[7]:0.4f}',
'\t'.join([f'{r:.2f}' for r in row[8:]]),
])
print(line, file=fout)
@main.command("parse-pingan")
@click.option("-i", "--infile", required=True)
@click.option("-o", "--outfile", required=True)
def parse_pingan(infile, outfile):
"""解析平安证券的交易记录"""
action_mappings = {
'证券买入': 'buy',
'证券卖出': 'sell',
'银证转入': 'transfer_in',
'银证转出': 'transfer_out',
'利息归本': 'reinvest',
}
results = []
with open(infile) as fin:
reader = csv.DictReader(fin)
for row in reader:
if row['操作'] not in action_mappings:
LOGGER.warning("unsupported action: %s", row['操作'])
continue
order_time = datetime.strptime(f'{row["成交日期"]} {row["成交时间"]}', '%Y%m%d %H:%M:%S')
action = action_mappings[row['操作']]
code, name = row['证券代码'], row['证券名称']
count, price = float(row['成交数量']), float(row['成交均价'])
money = float(row['发生金额'].lstrip('-'))
fee = float(row["手续费"]) + float(row["印花税"])
if action.startswith('transfer') or action == 'reinvest':
code, name, count, price = 'CASH', '现金', money, 1.0
if code != 'CASH':
suffix = get_code_suffix(code)
code = f'{code}.{suffix}'
results.append([
'平安证券', '平安证券', order_time, code, name,
action, count, price, money, fee
])
results.sort(key=itemgetter(2, 3, 5))
with open(outfile, 'w') as fout:
for row in results:
line = '\t'.join([
'\t'.join(map(str, row[:6])),
f'{row[6]:0.2f}', f'{row[7]:0.4f}',
'\t'.join([f'{r:0.2f}' for r in row[8:]]),
])
print(line, file=fout)
@main.command("parse-huabao")
@click.option("-i", "--infile", required=True)
@click.option("-o", "--outfile", required=True)
def parse_huabao(infile, outfile):
"""解析华宝证券的交易记录"""
ignore_actions = set(['中签通知', '配号'])
action_mappings = {
'买入': 'buy',
'卖出': 'sell',
'中签扣款': 'buy',
}
data = []
stagging_data = []
with open(infile) as fin:
reader = csv.DictReader(fin)
for row in reader:
if row['委托类别'] in ignore_actions:
continue
if row['委托类别'] not in action_mappings:
# 将打新股/打新债的扣款、托管相关的交易记录另外记录待之后处理
if row['委托类别'] in ('托管转入', '托管转出'):
stagging_data.append(row)
continue
else:
LOGGER.warning("unsupported action: %s", row)
continue
order_time = datetime.strptime(f'{row["成交日期"]} {row["成交时间"]}', '%Y%m%d %H:%M:%S')
action = action_mappings[row['委托类别']]
money, fee = float(row['发生金额']), float(row['佣金']) + float(row['印花税'])
if action == 'buy':
money += fee
elif action == 'sell':
money -= fee
# 有些品种用「手」作为单位,将其转换为「股」
count, price = float(row['成交数量']), float(row['成交价格'])
if abs(money / (float(count) * float(price)) - 10) < 0.5:
count = float(count) * 10
code, name = row['证券代码'], row['证券名称']
if row['委托类别'] != '中签扣款':
suffix = get_code_suffix(code)
code = f'{code}.{suffix}'
data.append((
'华宝证券', '华宝证券', order_time, code, name,
action, count, price, money, fee
))
name2codes = defaultdict(dict)
for row in stagging_data:
if not row['证券名称'].strip():
continue
if row['委托类别'] == '托管转出' and row['成交编号'] == '清理过期数据':
name2codes[row['证券名称']]['origin'] = row['证券代码']
elif row['委托类别'] == '托管转入':
suffix = get_code_suffix(row['证券代码'])
name2codes[row['证券名称']]['new'] = row['证券代码'] + f'.{suffix}'
code_mappings = {}
for codes in name2codes.values():
code_mappings[codes['origin']] = codes['new']
data.sort(key=itemgetter(2, 3, 5))
with open(outfile, 'w') as fout:
for row in data:
row = list(row)
if row[5] == 'buy' and row[3] in code_mappings:
LOGGER.info("convert code from `%s` to `%s`", row[4], code_mappings[row[3]])
row[3] = code_mappings[row[3]]
line = '\t'.join([
'\t'.join(map(str, row[:6])),
f'{row[6]:0.2f}', f'{row[7]:0.4f}',
'\t'.join([f'{r:0.2f}' for r in row[8:]]),
])
print(line, file=fout)
@main.command("create-db")
def create_db():
"""创建资产相关的数据库"""
DATABASE.connect()
DATABASE.create_tables([
Asset,
Deal,
AssetMarketHistory,
AccountHistory,
AccountAssetsHistory,
QiemanAsset,
])
DATABASE.close()
@main.command('add-asset')
@click.option('--zs-code', required=True)
@click.option('--code', required=True)
@click.option('--name', required=True)
@click.option('--category', required=True)
def add_asset(zs_code, code, name, category):
"""添加资产品种到数据库"""
_, created = Asset.get_or_create(
zs_code=zs_code,
code=code,
name=name,
category=category,
)
if created:
LOGGER.info('created asset in database successfully')
else:
LOGGER.warning('asset is already in database')
@main.command('init-assets')
def init_assets():
"""获取市场资产列表写入到数据库"""
token = os.environ.get('TS_TOKEN')
if not token:
LOGGER.warning('environment `TS_TOKEN` is empty!')
return -1
client = tushare.pro_api(token)
created_cnt, total = 0, 0
for _, row in client.stock_basic(list_status='L', fields='ts_code,name').iterrows():
_, created = Asset.get_or_create(
zs_code=row['ts_code'],
code=row['ts_code'][:6],
name=row['name'],
category='stock',
)
created_cnt += created
total += 1
LOGGER.info('got %d stocks and created %d new in database', total, created_cnt)
created_cnt, total = 0, 0
for _, row in client.cb_basic(fields='ts_code,bond_short_name').iterrows():
_, created = Asset.get_or_create(
zs_code=row['ts_code'],
code=row['ts_code'][:6],
name=row['bond_short_name'],
category='bond',
)
created_cnt += created
total += 1
LOGGER.info('got %d bonds and created %d new in database', total, created_cnt)
for market in 'EO':
created_cnt, total = 0, 0
funds = client.fund_basic(market=market, status='L')
for _, row in funds.iterrows():
zs_code = row['ts_code']
if zs_code[0] not in '0123456789':
LOGGER.warning('invalid fund code: %s', zs_code)
total += 1
continue
_, created = Asset.get_or_create(
zs_code=zs_code,
code=zs_code[:6],
name=row['name'],
category='fund',
)
created_cnt += created
total += 1
if market == 'E':
zs_code = zs_code[:6] + '.OF'
_, created = Asset.get_or_create(
zs_code=zs_code,
code=zs_code[:6],
name=row['name'],
category='fund',
)
created_cnt += created
total += 1
LOGGER.info(
'got %d funds(market:%s) and created %d new in database',
total, market, created_cnt
)
@main.command('update-prices')
@click.option('--category', type=click.Choice(['index', 'stock', 'fund', 'bond']))
@click.option('--codes')
@click.option('--start-date')
def update_prices(category, codes, start_date):
'''更新交易记录涉及到的资产的历史价格'''
token = os.environ.get('TS_TOKEN')
if not token:
LOGGER.warning('environment `TS_TOKEN` is empty!')
return -1
assets = []
if codes:
for code in codes.split(','):
asset = Asset.get_or_none(zs_code=code)
if asset is None:
LOGGER.warning("code `%s` is not found in database", code)
continue
assets.append(asset)
else:
categories = set(['index', 'stock', 'bond', 'fund'])
if category:
categories = categories & set([category])
assets = [
deal.asset for deal in Deal.select(Deal.asset).distinct()
if deal.asset.category in categories
]
if 'index' in categories:
assets.extend(list(Asset.select().where(Asset.category == 'index')))
now = datetime.now()
if start_date is None:
start_date = (now - timedelta(days=10)).date()
else:
start_date = datetime.strptime(start_date, '%Y%m%d').date()
if now.hour >= 15:
end_date = now.date()
else:
end_date = (now - timedelta(days=1)).date()
api = EastMoneyFundExporter()
client = tushare.pro_api(token)
methods = {
'stock': client.daily,
'bond': client.cb_daily,
'fund': client.fund_daily,
'index': client.index_daily
}
for asset in assets:
created_cnt = 0
if asset.category in ('stock', 'bond', 'index') or \
(asset.category == 'fund' and not asset.zs_code.endswith('OF')):
days = (end_date - start_date).days + 1
method = methods[asset.category]
for offset in range(0, days, 1000):
cur_start_date = start_date + timedelta(days=offset)
cur_end_date = min(cur_start_date + timedelta(days=1000), end_date)
data = method(
ts_code=asset.zs_code,
start_date=cur_start_date.strftime('%Y%m%d'),
end_date=cur_end_date.strftime('%Y%m%d')
)
for _, row in data.iterrows():
_, created = AssetMarketHistory.get_or_create(
date=datetime.strptime(row['trade_date'], '%Y%m%d').date(),
open_price=row['open'],
close_price=row['close'],
pre_close=row['pre_close'],
change=row['change'],
pct_change=row['pct_chg'],
vol=row['vol'],
amount=row['amount'],
high_price=row['high'],
low_price=row['low'],
asset=asset
)
created_cnt += created
elif asset.category == 'fund':
fund_data = api.get_fund_data(asset.code)
if fund_data is None:
LOGGER.warning('no data for fund: %s', asset.zs_code)
continue
history = defaultdict(dict)
for nav in fund_data['Data_netWorthTrend']:
date = str(datetime.fromtimestamp(nav['x'] / 1000).date())
history[date]['nav'] = nav['y']
if nav.get('unitMoney'):
bonus_text = nav['unitMoney']
action, value = 'unknown', None
if bonus_text.startswith('分红'):
action = 'bonus'
value = float(re.findall(r'派现金(\d\.\d+)元', bonus_text)[0])
elif bonus_text.startswith('拆分'):
action = 'spin_off'
value = float(re.findall(r'折算(\d\.\d+)份', bonus_text)[0])
else:
LOGGER.wanring("unknown bonus text: %s", bonus_text)
if action != 'unknown':
history[date]['bonus_action'] = action
history[date]['bonus_value'] = value
for auv in fund_data['Data_ACWorthTrend']:
date = str(datetime.fromtimestamp(auv[0] / 1000).date())
history[date]['auv'] = auv[1]
for date, info in history.items():
if 'nav' not in info:
LOGGER.warning("invalid history data: %s(%s)", info, date)
_, created = AssetMarketHistory.get_or_create(
date=datetime.strptime(date, '%Y-%m-%d').date(),
nav=info['nav'],
auv=info.get('auv'),
bonus_action=info.get('bonus_action'),
bonus_value=info.get('bonus_value'),
asset=asset
)
created_cnt += created
LOGGER.info('created %d history records for %s(%s)', created_cnt, asset.name, asset.zs_code)
@main.command()
@click.option("-i", "--infile", required=True)
def import_deals(infile):
"""从文件中批量导入交易"""
with open(infile) as fin:
reader = csv.reader(fin, delimiter='\t')
cnt, total = 0, 0
for row in reader:
if len(row) != 10:
LOGGER.warning('column number is not 10: %s', row)
continue
asset = Asset.get_or_none(Asset.zs_code == row[3])
if asset is None:
LOGGER.warning('no asset found for code: %s', row[3])
continue
if asset.zs_code == 'CASH' and row[6] != row[8]:
LOGGER.error('cash record is not balanced: %s', row)
return
if row[5] == 'buy':
try:
diff = abs(float(row[6]) * float(row[7]) + float(row[9]) - float(row[8]))
assert diff < 0.001
except AssertionError:
LOGGER.warning("record is not balanced: %s", row)
print(row)
elif row[5] == 'sell':
try:
diff = abs(float(row[6]) * float(row[7]) - float(row[9]) - float(row[8]))
assert diff < 0.001
except AssertionError:
LOGGER.warning("record is not balanced: %s", row)
_, created = Deal.get_or_create(
account=row[0],
sub_account=row[1],
time=datetime.strptime(row[2], '%Y-%m-%d %H:%M:%S'),
asset=asset,
action=row[5],
amount=row[6],
price=row[7],
money=row[8],
fee=row[9]
)
total += 1
if created:
cnt += 1
if cnt != total:
LOGGER.warning("%d records are already in database", total - cnt)
LOGGER.info("created %d records in database", cnt)
@main.command()
def validate_deals():
"""检查交易记录是否有缺失(如分红/拆分)或错误"""
deals = defaultdict(list)
for record in Deal.select().order_by(Deal.time):
deals[record.asset.zs_code].append(record)
for code, records in deals.items():
asset = records[0].asset
bonus_history = list(
asset.history.where(
AssetMarketHistory.bonus_action.is_null(False)
).where(
AssetMarketHistory.date >= records[0].time.date()
)
)
if not bonus_history:
continue
for bonus_record in bonus_history:
matched = False
for deal in records:
if deal.time.date() == bonus_record.date:
matched = True
break
if not matched:
LOGGER.warning(
"bonus is missing in deals - fund: %s(%s), "
"date: %s, action: %s, value: %s",
asset.name, asset.zs_code, bonus_record.date,
bonus_record.bonus_action, bonus_record.bonus_value
)
@main.command()
@click.option('--accounts')
def update_accounts(accounts):
"""更新账户持仓和收益数据"""
if not accounts:
accounts = set([
deal.account
for deal in Deal.select(Deal.account).distinct()
])
else:
accounts = set(accounts.split(','))
for account in accounts:
update_account_assets_history(account)
for account in accounts:
created_cnt, update_cnt = 0, 0
for item in compute_account_history(account):
record = AccountHistory.get_or_none(account=account, date=item[0])
if not record:
AccountHistory.create(
account=account,
date=item[0],
amount=item[1],
money=item[2],
nav=item[3],
cash=item[4],
position=item[5],
)
created_cnt += 1
elif record.amount != item[1] or record.money != item[2]:
record.amount = item[1]
record.money = item[2]
record.nav = item[3]
record.cash = item[4]
record.position = item[5]
record.save()
update_cnt += 1
LOGGER.info(
'created %d new history and update %d record for account %s',
created_cnt, update_cnt, account
)
@main.command("price2bean")
@click.option("-o", "--outdir", required=True)
def price2bean(outdir):
"""将价格历史输出为 beancount 格式"""
if not os.path.exists(outdir):
os.makedirs(outdir)
for deal in Deal.select(Deal.asset).distinct():
asset = deal.asset
if asset.category not in ('stock', 'fund', 'bond'):
continue
code, suffix = asset.zs_code.split('.')
name = f'{suffix}{code}'
with open(os.path.join(outdir, f'{name}.bean'), 'w') as fout:
for record in asset.history.order_by(AssetMarketHistory.date):
if suffix == 'OF':
price = record.nav
else:
price = record.close_price
print(f'{record.date} price {name} {price:0.4f} CNY', file=fout)
@main.command("to-bean")
@click.option("-a", "--account", required=True)
@click.option("-o", "--outfile", required=True)
@click.option("--asset-prefix")
def to_beancount(account, outfile, asset_prefix):
"""将交易记录输出为 beancount 格式"""
search = Deal.select().where(Deal.account == account).order_by(Deal.time)
records = list(search)
if not records:
return
if asset_prefix:
account_prefix = ':'.join(['Assets', asset_prefix, f'{account}'])
else:
account_prefix = ':'.join(['Assets', f'{account}'])
with open(outfile, 'w') as fout:
for item in records:
code, suffix = None, None
if item.asset.category != 'other':
code, suffix = item.asset.zs_code.split('.')
if item.action == 'transfer_in':
text = '\n'.join([
f'{item.time.date()} * "转账"',
f' {account_prefix}:CASH {item.money:0.2f} CNY',
' Equity:Opening-Balances',
])
print(text + '\n', file=fout)
elif item.action == 'transfer_out':
text = '\n'.join([
f'{item.time.date()} * "转出"',
f' {account_prefix}:CASH -{item.money:0.2f} CNY',
' Equity:Opening-Balances',
])
print(text + '\n', file=fout)
elif item.action == 'buy':
text = '\n'.join([
f'{item.time.date()} * "买入{item.asset.name}"',
f' {account_prefix}:持仓 {item.amount} {suffix}{code} @@ {item.money:0.2f} CNY',
f' {account_prefix}:CASH -{item.money:0.2f} CNY',
])
print(text + '\n', file=fout)
elif item.action == 'sell':
text = '\n'.join([
f'{item.time.date()} * "卖出{item.asset.name}"',
f' {account_prefix}:持仓 -{item.amount} {suffix}{code} @@ {item.money:0.2f} CNY',
f' {account_prefix}:CASH {item.money:0.2f} CNY',
])
print(text + '\n', file=fout)
elif item.action in ('reinvest', 'fix_cash') and item.asset.zs_code == 'CASH':
text = '\n'.join([
f'{item.time.date()} * "现金收益"',
f' {account_prefix}:CASH {item.money:0.2f} CNY',
' Income:现金收益',
])
print(text + '\n', file=fout)
elif item.action == 'bonus':
text = '\n'.join([
f'{item.time.date()} * "{item.asset.name}分红"',
f' {account_prefix}:CASH {item.money:0.2f} CNY',
' Income:分红',
])
print(text + '\n', file=fout)
elif item.action == 'reinvest':
text = '\n'.join([
f'{item.time.date()} * "{item.asset.name}分红"',
f' {account_prefix}:CASH {item.money:0.2f} CNY',
' Income:分红',
])
print(text + '\n', file=fout)
text = '\n'.join([
f'{item.time.date()} * "买入{item.asset.name}"',
f' {account_prefix}:持仓 {item.amount} {suffix}{code} @@ {item.money:0.2f} CNY',
f' {account_prefix}:CASH -{item.money:0.2f} CNY',
])
print(text + '\n', file=fout)
elif item.action == 'spin_off':
price = item.asset.history.\
where(AssetMarketHistory.date == item.time.date()).\
first().nav
money = round(item.amount * price, 2)
search = item.asset.assets_history.where(AccountAssetsHistory.account == account)
search = search.where(AccountAssetsHistory.date < item.time.date())
search = search.order_by(AccountAssetsHistory.date.desc())
record = search.first()
text = '\n'.join([
f'{item.time.date()} * "卖出{item.asset.name}"',
f' {account_prefix}:持仓 -{record.amount} {suffix}{code} @@ {money:0.2f} CNY',
f' {account_prefix}:CASH {money:0.2f} CNY',
])
print(text + '\n', file=fout)
text = '\n'.join([
f'{item.time.date()} * "买入{item.asset.name}"',
f' {account_prefix}:持仓 {item.amount} {suffix}{code} @@ {money:0.2f} CNY',
f' {account_prefix}:CASH -{money:0.2f} CNY',
])
print(text + '\n', file=fout)
@main.command()
@click.option("--zs-code", required=True)
@click.option("--start-date", required=True)
@click.option("--end-date", required=True)
@click.option("--price", type=float, required=True)
def set_prices(zs_code, start_date, end_date, price):
"""为指定品种设置历史价格(仅支持可转债)"""
asset = Asset.get_or_none(zs_code=zs_code)
if asset is None:
LOGGER.warning("code `%s` is not found in database", zs_code)
return
start_date = datetime.strptime(start_date, '%Y-%m-%d').date()
end_date = datetime.strptime(end_date, '%Y-%m-%d').date()
created_cnt = 0
for offset in range((end_date - start_date).days + 1):
cur_date = start_date + timedelta(days=offset)
record = AssetMarketHistory.get_or_none(date=cur_date, asset=asset)
if record is not None:
LOGGER.warning("price at %s already exists", cur_date)
continue
AssetMarketHistory.create(
date=cur_date,
open_price=price,
close_price=price,
pre_close=price,
change=0.0,
pct_change=0.0,
vol=0.0,
amount=0.0,
high_price=price,
low_price=price,
asset=asset
)
created_cnt += 1
LOGGER.info('created %d history records for %s(%s)', created_cnt, asset.name, asset.zs_code)
@main.command()
@click.option("-i", "--infile", required=True)
@click.option("-o", "--outfile", required=True)
def huobi2bean(infile, outfile):
"""将火币交易记录转为 beancount 格式"""
with open(infile) as fin, open(outfile, 'w') as fout:
data = []
for idx, line in enumerate(fin):
if idx == 0:
continue
cols = line.strip().split(',')
time, pair, action = cols[0], cols[2], cols[3]
target_coin, source_coin = pair.split('/')
price, amount, money, fee = cols[4:8]
if fee.endswith(source_coin) or fee.endswith(target_coin):
fee_coin = target_coin if action == '买入' else source_coin
fee = fee.replace(fee_coin, '')
elif fee.endswith('HBPOINT'):
fee_coin = 'HBPOINT'
fee = fee.replace(fee_coin, '')
if re.match(r'^0\.0+$', amount):
precision = max(
len(price.split('.')[1]),
len(money.split('.')[1]),
)
amount = f'{float(money) / float(price):0.{precision}f}'
time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
data.append((time, source_coin, target_coin, action, price, amount, money, fee_coin, fee))
print("option \"title\" \"我的账本\"", file=fout)
print('option "operating_currency" "USDT"', file=fout)
print('2021-01-01 custom "fava-option" "language" "zh"', file=fout)
print('2021-01-01 open Assets:Huobi', file=fout)
print('2021-01-01 open Expenses:Fee', file=fout)
data.sort(key=itemgetter(0))
for idx, item in enumerate(data):
time, source, target, action, price, amount, money, fee_coin, fee = item
print(f'{time.date()} * "{action}{target}"', file=fout)
if action == '买入':
print(f' Assets:Huobi {amount} {target} @@ {money} {source}', file=fout)
print(f' Assets:Huobi -{money} {source}', file=fout)
if not re.match(r'^0\.0+$', fee):
print(f' Expenses:Fee {fee} {fee_coin}', file=fout)
print(f' Assets:Huobi -{fee} {fee_coin}', file=fout)
else:
print(f' Assets:Huobi -{amount} {target} @@ {money} {source}', file=fout)
print(f' Assets:Huobi {money} {source}', file=fout)
if not re.match(r'^0\.0+$', fee):
print(f' Expenses:Fee {fee} {fee_coin}', file=fout)
print(f' Assets:Huobi -{fee} {fee_coin}', file=fout)
if idx < len(data) - 1:
print('', file=fout)
@main.command("add-qieman-asset")
@click.option("--asset-id", required=True)
@click.option("--asset-name", required=True)
def add_qieman_asset(asset_id, asset_name):
"""添加且慢资产"""
_, created = QiemanAsset.get_or_create(asset_id=asset_id, name=asset_name)
LOGGER.info("finished")
@main.command("list-qieman-assets")
def list_qieman_assets():
"""查看且慢资产列表"""
for asset in QiemanAsset.select():
print(asset.asset_id, asset.name)
@main.command("export-qieman-profits")
@click.option("-c", "--config-file", required=True)
@click.option("-o", "--outfile")
@click.option("-n", "--asset-name", required=True)
def export_qieman_profits(config_file, asset_name, outfile):
"""导出且慢资产的日收益历史"""
asset = QiemanAsset.get_or_none(name=asset_name)
if asset is None:
LOGGER.warning("could not find Qieman asset with name `%s`", asset_name)
return
if outfile:
fout = open(outfile, 'w')
else:
fout = sys.stdout
with open(config_file) as f:
config = json.load(f)
exporter = QiemanExporter(**config)
profits = []
for item in exporter.list_profits(asset.asset_id)['dailyProfitList']:
if item['dailyProfit'] is not None:
date_val = datetime.fromtimestamp(item['navDate'] / 1000).date()
profit = item['dailyProfit']
profits.append((date_val, profit))
profits.sort(key=itemgetter(0))
for date_val, profit in profits:
print(f'{date_val} 15:00:00\t{profit:0.2f}', file=fout)
if outfile:
fout.close()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TS_TOKEN"
] |
[]
|
["TS_TOKEN"]
|
python
| 1 | 0 | |
mod/github.com/hashicorp/[email protected]/physical/azure/azure.go
|
package azure
import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"time"
storage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/azure"
metrics "github.com/armon/go-metrics"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/physical"
)
const (
// MaxBlobSize at this time
MaxBlobSize = 1024 * 1024 * 4
// MaxListResults is the current default value, setting explicitly
MaxListResults = 5000
)
// AzureBackend is a physical backend that stores data
// within an Azure blob container.
type AzureBackend struct {
container *storage.Container
logger log.Logger
permitPool *physical.PermitPool
}
// Verify AzureBackend satisfies the correct interfaces
var _ physical.Backend = (*AzureBackend)(nil)
// NewAzureBackend constructs an Azure backend using a pre-existing
// bucket. Credentials can be provided to the backend, sourced
// from the environment, AWS credential files or by IAM role.
func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
name := os.Getenv("AZURE_BLOB_CONTAINER")
if name == "" {
name = conf["container"]
if name == "" {
return nil, fmt.Errorf("'container' must be set")
}
}
accountName := os.Getenv("AZURE_ACCOUNT_NAME")
if accountName == "" {
accountName = conf["accountName"]
if accountName == "" {
return nil, fmt.Errorf("'accountName' must be set")
}
}
accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
if accountKey == "" {
accountKey = conf["accountKey"]
if accountKey == "" {
return nil, fmt.Errorf("'accountKey' must be set")
}
}
environmentName := os.Getenv("AZURE_ENVIRONMENT")
if environmentName == "" {
environmentName = conf["environment"]
if environmentName == "" {
environmentName = "AzurePublicCloud"
}
}
environment, err := azure.EnvironmentFromName(environmentName)
if err != nil {
errorMsg := fmt.Sprintf("failed to look up Azure environment descriptor for name %q: {{err}}",
environmentName)
return nil, errwrap.Wrapf(errorMsg, err)
}
client, err := storage.NewBasicClientOnSovereignCloud(accountName, accountKey, environment)
if err != nil {
return nil, errwrap.Wrapf("failed to create Azure client: {{err}}", err)
}
client.HTTPClient = cleanhttp.DefaultPooledClient()
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(name)
_, err = container.CreateIfNotExists(&storage.CreateContainerOptions{
Access: storage.ContainerAccessTypePrivate,
})
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to create %q container: {{err}}", name), err)
}
maxParStr, ok := conf["max_parallel"]
var maxParInt int
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
}
}
a := &AzureBackend{
container: container,
logger: logger,
permitPool: physical.NewPermitPool(maxParInt),
}
return a, nil
}
// Put is used to insert or update an entry
func (a *AzureBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"azure", "put"}, time.Now())
if len(entry.Value) >= MaxBlobSize {
return fmt.Errorf("value is bigger than the current supported limit of 4MBytes")
}
blockID := base64.StdEncoding.EncodeToString([]byte("AAAA"))
blocks := make([]storage.Block, 1)
blocks[0] = storage.Block{ID: blockID, Status: storage.BlockStatusLatest}
a.permitPool.Acquire()
defer a.permitPool.Release()
blob := &storage.Blob{
Container: a.container,
Name: entry.Key,
}
if err := blob.PutBlock(blockID, entry.Value, nil); err != nil {
return err
}
return blob.PutBlockList(blocks, nil)
}
// Get is used to fetch an entry
func (a *AzureBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"azure", "get"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
blob := &storage.Blob{
Container: a.container,
Name: key,
}
exists, err := blob.Exists()
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
reader, err := blob.Get(nil)
if err != nil {
return nil, err
}
defer reader.Close()
data, err := ioutil.ReadAll(reader)
ent := &physical.Entry{
Key: key,
Value: data,
}
return ent, err
}
// Delete is used to permanently delete an entry
func (a *AzureBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now())
blob := &storage.Blob{
Container: a.container,
Name: key,
}
a.permitPool.Acquire()
defer a.permitPool.Release()
_, err := blob.DeleteIfExists(nil)
return err
}
// List is used to list all the keys under a given
// prefix, up to the next prefix.
func (a *AzureBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"azure", "list"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
var marker string
keys := []string{}
for {
list, err := a.container.ListBlobs(storage.ListBlobsParameters{
Prefix: prefix,
Marker: marker,
MaxResults: MaxListResults,
})
if err != nil {
return nil, err
}
for _, blob := range list.Blobs {
key := strings.TrimPrefix(blob.Name, prefix)
if i := strings.Index(key, "/"); i == -1 {
// file
keys = append(keys, key)
} else {
// subdirectory
keys = strutil.AppendIfMissing(keys, key[:i+1])
}
}
if list.NextMarker == "" {
break
}
marker = list.NextMarker
}
sort.Strings(keys)
return keys, nil
}
|
[
"\"AZURE_BLOB_CONTAINER\"",
"\"AZURE_ACCOUNT_NAME\"",
"\"AZURE_ACCOUNT_KEY\"",
"\"AZURE_ENVIRONMENT\""
] |
[] |
[
"AZURE_ACCOUNT_KEY",
"AZURE_ENVIRONMENT",
"AZURE_BLOB_CONTAINER",
"AZURE_ACCOUNT_NAME"
] |
[]
|
["AZURE_ACCOUNT_KEY", "AZURE_ENVIRONMENT", "AZURE_BLOB_CONTAINER", "AZURE_ACCOUNT_NAME"]
|
go
| 4 | 0 | |
tasks.py
|
import os, configparser
from celery import Celery
from celery.schedules import crontab
from parse import default
from parse import custom
__version__ = "2.0.0"
config = configparser.RawConfigParser()
config.read(os.environ.get("CFG_PATH"))
default_config = dict(config.items("DEFAULT"))
app = Celery("tasks")
app.conf.update(default_config)
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
print("===== Start add tasks =====")
for section in config.sections():
section_config = dict(config.items(section))
if "crontab" in section_config and section_config["crontab"]:
print(f"Add {section} task(crontab).")
crontab_info = {}
if "minute" in section_config:
crontab_info.update(minute=section_config["minute"])
if "hour" in section_config:
crontab_info.update(hour=section_config["hour"])
if "day_of_week" in section_config:
crontab_info.update(day_of_week=section_config["day_of_week"])
if "day_of_month" in section_config:
crontab_info.update(day_of_month=section_config["day_of_month"])
if "month_of_year" in section_config:
crontab_info.update(month_of_year=section_config["month_of_year"])
sender.add_periodic_task(crontab(**crontab_info),
switch.s(section, section_config),
name=f'RUN {section}')
elif "seconds" in section_config:
print(f"Add {section} task.")
sender.add_periodic_task(float(section_config.get("seconds")),
switch.s(section, section_config),
name=f'RUN {section} every {section_config.get("seconds")} seconds')
print("===== End add tasks =====")
@app.task
def switch(name, config):
if "custom" in config and config["custom"]:
return getattr(custom, name.lower())(config)
return getattr(default, name.lower())(config)
|
[] |
[] |
[
"CFG_PATH"
] |
[]
|
["CFG_PATH"]
|
python
| 1 | 0 | |
proxy/bootstrap/bootstrap_test.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bootstrap
import (
"github.com/apache/servicecomb-mesher/proxy/cmd"
"github.com/apache/servicecomb-mesher/proxy/common"
_ "github.com/apache/servicecomb-mesher/proxy/pkg/egress/archaius"
"github.com/go-chassis/go-archaius"
"github.com/go-chassis/go-chassis/core/config"
"github.com/go-chassis/go-chassis/core/config/model"
"github.com/go-chassis/go-chassis/core/lager"
"github.com/go-chassis/go-chassis/pkg/util/fileutil"
"github.com/stretchr/testify/assert"
"io"
"os"
"path/filepath"
"sync"
"testing"
// rate limiter handler
_ "github.com/go-chassis/go-chassis/middleware/ratelimiter"
)
var o sync.Once = sync.Once{}
var yamlContent = `---
egress:
infra: cse # pilot or cse
address: http://istio-pilot.istio-system:15010
egressRule:
google-ext:
- hosts:
- "www.google.com"
- "*.yahoo.com"
ports:
- port: 80
protocol: HTTP
facebook-ext:
- hosts:
- "www.facebook.com"
ports:
- port: 80
protocol: HTTP`
func TestBootstrap(t *testing.T) {
lager.Init(&lager.Options{LoggerLevel: "DEBUG"})
// init work dir
os.Setenv(fileutil.ChassisHome, filepath.Join("...", "..."))
os.Setenv(fileutil.ChassisConfDir, filepath.Join("...", "...", "conf"))
t.Log(os.Getenv("CHASSIS_HOME"))
// init archaius
archaius.Init(archaius.WithENVSource())
//ini config
config.Init()
protoMap := make(map[string]model.Protocol)
protoMap["http"] = model.Protocol{
Listen: "127.0.0.1:90909",
}
config.GlobalDefinition = &model.GlobalCfg{
Cse: model.CseStruct{
Protocols: protoMap,
},
}
configMesher := "../../conf/mesher.yaml"
os.Args = []string{"test", "--config", configMesher}
if err := cmd.Init(); err != nil {
panic(err)
}
if err := cmd.Configs.GeneratePortsMap(); err != nil {
panic(err)
}
// init egress.yaml file
d, _ := os.Getwd()
os.Mkdir(filepath.Join(d, "conf"), os.ModePerm)
filename := filepath.Join(d, "conf", "egress.yaml")
os.Remove(filename)
f1, err := os.Create(filename)
assert.NoError(t, err)
defer f1.Close()
_, err = io.WriteString(f1, yamlContent)
assert.NoError(t, err)
t.Run("Test RegisterFramework", func(t *testing.T) {
// case cmd.Configs.Role is empty
cmd.Configs.Role = ""
RegisterFramework()
// case cmd.Configs.Role == common.RoleSidecar
cmd.Configs.Role = common.RoleSidecar
RegisterFramework()
})
t.Run("Test Start", func(t *testing.T) {
// case Protocols is empty
config.GlobalDefinition.Cse.Protocols = map[string]model.Protocol{}
err := Start()
assert.Error(t, err)
// cmd.Configs.LocalServicePorts = "http:9090"
cmd.Configs.LocalServicePorts = "http:9090"
err = Start()
cmd.Configs.LocalServicePorts = ""
RegisterFramework()
SetHandlers()
err = InitEgressChain()
assert.NoError(t, err)
err = Start()
assert.NoError(t, err)
})
}
|
[
"\"CHASSIS_HOME\""
] |
[] |
[
"CHASSIS_HOME"
] |
[]
|
["CHASSIS_HOME"]
|
go
| 1 | 0 | |
daemon/daemon_unix.go
|
// +build linux freebsd
package daemon
import (
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/container"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/reference"
"github.com/docker/docker/runconfig"
runconfigopts "github.com/docker/docker/runconfig/opts"
pblkiodev "github.com/docker/engine-api/types/blkiodev"
containertypes "github.com/docker/engine-api/types/container"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/drivers/bridge"
"github.com/docker/libnetwork/ipamutils"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/options"
"github.com/docker/libnetwork/types"
blkiodev "github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runc/libcontainer/user"
)
const (
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
linuxMinCPUShares = 2
linuxMaxCPUShares = 262144
platformSupported = true
// It's not kernel limit, we want this 4M limit to supply a reasonable functional container
linuxMinMemory = 4194304
// constants for remapped root settings
defaultIDSpecifier string = "default"
defaultRemappedID string = "dockremap"
)
func getBlkioWeightDevices(config *containertypes.HostConfig) ([]*blkiodev.WeightDevice, error) {
var stat syscall.Stat_t
var blkioWeightDevices []*blkiodev.WeightDevice
for _, weightDevice := range config.BlkioWeightDevice {
if err := syscall.Stat(weightDevice.Path, &stat); err != nil {
return nil, err
}
weightDevice := blkiodev.NewWeightDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), weightDevice.Weight, 0)
blkioWeightDevices = append(blkioWeightDevices, weightDevice)
}
return blkioWeightDevices, nil
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
var (
labelOpts []string
err error
)
for _, opt := range config.SecurityOpt {
con := strings.SplitN(opt, ":", 2)
if len(con) == 1 {
switch con[0] {
case "no-new-privileges":
container.NoNewPrivileges = true
default:
return fmt.Errorf("Invalid --security-opt 1: %q", opt)
}
} else {
switch con[0] {
case "label":
labelOpts = append(labelOpts, con[1])
case "apparmor":
container.AppArmorProfile = con[1]
case "seccomp":
container.SeccompProfile = con[1]
default:
return fmt.Errorf("Invalid --security-opt 2: %q", opt)
}
}
}
container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err
}
func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) {
var blkioReadIOpsDevice []*blkiodev.ThrottleDevice
var stat syscall.Stat_t
for _, iopsDevice := range config.BlkioDeviceReadIOps {
if err := syscall.Stat(iopsDevice.Path, &stat); err != nil {
return nil, err
}
readIOpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), iopsDevice.Rate)
blkioReadIOpsDevice = append(blkioReadIOpsDevice, readIOpsDevice)
}
return blkioReadIOpsDevice, nil
}
func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) {
var blkioWriteIOpsDevice []*blkiodev.ThrottleDevice
var stat syscall.Stat_t
for _, iopsDevice := range config.BlkioDeviceWriteIOps {
if err := syscall.Stat(iopsDevice.Path, &stat); err != nil {
return nil, err
}
writeIOpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), iopsDevice.Rate)
blkioWriteIOpsDevice = append(blkioWriteIOpsDevice, writeIOpsDevice)
}
return blkioWriteIOpsDevice, nil
}
func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) {
var blkioReadBpsDevice []*blkiodev.ThrottleDevice
var stat syscall.Stat_t
for _, bpsDevice := range config.BlkioDeviceReadBps {
if err := syscall.Stat(bpsDevice.Path, &stat); err != nil {
return nil, err
}
readBpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), bpsDevice.Rate)
blkioReadBpsDevice = append(blkioReadBpsDevice, readBpsDevice)
}
return blkioReadBpsDevice, nil
}
func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) {
var blkioWriteBpsDevice []*blkiodev.ThrottleDevice
var stat syscall.Stat_t
for _, bpsDevice := range config.BlkioDeviceWriteBps {
if err := syscall.Stat(bpsDevice.Path, &stat); err != nil {
return nil, err
}
writeBpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), bpsDevice.Rate)
blkioWriteBpsDevice = append(blkioWriteBpsDevice, writeBpsDevice)
}
return blkioWriteBpsDevice, nil
}
func checkKernelVersion(k, major, minor int) bool {
if v, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("%s", err)
} else {
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
return false
}
}
return true
}
func checkKernel() error {
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407
if !checkKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String())
}
}
return nil
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if adjustCPUShares && hostConfig.CPUShares > 0 {
// Handle unsupported CPUShares
if hostConfig.CPUShares < linuxMinCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
hostConfig.CPUShares = linuxMinCPUShares
} else if hostConfig.CPUShares > linuxMaxCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
hostConfig.CPUShares = linuxMaxCPUShares
}
}
if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
// By default, MemorySwap is set to twice the size of Memory.
hostConfig.MemorySwap = hostConfig.Memory * 2
}
if hostConfig.ShmSize == 0 {
hostConfig.ShmSize = container.DefaultSHMSize
}
var err error
if hostConfig.SecurityOpt == nil {
hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
if err != nil {
return err
}
}
if hostConfig.MemorySwappiness == nil {
defaultSwappiness := int64(-1)
hostConfig.MemorySwappiness = &defaultSwappiness
}
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
}
return nil
}
func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) {
warnings := []string{}
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB")
}
if resources.Memory > 0 && !sysInfo.MemoryLimit {
warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
resources.Memory = 0
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit {
warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.")
logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.")
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory {
return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.")
}
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
}
if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < -1 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness)
}
}
if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation {
warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.")
logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.")
resources.MemoryReservation = 0
}
if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation {
return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.")
}
if resources.KernelMemory > 0 && !sysInfo.KernelMemory {
warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
resources.KernelMemory = 0
}
if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB")
}
if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
}
if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
// warning the caller if they already wanted the feature to be off
if *resources.OomKillDisable {
warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.")
logrus.Warnf("Your kernel does not support OomKillDisable, OomKillDisable discarded.")
}
resources.OomKillDisable = nil
}
if resources.PidsLimit != 0 && !sysInfo.PidsLimit {
warnings = append(warnings, "Your kernel does not support pids limit capabilities, pids limit discarded.")
logrus.Warnf("Your kernel does not support pids limit capabilities, pids limit discarded.")
resources.PidsLimit = 0
}
// cpu subsystem checks and adjustments
if resources.CPUShares > 0 && !sysInfo.CPUShares {
warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.")
logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.")
resources.CPUShares = 0
}
if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod {
warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.")
resources.CPUPeriod = 0
}
if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota {
warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.")
logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.")
resources.CPUQuota = 0
}
// cpuset subsystem checks and adjustments
if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset {
warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.")
logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.")
resources.CpusetCpus = ""
resources.CpusetMems = ""
}
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
if err != nil {
return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", resources.CpusetCpus)
}
if !cpusAvailable {
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", resources.CpusetCpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
if err != nil {
return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", resources.CpusetMems)
}
if !memsAvailable {
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", resources.CpusetMems, sysInfo.Mems)
}
// blkio subsystem checks and adjustments
if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight {
warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.")
logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.")
resources.BlkioWeight = 0
}
if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
}
if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice {
warnings = append(warnings, "Your kernel does not support Block I/O weight_device.")
logrus.Warnf("Your kernel does not support Block I/O weight_device. Weight-device discarded.")
resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{}
}
if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice {
warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.")
logrus.Warnf("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.")
resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.")
logrus.Warnf("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.")
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.")
logrus.Warnf("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.")
resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice {
warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.")
logrus.Warnf("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.")
resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{}
}
return warnings, nil
}
func (daemon *Daemon) getCgroupDriver() string {
cgroupDriver := "cgroupfs"
if daemon.usingSystemd() {
cgroupDriver = "systemd"
}
return cgroupDriver
}
func usingSystemd(config *Config) bool {
for _, option := range config.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
continue
}
if val == "systemd" {
return true
}
}
return false
}
func (daemon *Daemon) usingSystemd() bool {
return usingSystemd(daemon.configStore)
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
warnings := []string{}
sysInfo := sysinfo.New(true)
warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config)
if err != nil {
return warnings, err
}
w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update)
if err != nil {
return warnings, err
}
warnings = append(warnings, w...)
if hostConfig.ShmSize < 0 {
return warnings, fmt.Errorf("SHM size must be greater then 0")
}
if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000].", hostConfig.OomScoreAdj)
}
if sysInfo.IPv4ForwardingDisabled {
warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
}
// check for various conflicting options with user namespaces
if daemon.configStore.RemappedRoot != "" {
if hostConfig.Privileged {
return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces")
}
if hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsContainer() {
return warnings, fmt.Errorf("Cannot share the host or a container's network namespace when user namespaces are enabled")
}
if hostConfig.PidMode.IsHost() {
return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled")
}
if hostConfig.IpcMode.IsContainer() {
return warnings, fmt.Errorf("Cannot share a container's IPC namespace when user namespaces are enabled")
}
if hostConfig.ReadonlyRootfs {
return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && daemon.usingSystemd() {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
return warnings, nil
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(config *Config) error {
// Check for mutually incompatible config options
if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" {
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
}
if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication {
return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true")
}
if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq {
config.bridgeConfig.EnableIPMasq = false
}
if config.CgroupParent != "" && usingSystemd(config) {
if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") {
return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
if os.Geteuid() != 0 {
return fmt.Errorf("The Docker daemon needs to be run as root")
}
return checkKernel()
}
// configureMaxThreads sets the Go runtime max threads threshold
// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
func configureMaxThreads(config *Config) error {
mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max")
if err != nil {
return err
}
mtint, err := strconv.Atoi(strings.TrimSpace(string(mt)))
if err != nil {
return err
}
maxThreads := (mtint / 100) * 90
debug.SetMaxThreads(maxThreads)
logrus.Debugf("Golang's threads limit set to %d", maxThreads)
return nil
}
// configureKernelSecuritySupport configures and validate security support for the kernel
func configureKernelSecuritySupport(config *Config, driverName string) error {
if config.EnableSelinuxSupport {
if selinuxEnabled() {
// As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled
if driverName == "overlay" {
return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName)
}
logrus.Debug("SELinux enabled successfully")
} else {
logrus.Warn("Docker could not enable SELinux on the host system")
}
} else {
selinuxSetDisabled()
}
return nil
}
func isBridgeNetworkDisabled(config *Config) bool {
return config.bridgeConfig.Iface == disableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *Config) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
return options, nil
}
func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
// Initialize default network on "null"
if _, err := controller.NewNetwork("null", "none", libnetwork.NetworkOptionPersist(false)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
// Initialize default network on "host"
if _, err := controller.NewNetwork("host", "host", libnetwork.NetworkOptionPersist(false)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
}
return controller, nil
}
func driverOptions(config *Config) []nwconfig.Option {
bridgeConfig := options.Generic{
"EnableIPForwarding": config.bridgeConfig.EnableIPForward,
"EnableIPTables": config.bridgeConfig.EnableIPTables,
"EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy}
bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig}
dOptions := []nwconfig.Option{}
dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption))
return dOptions
}
func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error {
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return fmt.Errorf("could not delete the default bridge network: %v", err)
}
}
bridgeName := bridge.DefaultBridgeName
if config.bridgeConfig.Iface != "" {
bridgeName = config.bridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.bridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String()
}
var (
ipamV4Conf *libnetwork.IpamConf
ipamV6Conf *libnetwork.IpamConf
)
ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nw, nw6List, err := ipamutils.ElectInterfaceAddresses(bridgeName)
if err == nil {
ipamV4Conf.PreferredPool = types.GetIPNetCanonical(nw).String()
hip, _ := types.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
}
if config.bridgeConfig.IP != "" {
ipamV4Conf.PreferredPool = config.bridgeConfig.IP
ip, _, err := net.ParseCIDR(config.bridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.bridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.bridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String()
}
var deferIPv6Alloc bool
if config.bridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.PreferredPool = fCIDRv6.String()
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.bridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
_, err = controller.NewNetwork("bridge", "bridge",
libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// setupInitLayer populates a directory with mountpoints suitable
// for bind-mounting things into the container.
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
func setupInitLayer(initLayer string, rootUID, rootGID int) error {
for pth, typ := range map[string]string{
"/dev/pts": "dir",
"/dev/shm": "dir",
"/proc": "dir",
"/sys": "dir",
"/.dockerenv": "file",
"/etc/resolv.conf": "file",
"/etc/hosts": "file",
"/etc/hostname": "file",
"/dev/console": "file",
"/etc/mtab": "/proc/mounts",
} {
parts := strings.Split(pth, "/")
prev := "/"
for _, p := range parts[1:] {
prev = filepath.Join(prev, p)
syscall.Unlink(filepath.Join(initLayer, prev))
}
if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
if os.IsNotExist(err) {
if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil {
return err
}
switch typ {
case "dir":
if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil {
return err
}
case "file":
f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
if err != nil {
return err
}
f.Chown(rootUID, rootGID)
f.Close()
default:
if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
return err
}
}
} else {
return err
}
}
}
// Layer is ready to use, if it wasn't before.
return nil
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group
// uid - 32-bit unsigned int valid Linux UID value
// uid:gid - uid value; 32-bit unsigned int Linux GID value
//
// If no groupname is specified, and a username is specified, an attempt
// will be made to lookup a gid for that username as a groupname
//
// If names are used, they are verified to exist in passwd/group
func parseRemappedRoot(usergrp string) (string, string, error) {
var (
userID, groupID int
username, groupname string
)
idparts := strings.Split(usergrp, ":")
if len(idparts) > 2 {
return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
}
if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
// must be a uid; take it as valid
userID = int(uid)
luser, err := user.LookupUid(userID)
if err != nil {
return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
}
username = luser.Name
if len(idparts) == 1 {
// if the uid was numeric and no gid was specified, take the uid as the gid
groupID = userID
lgrp, err := user.LookupGid(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
}
groupname = lgrp.Name
}
} else {
lookupName := idparts[0]
// special case: if the user specified "default", they want Docker to create or
// use (after creation) the "dockremap" user/group for root remapping
if lookupName == defaultIDSpecifier {
lookupName = defaultRemappedID
}
luser, err := user.LookupUser(lookupName)
if err != nil && idparts[0] != defaultIDSpecifier {
// error if the name requested isn't the special "dockremap" ID
return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
} else if err != nil {
// special case-- if the username == "default", then we have been asked
// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
// ranges will be used for the user and group mappings in user namespaced containers
_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
if err == nil {
return defaultRemappedID, defaultRemappedID, nil
}
return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
}
userID = luser.Uid
username = luser.Name
if len(idparts) == 1 {
// we only have a string username, and no group specified; look up gid from username as group
group, err := user.LookupGroup(lookupName)
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
}
groupID = group.Gid
groupname = group.Name
}
}
if len(idparts) == 2 {
// groupname or gid is separately specified and must be resolved
// to a unsigned 32-bit gid
if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
// must be a gid, take it as valid
groupID = int(gid)
lgrp, err := user.LookupGid(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
}
groupname = lgrp.Name
} else {
// not a number; attempt a lookup
group, err := user.LookupGroup(idparts[1])
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", idparts[1], err)
}
groupID = group.Gid
groupname = idparts[1]
}
}
return username, groupname, nil
}
func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) {
if runtime.GOOS != "linux" && config.RemappedRoot != "" {
return nil, nil, fmt.Errorf("User namespaces are only supported on Linux")
}
// if the daemon was started with remapped root option, parse
// the config option to the int uid,gid values
var (
uidMaps, gidMaps []idtools.IDMap
)
if config.RemappedRoot != "" {
username, groupname, err := parseRemappedRoot(config.RemappedRoot)
if err != nil {
return nil, nil, err
}
if username == "root" {
// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
// effectively
logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
return uidMaps, gidMaps, nil
}
logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname)
// update remapped root setting now that we have resolved them to actual names
config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname)
if err != nil {
return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err)
}
}
return uidMaps, gidMaps, nil
}
func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error {
config.Root = rootDir
// the docker root metadata directory needs to have execute permissions for all users (o+x)
// so that syscalls executing as non-root, operating on subdirectories of the graph root
// (e.g. mounted layers of a container) can traverse this path.
// The user namespace support will create subdirectories for the remapped root host uid:gid
// pair owned by that same uid:gid pair for proper write access to those needed metadata and
// layer content subtrees.
if _, err := os.Stat(rootDir); err == nil {
// root current exists; verify the access bits are correct by setting them
if err = os.Chmod(rootDir, 0701); err != nil {
return err
}
} else if os.IsNotExist(err) {
// no root exists yet, create it 0701 with root:root ownership
if err := os.MkdirAll(rootDir, 0701); err != nil {
return err
}
}
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exists
if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
}
return nil
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
return nil
}
for _, l := range hostConfig.Links {
name, alias, err := runconfigopts.ParseLink(l)
if err != nil {
return err
}
child, err := daemon.GetContainer(name)
if err != nil {
//An error from daemon.GetContainer() means this name could not be found
return fmt.Errorf("Could not get container for %s", name)
}
for child.HostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
child, err = daemon.GetContainer(parts[1])
if err != nil {
return fmt.Errorf("Could not get container for %s", parts[1])
}
}
if child.HostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
if err := daemon.registerLink(container, child, alias); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
return container.WriteHostConfig()
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) {
daemon.Unmount(container)
}
func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error {
// Unix has no custom images to register
return nil
}
|
[
"\"DOCKER_NOWARN_KERNEL_VERSION\""
] |
[] |
[
"DOCKER_NOWARN_KERNEL_VERSION"
] |
[]
|
["DOCKER_NOWARN_KERNEL_VERSION"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "port.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
app.py
|
""" write to a SQLite database with forms, templates
add new record, delete a record, edit/update a record
"""
from flask import Flask, render_template, request, flash, send_file, make_response, jsonify, abort, session, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import login_required, LoginManager, login_user, UserMixin, logout_user, current_user
from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, RadioField, HiddenField, StringField, IntegerField, FloatField, PasswordField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired, Optional, DataRequired
from datetime import date
import csv
import sqlite3
from io import StringIO, BytesIO
import os
import pandas as pd
from sqlalchemy import create_engine
import plotly.express as px
from plotly.offline import plot
DB_VAR=os.environ.get('HEROKU_POSTGRESQL_PINK_URL', None)
OUT_DB_VAR=os.environ.get('DATABASE_URL', None)
GROUP_NAME=os.environ.get('GROUP_NAME', None)
app = Flask(__name__)
# Flask-WTF requires an enryption key - the string can be anything
app.config['SECRET_KEY'] = 'MLXH243GssUWwKdTWS7FDhdwYF56wPj8'
# Flask-Bootstrap requires this line
Bootstrap(app)
# the name of the database; add path if necessary
app.config['SQLALCHEMY_BINDS'] = {
"db1":DB_VAR,
"db2":OUT_DB_VAR}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
###Login Setting###
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
# this variable, db, will be used for all SQLAlchemy commands
db = SQLAlchemy(app)
# each table in the database needs a class to be created for it
# db.Model is required - don't change it
# identify all columns by name and data type
class Emissions(db.Model):
__tablename__ = 'records'
__bind_key__= "db1"
id = db.Column(db.Integer, primary_key=True)
kms = db.Column(db.Float)
transport = db.Column(db.String)
fuel = db.Column(db.String)
date = db.Column(db.String)
co2= db.Column(db.Float)
ch4= db.Column(db.Float)
user_name= db.Column(db.String)
updated = db.Column(db.String)
def __init__(self, kms, transport, fuel, date, co2, ch4, user_name, updated):
self.kms = kms
self.transport = transport
self.fuel = fuel
self.date = date
self.co2 = co2
self.ch4 = ch4
self.user_name = user_name
self.updated = updated
engine_local = create_engine(DB_VAR)
engine_super =create_engine(OUT_DB_VAR)
### SupeUser DB
class SuperUser(UserMixin,db.Model):
__tablename__ = 'users'
__bind_key__= "db2"
id = db.Column(db.Integer, primary_key=True)
student = db.Column(db.String)
user_name= db.Column(db.Integer)
password = db.Column(db.String)
group_name= db.Column(db.String)
def __init__(self, user_name):
self.user_name= user_name
####Everything is recorded. nothing removed
class SuperBackUp(db.Model):
__tablename__= 'backup'
__bind_key__="db2"
id = db.Column(db.Integer, primary_key=True)
kms = db.Column(db.Float)
transport = db.Column(db.String)
fuel = db.Column(db.String)
date = db.Column(db.String)
co2= db.Column(db.Float)
ch4= db.Column(db.Float)
user_name= db.Column(db.String)
updated = db.Column(db.String)
def __init__(self, kms, transport, fuel, date, co2, ch4, user_name, updated):
self.kms = kms
self.transport = transport
self.fuel = fuel
self.date = date
self.co2 = co2
self.ch4 = ch4
self.user_name = user_name
self.updated = updated
###Global DB dynamically updated from sessions.
class SuperGlobal(db.Model):
__tablename__= 'global'
__bind_key__="db2"
id = db.Column(db.Integer, primary_key=True)
kms = db.Column(db.Float)
transport = db.Column(db.String)
fuel = db.Column(db.String)
date = db.Column(db.String)
co2= db.Column(db.Float)
ch4= db.Column(db.Float)
user_name= db.Column(db.String)
updated = db.Column(db.String)
group_name = db.Column(db.String)
def __init__(self, kms, transport, fuel, date, co2, ch4, user_name, updated, group_name):
self.kms = kms
self.transport = transport
self.fuel = fuel
self.date = date
self.co2 = co2
self.ch4 = ch4
self.user_name = user_name
self.updated = updated
self.group_name = group_name
@app.before_first_request
def before_first_request():
db.create_all()
# +++++++++++++++++++++++
# forms with Flask-WTF
class LoginRecord(FlaskForm):
user= StringField("User",validators=[InputRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField("Submit")
# form for add_record and edit_or_delete
# each field includes validation requirements and messages
class AddRecord(FlaskForm):
id_field = HiddenField()
##Transport
kms = FloatField("Kilometers",[InputRequired()])
transport_type = SelectField("Type of Transport",
[InputRequired()],
choices=[
('Bus', 'Bus'),
('Car', 'Car'),
('Plane', 'Plane'),
('Ferry', 'Ferry'),
('Scooter', 'E-Scooter'),
('Bicycle', 'Bicycle'),
('Motorbike',"Motorbike"),
('Walk', 'Walk')
])
fuel_type = SelectField("Fuel Type",
validators=[InputRequired()],choices=[])
date=DateField("Date",[InputRequired()])
gas =FloatField("kg/passenger km",[Optional()],description='Add CO2 kg/passenger km if known. \
Otherwise, leave blank and a default corresponding to the fuel \
type and vehicle average from "UK Government GHG Conversion Factors for Company Reporting" will be used')
submit = SubmitField("Submit")
##Emissions factor per transport in kg per passemger km
##++++++++++++++++++++++
efco2={"Bus":{"Diesel":0.10231,"CNG":0.08,"Petrol":0.10231,"No Fossil Fuel":0},
"Car":{"Hybrid":0.10567,"Petrol":0.18592,"Diesel":0.16453,"No Fossil Fuel":0},
"Plane":{"Jet Fuel":0.24298,"No Fossil Fuel":0},
"Ferry":{"Diesel":0.11131,"HFO":0.1131,"No Fossil Fuel":0},
"Motorbike":{"Petrol":0.09816,"No Fossil Fuel":0},
"Scooter":{"No Fossil Fuel":0},
"Bicycle":{"No Fossil Fuel":0},
"Walk":{"No Fossil Fuel":0}}
efch4={"Bus":{"Diesel":2e-5,"CNG":2.5e-3,"Petrol":2e-5,"No Fossil Fuel":0},
"Car":{"Hybrid":1.5e-4,"Petrol":3.1e-4,"Diesel":3e-6,"No Fossil Fuel":0},
"Plane":{"Jet Fuel":1.1e-4,"No Fossil Fuel":0},
"Ferry":{"DO":3e-5,"HFO":3e-5,"No Fossil Fuel":0},
"Motorbike":{"Petrol":2.1e-3,"No Fossil Fuel":0},
"Scooter":{"No Fossil Fuel":0},
"Bicycle":{"No Fossil Fuel":0},
"Walk":{"No Fossil Fuel":0}}
#+++++++++++++++++++++++
# small form
class DeleteForm(FlaskForm):
id_field = HiddenField()
purpose = HiddenField()
submit = SubmitField('Delete This Record')
# +++++++++++++++++++++++
# get local date - does not account for time zone
# note: date was imported at top of script
def stringdate():
today = date.today()
date_list = str(today).split('-')
# build string in format 01-01-2000
date_string = date_list[0] + "-" + date_list[1] + "-" + date_list[2]
return date_string
###routes
@login_manager.user_loader
def load_user(user_id):
return SuperUser(user_id)
@app.route('/login',methods=['GET', 'POST'])
def login():
formlog=LoginRecord(request.form)
if request.method =="POST" and formlog.validate_on_submit():
##check user
user=SuperUser.query.filter_by(user_name=formlog.user.data).first()
if user and formlog.password.data == user.password and GROUP_NAME==user.group_name:
login_user(user)
session.pop('_flashes', None)
return (redirect(url_for("index")))
else:
# if password is in correct , redirect to login page
message = "User or password incorrect "
return render_template('login.html', formlog=formlog, message=message)
return render_template('login.html', formlog = formlog)
@app.route('/')
@login_required
def index():
# get a list of unique values in the style column
user_rec=SuperUser.query.filter_by(id=current_user.user_name).first().student
transport = Emissions.query.with_entities(Emissions.transport).distinct()
###Outer Plot
global_emissions=pd.read_sql("SELECT * FROM global",engine_super)
global_emissions["date"]= pd.to_datetime(global_emissions["date"],yearfirst=True)
global_emissions=global_emissions.sort_values(by="date")
global_emissions=global_emissions.groupby(["date","group_name"]).agg({"co2":sum})
global_emissions=global_emissions.reset_index()
if global_emissions.shape[0]!=0:
global_emissions["date"]=global_emissions["date"].dt.strftime('%Y-%m-%d %H:%M:%S')
fig_global = px.line(global_emissions, x="date", y="co2", color='group_name',
labels={
"co2": "CO2 kg/passenger km",
"date": "Date",
"group_name": "Group Name"
},
title="Emissions per Group")
fig_global.update_traces(mode='markers+lines')
plot_div_global = plot(fig_global, output_type='div', include_plotlyjs=False)
else:
plot_div_global = ""
if transport.first() is not None: ##To avoid crash when DB is empty
##Inner plot group
group_emissions=pd.read_sql("SELECT * FROM records",engine_local)
group_emissions["date"]= pd.to_datetime(group_emissions["date"],yearfirst=True)
group_emissions=group_emissions.sort_values(by="date")
group_emissions=group_emissions.groupby(["date","user_name"]).agg({"co2":sum})
group_emissions=group_emissions.reset_index()
group_emissions["date"]=group_emissions["date"].dt.strftime('%Y-%m-%d %H:%M:%S')
fig = px.line(group_emissions, x="date", y="co2", color='user_name',
labels={
"co2": "CO2 kg/passenger km",
"date": "Date",
"user_name": "Name"
},
title="Emissions per Group Member")
fig.update_traces(mode='markers+lines')
plot_div = plot(fig, output_type='div', include_plotlyjs=False)
return render_template('index.html',transport=transport,user_rec=user_rec,plot_div=plot_div,
plot_div_global=plot_div_global)
else:
return render_template('index.html',transport=transport,user_rec=user_rec, plot_div_global=plot_div_global)
@app.route('/inventory/<transport>')
def inventory(transport):
emissions = Emissions.query.filter_by(transport=transport).order_by(Emissions.date).all()
return render_template('list.html', emissions=emissions, transport=transport)
@app.route('/inventory')
def inventory_all():
emissions = Emissions.query.order_by(Emissions.date).all()
return render_template('list.html', emissions=emissions)
##New record
@app.route('/add_record', methods=['GET', 'POST'])
def add_record():
form1 = AddRecord()
form1.fuel_type.choices=[(fuel,fuel) for fuel in efco2["Bus"].keys()]
if form1.validate_on_submit():
kms = request.form['kms']
transport = request.form['transport_type']
fuel = request.form['fuel_type']
date = request.form['date']
# get today's date from function, above all the routes
updated = stringdate()
gas=request.form["gas"]
if gas=="":
co2=float(kms)*efco2[transport][fuel]
ch4=float(kms)*efch4[transport][fuel]
else:
co2=float(kms)*float(gas)
ch4=float(kms)*efch4[transport][fuel]
user=SuperUser.query.filter_by(id=current_user.user_name).first()
user_rec=user.student
group_rec=user.group_name
# the data to be inserted into Emission model - the table, records
record = Emissions(kms, transport, fuel, date, co2, ch4, user_rec, updated)
backup= SuperBackUp(kms, transport, fuel, date, co2, ch4, user_rec, updated)
global_db= SuperGlobal(kms, transport, fuel, date, co2, ch4, user_rec, updated, group_rec)
# Flask-SQLAlchemy magic adds record to database
db.session.add(record)
db.session.add(backup)
db.session.add(global_db)
db.session.commit()
# create a message to send to the template
message = f"The record for {transport} on {date} has been submitted."
return render_template('add_record.html', message=message)
else:
# show validaton errors
for field, errors in form1.errors.items():
for error in errors:
flash("Error in {}: {}".format(
getattr(form1, field).label.text,
error
), 'error')
return render_template('add_record.html', form1=form1)
@app.route('/fuel_type/<transport>')
def fuel_type(transport):
Allfuel=efco2[transport].keys()
fuelArray= []
for fuel in Allfuel:
fuelObj={}
fuelObj["transport"]=transport
fuelObj["fuel"]=fuel
fuelArray.append(fuelObj)
return jsonify({"fuel_json": fuelArray})
#select a record to edit or delete
@app.route('/select_record')
def select_record():
emissions = Emissions.query.order_by(Emissions.date).all()
return render_template('select_record.html', emissions=emissions)
# edit or delete - come here from form in /select_record
@app.route('/edit_or_delete', methods=['POST'])
def edit_or_delete():
id = request.form['id']
choice = request.form['choice']
emissions = Emissions.query.filter(Emissions.id == id).first()
# two forms in this template
form1 = AddRecord()
form1.fuel_type.choices=[(fuel,fuel) for fuel in efco2[emissions.transport].keys()]
form2 = DeleteForm()
return render_template('edit_or_delete.html', emissions=emissions, form1=form1, form2=form2, choice=choice)
# result of delete - this function deletes the record
@app.route('/delete_result', methods=['POST'])
def delete_result():
id = request.form['id_field']
purpose = request.form['purpose']
emissions = Emissions.query.filter(Emissions.id == id).first()
emissions_global= SuperGlobal.query.filter(SuperGlobal.kms==emissions.kms,
SuperGlobal.transport==emissions.transport,
SuperGlobal.fuel==emissions.fuel,
SuperGlobal.date==emissions.date,
SuperGlobal.updated==emissions.updated).first()
if purpose == 'delete':
db.session.delete(emissions)
db.session.delete(emissions_global)
db.session.commit()
message = f"The record {emissions.transport} on {emissions.date} has been deleted from the database."
return render_template('result.html', message=message)
else:
# this calls an error handler
abort(405)
# result of edit - this function updates the record
@app.route('/edit_result', methods=['POST'])
def edit_result():
id_in = request.form['id_field']
# call up the record from the database
emissions = Emissions.query.filter(Emissions.id == id_in).first()
emissions_global= SuperGlobal.query.filter(SuperGlobal.kms==emissions.kms,
SuperGlobal.transport==emissions.transport,
SuperGlobal.fuel==emissions.fuel,
SuperGlobal.date==emissions.date,
SuperGlobal.updated==emissions.updated).first()
# update all values
emissions.kms = request.form['kms']
emissions.transport = request.form['transport_type']
emissions.fuel = request.form['fuel_type']
emissions.date=request.form['date']
# get today's date from function, above all the routes
emissions.updated = stringdate()
emissions.gas=request.form["gas"]
# update all values
emissions_global.kms = request.form['kms']
emissions_global.transport = request.form['transport_type']
emissions_global.fuel = request.form['fuel_type']
emissions_global.date=request.form['date']
# get today's date from function, above all the routes
emissions_global.updated = stringdate()
emissions_global.gas=request.form["gas"]
if emissions.gas=="":
emissions.co2=float(emissions.kms)*efco2[emissions.transport][emissions.fuel]
emissions.ch4=float(emissions.kms)*efch4[emissions.transport][emissions.fuel]
emissions_global.co2=float(emissions_global.kms)*efco2[emissions_global.transport][emissions_global.fuel]
emissions_global.ch4=float(emissions_global.kms)*efch4[emissions_global.transport][emissions_global.fuel]
else:
emissions.co2=float(emissions.kms)*float(emissions.gas)
emissions.ch4=float(emissions.kms)*efch4[emissions.transport][emissions.fuel]
emissions_global.co2=float(emissions_global.kms)*float(emissions_global.gas)
emissions_global.ch4=float(emissions_global.kms)*efch4[emissions_global.transport][emissions_global.fuel]
emissions.user=SuperUser.query.filter_by(id=current_user.user_name).first().user_name
form1 = AddRecord()
form1.fuel_type.choices=[(fuel,fuel) for fuel in efco2[emissions.transport].keys()]
if form1.validate_on_submit():
# update database record
db.session.commit()
# create a message to send to the template
message = f"The data for {emissions.transport} on {emissions.date} has been updated."
return render_template('result.html', message=message)
else:
# show validaton errors
emissions.id = id_in
for field, errors in form1.errors.items():
for error in errors:
flash("Error in {}: {}".format(
getattr(form1, field).label.text,
error
), 'error')
return render_template('edit_or_delete.html', form1=form1, emissions=emissions, choice='edit')
##Download option
@app.route("/download")
def download():
si = StringIO()
outcsv=csv.writer(si)
con=engine_local.connect()
result= con.execute('select * from records')
outcsv.writerow(x for x in result._metadata.keys)
# dump rows
outcsv.writerows(row for row in result)
mem = BytesIO()
mem.write(si.getvalue().encode('utf-8'))
mem.seek(0)
si.close()
output = send_file(mem,mimetype="text/csv",
attachment_filename= 'emissions.csv',as_attachment=True, cache_timeout=0)
return output
con.close()
os.remove('emissions.csv')
@app.route('/logout/')
@login_required
def logout(methods=["GET"]):
user=current_user
user.authenticated=False
logout_user()
engine_local.dispose()
# redirecting to home page
return redirect(url_for('login'))
# +++++++++++++++++++++++
# error routes
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html', pagetitle="404 Error - Page Not Found", pageheading="Page not found (Error 404)", error=e), 404
@app.errorhandler(405)
def form_not_posted(e):
return render_template('error.html', pagetitle="405 Error - Form Not Submitted", pageheading="The form was not submitted (Error 405)", error=e), 405
@app.errorhandler(500)
def internal_server_error(e):
return render_template('error.html', pagetitle="500 Error - Internal Server Error", pageheading="Internal server error (500)", error=e), 500
# +++++++++++++++++++++++
if __name__ == '__main__':
app.run(debug=True)
|
[] |
[] |
[
"DATABASE_URL",
"HEROKU_POSTGRESQL_PINK_URL",
"GROUP_NAME"
] |
[]
|
["DATABASE_URL", "HEROKU_POSTGRESQL_PINK_URL", "GROUP_NAME"]
|
python
| 3 | 0 | |
a2ml/tasks_queue/tasks_api.py
|
from .celery_app import celeryApp
import logging
import copy
import os
import json
import jsonpickle
from a2ml.api.utils.context import Context
from a2ml.api.a2ml import A2ML
from a2ml.api.a2ml_dataset import A2MLDataset
from a2ml.api.a2ml_experiment import A2MLExperiment
from a2ml.api.a2ml_model import A2MLModel
from a2ml.api.a2ml_project import A2MLProject
from a2ml.server.notification import SyncSender
notificator = SyncSender()
def create_context(params, new_project=False):
if params.get('context'):
ctx = jsonpickle.decode(params['context'])
ctx.set_runs_on_server(True)
ctx.config.set('config', 'use_server', False)
ctx.notificator = notificator
ctx.request_id = params['_request_id']
ctx.setup_logger(format='')
else:
# For Tasks Test Only!
project_path = os.path.join(
os.environ.get('A2ML_PROJECT_PATH', ''), params.get('project_name')
)
ctx = Context(path=project_path, debug = params.get("debug_log", False))
if not new_project:
if params.get("provider"):
ctx.config.set('config', 'providers', [params.get("provider")])
if params.get("source_path"):
ctx.config.set('config', 'source', params.get("source_path"))
tmp_dir = os.path.join(os.path.dirname(__file__), 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
# For Azure, since it package current directory
os.chdir(tmp_dir)
return ctx
def __handle_task_result(self, status, retval, task_id, args, kwargs, einfo):
request_id = args[0]['_request_id']
if status == 'SUCCESS':
notificator.publish_result(request_id, status, retval)
else:
notificator.publish_result(
request_id,
status,
__error_to_result(retval, einfo)
)
def execute_tasks(tasks_func, params):
if os.environ.get('TEST_CALL_CELERY_TASKS'):
return tasks_func(params)
else:
ar = tasks_func.delay(params)
return ar.get()
# Projects
@celeryApp.task(after_return=__handle_task_result)
def new_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).create(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_projects_task(params):
def func(ctx):
res = A2MLProject(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'projects')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def delete_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).delete(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def select_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).select(*params['args'], **params['kwargs'])
)
# Datasets
@celeryApp.task(after_return=__handle_task_result)
def new_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).create(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_datasets_task(params):
def func(ctx):
res = A2MLDataset(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'datasets')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def delete_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).delete(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def select_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).select(*params['args'], **params['kwargs'])
)
# Experiment
@celeryApp.task(after_return=__handle_task_result)
def list_experiments_task(params):
def func(ctx):
res = A2MLExperiment(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'experiments')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def leaderboard_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).leaderboard(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def history_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).history(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def start_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).start(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def stop_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).stop(*params['args'], **params['kwargs'])
)
# Models
@celeryApp.task(after_return=__handle_task_result)
def actual_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).actual(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def deploy_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).deploy(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def predict_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).predict(*params['args'], **params['kwargs'])
)
# Complex tasks
@celeryApp.task(after_return=__handle_task_result)
def import_data_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).import_data(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def train_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).train(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def evaluate_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).evaluate(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def deploy_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).deploy(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def predict_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).predict(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def review_task(params):
# TODO
raise Exception('not inplemented yet')
@celeryApp.task(after_return=__handle_task_result)
def demo_task(params):
import time
request_id = params['_request_id']
for i in range(0, 10):
notificator.publish_log(request_id, 'info', 'log ' + str(i))
time.sleep(2)
notificator.publish_result(request_id, 'SUCCESS', 'done')
def with_context(params, proc):
ctx = create_context(params)
if not 'args' in params:
params['args'] = []
if not 'kwargs' in params:
params['kwargs'] = {}
res = proc(ctx)
ctx.set_runs_on_server(False)
ctx.config.set('config', 'use_server', True)
return {'response': res, 'config': jsonpickle.encode(ctx.config)}
def __exception_message_with_all_causes(e):
if isinstance(e, Exception) and e.__cause__:
return str(e) + ' caused by ' + __exception_message_with_all_causes(e.__cause__)
else:
return str(e)
def __error_to_result(retval, einfo):
res = __exception_message_with_all_causes(retval)
if einfo:
res += '\n' + str(einfo)
return res
def __map_collection_to_name(res, collection_name):
for provder in res.keys():
if collection_name in res[provder]['data']:
res[provder]['data'][collection_name] = list(
map(lambda x: x.get('name'), res[provder]['data'][collection_name])
)
|
[] |
[] |
[
"A2ML_PROJECT_PATH",
"TEST_CALL_CELERY_TASKS"
] |
[]
|
["A2ML_PROJECT_PATH", "TEST_CALL_CELERY_TASKS"]
|
python
| 2 | 0 | |
app.py
|
import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from models import setup_db, Client, Artist, Project
from auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
GET '/projects'
- require permission 'get:projects'
- return status code 200 and json {'success': True, 'projects': projects}
projects: a list of open projects' names
'''
@app.route('/projects')
@requires_auth(permission='get:projects')
def get_projects(jwt):
projects = Project.query.all()
projects = [p.name for p in projects]
return jsonify({'success': True, 'projects': projects})
'''
GET '/artists'
- require permission 'get:artists'
- return status code 200 and json {'success': true, 'artists': artists}
artists: a paginated list of artists
'''
@app.route('/artists')
@requires_auth(permission='get:artists')
def get_artists(jwt):
artists = Artist.query.all()
artists = [a.name for a in artists]
return jsonify({'success': True, 'artists': artists})
'''
GET '/projects/<int:id>'
- require permission 'get:projects'
- return status code 200 and json {'success': true, 'project': project}
project: the project with id requested
- return status code 404 if <id> is not found
'''
@app.route('/projects/<int:id>')
@requires_auth(permission='get:projects')
def get_project_detail(jwt, id):
project = Project.query.filter_by(id=id).one_or_none()
if (project is None):
abort(404)
return jsonify({'success': True, 'project': project.format()})
'''
POST '/clients'
- require permission 'post:clients'
- return status code 200 and json {'success': true, 'client': client_id}
client: the client created
- return status code 422 if request is unprocessable
'''
@app.route('/clients', methods=['POST'])
@requires_auth(permission='post:clients')
def post_client(jwt):
body = request.get_json()
if (body is None):
abort(422)
name = body.get('name')
description = body.get('description')
if (name is None or description is None):
abort(422)
client = Client(name=name, description=description)
try:
client.insert()
except Exception as e:
abort(422)
client = Client.query.filter_by(name=name).one()
return jsonify({'success': True, 'client': client.name})
'''
POST '/projects/'
- require permission 'post:projects'
- return status code 200 and json {'success': true, 'project': project}
project: the project created
- return status code 422 if request is unprocessable
'''
@app.route('/projects', methods=['POST'])
@requires_auth(permission='post:projects')
def post_project(jwt):
body = request.get_json()
if (body is None):
abort(422)
name = body.get('name')
client_id = body.get('client_id')
description = body.get('description')
if (name is None or client_id is None or description is None):
abort(422)
project = Project(name=name, client_id=client_id, description=description)
try:
project.insert()
except Exception:
abort(422)
return jsonify({'success': True, 'project': project.format()})
'''
POST '/artists/'
- require permission 'post:artists'
- return status code 200 and json {'success': true, 'artist': artist}
artists: the artists just added
- return status code 422 if request is unprocessable
'''
@app.route('/artists', methods=['POST'])
@requires_auth(permission='post:artists')
def post_artists(jwt):
body = request.get_json()
if (body is None):
abort(422)
name = body.get('name')
portfolio_link = body.get('portfolio_link')
if (name is None or portfolio_link is None):
abort(422)
artist = Artist(name=name, portfolio_link=portfolio_link)
try:
artist.insert()
except Exception:
abort(422)
return jsonify({'success': True, 'artist': artist.format()})
'''
PATCH '/projects/<int:id>'
- require permission 'patch:projects'
- return status code 200 and json {'success': true, 'project': project}
project: the project created
- return status code 404 if <id> is not found
- return status code 422 if request is unprocessable
'''
@app.route('/projects/<int:id>', methods=['PATCH'])
@requires_auth(permission='patch:projects')
def patch_project(jwt, id):
project = Project.query.filter_by(id=id).one_or_none()
if (project is None):
abort(404)
body = request.get_json()
if (body is None):
abort(422)
name = body.get('name')
client_id = body.get('client_id')
description = body.get('description')
if (name is not None):
project.name = name
if (client_id is not None):
project.client_id = client_id
if (description is not None):
project.description = description
try:
project.update()
except Exception as e:
abort(422)
return jsonify({'success': True, 'project': project.format()})
'''
DELETE '/projects/<int:id>'
- require permission 'delete:projects'
- return status code 200 and json {'success': true, 'deleted': name}
'''
@app.route('/projects/<int:id>', methods=['DELETE'])
@requires_auth(permission='delete:projects')
def delete_project(jwt, id):
project = Project.query.filter_by(id=id).one_or_none()
if (project is None):
abort(404)
name = project.name
project.delete()
return jsonify({'success': True, 'project': name})
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
# Error Handling
'''
Example error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
'''
Implement error handler for 404
error handler should conform to general task above
'''
@app.errorhandler(404)
def unprocessable(error):
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
Implement error handler for AuthError
error handler should conform to general task above
'''
@app.errorhandler(AuthError)
def autherror(error):
return jsonify({
"success": False,
"code": error.status_code,
"error": error.error["code"],
"description": error.error["description"]
}), error.status_code
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
unsupported/tests/test_package.py
|
import nose.tools
import os
import ndar
def test_package():
package = ndar.Package('test_data/package')
assert len(package.images) == 12
def test_noent_package():
nose.tools.assert_raises(Exception, lambda: ndar.Package('test_data/bogus'))
def test_mysql_package():
package = ndar.MySQLPackage(os.environ['MYSQL_HOST'],
os.environ['MYSQL_USER'],
os.environ['MYSQL_PASSWORD'],
os.environ['MYSQL_DATABASE'],
os.environ['S3ACCESS'],
os.environ['S3SECRET'])
assert len(package.images) == 7626
# eof
|
[] |
[] |
[
"MYSQL_PASSWORD",
"MYSQL_USER",
"S3SECRET",
"S3ACCESS",
"MYSQL_DATABASE",
"MYSQL_HOST"
] |
[]
|
["MYSQL_PASSWORD", "MYSQL_USER", "S3SECRET", "S3ACCESS", "MYSQL_DATABASE", "MYSQL_HOST"]
|
python
| 6 | 0 | |
middleware/securityTokenMiddleware.go
|
// Copyright 2021 Kévin José. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package middleware
import (
"net/http"
"os"
"strings"
"github.com/gin-gonic/gin"
"gitlab.com/kjose/jgmc/api/internal/easyapi"
)
// Middleware to check the token sent in the header
// It needs TOKEN_COOKIE_NAME env var to know the cookie where it is registered
func SecurityTokenMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
tkn, err := c.Cookie(os.Getenv("TOKEN_COOKIE_NAME"))
if err != nil {
tkn = c.GetHeader("Authorization")
if tkn == "" {
easyapi.HttpError(c, http.StatusUnauthorized, "Authorization token is required", nil)
c.Abort()
return
}
tkn = strings.Replace(tkn, "Bearer ", "", 1)
}
tknData, err := easyapi.ParseToken(tkn)
if err != nil {
easyapi.HttpError(c, http.StatusUnauthorized, "Authorization token is invalid", nil)
c.Abort()
return
}
c.Set(easyapi.CONTEXT_KEY_TOKEN, tknData)
c.Next()
}
}
|
[
"\"TOKEN_COOKIE_NAME\""
] |
[] |
[
"TOKEN_COOKIE_NAME"
] |
[]
|
["TOKEN_COOKIE_NAME"]
|
go
| 1 | 0 | |
InstaScan.py
|
import argparse
import hashlib
import json
import re
import requests
authtokens = tuple()
def checkTokens():
if not authtokens:
getTokens()
def getTokens():
r = requests.get('https://instagram.com/', headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0', }).text
rhx_gis = json.loads(re.compile('window._sharedData = ({.*?});', re.DOTALL).search(r).group(1))['nonce']
ppc = re.search(r'ConsumerLibCommons.js/(.*?).js', r).group(1)
r = requests.get('https://www.instagram.com/static/bundles/metro/ConsumerLibCommons.js/' + ppc + '.js').text
query_hash = re.findall(r'{value:!0}\);(?:var|const|let) .=\"([0-9a-f]{32})\"', r)[1]
global authtokens
authtokens = tuple((rhx_gis, query_hash))
def const_gis(query):
checkTokens()
t = authtokens[0] + ':' + query
x_instagram_gis = hashlib.md5(t.encode("utf-8")).hexdigest()
return x_instagram_gis
def usernameToUserId(user):
r = requests.get('https://www.instagram.com/web/search/topsearch/?query=' + user, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0'}).text
if json.loads(r).get("message") == 'rate limited':
print(
'[x] Rate limit reached!\n[#] Unchecked Username: {}\n[!] Try again in a few minutes.\n'.format(user))
exit()
try:
for i in range(len(json.loads(r)['users'])):
if json.loads(r)['users'][i]['user']['username'] == user:
return json.loads(r)['users'][i]['user']['pk']
except:
return False
def useridToUsername(userid):
header = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 Instagram 12.0.0.16.90 (iPhone9,4; iOS 10_3_3; en_US; en-US; scale=2.61; gamut=wide; 1080x1920)',
'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(
f'https://i.instagram.com/api/v1/users/{userid}/info/',
headers=header).text
if json.loads(r).get("status") != 'ok':
print('[x] Rate limit reached!\n[#] Unchecked ID: {}\n[!] Try again in a few minutes..\n'.format(userid))
exit()
try:
username = json.loads(r)['user']['username']
return username
except:
return False
def main():
parser = argparse.ArgumentParser(prog='InstaScan.py')
parser.add_argument('-u', '--user', action='store', dest='username',
help='Set Instagram username', type=str)
parser.add_argument('-i', '--id', action='store', dest='id',
help='Set Instagram userID', type=int)
parser.add_argument('-f', '--list', action='store', dest='file',
help='Import username/userID as a .txt file',
type=str)
args = parser.parse_args()
if args.file is not None:
result = list()
try:
with open(args.file, 'r') as file:
elements = file.readlines()
except FileNotFoundError:
print('[-] File Not Found :(')
return 0
print("Processing...\n")
with open('result.txt', 'w') as file:
for e in elements:
e = e.strip()
if e.isdigit():
username = useridToUsername(e)
if username:
result.append('{}:{}'.format(e, username))
file.write('{}:{}\n'.format(e, username))
else:
print('[-] "{}" Not Found!\n'.format(e))
else:
userid = usernameToUserId(e)
if userid:
result.append('{}:{}'.format(userid, e))
file.write('{}:{}\n'.format(userid, e))
else:
print('[-] "{}" Not Found!\n'.format(e))
print('[++] Result saved as result.txt')
return 0
if args.id is not None:
username = useridToUsername(args.id)
if not username:
print('[-] UserID does not exist')
else:
print('[+] Username: {}'.format(username))
if args.username is not None:
userid = usernameToUserId(args.username)
if not userid:
print('[-] Username does not exist')
else:
print('[+] UserID: {}'.format(userid))
if args.id is None and args.username is None:
parser.print_help()
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pydbapi/conf/settings.py
|
# @Author: chunyang.xu
# @Email: [email protected]
# @Date: 2020-06-10 14:14:53
# @Last Modified time: 2022-01-20 09:47:50
# @github: https://github.com/longfengpili
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
import re
import os
import sys
import colorlog
AUTO_RULES = ['test_xu'] # 可以自动执行表名(表名包含即可)
REDSHIFT_AUTO_RULES = AUTO_RULES + ['_data_aniland'] # Amazon Redshift 可以自动执行表名(表名包含即可)
# logging settings
USERPATH = os.environ['USERPROFILE'] if 'USERPROFILE' in os.environ else os.environ['HOME'] if 'HOME' in os.environ else ''
LOG_BASE_PATH = os.path.join(USERPATH, '.pydbapilog') # 可以user目录下查看日志
PROJECT_NAME = re.sub(':?\\\\', '_', os.getcwd())
LOGGING_CONFIG = {
'version': 1, # 保留字
'disable_existing_loggers': False, # 禁用已经存在的logger实例
# 日志文件的格式
'formatters': {
# 详细的日志格式
'standard': {
'format': '%(asctime)s.%(msecs)03d - %(threadName)s:%(thread)d - %(name)s - %(levelname)s - %(pathname)s - %(lineno)d - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
# 简单的日志格式
'simple': {
'format': '%(asctime)s.%(msecs)03d - %(threadName)s - %(name)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
# 定义一个特殊的日志格式
'collect': {
'format': '%(message)s'
},
# color
'color': {
'()': colorlog.ColoredFormatter,
'format': '%(asctime)s.%(msecs)03d - %(threadName)s - %(name)s - %(levelname)s - %(filename)s - %(lineno)d - %(log_color)s%(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'log_colors': {
'CRITICAL': 'bold_red',
'ERROR': 'red',
'WARNING': 'purple',
'INFO': 'green',
'DEBUG': 'yellow'
}
}
},
# 过滤器
'filters': {
},
# 处理器
'handlers': {
# 在终端打印
'console': {
'level': 'INFO',
'filters': [],
'class': 'logging.StreamHandler', #
'formatter': 'color' if sys.stdout.isatty() else 'simple'
},
# 默认的
'default': {
'level': 'INFO',
'class': 'pydbapi.conf.MakeFileHandler', # 能够判断创建日持文件
'filename': os.path.join(LOG_BASE_PATH, f'{PROJECT_NAME}_default.log'), # 日志文件
'when': 'd', # 每天备份
'interval': 1,
'backupCount': 30, # 最多备份几个
'formatter': 'standard',
'encoding': 'utf-8',
},
'db': {
'level': 'INFO',
'class': 'pydbapi.conf.MakeFileHandler', # 保存到文件,自动切
'filename': os.path.join(LOG_BASE_PATH, f'{PROJECT_NAME}_db.log'), # 日志文件
'when': 'd', # 每小时备份
'interval': 1,
'backupCount': 30,
'formatter': 'simple',
'encoding': "utf-8"
},
'sql': {
'level': 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler', # 保存到文件,自动切
'filename': os.path.join(LOG_BASE_PATH, f'{PROJECT_NAME}_sql.log'), # 日志文件
'when': 'd', # 每小时备份
'interval': 1,
'backupCount': 30,
'formatter': 'simple',
'encoding': "utf-8"
},
},
'loggers': {
# 默认的logger应用如下配置
'': {
'handlers': ['console', 'default'],
'level': 'INFO',
'propagate': True, # 向不向更高级别的logger传递
},
'db': {
'handlers': ['console', 'db'],
'level': 'INFO',
'propagate': False, # 向不向更高级别的logger传递
},
'sql': {
'handlers': ['console', 'sql'],
'level': 'INFO',
'propagate': False, # 向不向更高级别的logger传递
},
'redshift': {
'handlers': ['console', 'db'],
'level': 'INFO',
'propagate': False, # 向不向更高级别的logger传递
},
'sqlite': {
'handlers': ['console', 'db'],
'level': 'INFO',
'propagate': False, # 向不向更高级别的logger传递
},
'mysql': {
'handlers': ['console', 'db'],
'level': 'INFO',
'propagate': False, # 向不向更高级别的logger传递
},
'snowflake': {
'handlers': ['console', 'db'],
'level': 'INFO',
'propagate': False, # 向不向更高级别的logger传递
},
},
}
|
[] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
python
| 2 | 0 | |
pkg/operator/ceph/cluster/osd/spec.go
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package osd for the Ceph OSDs.
package osd
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2"
opmon "github.com/rook/rook/pkg/operator/ceph/cluster/mon"
"github.com/rook/rook/pkg/operator/ceph/cluster/osd/config"
opspec "github.com/rook/rook/pkg/operator/ceph/spec"
"github.com/rook/rook/pkg/operator/ceph/version"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
dataDirsEnvVarName = "ROOK_DATA_DIRECTORIES"
osdStoreEnvVarName = "ROOK_OSD_STORE"
osdDatabaseSizeEnvVarName = "ROOK_OSD_DATABASE_SIZE"
osdWalSizeEnvVarName = "ROOK_OSD_WAL_SIZE"
osdJournalSizeEnvVarName = "ROOK_OSD_JOURNAL_SIZE"
osdsPerDeviceEnvVarName = "ROOK_OSDS_PER_DEVICE"
encryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE"
osdMetadataDeviceEnvVarName = "ROOK_METADATA_DEVICE"
pvcBackedOSDVarName = "ROOK_PVC_BACKED_OSD"
lvPathVarName = "ROOK_LV_PATH"
topologyAwareEnvVarName = "ROOK_TOPOLOGY_AWARE"
rookBinariesMountPath = "/rook"
rookBinariesVolumeName = "rook-binaries"
blockPVCMapperInitContainer = "blkdevmapper"
osdMemoryTargetSafetyFactor float32 = 0.8
CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet"
CephSetIndexLabelKey = "ceph.rook.io/setIndex"
CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId"
OSDOverPVCLabelKey = "ceph.rook.io/pvc"
)
func (c *Cluster) makeJob(osdProps osdProperties) (*batch.Job, error) {
podSpec, err := c.provisionPodTemplateSpec(osdProps, v1.RestartPolicyOnFailure)
if err != nil {
return nil, err
}
if osdProps.pvc.ClaimName == "" {
podSpec.Spec.NodeSelector = map[string]string{v1.LabelHostname: osdProps.crushHostname}
} else {
podSpec.Spec.InitContainers = append(podSpec.Spec.InitContainers, c.getPVCInitContainer(osdProps.pvc))
}
job := &batch.Job{
ObjectMeta: metav1.ObjectMeta{
Name: k8sutil.TruncateNodeName(prepareAppNameFmt, osdProps.crushHostname),
Namespace: c.Namespace,
Labels: map[string]string{
k8sutil.AppAttr: prepareAppName,
k8sutil.ClusterAttr: c.Namespace,
},
},
Spec: batch.JobSpec{
Template: *podSpec,
},
}
if len(osdProps.pvc.ClaimName) > 0 {
k8sutil.AddLabelToJob(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, job)
}
k8sutil.AddRookVersionLabelToJob(job)
opspec.AddCephVersionLabelToJob(c.clusterInfo.CephVersion, job)
k8sutil.SetOwnerRef(&job.ObjectMeta, &c.ownerRef)
return job, nil
}
func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo) (*apps.Deployment, error) {
replicaCount := int32(1)
volumeMounts := opspec.CephVolumeMounts(false)
configVolumeMounts := opspec.RookVolumeMounts(false)
volumes := opspec.PodVolumes(c.dataDirHostPath, c.Namespace, false)
failureDomainValue := osdProps.crushHostname
var dataDir string
if osd.IsDirectory {
// Mount the path to the directory-based osd
// osd.DataPath includes the osd subdirectory, so we want to mount the parent directory
parentDir := filepath.Dir(osd.DataPath)
dataDir = parentDir
// Skip the mount if this is the default directory being mounted. Inside the container, the path
// will be mounted at "/var/lib/rook" even if the dataDirHostPath is a different path on the host.
if parentDir != k8sutil.DataDir {
volumeName := k8sutil.PathToVolumeName(parentDir)
dataDirSource := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: parentDir}}
volumes = append(volumes, v1.Volume{Name: volumeName, VolumeSource: dataDirSource})
configVolumeMounts = append(configVolumeMounts, v1.VolumeMount{Name: volumeName, MountPath: parentDir})
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumeName, MountPath: parentDir})
}
} else {
dataDir = k8sutil.DataDir
// Create volume config for /dev so the pod can access devices on the host
devVolume := v1.Volume{Name: "devices", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}}}
volumes = append(volumes, devVolume)
devMount := v1.VolumeMount{Name: "devices", MountPath: "/dev"}
volumeMounts = append(volumeMounts, devMount)
}
if osdProps.pvc.ClaimName != "" {
// Create volume config for PVCs
volumes = append(volumes, getPVCOSDVolumes(&osdProps)...)
}
if len(volumes) == 0 {
return nil, fmt.Errorf("empty volumes")
}
storeType := config.Bluestore
if osd.IsFileStore {
storeType = config.Filestore
}
osdID := strconv.Itoa(osd.ID)
tiniEnvVar := v1.EnvVar{Name: "TINI_SUBREAPER", Value: ""}
envVars := []v1.EnvVar{
nodeNameEnvVar(osdProps.crushHostname),
k8sutil.PodIPEnvVar(k8sutil.PrivateIPEnvVar),
k8sutil.PodIPEnvVar(k8sutil.PublicIPEnvVar),
tiniEnvVar,
}
envVars = append(envVars, k8sutil.ClusterDaemonEnvVars(c.cephVersion.Image)...)
envVars = append(envVars, []v1.EnvVar{
{Name: "ROOK_OSD_UUID", Value: osd.UUID},
{Name: "ROOK_OSD_ID", Value: osdID},
{Name: "ROOK_OSD_STORE_TYPE", Value: storeType},
}...)
configEnvVars := append(c.getConfigEnvVars(osdProps.storeConfig, dataDir, osdProps.crushHostname, osdProps.location), []v1.EnvVar{
tiniEnvVar,
{Name: "ROOK_OSD_ID", Value: osdID},
{Name: "ROOK_CEPH_VERSION", Value: c.clusterInfo.CephVersion.CephVersionFormatted()},
}...)
if !osd.IsDirectory {
configEnvVars = append(configEnvVars, v1.EnvVar{Name: "ROOK_IS_DEVICE", Value: "true"})
}
// Activate verbose mode for ceph-volume on prepare
if osd.CephVolumeInitiated {
configEnvVars = append(configEnvVars, v1.EnvVar{Name: "CEPH_VOLUME_DEBUG", Value: "1"})
}
// default args when the ceph cluster isn't initialized
defaultArgs := []string{
"--foreground",
"--id", osdID,
"--conf", osd.Config,
"--osd-data", osd.DataPath,
"--keyring", osd.KeyringPath,
"--cluster", osd.Cluster,
"--osd-uuid", osd.UUID,
}
var commonArgs []string
// Set osd memory target to the best appropriate value
if !osd.IsFileStore {
// As of Nautilus Ceph auto-tunes its osd_memory_target on the fly so we don't need to force it
if !c.clusterInfo.CephVersion.IsAtLeastNautilus() && !c.resources.Limits.Memory().IsZero() {
osdMemoryTargetValue := float32(c.resources.Limits.Memory().Value()) * osdMemoryTargetSafetyFactor
commonArgs = append(commonArgs, fmt.Sprintf("--osd-memory-target=%f", osdMemoryTargetValue))
}
}
if osd.IsFileStore {
commonArgs = append(commonArgs, fmt.Sprintf("--osd-journal=%s", osd.Journal))
}
if c.clusterInfo.CephVersion.IsAtLeast(version.CephVersion{Major: 14, Minor: 2, Extra: 1}) {
commonArgs = append(commonArgs, "--default-log-to-file", "false")
}
commonArgs = append(commonArgs, osdOnSDNFlag(c.Network, c.clusterInfo.CephVersion)...)
// Add the volume to the spec and the mount to the daemon container
copyBinariesVolume, copyBinariesContainer := c.getCopyBinariesContainer()
volumes = append(volumes, copyBinariesVolume)
volumeMounts = append(volumeMounts, copyBinariesContainer.VolumeMounts[0])
var command []string
var args []string
if !osd.IsDirectory && osd.IsFileStore && !osd.CephVolumeInitiated {
// All scenarios except one can call the ceph-osd daemon directly. The one different scenario is when
// filestore is running on a device. Rook needs to mount the device, run the ceph-osd daemon, and then
// when the daemon exits, rook needs to unmount the device. Since rook needs to be in the container
// for this scenario, we will copy the binaries necessary to a mount, which will then be mounted
// to the daemon container.
sourcePath := path.Join("/dev/disk/by-partuuid", osd.DevicePartUUID)
command = []string{path.Join(k8sutil.BinariesMountPath, "tini")}
args = append([]string{
"--", path.Join(k8sutil.BinariesMountPath, "rook"),
"ceph", "osd", "filestore-device",
"--source-path", sourcePath,
"--mount-path", osd.DataPath,
"--"},
defaultArgs...)
} else if osd.CephVolumeInitiated {
// if the osd was provisioned by ceph-volume, we need to launch it with rook as the parent process
command = []string{path.Join(rookBinariesMountPath, "tini")}
args = []string{
"--", path.Join(rookBinariesMountPath, "rook"),
"ceph", "osd", "start",
"--",
"--foreground",
"--id", osdID,
"--osd-uuid", osd.UUID,
"--conf", osd.Config,
"--cluster", "ceph",
"--setuser", "ceph",
"--setgroup", "ceph",
// Set '--setuser-match-path' so that existing directory owned by root won't affect the daemon startup.
// For existing data store owned by root, the daemon will continue to run as root
"--setuser-match-path", osd.DataPath,
}
// mount /run/udev in the container so ceph-volume (via `lvs`)
// can access the udev database
volumes = append(volumes, v1.Volume{
Name: "run-udev",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/run/udev"}}})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: "run-udev",
MountPath: "/run/udev"})
// Activate verbose mode for ceph-volume on activate
envVars = append(envVars, []v1.EnvVar{
{Name: "CEPH_VOLUME_DEBUG", Value: "1"},
}...)
} else {
// other osds can launch the osd daemon directly
command = []string{"ceph-osd"}
args = defaultArgs
}
args = append(args, commonArgs...)
if osdProps.pvc.ClaimName != "" {
volumeMounts = append(volumeMounts, getPvcOSDBridgeMount(osdProps.pvc.ClaimName))
envVars = append(envVars, pvcBackedOSDEnvVar("true"))
envVars = append(envVars, lvPathEnvVariable(osd.LVPath))
}
privileged := true
runAsUser := int64(0)
readOnlyRootFilesystem := false
securityContext := &v1.SecurityContext{
Privileged: &privileged,
RunAsUser: &runAsUser,
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
}
// needed for luksOpen synchronization when devices are encrypted
hostIPC := osdProps.storeConfig.EncryptedDevice
DNSPolicy := v1.DNSClusterFirst
if c.Network.IsHost() {
DNSPolicy = v1.DNSClusterFirstWithHostNet
}
deployment := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(osdAppNameFmt, osd.ID),
Namespace: c.Namespace,
Labels: c.getOSDLabels(osd.ID, failureDomainValue, osdProps.portable),
},
Spec: apps.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
k8sutil.AppAttr: AppName,
k8sutil.ClusterAttr: c.Namespace,
OsdIdLabelKey: fmt.Sprintf("%d", osd.ID),
},
},
Strategy: apps.DeploymentStrategy{
Type: apps.RecreateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: AppName,
Labels: c.getOSDLabels(osd.ID, failureDomainValue, osdProps.portable),
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyAlways,
ServiceAccountName: serviceAccountName,
HostNetwork: c.Network.IsHost(),
HostPID: true,
HostIPC: hostIPC,
DNSPolicy: DNSPolicy,
InitContainers: []v1.Container{
{
Args: []string{"ceph", "osd", "init"},
Name: opspec.ConfigInitContainerName,
Image: k8sutil.MakeRookImage(c.rookVersion),
VolumeMounts: configVolumeMounts,
Env: configEnvVars,
SecurityContext: securityContext,
},
*copyBinariesContainer,
},
Containers: []v1.Container{
{
Command: command,
Args: args,
Name: "osd",
Image: c.cephVersion.Image,
VolumeMounts: volumeMounts,
Env: envVars,
Resources: osdProps.resources,
SecurityContext: securityContext,
Lifecycle: opspec.PodLifeCycle(osd.DataPath),
},
},
Volumes: volumes,
},
},
Replicas: &replicaCount,
},
}
if osdProps.pvc.ClaimName != "" {
deployment.Spec.Template.Spec.InitContainers = append(deployment.Spec.Template.Spec.InitContainers, c.getPVCInitContainer(osdProps.pvc))
k8sutil.AddLabelToDeployement(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, deployment)
k8sutil.AddLabelToPod(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, &deployment.Spec.Template)
}
if !osdProps.portable {
deployment.Spec.Template.Spec.NodeSelector = map[string]string{v1.LabelHostname: osdProps.crushHostname}
}
k8sutil.AddRookVersionLabelToDeployment(deployment)
c.annotations.ApplyToObjectMeta(&deployment.ObjectMeta)
c.annotations.ApplyToObjectMeta(&deployment.Spec.Template.ObjectMeta)
opspec.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, deployment)
opspec.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, deployment)
k8sutil.SetOwnerRef(&deployment.ObjectMeta, &c.ownerRef)
if len(osdProps.pvc.ClaimName) == 0 {
c.placement.ApplyToPodSpec(&deployment.Spec.Template.Spec)
} else {
osdProps.placement.ApplyToPodSpec(&deployment.Spec.Template.Spec)
}
return deployment, nil
}
// To get rook inside the container, the config init container needs to copy "tini" and "rook" binaries into a volume.
// Get the config flag so rook will copy the binaries and create the volume and mount that will be shared between
// the init container and the daemon container
func (c *Cluster) getCopyBinariesContainer() (v1.Volume, *v1.Container) {
volume := v1.Volume{Name: rookBinariesVolumeName, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}
mount := v1.VolumeMount{Name: rookBinariesVolumeName, MountPath: rookBinariesMountPath}
return volume, &v1.Container{
Args: []string{
"copy-binaries",
"--copy-to-dir", rookBinariesMountPath},
Name: "copy-bins",
Image: k8sutil.MakeRookImage(c.rookVersion),
VolumeMounts: []v1.VolumeMount{mount},
}
}
func (c *Cluster) provisionPodTemplateSpec(osdProps osdProperties, restart v1.RestartPolicy) (*v1.PodTemplateSpec, error) {
copyBinariesVolume, copyBinariesContainer := c.getCopyBinariesContainer()
// ceph-volume is currently set up to use /etc/ceph/ceph.conf; this means no user config
// overrides will apply to ceph-volume, but this is unnecessary anyway
volumes := append(opspec.PodVolumes(c.dataDirHostPath, c.Namespace, true), copyBinariesVolume)
// by default, don't define any volume config unless it is required
if len(osdProps.devices) > 0 || osdProps.selection.DeviceFilter != "" || osdProps.selection.GetUseAllDevices() || osdProps.metadataDevice != "" || osdProps.pvc.ClaimName != "" {
// create volume config for the data dir and /dev so the pod can access devices on the host
devVolume := v1.Volume{Name: "devices", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}}}
volumes = append(volumes, devVolume)
udevVolume := v1.Volume{Name: "udev", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/run/udev"}}}
volumes = append(volumes, udevVolume)
}
if osdProps.pvc.ClaimName != "" {
// Create volume config for PVCs
volumes = append(volumes, getPVCOSDVolumes(&osdProps)...)
}
// add each OSD directory as another host path volume source
for _, d := range osdProps.selection.Directories {
if c.skipVolumeForDirectory(d.Path) {
// the dataDirHostPath has already been added as a volume
continue
}
dirVolume := v1.Volume{
Name: k8sutil.PathToVolumeName(d.Path),
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: d.Path}},
}
volumes = append(volumes, dirVolume)
}
if len(volumes) == 0 {
return nil, fmt.Errorf("empty volumes")
}
podSpec := v1.PodSpec{
ServiceAccountName: serviceAccountName,
InitContainers: []v1.Container{
*copyBinariesContainer,
},
Containers: []v1.Container{
c.provisionOSDContainer(osdProps, copyBinariesContainer.VolumeMounts[0]),
},
RestartPolicy: restart,
Volumes: volumes,
HostNetwork: c.Network.IsHost(),
}
if c.Network.IsHost() {
podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet
}
if len(osdProps.pvc.ClaimName) == 0 {
c.placement.ApplyToPodSpec(&podSpec)
} else {
osdProps.placement.ApplyToPodSpec(&podSpec)
}
podMeta := metav1.ObjectMeta{
Name: AppName,
Labels: map[string]string{
k8sutil.AppAttr: prepareAppName,
k8sutil.ClusterAttr: c.Namespace,
OSDOverPVCLabelKey: osdProps.pvc.ClaimName,
},
Annotations: map[string]string{},
}
c.annotations.ApplyToObjectMeta(&podMeta)
// ceph-volume --dmcrypt uses cryptsetup that synchronizes with udev on
// host through semaphore
podSpec.HostIPC = osdProps.storeConfig.EncryptedDevice
return &v1.PodTemplateSpec{
ObjectMeta: podMeta,
Spec: podSpec,
}, nil
}
// Currently we can't mount a block mode pv directly to a priviliged container
// So we mount it to a non priviliged init container and then copy it to a common directory mounted inside init container
// and the privileged provision container.
func (c *Cluster) getPVCInitContainer(pvc v1.PersistentVolumeClaimVolumeSource) v1.Container {
return v1.Container{
Name: blockPVCMapperInitContainer,
Image: c.cephVersion.Image,
Command: []string{
"cp",
},
Args: []string{"-a", fmt.Sprintf("/%s", pvc.ClaimName), fmt.Sprintf("/mnt/%s", pvc.ClaimName)},
VolumeDevices: []v1.VolumeDevice{
{
Name: pvc.ClaimName,
DevicePath: fmt.Sprintf("/%s", pvc.ClaimName),
},
},
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/mnt",
Name: fmt.Sprintf("%s-bridge", pvc.ClaimName),
},
},
SecurityContext: opmon.PodSecurityContext(),
}
}
func (c *Cluster) getConfigEnvVars(storeConfig config.StoreConfig, dataDir, nodeName, location string) []v1.EnvVar {
envVars := []v1.EnvVar{
nodeNameEnvVar(nodeName),
{Name: "ROOK_CLUSTER_ID", Value: string(c.ownerRef.UID)},
k8sutil.PodIPEnvVar(k8sutil.PrivateIPEnvVar),
k8sutil.PodIPEnvVar(k8sutil.PublicIPEnvVar),
opmon.ClusterNameEnvVar(c.Namespace),
opmon.EndpointEnvVar(),
opmon.SecretEnvVar(),
opmon.AdminSecretEnvVar(),
k8sutil.ConfigDirEnvVar(dataDir),
k8sutil.ConfigOverrideEnvVar(),
{Name: "ROOK_FSID", ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "rook-ceph-mon"},
Key: "fsid",
},
}},
k8sutil.NodeEnvVar(),
}
// pass on the topologyAware flag to the provion pod so that portable OSDs can reconcile zone/region
if c.DesiredStorage.TopologyAware {
envVars = append(envVars, topologyAwareEnvVar("true"))
}
if storeConfig.StoreType != "" {
envVars = append(envVars, v1.EnvVar{Name: osdStoreEnvVarName, Value: storeConfig.StoreType})
}
if storeConfig.DatabaseSizeMB != 0 {
envVars = append(envVars, v1.EnvVar{Name: osdDatabaseSizeEnvVarName, Value: strconv.Itoa(storeConfig.DatabaseSizeMB)})
}
if storeConfig.WalSizeMB != 0 {
envVars = append(envVars, v1.EnvVar{Name: osdWalSizeEnvVarName, Value: strconv.Itoa(storeConfig.WalSizeMB)})
}
if storeConfig.JournalSizeMB != 0 {
envVars = append(envVars, v1.EnvVar{Name: osdJournalSizeEnvVarName, Value: strconv.Itoa(storeConfig.JournalSizeMB)})
}
if storeConfig.OSDsPerDevice != 0 {
envVars = append(envVars, v1.EnvVar{Name: osdsPerDeviceEnvVarName, Value: strconv.Itoa(storeConfig.OSDsPerDevice)})
}
if storeConfig.EncryptedDevice {
envVars = append(envVars, v1.EnvVar{Name: encryptedDeviceEnvVarName, Value: "true"})
}
if location != "" {
envVars = append(envVars, rookalpha.LocationEnvVar(location))
}
return envVars
}
func (c *Cluster) provisionOSDContainer(osdProps osdProperties, copyBinariesMount v1.VolumeMount) v1.Container {
envVars := c.getConfigEnvVars(osdProps.storeConfig, k8sutil.DataDir, osdProps.crushHostname, osdProps.location)
devMountNeeded := false
if osdProps.pvc.ClaimName != "" {
devMountNeeded = true
}
privileged := false
// only 1 of device list, device filter and use all devices can be specified. We prioritize in that order.
if len(osdProps.devices) > 0 {
deviceNames := make([]string, len(osdProps.devices))
for i, device := range osdProps.devices {
devSuffix := ""
if count, ok := device.Config[config.OSDsPerDeviceKey]; ok {
logger.Infof("%s osds requested on device %s (node %s)", count, device.Name, osdProps.crushHostname)
devSuffix += ":" + count
} else {
devSuffix += ":1"
}
if databaseSizeMB, ok := device.Config[config.DatabaseSizeMBKey]; ok {
logger.Infof("osd %s requested with DB size %sMB (node %s)", device.Name, databaseSizeMB, osdProps.crushHostname)
devSuffix += ":" + databaseSizeMB
} else {
devSuffix += ":"
}
if deviceClass, ok := device.Config[config.DeviceClassKey]; ok {
logger.Infof("osd %s requested with deviceClass %s (node %s)", device.Name, deviceClass, osdProps.crushHostname)
devSuffix += ":" + deviceClass
} else {
devSuffix += ":"
}
if md, ok := device.Config[config.MetadataDeviceKey]; ok {
logger.Infof("osd %s requested with metadataDevice %s (node %s)", device.Name, md, osdProps.crushHostname)
devSuffix += ":" + md
} else {
devSuffix += ":"
}
deviceNames[i] = device.Name + devSuffix
}
envVars = append(envVars, dataDevicesEnvVar(strings.Join(deviceNames, ",")))
devMountNeeded = true
} else if osdProps.selection.DeviceFilter != "" {
envVars = append(envVars, deviceFilterEnvVar(osdProps.selection.DeviceFilter))
devMountNeeded = true
} else if osdProps.selection.GetUseAllDevices() {
envVars = append(envVars, deviceFilterEnvVar("all"))
devMountNeeded = true
}
envVars = append(envVars, v1.EnvVar{Name: "ROOK_CEPH_VERSION", Value: c.clusterInfo.CephVersion.CephVersionFormatted()})
if osdProps.metadataDevice != "" {
envVars = append(envVars, metadataDeviceEnvVar(osdProps.metadataDevice))
devMountNeeded = true
}
// ceph-volume is currently set up to use /etc/ceph/ceph.conf; this means no user config
// overrides will apply to ceph-volume, but this is unnecessary anyway
volumeMounts := append(opspec.CephVolumeMounts(true), copyBinariesMount)
if devMountNeeded {
devMount := v1.VolumeMount{Name: "devices", MountPath: "/dev"}
volumeMounts = append(volumeMounts, devMount)
udevMount := v1.VolumeMount{Name: "udev", MountPath: "/run/udev"}
volumeMounts = append(volumeMounts, udevMount)
}
if osdProps.pvc.ClaimName != "" {
volumeMounts = append(volumeMounts, getPvcOSDBridgeMount(osdProps.pvc.ClaimName))
envVars = append(envVars, dataDevicesEnvVar(strings.Join([]string{fmt.Sprintf("/mnt/%s", osdProps.pvc.ClaimName)}, ",")))
envVars = append(envVars, pvcBackedOSDEnvVar("true"))
}
if len(osdProps.selection.Directories) > 0 {
// for each directory the user has specified, create a volume mount and pass it to the pod via cmd line arg
dirPaths := make([]string, len(osdProps.selection.Directories))
for i := range osdProps.selection.Directories {
dpath := osdProps.selection.Directories[i].Path
dirPaths[i] = dpath
if c.skipVolumeForDirectory(dpath) {
// the dataDirHostPath has already been added as a volume mount
continue
}
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: k8sutil.PathToVolumeName(dpath), MountPath: dpath})
}
if !IsRemovingNode(osdProps.selection.DeviceFilter) {
envVars = append(envVars, dataDirectoriesEnvVar(strings.Join(dirPaths, ",")))
}
}
// elevate to be privileged if it is going to mount devices or if running in a restricted environment such as openshift
if devMountNeeded || os.Getenv("ROOK_HOSTPATH_REQUIRES_PRIVILEGED") == "true" || osdProps.pvc.ClaimName != "" {
privileged = true
}
runAsUser := int64(0)
runAsNonRoot := false
readOnlyRootFilesystem := false
osdProvisionContainer := v1.Container{
Command: []string{path.Join(rookBinariesMountPath, "tini")},
Args: []string{"--", path.Join(rookBinariesMountPath, "rook"), "ceph", "osd", "provision"},
Name: "provision",
Image: c.cephVersion.Image,
VolumeMounts: volumeMounts,
Env: envVars,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
RunAsUser: &runAsUser,
RunAsNonRoot: &runAsNonRoot,
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
Resources: osdProps.resources,
}
return osdProvisionContainer
}
func getPvcOSDBridgeMount(claimName string) v1.VolumeMount {
return v1.VolumeMount{Name: fmt.Sprintf("%s-bridge", claimName), MountPath: "/mnt"}
}
func (c *Cluster) skipVolumeForDirectory(path string) bool {
// If attempting to add a directory at /var/lib/rook, we need to skip the volume and volume mount
// since the dataDirHostPath is always mounting at /var/lib/rook
return path == k8sutil.DataDir
}
func getPVCOSDVolumes(osdProps *osdProperties) []v1.Volume {
return []v1.Volume{
{
Name: osdProps.pvc.ClaimName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &osdProps.pvc,
},
},
{
// We need a bridge mount which is basically a common volume mount between the non priviliged init container
// and the privileged provision container or osd daemon container
// The reason for this is mentioned in the comment for getPVCInitContainer() method
Name: fmt.Sprintf("%s-bridge", osdProps.pvc.ClaimName),
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: "Memory",
},
},
},
}
}
func nodeNameEnvVar(name string) v1.EnvVar {
return v1.EnvVar{Name: "ROOK_NODE_NAME", Value: name}
}
func dataDevicesEnvVar(dataDevices string) v1.EnvVar {
return v1.EnvVar{Name: "ROOK_DATA_DEVICES", Value: dataDevices}
}
func deviceFilterEnvVar(filter string) v1.EnvVar {
return v1.EnvVar{Name: "ROOK_DATA_DEVICE_FILTER", Value: filter}
}
func metadataDeviceEnvVar(metadataDevice string) v1.EnvVar {
return v1.EnvVar{Name: osdMetadataDeviceEnvVarName, Value: metadataDevice}
}
func dataDirectoriesEnvVar(dataDirectories string) v1.EnvVar {
return v1.EnvVar{Name: dataDirsEnvVarName, Value: dataDirectories}
}
func pvcBackedOSDEnvVar(pvcBacked string) v1.EnvVar {
return v1.EnvVar{Name: pvcBackedOSDVarName, Value: pvcBacked}
}
func lvPathEnvVariable(lvPath string) v1.EnvVar {
return v1.EnvVar{Name: lvPathVarName, Value: lvPath}
}
func topologyAwareEnvVar(topologyAware string) v1.EnvVar {
return v1.EnvVar{Name: topologyAwareEnvVarName, Value: topologyAware}
}
func getDirectoriesFromContainer(osdContainer v1.Container) []rookalpha.Directory {
var dirsArg string
for _, envVar := range osdContainer.Env {
if envVar.Name == dataDirsEnvVarName {
dirsArg = envVar.Value
}
}
var dirsList []string
if dirsArg != "" {
dirsList = strings.Split(dirsArg, ",")
}
dirs := make([]rookalpha.Directory, len(dirsList))
for dirNum, dir := range dirsList {
dirs[dirNum] = rookalpha.Directory{Path: dir}
}
return dirs
}
func getConfigFromContainer(osdContainer v1.Container) map[string]string {
cfg := map[string]string{}
for _, envVar := range osdContainer.Env {
switch envVar.Name {
case osdStoreEnvVarName:
cfg[config.StoreTypeKey] = envVar.Value
case osdDatabaseSizeEnvVarName:
cfg[config.DatabaseSizeMBKey] = envVar.Value
case osdWalSizeEnvVarName:
cfg[config.WalSizeMBKey] = envVar.Value
case osdJournalSizeEnvVarName:
cfg[config.JournalSizeMBKey] = envVar.Value
case osdMetadataDeviceEnvVarName:
cfg[config.MetadataDeviceKey] = envVar.Value
}
}
return cfg
}
func osdOnSDNFlag(network cephv1.NetworkSpec, v cephver.CephVersion) []string {
var args []string
// OSD fails to find the right IP to bind to when running on SDN
// for more details: https://github.com/rook/rook/issues/3140
if !network.IsHost() {
if v.IsAtLeast(cephver.CephVersion{Major: 14, Minor: 2, Extra: 2}) {
args = append(args, "--ms-learn-addr-from-peer=false")
}
}
return args
}
func makeStorageClassDeviceSetPVCID(storageClassDeviceSetName string, setIndex, pvcIndex int) (pvcId, pvcLabelSelector string) {
pvcStorageClassDeviceSetPVCId := fmt.Sprintf("%s-%v", storageClassDeviceSetName, setIndex)
return pvcStorageClassDeviceSetPVCId, fmt.Sprintf("%s=%s", CephDeviceSetPVCIDLabelKey, pvcStorageClassDeviceSetPVCId)
}
func makeStorageClassDeviceSetPVCLabel(storageClassDeviceSetName, pvcStorageClassDeviceSetPVCId string, pvcIndex, setIndex int) map[string]string {
return map[string]string{
CephDeviceSetLabelKey: storageClassDeviceSetName,
CephSetIndexLabelKey: fmt.Sprintf("%v", setIndex),
CephDeviceSetPVCIDLabelKey: pvcStorageClassDeviceSetPVCId,
}
}
func (c *Cluster) getOSDLabels(osdID int, failureDomainValue string, portable bool) map[string]string {
return map[string]string{
k8sutil.AppAttr: AppName,
k8sutil.ClusterAttr: c.Namespace,
OsdIdLabelKey: fmt.Sprintf("%d", osdID),
FailureDomainKey: failureDomainValue,
portableKey: strconv.FormatBool(portable),
}
}
|
[
"\"ROOK_HOSTPATH_REQUIRES_PRIVILEGED\""
] |
[] |
[
"ROOK_HOSTPATH_REQUIRES_PRIVILEGED"
] |
[]
|
["ROOK_HOSTPATH_REQUIRES_PRIVILEGED"]
|
go
| 1 | 0 | |
grpc_server/server_test.go
|
package grpc_server_test
import (
"crypto/tls"
"fmt"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"os"
"path"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
"github.com/tedsuo/ifrit/grpc_server"
"golang.org/x/net/context"
"google.golang.org/grpc/examples/helloworld/helloworld"
)
var _ = Describe("GRPCServer", func() {
var (
listenAddress string
runner ifrit.Runner
serverProcess ifrit.Process
tlsConfig *tls.Config
)
BeforeEach(func() {
var err error
basePath := path.Join(os.Getenv("GOPATH"), "src", "github.com", "tedsuo", "ifrit", "http_server", "test_certs")
certFile := path.Join(basePath, "server.crt")
keyFile := path.Join(basePath, "server.key")
tlsCert, err := tls.LoadX509KeyPair(certFile, keyFile)
Expect(err).NotTo(HaveOccurred())
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
Certificates: []tls.Certificate{tlsCert},
}
listenAddress = fmt.Sprintf("localhost:%d", 10000+GinkgoParallelNode())
})
Context("given an instatiated runner", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, helloworld.RegisterGreeterServer)
})
JustBeforeEach(func() {
serverProcess = ginkgomon.Invoke(runner)
})
AfterEach(func() {
ginkgomon.Kill(serverProcess)
})
It("serves on the listen address", func() {
conn, err := grpc.Dial(listenAddress, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
Expect(err).NotTo(HaveOccurred())
helloClient := helloworld.NewGreeterClient(conn)
_, err = helloClient.SayHello(context.Background(), &helloworld.HelloRequest{Name: "Fred"})
Expect(err).NotTo(HaveOccurred())
})
Context("when the server trys to listen on a busy port", func() {
var alternateRunner ifrit.Runner
BeforeEach(func() {
alternateRunner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, helloworld.RegisterGreeterServer)
})
It("exits with an error", func() {
var err error
process := ifrit.Background(alternateRunner)
Eventually(process.Wait()).Should(Receive(&err))
Expect(err).To(HaveOccurred())
})
})
})
Context("when there is no tls config", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, nil, &server{}, helloworld.RegisterGreeterServer)
})
JustBeforeEach(func() {
serverProcess = ginkgomon.Invoke(runner)
})
AfterEach(func() {
ginkgomon.Kill(serverProcess)
})
It("serves on the listen address", func() {
conn, err := grpc.Dial(listenAddress, grpc.WithInsecure())
Expect(err).NotTo(HaveOccurred())
helloClient := helloworld.NewGreeterClient(conn)
_, err = helloClient.SayHello(context.Background(), &helloworld.HelloRequest{Name: "Fred"})
Expect(err).NotTo(HaveOccurred())
})
})
Context("when the inputs to NewGRPCServer are invalid", func() {
var (
err error
)
JustBeforeEach(func() {
process := ifrit.Background(runner)
Eventually(process.Wait()).Should(Receive(&err))
Expect(err).To(HaveOccurred())
})
Context("when the registrar is an integer", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, 42)
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("should be func but is int"))
})
})
Context("when the registrar is nil", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, nil)
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("`serverRegistrar` and `handler` must be non nil"))
})
})
Context("when the registrar is nil", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, nil, helloworld.RegisterGreeterServer)
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("`serverRegistrar` and `handler` must be non nil"))
})
})
Context("when the registrar is an empty func", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, func() {})
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("should have 2 parameters but it has 0 parameters"))
})
})
Context("when the registrar has bad parameters", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, func(a, b int) {})
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("first parameter must be `*grpc.Server` but is int"))
})
})
Context("when the registrar's first parameter is bad", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, func(a, b int) {})
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("first parameter must be `*grpc.Server` but is int"))
})
})
Context("when the registrar's second parameter is not an interface", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, func(a *grpc.Server, b int) {})
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("is not implemented by `handler`"))
})
})
Context("when the registrar's second parameter is not implemented", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, func(a *grpc.Server, b testInterface) {})
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("is not implemented by `handler`"))
})
})
Context("when the handler is a *struct but doesn't implement the registrar's second parameter", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, ¬Server{}, helloworld.RegisterGreeterServer)
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("is not implemented by `handler`"))
})
})
Context("when the handler is a int but doesn't implement the registrar's second parameter", func() {
BeforeEach(func() {
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, 42, helloworld.RegisterGreeterServer)
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("is not implemented by `handler`"))
})
})
Context("when the registrar returns a value", func() {
BeforeEach(func() {
f := func(a *grpc.Server, b helloworld.GreeterServer) error { return nil }
runner = grpc_server.NewGRPCServer(listenAddress, tlsConfig, &server{}, f)
})
It("fails", func() {
Expect(err.Error()).To(ContainSubstring("should return no value but it returns 1 value"))
})
})
})
})
// server is used to implement helloworld.GreeterServer.
type server struct{}
// SayHello implements helloworld.GreeterServer
func (s *server) SayHello(ctx context.Context, in *helloworld.HelloRequest) (*helloworld.HelloReply, error) {
return &helloworld.HelloReply{Message: "Hello " + in.Name}, nil
}
// notServer doesn't implement anything
type notServer struct{}
type testInterface interface {
something(a int) int
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
pkg/operator/controller/cr-manager.go
|
package controller
import (
"context"
"fmt"
"os"
"strings"
resources "github.com/kubevirt/vm-import-operator/pkg/operator/resources/operator"
v2vv1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// Create creates empty CR
func (r *ReconcileVMImportConfig) Create() controllerutil.Object {
return &v2vv1.VMImportConfig{}
}
// Status extracts status from the cr
func (r *ReconcileVMImportConfig) Status(object runtime.Object) *sdkapi.Status {
return &object.(*v2vv1.VMImportConfig).Status.Status
}
// GetAllResources provides all resources managed by the cr
func (r *ReconcileVMImportConfig) GetAllResources(cr runtime.Object) ([]runtime.Object, error) {
return r.getAllResources(cr.(*v2vv1.VMImportConfig))
}
// GetDependantResourcesListObjects returns resource list objects of dependant resources
func (r *ReconcileVMImportConfig) GetDependantResourcesListObjects() []runtime.Object {
return []runtime.Object{
&extv1.CustomResourceDefinitionList{},
&rbacv1.ClusterRoleBindingList{},
&rbacv1.ClusterRoleList{},
&appsv1.DeploymentList{},
&corev1.ServiceAccountList{},
}
}
// IsCreating checks whether creation of the managed resources will be executed
func (r *ReconcileVMImportConfig) IsCreating(cr controllerutil.Object) (bool, error) {
vmiconfig := cr.(*v2vv1.VMImportConfig)
return vmiconfig.Status.Conditions == nil || len(vmiconfig.Status.Conditions) == 0, nil
}
func (r *ReconcileVMImportConfig) getAllResources(cr *v2vv1.VMImportConfig) ([]runtime.Object, error) {
var resultingResources []runtime.Object
if deployClusterResources() {
rs := createCRDResources()
resultingResources = append(resultingResources, rs...)
}
nsrs := createControllerResources(r.getOperatorArgs(cr))
resultingResources = append(resultingResources, nsrs...)
return resultingResources, nil
}
func createControllerResources(args *OperatorArgs) []runtime.Object {
objs := []runtime.Object{
resources.CreateServiceAccount(args.Namespace),
resources.CreateControllerRole(),
resources.CreateControllerRoleBinding(args.Namespace),
resources.CreateControllerDeployment(resources.ControllerName, args.Namespace, args.ControllerImage, args.Virtv2vImage, args.PullPolicy, int32(1), args.InfraNodePlacement),
}
// Add metrics objects if servicemonitor is available:
if ok, err := hasServiceMonitor(); ok && err == nil {
objs = append(objs,
resources.CreateMetricsService(args.Namespace),
resources.CreateServiceMonitor(args.MonitoringNamespace, args.Namespace),
)
}
return objs
}
// hasServiceMonitor checks if ServiceMonitor is registered in the cluster.
func hasServiceMonitor() (bool, error) {
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
return false, fmt.Errorf("Can't load restconfig")
}
dc := discovery.NewDiscoveryClientForConfigOrDie(cfg)
apiVersion := "monitoring.coreos.com/v1"
kind := "ServiceMonitor"
return k8sutil.ResourceExists(dc, apiVersion, kind)
}
func createCRDResources() []runtime.Object {
return []runtime.Object{
resources.CreateResourceMapping(),
resources.CreateVMImport(),
}
}
func deployClusterResources() bool {
return strings.ToLower(os.Getenv("DEPLOY_CLUSTER_RESOURCES")) != "false"
}
func (r *ReconcileVMImportConfig) getOsMappingConfigMapName(namespace string) *types.NamespacedName {
var configMapName, configMapNamespace string
operatorDeployment := &appsv1.Deployment{}
key := client.ObjectKey{Namespace: namespace, Name: "vm-import-operator"}
if err := r.client.Get(context.TODO(), key, operatorDeployment); err == nil {
operatorEnv := r.findVMImportOperatorContainer(*operatorDeployment).Env
for _, env := range operatorEnv {
if env.Name == osConfigMapName {
configMapName = env.Value
}
if env.Name == osConfigMapNamespace {
configMapNamespace = env.Value
}
}
}
if configMapName == "" && configMapNamespace == "" {
return nil
}
return &types.NamespacedName{Name: configMapName, Namespace: configMapNamespace}
}
func (r *ReconcileVMImportConfig) findVMImportOperatorContainer(operatorDeployment appsv1.Deployment) corev1.Container {
for _, container := range operatorDeployment.Spec.Template.Spec.Containers {
if container.Name == "vm-import-operator" {
return container
}
}
log.Info("vm-import-operator container not found", "deployment", operatorDeployment.Name)
return corev1.Container{}
}
|
[
"\"DEPLOY_CLUSTER_RESOURCES\""
] |
[] |
[
"DEPLOY_CLUSTER_RESOURCES"
] |
[]
|
["DEPLOY_CLUSTER_RESOURCES"]
|
go
| 1 | 0 | |
pyloci/sparql/generate_loci_type_count.py
|
from SPARQLWrapper import SPARQLWrapper, JSON
import json
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
from . import util
GRAPHDB_USER = os.getenv("GRAPHDB_USER")
GRAPHDB_PASSWORD = os.getenv("GRAPHDB_PASSWORD")
SPARQL_ENDPOINT = os.getenv("SPARQL_ENDPOINT")
# uncomment the following GRAPHDB_SPARQL and auth variables for test repo
#GRAPHDB_SPARQL = GRAPHDB_SPARQL_TEST
auth = None
# set auth only if .env has credentials
if(GRAPHDB_USER != None and GRAPHDB_PASSWORD != None):
auth = {
"user" : GRAPHDB_USER,
"password" : GRAPHDB_PASSWORD
}
loci_types = [
"geo:Feature",
"hyf:HY_Catchment",
"hyf:HY_HydroFeature",
"loci:Linkset",
"geo:Geometry",
"<http://linked.data.gov.au/def/asgs#DestinationZone>",
"<http://linked.data.gov.au/def/asgs#NaturalResourceManagementRegion>",
"<http://linked.data.gov.au/def/asgs#StateSuburb>",
"<http://linked.data.gov.au/def/asgs#GreaterCapitalCityStatisticalArea>",
"<http://linked.data.gov.au/def/geofabric#ContractedCatchment>",
"<http://linked.data.gov.au/def/geofabric#DrainageDivision>",
"<http://linked.data.gov.au/def/geofabric#RiverRegion>",
"<http://linked.data.gov.au/def/gnaf#Address>",
"<http://linked.data.gov.au/def/gnaf#Locality>",
"<http://linked.data.gov.au/def/geofabric#ReportingRegion>",
"<http://linked.data.gov.au/def/gnaf#Street>",
"<http://linked.data.gov.au/def/loci#Feature>",
"<http://linked.data.gov.au/def/gnaf#StreetLocality>",
"<http://linked.data.gov.au/def/asgs#MeshBlock>",
"<http://linked.data.gov.au/def/asgs#StateOrTerritory>",
"<http://linked.data.gov.au/def/asgs#StatisticalAreaLevel1>",
"<http://linked.data.gov.au/def/asgs#StatisticalAreaLevel2>",
"<http://linked.data.gov.au/def/asgs#StatisticalAreaLevel3>",
"<http://linked.data.gov.au/def/asgs#StatisticalAreaLevel4>"
]
#query()
#print(auth)
count_types = []
for curr in loci_types:
count = util.query_type(curr, SPARQL_ENDPOINT, auth=auth)
count_types.append((curr, count))
print(json.dumps(count_types, indent=4, sort_keys=True))
|
[] |
[] |
[
"SPARQL_ENDPOINT",
"GRAPHDB_PASSWORD",
"GRAPHDB_USER"
] |
[]
|
["SPARQL_ENDPOINT", "GRAPHDB_PASSWORD", "GRAPHDB_USER"]
|
python
| 3 | 0 | |
salt/grains/core.py
|
# -*- coding: utf-8 -*-
'''
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
'''
from __future__ import absolute_import
# Import python libs
import os
import socket
import sys
import re
import platform
import logging
import locale
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of platform.linux_distribution()
from platform import _supported_dists
import salt.ext.six as six
_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
'slamd64', 'ovs', 'system', 'mint', 'oracle')
# Import salt libs
import salt.log
import salt.utils
import salt.utils.network
from salt.ext.six import string_types
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
'cmd.retcode': salt.modules.cmdmod._retcode_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.is_windows():
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi
import salt.utils.winapi
HAS_WMI = True
except ImportError:
log.exception(
'Unable to import Python wmi module, some core grains '
'will be missing'
)
_INTERFACES = {}
def _windows_cpudata():
'''
Return some CPU information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = platform.processor()
return grains
def _linux_cpudata():
'''
Return some CPU information for Linux minions
'''
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.fopen(cpuinfo, 'r') as _fp:
for line in _fp:
comps = line.split(':')
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] = int(val) + 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
elif key == 'Features':
grains['cpu_flags'] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000
elif key == 'Processor':
grains['cpu_model'] = val.split('-')[0]
grains['num_cpus'] = 1
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _linux_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
if __opts__.get('enable_lspci', True) is False:
return {}
if __opts__.get('enable_gpu_grains', True) is False:
return {}
lspci = salt.utils.which('lspci')
if not lspci:
log.debug(
'The `lspci` binary is not available on the system. GPU grains '
'will not be available.'
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = ['nvidia', 'amd', 'ati', 'intel']
gpu_classes = ('vga compatible controller', '3d controller')
devs = []
try:
lspci_out = __salt__['cmd.run']('lspci -vmm')
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append('')
for line in lspci_list:
# check for record-separating empty lines
if line == '':
if cur_dev.get('Class', '').lower() in gpu_classes:
devs.append(cur_dev)
cur_dev = {}
continue
if re.match(r'^\w+:\s+.*', line):
key, val = line.split(':', 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug('Unexpected lspci output: {0!r}'.format(line))
if error:
log.warn(
'Error loading grains, unexpected linux_gpu_data output, '
'check that you have a valid shell configured and '
'permissions to run lspci command'
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = gpu['Vendor'].lower().split()
# default vendor to 'unknown', overwrite if we match a known one
vendor = 'unknown'
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({'vendor': vendor, 'model': gpu['Device']})
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if vendor_match:
gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _osx_gpudata():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
gpus = []
try:
pcictl_out = __salt__['cmd.run']('system_profiler SPDisplaysDataType')
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(': ')
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(' ')
vendor = vendor.lower()
gpus.append({'vendor': vendor, 'model': model})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _bsd_cpudata(osdata):
'''
Return CPU information for BSD-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.which('sysctl')
arch = salt.utils.which('arch')
cmds = {}
if sysctl:
cmds.update({
'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
'cpuarch': '{0} -n hw.machine'.format(sysctl),
'cpu_model': '{0} -n hw.model'.format(sysctl),
})
if arch and osdata['kernel'] == 'OpenBSD':
cmds['cpuarch'] = '{0} -s'.format(arch)
if osdata['kernel'] == 'Darwin':
cmds['cpu_model'] = '{0} -n machdep.cpu.brand_string'.format(sysctl)
cmds['cpu_flags'] = '{0} -n machdep.cpu.features'.format(sysctl)
grains = dict([(k, __salt__['cmd.run'](v)) for k, v in cmds.items()])
if 'cpu_flags' in grains and isinstance(grains['cpu_flags'], string_types):
grains['cpu_flags'] = grains['cpu_flags'].split(' ')
if osdata['kernel'] == 'NetBSD':
grains['cpu_flags'] = []
for line in __salt__['cmd.run']('cpuctl identify 0').splitlines():
cpu_match = re.match(r'cpu[0-9]:\ features[0-9]?\ .+<(.+)>', line)
if cpu_match:
flag = cpu_match.group(1).split(',')
grains['cpu_flags'].extend(flag)
if osdata['kernel'] == 'FreeBSD' and os.path.isfile('/var/run/dmesg.boot'):
grains['cpu_flags'] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.fopen('/var/run/dmesg.boot', 'r') as _fp:
cpu_here = False
for line in _fp:
if line.startswith('CPU: '):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(' '):
break # game over
if 'Features' in line:
start = line.find('<')
end = line.find('>')
if start > 0 and end > 0:
flag = line[start + 1:end].split(',')
grains['cpu_flags'].extend(flag)
try:
grains['num_cpus'] = int(grains['num_cpus'])
except ValueError:
grains['num_cpus'] = 1
return grains
def _sunos_cpudata():
'''
Return the CPU information for Solaris-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains['cpu_flags'] = []
grains['cpuarch'] = __salt__['cmd.run']('uname -p')
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo, python_shell=True).splitlines())
kstat_info = 'kstat -p cpu_info:0:*:brand'
for line in __salt__['cmd.run'](kstat_info).splitlines():
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
if match:
grains['cpu_model'] = match.group(2)
isainfo = 'isainfo -n -v'
for line in __salt__['cmd.run'](isainfo).splitlines():
match = re.match(r'^\s+(.+)', line)
if match:
cpu_flags = match.group(1).split()
grains['cpu_flags'].extend(cpu_flags)
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
with salt.utils.fopen(meminfo, 'r') as ifile:
for line in ifile:
comps = line.rstrip('\n').split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
grains['mem_total'] = int(comps[1].split()[0]) / 1024
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD', 'Darwin'):
sysctl = salt.utils.which('sysctl')
if sysctl:
if osdata['kernel'] == 'Darwin':
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
else:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
grains['mem_total'] = int(mem) / 1024 / 1024
elif osdata['kernel'] == 'SunOS':
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
comps = line.split(' ')
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
grains['mem_total'] = int(comps[2].strip())
elif osdata['kernel'] == 'Windows' and HAS_WMI:
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# this is a list of each stick of ram in a system
# WMI returns it as the string value of the number of bytes
tot_bytes = sum([int(x.Capacity) for x in wmi_c.Win32_PhysicalMemory()], 0)
# return memory info in gigabytes
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
return grains
def _windows_virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# Provides:
# virtual
# virtual_subtype
grains = dict()
if osdata['kernel'] != 'Windows':
return grains
if 'QEMU' in osdata.get('manufacturer', ''):
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Bochs' in osdata.get('manufacturer', ''):
grains['virtual'] = 'kvm'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'oVirt' in osdata.get('productname', ''):
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'oVirt'
# Red Hat Enterprise Virtualization
elif 'RHEV Hypervisor' in osdata.get('productname', ''):
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
# Product Name: VirtualBox
elif 'VirtualBox' in osdata.get('productname', ''):
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware Virtual Platform' in osdata.get('productname', ''):
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif 'Microsoft' in osdata.get('manufacturer', '') and \
'Virtual Machine' in osdata.get('productname', ''):
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in osdata.get('manufacturer'):
grains['virtual'] = 'Parallels'
if HAS_WMI:
pass
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {'virtual': 'physical'}
# Skip the below loop on platforms which have none of the desired cmds
# This is a temporary measure until we can write proper virtual hardware
# detection.
skip_cmds = ('AIX',)
# Check if enable_lspci is True or False
if __opts__.get('enable_lspci', True) is False:
_cmds = ('dmidecode', 'dmesg')
elif osdata['kernel'] in skip_cmds:
_cmds = ()
else:
# /proc/bus/pci does not exists, lspci will fail
if not os.path.exists('/proc/bus/pci'):
_cmds = ('dmidecode', 'dmesg')
else:
_cmds = ('dmidecode', 'lspci', 'dmesg')
failed_commands = set()
for command in _cmds:
args = []
if osdata['kernel'] == 'Darwin':
command = 'system_profiler'
args = ['SPDisplaysDataType']
cmd = salt.utils.which(command)
if not cmd:
continue
cmd = '{0} {1}'.format(command, ' '.join(args))
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] > 0:
if salt.log.is_logging_configured():
if salt.utils.is_windows():
continue
failed_commands.add(command)
continue
output = ret['stdout']
if command == "system_profiler":
macoutput = output.lower()
if '0x1ab8' in macoutput:
grains['virtual'] = 'Parallels'
if 'parallels' in macoutput:
grains['virtual'] = 'Parallels'
if 'vmware' in macoutput:
grains['virtual'] = 'VMware'
if '0x15ad' in macoutput:
grains['virtual'] = 'VMware'
if 'virtualbox' in macoutput:
grains['virtual'] = 'VirtualBox'
# Break out of the loop so the next log message is not issued
break
elif command == 'dmidecode' or command == 'dmesg':
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Manufacturer: QEMU' in output:
grains['virtual'] = 'kvm'
if 'Vendor: Bochs' in output:
grains['virtual'] = 'kvm'
if 'BHYVE BVXSDT' in output:
grains['virtual'] = 'bhyve'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'Manufacturer: oVirt' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'ovirt'
# Red Hat Enterprise Virtualization
elif 'Product Name: RHEV Hypervisor' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ': Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in output:
grains['virtual'] = 'Parallels'
# Break out of the loop, lspci parsing is not necessary
break
elif command == 'lspci':
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH
# VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'virtio' in model:
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
else:
if osdata['kernel'] in skip_cmds:
log.warn(
'The tools \'dmidecode\', \'lspci\' and \'dmesg\' failed to '
'execute because they do not exist on the system of the user '
'running this instance or the user does not have the '
'necessary permissions to execute them. Grains output might '
'not be accurate.'
)
choices = ('Linux', 'OpenBSD', 'HP-UX')
isdir = os.path.isdir
sysctl = salt.utils.which('sysctl')
if osdata['kernel'] in choices:
if os.path.isdir('/proc'):
try:
self_root = os.stat('/')
init_root = os.stat('/proc/1/root/.')
if self_root != init_root:
grains['virtual_subtype'] = 'chroot'
except (IOError, OSError):
pass
if os.path.isfile('/proc/1/cgroup'):
try:
with salt.utils.fopen('/proc/1/cgroup', 'r') as fhr:
if ':/lxc/' in fhr.read():
grains['virtual_subtype'] = 'LXC'
with salt.utils.fopen('/proc/1/cgroup', 'r') as fhr:
if ':/docker/' in fhr.read():
grains['virtual_subtype'] = 'Docker'
except IOError:
pass
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
elif os.path.isfile('/proc/vz/veinfo'):
grains['virtual'] = 'openvzve'
# a posteriori, it's expected for these to have failed:
failed_commands.discard('lspci')
failed_commands.discard('dmidecode')
# Provide additional detection for OpenVZ
if os.path.isfile('/proc/self/status'):
with salt.utils.fopen('/proc/self/status') as status_file:
vz_re = re.compile(r'^envID:\s+(\d+)$')
for line in status_file:
vz_match = vz_re.match(line.rstrip('\n'))
if vz_match and int(vz_match.groups()[0]) != 0:
grains['virtual'] = 'openvzve'
elif vz_match and int(vz_match.groups()[0]) == 0:
grains['virtual'] = 'openvzhn'
if isdir('/proc/sys/xen') or \
isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if grains.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and \
os.access('/proc/xen/capabilities', os.R_OK):
with salt.utils.fopen('/proc/xen/capabilities') as fhr:
if 'control_d' not in fhr.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.listdir('/sys/bus/xen/drivers'):
# An actual DomU will have several drivers
# whereas a paravirt ops kernel will not.
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
if os.path.isfile('/proc/cpuinfo'):
with salt.utils.fopen('/proc/cpuinfo', 'r') as fhr:
if 'QEMU Virtual CPU' in fhr.read():
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
product = __salt__['cmd.run'](
'{0} smbios.system.product'.format(kenv)
)
maker = __salt__['cmd.run']('{0} smbios.system.maker'.format(kenv))
if product.startswith('VMware'):
grains['virtual'] = 'VMware'
if maker.startswith('Xen'):
grains['virtual_subtype'] = '{0} {1}'.format(maker, product)
grains['virtual'] = 'xen'
if maker.startswith('Microsoft') and product.startswith('Virtual'):
grains['virtual'] = 'VirtualPC'
if maker.startswith('OpenStack'):
grains['virtual'] = 'OpenStack'
if sysctl:
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl))
jail = __salt__['cmd.run'](
'{0} -n security.jail.jailed'.format(sysctl)
)
if jail == '1':
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'SunOS':
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.which('zonename')
if zonename:
zone = __salt__['cmd.run']('{0}'.format(zonename))
if zone != 'global':
grains['virtual'] = 'zone'
if osdata['os'] == 'SmartOS':
grains.update(_smartos_zone_data())
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
elif osdata['kernel'] == 'NetBSD':
if sysctl:
if 'QEMU Virtual CPU' in __salt__['cmd.run'](
'{0} -n machdep.cpu_brand'.format(sysctl)):
grains['virtual'] = 'kvm'
elif 'invalid' not in __salt__['cmd.run'](
'{0} -n machdep.xen.suspend'.format(sysctl)):
grains['virtual'] = 'Xen PV DomU'
elif 'VMware' in __salt__['cmd.run'](
'{0} -n machdep.dmi.system-vendor'.format(sysctl)):
grains['virtual'] = 'VMware'
# NetBSD has Xen dom0 support
elif __salt__['cmd.run'](
'{0} -n machdep.idle-mechanism'.format(sysctl)) == 'xen':
if os.path.isfile('/var/run/xenconsoled.pid'):
grains['virtual_subtype'] = 'Xen Dom0'
for command in failed_commands:
log.warn(
'Although {0!r} was found in path, the current user '
'cannot execute it. Grains output might not be '
'accurate.'.format(command)
)
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os_family'] == 'Solaris':
grains['ps'] = '/usr/ucb/ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = (
'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" '
'/proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") '
'| awk \'{ $7=\"\"; print }\''
)
elif osdata['os_family'] == 'Debian':
grains['ps'] = 'ps -efHww'
else:
grains['ps'] = 'ps -efH'
return grains
def _windows_platform_data():
'''
Use the platform module for as much as we can.
'''
# Provides:
# osmanufacturer
# manufacturer
# productname
# biosversion
# serialnumber
# osfullname
# timezone
# windowsdomain
# motherboard.productname
# motherboard.serialnumber
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394239%28v=vs.85%29.aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394072(v=vs.85).aspx
motherboard = {}
motherboard['product'] = None
motherboard['serial'] = None
try:
motherboardinfo = wmi_c.Win32_BaseBoard()[0]
motherboard['product'] = motherboardinfo.Product
motherboard['serial'] = motherboardinfo.SerialNumber
except IndexError:
log.debug('Motherboard info not available on this sytem')
# the name of the OS comes with a bunch of other data about the install
# location. For example:
# 'Microsoft Windows Server 2008 R2 Standard |C:\\Windows|\\Device\\Harddisk0\\Partition2'
(osfullname, _) = osinfo.Name.split('|', 1)
osfullname = osfullname.strip()
grains = {
'osmanufacturer': osinfo.Manufacturer,
'manufacturer': systeminfo.Manufacturer,
'productname': systeminfo.Model,
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
'biosversion': biosinfo.Name.strip(),
'serialnumber': biosinfo.SerialNumber,
'osfullname': osfullname,
'timezone': timeinfo.Description,
'windowsdomain': systeminfo.Domain,
'motherboard': {
'productname': motherboard['product'],
'serialnumber': motherboard['serial']
}
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if 'VRTUAL' in biosinfo.Version: # (not a typo)
grains['virtual'] = 'HyperV'
elif 'A M I' in biosinfo.Version:
grains['virtual'] = 'VirtualPC'
elif 'VMware' in systeminfo.Model:
grains['virtual'] = 'VMware'
elif 'VirtualBox' in systeminfo.Model:
grains['virtual'] = 'VirtualBox'
elif 'Xen' in biosinfo.Version:
grains['virtual'] = 'Xen'
if 'HVM domU' in systeminfo.Model:
grains['virtual_subtype'] = 'HVM domU'
elif 'OpenStack' in systeminfo.Model:
grains['virtual'] = 'OpenStack'
return grains
def id_():
'''
Return the id
'''
return {'id': __opts__.get('id', '')}
_REPLACE_LINUX_RE = re.compile(r'linux', re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
'redhatente': 'RedHat',
'gentoobase': 'Gentoo',
'archarm': 'Arch ARM',
'arch': 'Arch',
'debian': 'Debian',
'debiangnu/': 'Debian',
'raspbiangn': 'Raspbian',
'fedoraremi': 'Fedora',
'amazonami': 'Amazon',
'alt': 'ALT',
'enterprise': 'OEL',
'oracleserv': 'OEL',
'cloudserve': 'CloudLinux',
'pidora': 'Fedora',
'scientific': 'ScientificLinux'
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
'Ubuntu': 'Debian',
'Fedora': 'RedHat',
'CentOS': 'RedHat',
'GoOSe': 'RedHat',
'Scientific': 'RedHat',
'Amazon': 'RedHat',
'CloudLinux': 'RedHat',
'OVS': 'RedHat',
'OEL': 'RedHat',
'XCP': 'RedHat',
'XenServer': 'RedHat',
'Mandrake': 'Mandriva',
'ESXi': 'VMWare',
'Mint': 'Debian',
'VMWareESX': 'VMWare',
'Bluewhite64': 'Bluewhite',
'Slamd64': 'Slackware',
'SLES': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SLED': 'Suse',
'openSUSE': 'Suse',
'SUSE': 'Suse',
'Solaris': 'Solaris',
'SmartOS': 'Solaris',
'OpenIndiana Development': 'Solaris',
'OpenIndiana': 'Solaris',
'OpenSolaris Development': 'Solaris',
'OpenSolaris': 'Solaris',
'Arch ARM': 'Arch',
'ALT': 'RedHat',
'Trisquel': 'Debian',
'GCEL': 'Debian',
'Linaro': 'Debian',
'elementary OS': 'Debian',
'ScientificLinux': 'RedHat',
'Raspbian': 'Debian'
}
def _linux_bin_exists(binary):
'''
Does a binary exist in linux (depends on which)
'''
return __salt__['cmd.retcode'](
'which {0}'.format(binary)
) == 0
def _get_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
'''
global _INTERFACES
if not _INTERFACES:
_INTERFACES = salt.utils.network.interfaces()
return _INTERFACES
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {
'num_gpus': 0,
'gpus': [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64',
# 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server',
# '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
# pylint: disable=unpacking-non-sequence
(grains['kernel'], grains['nodename'],
grains['kernelrelease'], version, grains['cpuarch'], _) = platform.uname()
# pylint: enable=unpacking-non-sequence
if salt.utils.is_windows():
grains['osrelease'] = grains['kernelrelease']
grains['osversion'] = grains['kernelrelease'] = version
grains['os'] = 'Windows'
grains['os_family'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_windows_virtual(grains))
grains.update(_ps(grains))
return grains
elif salt.utils.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists('selinuxenabled'):
grains['selinux'] = {}
grains['selinux']['enabled'] = __salt__['cmd.retcode'](
'selinuxenabled'
) == 0
if _linux_bin_exists('getenforce'):
grains['selinux']['enforced'] = __salt__['cmd.run'](
'getenforce'
).strip()
# Add systemd grain, if you have it
if _linux_bin_exists('systemctl') and _linux_bin_exists('localectl'):
grains['systemd'] = {}
systemd_info = __salt__['cmd.run'](
'systemctl --version'
).splitlines()
grains['systemd']['version'] = systemd_info[0].split()[1]
grains['systemd']['features'] = systemd_info[1]
# Add lsb grains on any distro with lsb-release
try:
import lsb_release
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
key = key.lower()
lsb_param = 'lsb_{0}{1}'.format(
'' if key.startswith('distrib_') else 'distrib_',
key
)
grains[lsb_param] = value
except ImportError:
# if the python library isn't available, default to regex
if os.path.isfile('/etc/lsb-release'):
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
regex = re.compile((
'^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?'
'([\\w\\s\\.-_]+)(?:\'|")?'
))
with salt.utils.fopen('/etc/lsb-release') as ifile:
for line in ifile:
match = regex.match(line.rstrip('\n'))
if match:
# Adds:
# lsb_distrib_{id,release,codename,description}
grains[
'lsb_{0}'.format(match.groups()[0].lower())
] = match.groups()[1].rstrip()
elif os.path.isfile('/etc/os-release'):
# Arch ARM Linux
with salt.utils.fopen('/etc/os-release') as ifile:
# Imitate lsb-release
for line in ifile:
# NAME="Arch Linux ARM"
# ID=archarm
# ID_LIKE=arch
# PRETTY_NAME="Arch Linux ARM"
# ANSI_COLOR="0;36"
# HOME_URL="http://archlinuxarm.org/"
# SUPPORT_URL="https://archlinuxarm.org/forum"
# BUG_REPORT_URL=
# "https://github.com/archlinuxarm/PKGBUILDs/issues"
regex = re.compile(
'^([\\w]+)=(?:\'|")?([\\w\\s\\.-_]+)(?:\'|")?'
)
match = regex.match(line.rstrip('\n'))
if match:
name, value = match.groups()
if name.lower() == 'name':
grains['lsb_distrib_id'] = value.strip()
elif os.path.isfile('/etc/SuSE-release'):
grains['lsb_distrib_id'] = 'SUSE'
with salt.utils.fopen('/etc/SuSE-release') as fhr:
rel = re.sub("[^0-9]", "", fhr.read().split('\n')[1])
with salt.utils.fopen('/etc/SuSE-release') as fhr:
patch = re.sub("[^0-9]", "", fhr.read().split('\n')[2])
release = rel + " SP" + patch
grains['lsb_distrib_release'] = release
grains['lsb_distrib_codename'] = "n.a"
elif os.path.isfile('/etc/altlinux-release'):
# ALT Linux
grains['lsb_distrib_id'] = 'altlinux'
with salt.utils.fopen('/etc/altlinux-release') as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == 'ALT':
grains['lsb_distrib_release'] = comps[2]
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
# CentOS Linux
grains['lsb_distrib_id'] = 'CentOS'
with salt.utils.fopen('/etc/centos-release') as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r'\d+\.\d+')
find_codename = re.compile(r'(?<=\()(.*?)(?=\))')
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains['lsb_distrib_release'] = release.group()
if codename is not None:
grains['lsb_distrib_codename'] = codename.group()
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
(osname, osrelease, oscodename) = \
[x.strip('"').strip("'") for x in
platform.linux_distribution(supported_dists=_supported_dists)]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that platform.linux_distribution() does the /etc/lsb-release
# parsing, but we do it anyway here for the sake for full portability.
grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
grains['osrelease'] = grains.get('lsb_distrib_release',
osrelease).strip()
grains['oscodename'] = grains.get('lsb_distrib_codename',
oscodename).strip()
distroname = _REPLACE_LINUX_RE.sub('', grains['osfullname']).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(' ', '').lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
grains['os'] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains['kernel'] == 'SunOS':
grains['os_family'] = 'Solaris'
uname_v = __salt__['cmd.run']('uname -v')
if 'joyent_' in uname_v:
# See https://github.com/joyent/smartos-live/issues/224
grains['os'] = grains['osfullname'] = 'SmartOS'
grains['osrelease'] = uname_v
elif os.path.isfile('/etc/release'):
with salt.utils.fopen('/etc/release', 'r') as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
r'((?:Open)?Solaris|OpenIndiana) (Development)?'
r'\s*(\d+ \d+\/\d+|oi_\S+|snv_\S+)?'
)
osname, development, osrelease = \
release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains['os'] = grains['osfullname'] = 'Solaris'
grains['osrelease'] = ''
else:
if development is not None:
osname = ' '.join((osname, development))
grains['os'] = grains['osfullname'] = osname
grains['osrelease'] = osrelease
grains.update(_sunos_cpudata())
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
osrelease = __salt__['cmd.run']('sw_vers -productVersion')
grains['os'] = 'MacOS'
grains['osrelease'] = osrelease
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
grains.update(_bsd_cpudata(grains))
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
if grains['kernel'] == 'NetBSD':
grains.update(_netbsd_gpu_data())
if not grains['os']:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
grains['os_family'] = 'Unknown'
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
grains['os'])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get('os_family') == 'Debian':
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
elif grains.get('os') == 'Fedora':
osarch = __salt__['cmd.run']('rpm --eval %{_host_cpu}').strip()
else:
osarch = grains['cpuarch']
grains['osarch'] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_ps(grains))
# Load additional OS family grains
if grains['os_family'] == "RedHat":
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)[0]
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'].partition('.')[0])
elif grains.get('osfullname') == 'Ubuntu':
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'])
elif grains.get('osfullname') == "Debian":
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)[0]
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'].partition('.')[0])
elif grains.get('os') in ('FreeBSD', 'OpenBSD', 'NetBSD'):
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)[0]
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['os'],
ver=grains['osrelease'])
if grains.get('osrelease', ''):
osrelease_info = grains['osrelease'].split('.')
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains['osrelease_info'] = tuple(osrelease_info)
return grains
def locale_info():
'''
Provides
defaultlanguage
defaultencoding
'''
grains = {}
grains['locale_info'] = {}
if 'proxyminion' in __opts__:
return grains
try:
(
grains['locale_info']['defaultlanguage'],
grains['locale_info']['defaultencoding']
) = locale.getdefaultlocale()
except Exception:
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains['locale_info']['defaultlanguage'] = 'unknown'
grains['locale_info']['defaultencoding'] = 'unknown'
return grains
def hostname():
'''
Return fqdn, hostname, domainname
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
grains = {}
if 'proxyminion' in __opts__:
return grains
grains['localhost'] = socket.gethostname()
grains['fqdn'] = salt.utils.network.get_fqhostname()
(grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
return grains
def append_domain():
'''
Return append_domain if set
'''
grain = {}
if 'proxyminion' in __opts__:
return grain
if 'append_domain' in __opts__:
grain['append_domain'] = __opts__['append_domain']
return grain
def ip4():
'''
Return a list of ipv4 addrs
'''
if 'proxyminion' in __opts__:
return {}
return {'ipv4': salt.utils.network.ip_addrs(include_loopback=True)}
def fqdn_ip4():
'''
Return a list of ipv4 addrs of fqdn
'''
if 'proxyminion' in __opts__:
return {}
try:
info = socket.getaddrinfo(hostname()['fqdn'], None, socket.AF_INET)
addrs = list(set(item[4][0] for item in info))
except socket.error:
addrs = []
return {'fqdn_ip4': addrs}
def ip6():
'''
Return a list of ipv6 addrs
'''
if 'proxyminion' in __opts__:
return {}
return {'ipv6': salt.utils.network.ip_addrs6(include_loopback=True)}
def fqdn_ip6():
'''
Return a list of ipv6 addrs of fqdn
'''
if 'proxyminion' in __opts__:
return {}
try:
info = socket.getaddrinfo(hostname()['fqdn'], None, socket.AF_INET6)
addrs = list(set(item[4][0] for item in info))
except socket.error:
addrs = []
return {'fqdn_ip6': addrs}
def ip_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
'''
# Provides:
# ip_interfaces
if 'proxyminion' in __opts__:
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for inet in ifaces[face].get('inet6', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip_interfaces': ret}
def ip4_interfaces():
'''
Provide a dict of the connected interfaces and their ip4 addresses
'''
# Provides:
# ip_interfaces
if 'proxyminion' in __opts__:
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip4_interfaces': ret}
def ip6_interfaces():
'''
Provide a dict of the connected interfaces and their ip6 addresses
'''
# Provides:
# ip_interfaces
if 'proxyminion' in __opts__:
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet6', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip6_interfaces': ret}
def hwaddr_interfaces():
'''
Provide a dict of the connected interfaces and their
hw addresses (Mac Address)
'''
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
if 'hwaddr' in ifaces[face]:
ret[face] = ifaces[face]['hwaddr']
return {'hwaddr_interfaces': ret}
def get_machine_id():
'''
Provide the machine-id
'''
# Provides:
# machine-id
locations = ['/etc/machine-id', '/var/lib/dbus/machine-id']
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
return {}
else:
with salt.utils.fopen(existing_locations[0]) as machineid:
return {'machine_id': machineid.read().strip()}
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ.get('PATH', '').strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def pythonexecutable():
'''
Return the python executable in use
'''
# Provides:
# pythonexecutable
return {'pythonexecutable': sys.executable}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(salt_path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt.version import __version__
return {'saltversion': __version__}
def zmqversion():
'''
Return the zeromq version
'''
# Provides:
# zmqversion
try:
import zmq
return {'zmqversion': zmq.zmq_version()}
except ImportError:
return {}
def saltversioninfo():
'''
Return the version_info of salt
.. versionadded:: 0.17.0
'''
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {'saltversioninfo': __version_info__}
# Relatively complex mini-algorithm to iterate over the various
# sections of dmidecode output and return matches for specific
# lines containing data we want, but only in the right section.
def _dmidecode_data(regex_dict):
'''
Parse the output of dmidecode in a generic fashion that can
be used for the multiple system types which have dmidecode.
'''
ret = {}
if 'proxyminion' in __opts__:
return {}
# No use running if dmidecode/smbios isn't in the path
if salt.utils.which('dmidecode'):
out = __salt__['cmd.run']('dmidecode')
elif salt.utils.which('smbios'):
out = __salt__['cmd.run']('smbios')
else:
log.debug(
'The `dmidecode` binary is not available on the system. GPU '
'grains will not be available.'
)
return ret
for section in regex_dict:
section_found = False
# Look at every line for the right section
for line in out.splitlines():
if not line:
continue
# We've found it, woohoo!
if re.match(section, line):
section_found = True
continue
if not section_found:
continue
# Now that a section has been found, find the data
for item in regex_dict[section]:
# Examples:
# Product Name: 64639SU
# Version: 7LETC1WW (2.21 )
regex = re.compile(r'\s+{0}\s+(.*)$'.format(item))
grain = regex_dict[section][item]
# Skip to the next iteration if this grain
# has been found in the dmidecode output.
if grain in ret:
continue
match = regex.match(line)
# Finally, add the matched data to the grains returned
if match:
ret[grain] = match.group(1).strip()
return ret
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
.. versionadded:: 0.9.5
'''
if 'proxyminion' in __opts__:
return {}
grains = {}
# TODO: *BSD dmidecode output
if osdata['kernel'] == 'Linux':
linux_dmi_regex = {
'BIOS [Ii]nformation': {
'[Vv]ersion:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
'[Ss]ystem [Ii]nformation': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(linux_dmi_regex))
elif osdata['kernel'] == 'SunOS':
sunos_dmi_regex = {
r'(.+)SMB_TYPE_BIOS\s\(BIOS [Ii]nformation\)': {
'[Vv]ersion [Ss]tring:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
r'(.+)SMB_TYPE_SYSTEM\s\([Ss]ystem [Ii]nformation\)': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(sunos_dmi_regex))
# On FreeBSD /bin/kenv (already in base system)
# can be used instead of dmidecode
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
'biosversion': 'smbios.bios.version',
'manufacturer': 'smbios.system.maker',
'serialnumber': 'smbios.system.serial',
'productname': 'smbios.system.product',
'biosreleasedate': 'smbios.bios.reldate',
}
for key, val in fbsd_hwdata.items():
grains[key] = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
elif osdata['kernel'] == 'OpenBSD':
sysctl = salt.utils.which('sysctl')
hwdata = {'biosversion': 'hw.version',
'manufacturer': 'hw.vendor',
'productname': 'hw.product',
'serialnumber': 'hw.serialno'}
for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = value
elif osdata['kernel'] == 'NetBSD':
sysctl = salt.utils.which('sysctl')
nbsd_hwdata = {
'biosversion': 'machdep.dmi.board-version',
'manufacturer': 'machdep.dmi.system-vendor',
'serialnumber': 'machdep.dmi.system-serial',
'productname': 'machdep.dmi.system-product',
'biosreleasedate': 'machdep.dmi.bios-date',
}
for key, oid in nbsd_hwdata.items():
result = __salt__['cmd.run_all']('{0} -n {1}'.format(sysctl, oid))
if result['retcode'] == 0:
grains[key] = result['stdout']
return grains
def _smartos_zone_data():
'''
Return useful information from a SmartOS zone
'''
# Provides:
# pkgsrcversion
# imageversion
# pkgsrcpath
# zonename
# zoneid
# hypervisor_uuid
# datacenter
if 'proxyminion' in __opts__:
return {}
grains = {}
pkgsrcversion = re.compile('^release:\\s(.+)')
imageversion = re.compile('Image:\\s(.+)')
pkgsrcpath = re.compile('PKG_PATH=(.+)')
if os.path.isfile('/etc/pkgsrc_version'):
with salt.utils.fopen('/etc/pkgsrc_version', 'r') as fp_:
for line in fp_:
match = pkgsrcversion.match(line)
if match:
grains['pkgsrcversion'] = match.group(1)
if os.path.isfile('/etc/product'):
with salt.utils.fopen('/etc/product', 'r') as fp_:
for line in fp_:
match = imageversion.match(line)
if match:
grains['imageversion'] = match.group(1)
if os.path.isfile('/opt/local/etc/pkg_install.conf'):
with salt.utils.fopen('/opt/local/etc/pkg_install.conf', 'r') as fp_:
for line in fp_:
match = pkgsrcpath.match(line)
if match:
grains['pkgsrcpath'] = match.group(1)
if 'pkgsrcversion' not in grains:
grains['pkgsrcversion'] = 'Unknown'
if 'imageversion' not in grains:
grains['imageversion'] = 'Unknown'
if 'pkgsrcpath' not in grains:
grains['pkgsrcpath'] = 'Unknown'
grains['zonename'] = __salt__['cmd.run']('zonename')
grains['zoneid'] = __salt__['cmd.run']('zoneadm list -p | awk -F: \'{ print $1 }\'', python_shell=True)
grains['hypervisor_uuid'] = __salt__['cmd.run']('mdata-get sdc:server_uuid')
grains['datacenter'] = __salt__['cmd.run']('mdata-get sdc:datacenter_name')
if "FAILURE" in grains['datacenter'] or "No metadata" in grains['datacenter']:
grains['datacenter'] = "Unknown"
return grains
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID
like this.
'''
# Provides:
# server_id
if 'proxyminion' in __opts__:
return {}
return {'server_id': abs(hash(__opts__.get('id', '')) % (2 ** 31))}
def get_master():
'''
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
'''
# Provides:
# master
return {'master': __opts__.get('master', '')}
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
[] |
[] |
[
"PATH",
"NUMBER_OF_PROCESSORS"
] |
[]
|
["PATH", "NUMBER_OF_PROCESSORS"]
|
python
| 2 | 0 | |
tweebot.py
|
import random
import os
import csv
from datetime import datetime
import sendgrid
from sendgrid.helpers.mail import *
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), verbose=True)
FOLDER_PATH = os.getenv("DIR_WITH_CSV")
TYPE_INDEX = 0
QUOTE_INDEX = 3
def choose_excerpt(file_path, file_type):
'''
accepts a file path and the "type" of file it is (i.e Kindle highlights,
Android text file, etc)
returns a dictionary with the following keys:
- quote
- book
'''
if os.path.isfile(file_path):
if file_type == "kindle_csv":
with open(file_path, 'r') as csvfile:
highlights = csv.reader(csvfile, delimiter=',', quotechar='"')
# convert csv reader object to a list
highlights = [h for h in highlights]
book_name = "%s %s" % (highlights[1][0], highlights[2][0])
highlights = [h for h in highlights \
if (len(h) > 1 and h[TYPE_INDEX].startswith("Highlight"))]
chosen = random.choice(highlights)
return {
"book": book_name,
"quote": chosen[QUOTE_INDEX]
}
else:
return "TYPE NOT SUPPORTED"
def get_quote():
files = os.listdir(FOLDER_PATH)
print("Files: ", files)
files = [f for f in files if f.endswith('.csv')]
to_open = random.choice(files)
fname = os.path.join(FOLDER_PATH, to_open)
return choose_excerpt(fname, "kindle_csv")
def main():
print("%s" % datetime.now().isoformat())
print("Getting quote")
h = get_quote()
mail_pt_context = '\n'.join([
"Today's Excerpt",
"",
"%s" % h['quote'],
"",
"from the book %s" % h['book']
])
mail_html_context = ''.join([
"<h1>Today's Excerpt</h1>",
"<blockquote>%s</blockquote>" % h['quote'],
"<p>-- %s</p>" % h['book']
])
sg = sendgrid.SendGridAPIClient(apikey=os.getenv("SENDGRID_API_KEY"))
from_email = Email(os.getenv("SENDER_EMAIL"))
to_email = Email(os.getenv("RECEIVER_EMAIL"))
subject = "Today's Excerpt"
content = Content("text/html", mail_html_context)
mail = Mail(from_email, subject, to_email, content)
print("Sending email")
response = sg.client.mail.send.post(request_body=mail.get())
print("Email sent. Response: ")
print(response.status_code)
print(response.body)
print(response.headers)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"SENDGRID_API_KEY",
"RECEIVER_EMAIL",
"DIR_WITH_CSV",
"SENDER_EMAIL"
] |
[]
|
["SENDGRID_API_KEY", "RECEIVER_EMAIL", "DIR_WITH_CSV", "SENDER_EMAIL"]
|
python
| 4 | 0 | |
rpc/client_example_test.go
|
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rpc_test
import (
"context"
"fmt"
"time"
"github.com/jeffprestes/ethclient/common/hexutil"
"github.com/jeffprestes/ethclient/rpc"
)
// In this example, our client wishes to track the latest 'block number'
// known to the server. The server supports two methods:
//
// eth_getBlockByNumber("latest", {})
// returns the latest block object.
//
// eth_subscribe("newHeads")
// creates a subscription which fires block objects when new blocks arrive.
type Block struct {
Number *hexutil.Big
}
func ExampleClientSubscription() {
// Connect the client.
client, _ := rpc.Dial("ws://127.0.0.1:8545")
subch := make(chan Block)
// Ensure that subch receives the latest block.
go func() {
for i := 0; ; i++ {
if i > 0 {
time.Sleep(2 * time.Second)
}
subscribeBlocks(client, subch)
}
}()
// Print events from the subscription as they arrive.
for block := range subch {
fmt.Println("latest block:", block.Number)
}
}
// subscribeBlocks runs in its own goroutine and maintains
// a subscription for new blocks.
func subscribeBlocks(client *rpc.Client, subch chan Block) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Subscribe to new blocks.
sub, err := client.EthSubscribe(ctx, subch, "newHeads")
if err != nil {
fmt.Println("subscribe error:", err)
return
}
// The connection is established now.
// Update the channel with the current block.
var lastBlock Block
err = client.CallContext(ctx, &lastBlock, "eth_getBlockByNumber", "latest", false)
if err != nil {
fmt.Println("can't get latest block:", err)
return
}
subch <- lastBlock
// The subscription will deliver events to the channel. Wait for the
// subscription to end for any reason, then loop around to re-establish
// the connection.
fmt.Println("connection lost: ", <-sub.Err())
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
vendor/code.cloudfoundry.org/cli/command/v2/feature_flag_command.go
|
package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
"code.cloudfoundry.org/cli/command/flag"
)
type FeatureFlagCommand struct {
RequiredArgs flag.Feature `positional-args:"yes"`
usage interface{} `usage:"CF_NAME feature-flag FEATURE_NAME"`
relatedCommands interface{} `related_commands:"disable-feature-flag, enable-feature-flag, feature-flags"`
}
func (_ FeatureFlagCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (_ FeatureFlagCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
|
[
"\"CF_TRACE\""
] |
[] |
[
"CF_TRACE"
] |
[]
|
["CF_TRACE"]
|
go
| 1 | 0 | |
googleapis/api/serviceconfig/log.pb.go
|
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.17.3
// source: google/api/log.proto
package serviceconfig
import (
reflect "reflect"
sync "sync"
label "google.golang.org/genproto/googleapis/api/label"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// A description of a log type. Example in YAML format:
//
// - name: library.googleapis.com/activity_history
// description: The history of borrowing and returning library items.
// display_name: Activity
// labels:
// - key: /customer_id
// description: Identifier of a library customer
type LogDescriptor struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The name of the log. It must be less than 512 characters long and can
// include the following characters: upper- and lower-case alphanumeric
// characters [A-Za-z0-9], and punctuation characters including
// slash, underscore, hyphen, period [/_-.].
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The set of labels that are available to describe a specific log entry.
// Runtime requests that contain labels not specified here are
// considered invalid.
Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
// A human-readable description of this log. This information appears in
// the documentation and can contain details.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// The human-readable name for this log. This information appears on
// the user interface and should be concise.
DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
}
func (x *LogDescriptor) Reset() {
*x = LogDescriptor{}
if protoimpl.UnsafeEnabled {
mi := &file_google_api_log_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *LogDescriptor) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LogDescriptor) ProtoMessage() {}
func (x *LogDescriptor) ProtoReflect() protoreflect.Message {
mi := &file_google_api_log_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LogDescriptor.ProtoReflect.Descriptor instead.
func (*LogDescriptor) Descriptor() ([]byte, []int) {
return file_google_api_log_proto_rawDescGZIP(), []int{0}
}
func (x *LogDescriptor) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *LogDescriptor) GetLabels() []*label.LabelDescriptor {
if x != nil {
return x.Labels
}
return nil
}
func (x *LogDescriptor) GetDescription() string {
if x != nil {
return x.Description
}
return ""
}
func (x *LogDescriptor) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
var File_google_api_log_proto protoreflect.FileDescriptor
var file_google_api_log_proto_rawDesc = []byte{
0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x6f, 0x67,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
0x70, 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x0d, 0x4c,
0x6f, 0x67, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61,
0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c,
0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f,
0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x08, 0x4c, 0x6f,
0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x3b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xa2,
0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_api_log_proto_rawDescOnce sync.Once
file_google_api_log_proto_rawDescData = file_google_api_log_proto_rawDesc
)
func file_google_api_log_proto_rawDescGZIP() []byte {
file_google_api_log_proto_rawDescOnce.Do(func() {
file_google_api_log_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_log_proto_rawDescData)
})
return file_google_api_log_proto_rawDescData
}
var file_google_api_log_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_api_log_proto_goTypes = []interface{}{
(*LogDescriptor)(nil), // 0: google.api.LogDescriptor
(*label.LabelDescriptor)(nil), // 1: google.api.LabelDescriptor
}
var file_google_api_log_proto_depIdxs = []int32{
1, // 0: google.api.LogDescriptor.labels:type_name -> google.api.LabelDescriptor
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_google_api_log_proto_init() }
func file_google_api_log_proto_init() {
if File_google_api_log_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_api_log_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LogDescriptor); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_log_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_api_log_proto_goTypes,
DependencyIndexes: file_google_api_log_proto_depIdxs,
MessageInfos: file_google_api_log_proto_msgTypes,
}.Build()
File_google_api_log_proto = out.File
file_google_api_log_proto_rawDesc = nil
file_google_api_log_proto_goTypes = nil
file_google_api_log_proto_depIdxs = nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
bridge/npbackend/bohrium/contexts.py
|
"""
Bohrium Contexts
================
"""
import sys
import os
from . import backend_messaging as messaging
class EnableBohrium:
"""Enable Bohrium within the context"""
def __init__(self):
# In order to avoid complications, we import common libraries BEFORE enabling Bohrium
try:
import matplotlib
if os.environ.get("DISPLAY", "") == "":
matplotlib.use('Agg') # When no DISPLAY, we assume a headless matplotlib is used
import matplotlib.pyplot
import matplotlib.pylab
except ImportError:
pass
try:
import scipy
import scipy.sparse
import scipy.io
except ImportError:
pass
try:
import netCDF4
except ImportError:
pass
try:
import sklearn
import sklearn.preprocessing
except ImportError:
pass
# Let's save to real NumPy module
self.__numpy = sys.modules['numpy']
self.__numpy_random = sys.modules['numpy.random']
self.__numpy_linalg = sys.modules['numpy.linalg']
# Sub-module matlib has to be imported explicitly once in order to be available through bohrium
try:
import numpy.matlib
except ImportError:
pass
def __enter__(self):
import numpy
import bohrium
# Overwrite with Bohrium
sys.modules['numpy_force'] = numpy
sys.modules['numpy'] = bohrium
sys.modules['numpy.random'] = bohrium.random
sys.modules['numpy.linalg'] = bohrium.linalg
def __exit__(self, *args):
# Put NumPy back together
sys.modules.pop('numpy_force', None)
sys.modules['numpy'] = self.__numpy
sys.modules['numpy.random'] = self.__numpy_random
sys.modules['numpy.linalg'] = self.__numpy_linalg
class DisableBohrium:
"""Disable Bohrium within the context"""
def __enter__(self):
# Save current state
import numpy
self._numpy = sys.modules['numpy']
self._numpy_random = sys.modules['numpy.random']
self._numpy_linalg = sys.modules['numpy.linalg']
# Make sure that numpy points to numpy (and not Bohrium)
sys.modules['numpy'] = sys.modules.get("numpy_force", self._numpy)
def __exit__(self, *args):
# Load the state before entering context
sys.modules['numpy'] = self._numpy
sys.modules['numpy.random'] = self._numpy_random
sys.modules['numpy.linalg'] = self._numpy_linalg
class Profiling:
"""Profiling the Bohrium backends within the context."""
def __init__(self):
pass
def __enter__(self):
messaging.statistic_enable_and_reset()
def __exit__(self, *args):
print(messaging.statistic())
class DisableGPU:
"""Disable the GPU backend within the context."""
def __init__(self):
pass
def __enter__(self):
messaging.gpu_disable()
def __exit__(self, *args):
messaging.gpu_enable()
|
[] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
python
| 1 | 0 | |
myip.py
|
#!/usr/bin/env python
"""Show the IP Address of the browser. """
TEMPLATE = """Content-Type: text/html\n\n
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>IP Address</title>
</head>
<body>
%s
</body>
</html>"""
import os
def main():
"""If called directly then show IP Address.
It may well be a router rather than the local
machine."""
includes = os.environ['REMOTE_ADDR']
print TEMPLATE % includes
# start the ball rolling
if __name__ == "__main__":
main()
|
[] |
[] |
[
"REMOTE_ADDR"
] |
[]
|
["REMOTE_ADDR"]
|
python
| 1 | 0 | |
src/config/wsgi.py
|
# -*- coding: utf-8 -*-
"""
WSGI config for src project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from local_prod import local_or_prod
local_or_prod, config = local_or_prod()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", config)
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
utils/command.go
|
package utils
import (
"go/build"
"os"
"os/exec"
"strings"
"bytes"
"path/filepath"
)
// Initialize the command based on the GO environment
func CmdInit(c *exec.Cmd, basePath string) {
c.Dir = basePath
// Dep does not like paths that are not real, convert all paths in go to real paths
realPath := &bytes.Buffer{}
for _, p := range filepath.SplitList(build.Default.GOPATH) {
rp,_ := filepath.EvalSymlinks(p)
if realPath.Len() > 0 {
realPath.WriteString(string(filepath.ListSeparator))
}
realPath.WriteString(rp)
}
// Go 1.8 fails if we do not include the GOROOT
c.Env = []string{"GOPATH=" + realPath.String(), "GOROOT="+ os.Getenv("GOROOT")}
// Fetch the rest of the env variables
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
if pair[0]=="GOPATH" || pair[0]=="GOROOT" {
continue
}
c.Env = append(c.Env,e)
}
}
|
[
"\"GOROOT\""
] |
[] |
[
"GOROOT"
] |
[]
|
["GOROOT"]
|
go
| 1 | 0 | |
ML_web_app/wsgi.py
|
"""
WSGI config for ML_web_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ML_web_app.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/main/java/zenit/settingspanel/SettingsPanelController.java
|
package main.java.zenit.settingspanel;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.controlsfx.control.ToggleSwitch;
import javafx.application.Platform;
import javafx.beans.value.ChangeListener;
import javafx.beans.value.ObservableValue;
import javafx.event.Event;
import javafx.fxml.FXML;
import javafx.fxml.FXMLLoader;
import javafx.geometry.Pos;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.control.ChoiceBox;
import javafx.scene.control.ColorPicker;
import javafx.scene.control.Hyperlink;
import javafx.scene.control.Label;
import javafx.scene.control.ListView;
import javafx.scene.control.Slider;
import javafx.scene.control.TextField;
import javafx.scene.layout.AnchorPane;
import javafx.scene.text.Font;
import javafx.stage.DirectoryChooser;
import javafx.stage.Stage;
import main.java.zenit.console.ConsoleController;
import main.java.zenit.ui.MainController;
import main.java.zenit.zencodearea.ZenCodeArea;
/**
* Controller class for the NewTextSize window,
* @author Sigge Labor
*
*/
public class SettingsPanelController extends AnchorPane implements ThemeCustomizable{
private int oldSize;
private String oldFont;
private File customThemeCSS;
private LinkedList<String> addedCSSLines;
private List<ThemeCustomizable> stages;
private Stage window;
private MainController mainController;
private CustomCSSThemeHandler themeHandler;
private boolean isCustomTheme = false;
private boolean isDarkMode = true;
private String settingsPanelDarkMode = getClass().getResource(
"/zenit/settingspanel/settingspanelDarkMode.css").toExternalForm();
private String settingsPanelLightMode = getClass().getResource(
"/zenit/settingspanel/settingspanelLightMode.css").toExternalForm();
private enum OS {
MACOS, WINDOWS, LINUX
}
private OS operatingSystem;
@FXML
private TextField fldNewSize;
@FXML
private Slider sldrNewSize;
@FXML
private Label lblOldTextSize;
@FXML
private Label lblOldFont;
@FXML
private Label lblCurrentJavaHome;
@FXML
private Label newJavaHome;
@FXML
private Label lblTxtAppeaSize;
@FXML
private ChoiceBox<String> chcbxNewFont;
@FXML
private Button btnTextAppearance;
@FXML
private Button btnJavaHome;
@FXML
private Button btnSupport;
@FXML
private Button btnTheme;
@FXML
private Button btnCustomCSS;
@FXML
private Button btnCustomTheme;
@FXML
private Hyperlink linkOpenInGitHub;
@FXML
private Hyperlink linkSubmitIssue;
@FXML
private Hyperlink linkDownloadSource;
@FXML
private ToggleSwitch toggleDarkMode;
@FXML
private ToggleSwitch toggleSwitchCustomTheme;
@FXML
private ListView listViewAddedCSS;
@FXML
private ColorPicker colorPickerPrimaryColor;
@FXML
private ColorPicker colorPickerPrimaryTint;
@FXML
private ColorPicker colorPickerSecondaryColor;
@FXML
private ColorPicker colorPickerSecondaryTint;
@FXML
private AnchorPane pnlTextAppearance;
@FXML
private AnchorPane pnlJavaHome;
@FXML
private AnchorPane pnlSupport;
@FXML
private AnchorPane pnlTheme;
@FXML
private AnchorPane pnlCustomCSS;
@FXML
private AnchorPane pnlCustomTheme;
private ConsoleController consoleController;
/**
* constructs a controller for the TextSizeWindow.
* @param codeArea the ZenCodeArea that will be modified.
*/
public SettingsPanelController(MainController mainController, int oldFontSize, String oldFontFamily, ConsoleController consoleController) {
this.mainController = mainController;
this.consoleController = consoleController;
oldSize = oldFontSize;
oldFont = oldFontFamily;
addedCSSLines = new LinkedList<String>();
FXMLLoader loader = new FXMLLoader(
getClass().getResource("/zenit/settingspanel/SettingsPanel.fxml"
));
loader.setRoot(this);
loader.setController(this);
try {
loader.load();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
window = new Stage();
Scene scene = new Scene(this);
window.setScene(scene);
window.setTitle("Preferences");
initialize();
// scene.getStylesheets().add(getClass().getResource(
// "/zenit/settingspanel/settingspanelDarkMode.css").toString(
// ));
darkModeChanged(mainController.isDarkmode());
window.show();
this.customThemeCSS = new File("/customtheme/settingspanelCustomTheme.css");
stages = new ArrayList<ThemeCustomizable>();
stages.add(mainController);
stages.add(this);
themeHandler = new CustomCSSThemeHandler(stages);
}
/**
* Sets the font of the given ZenCodeArea.
* @param newFont the font to be applied.
*/
public void setNewFont(String newFont) {
chcbxNewFont.setValue(newFont);
mainController.setFontFamily(newFont);
}
/**
* Sets the font size of the given ZenCodeArea.
* @param newFontSize the font size to be applied.
*/
public void setNewFontSize(long newFontSize) {
long size = newFontSize;
fldNewSize.textProperty().setValue(String.valueOf(size));
if(size > 100) {
size = 100;
}
else if(size < 6) {
size = 6;
}
sldrNewSize.setValue(size);
mainController.setFontSize((int)size);//this.codeArea.setFontSize((int)size);
}
/**
* Moves a panel to the front, and thereby makes it visible.
* @param e
*/
public void panelToFront(Event e) {
if(e.getSource() == btnTextAppearance) {
pnlTextAppearance.toFront();
}
else if(e.getSource() == btnJavaHome) {
pnlJavaHome.toFront();
}
else if(e.getSource() == btnSupport) {
pnlSupport.toFront();
}
else if(e.getSource() == btnTheme) {
pnlTheme.toFront();
}
else if(e.getSource() == btnCustomCSS) {
pnlCustomCSS.toFront();
}
else if(e.getSource() == btnCustomTheme) {
pnlCustomTheme.toFront();
}
}
@FXML
private void setNewJavaHome() {
/*
* TODO REMOVE
*/
DirectoryChooser directoryChooser = new DirectoryChooser();
File selectedDirectory = directoryChooser.showDialog(window);
if(selectedDirectory == null){
//No Directory selected
}
else{
ProcessBuilder pb = new ProcessBuilder();
Map<String, String> env = pb.environment();
env.put("JAVA_HOME", selectedDirectory.getAbsolutePath());
try {
Process p = pb.start();
Thread.sleep(100);
newJavaHome.setText(System.getenv("JAVA_HOME"));
newJavaHome.setStyle("-fx-text-fill: #0B6623;");
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
// TODO update the comments below im tired.
/**
* Adds the string written in fldCSSLineInput to the setStyle method. till will add the styling
* to the application.
*/
@FXML
private void addCSSLine() {
// String CSSLine = fldCSSLineInput.getText();
// try {
// Scene mockScene = new Scene(new Region());
// mockScene.getRoot().setStyle(CSSLine);
//
// String allCusomLinesOfCSS = "";
// addedCSSLines.addFirst(CSSLine);
//
// for(int i = 0; i < addedCSSLines.size(); i++) {
// allCusomLinesOfCSS += addedCSSLines.get(i);
// }
// this.window.getScene().getRoot().setStyle(allCusomLinesOfCSS);
//
// updateCustomCSSListView();
// }
//
// catch(Exception e) {
// e.printStackTrace();
// }
}
/**
* Updates the listViewAddedCSS to show the correct lines.
*/
@SuppressWarnings("unchecked")
private void updateCustomCSSListView() {
listViewAddedCSS.getItems().clear();
for(int i = 0; i < addedCSSLines.size(); i++) {
listViewAddedCSS.getItems().add(new CustomCSSListItem(addedCSSLines.get(i)));
}
}
/**
* Calls the openInBrowser method. The URL depends on which button that is clicked.
* @param e
*/
@FXML
private void openLinkInBrowserEvent(Event e) {
if(e.getSource() == linkOpenInGitHub) {
openInBrowser("https://github.com/strazan/zenit");
}
if(e.getSource() == linkSubmitIssue) {
openInBrowser("https://github.com/strazan/zenit/issues/new");
}
if(e.getSource() == linkDownloadSource) {
openInBrowser("https://github.com/strazan/zenit/archive/develop.zip");
}
}
/**
* Opens an URL an the computers default browser. The command varies depending on the users
* operating system.
* @param url to open
*/
private void openInBrowser(String url) {
Runtime rt = Runtime.getRuntime();
switch(operatingSystem) {
case LINUX:
try {
rt.exec("xdg-open " + url);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
break;
case MACOS:
try {
rt.exec("open " + url);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
break;
case WINDOWS:
try {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
break;
default:
System.err.println("OS not found. Open link manually in browser:\n" + url );
break;
}
}
/**
* Switches between dark- and light mode depending on what is selected in the setting pane's
* toggle switch.
* @param event
* @author Pontus Laos, Sigge Labor
*/
private void darkModeChanged(boolean isDarkMode) {
if(!isCustomTheme) {
var stylesheets = this.mainController.getStage().getScene().getStylesheets();
var settingsPanelStylesheets = window.getScene().getStylesheets();
var lightMode = getClass().getResource("/zenit/ui/mainStyle-lm.css").toExternalForm();
var darkMode = getClass().getResource("/zenit/ui/mainStyle.css").toExternalForm();
var darkModeKeywords = ZenCodeArea.class.getResource("/zenit/ui/keywords.css").toExternalForm();
var lightModeKeywords = ZenCodeArea.class.getResource("/zenit/ui/keywords-lm.css").toExternalForm();
var darkModeConsole = getClass().getResource("/zenit/console/consoleStyle.css").toExternalForm();
var lightModeConsole = getClass().getResource("/zenit/console/consoleStyleLight.css").toExternalForm();
if (isDarkMode) {
settingsPanelStylesheets.clear();
settingsPanelStylesheets.add(settingsPanelDarkMode);
consoleController.getStylesheets().remove(lightModeConsole);
consoleController.getStylesheets().add(darkModeConsole);
consoleController.changeAllConsoleAreaColors("-fx-background-color:#444");
stylesheets.clear();
stylesheets.add(darkModeKeywords);
stylesheets.add(darkMode);
} else {
settingsPanelStylesheets.clear();
settingsPanelStylesheets.add(settingsPanelLightMode);
consoleController.getStylesheets().remove(darkModeConsole);
consoleController.getStylesheets().add(lightModeConsole);
consoleController.changeAllConsoleAreaColors("-fx-background-color:#989898");
stylesheets.clear();
stylesheets.add(lightModeKeywords);
stylesheets.add(lightMode);
}
}
this.isDarkMode = isDarkMode;
mainController.setDarkmode(this.isDarkMode);
}
/**
* initializing steps. Variables will get their value. ActionListeners added.
*/
private void initialize() {
lblOldTextSize.setText(String.valueOf(oldSize));
fldNewSize.setText(String.valueOf(oldSize));
sldrNewSize.setValue(oldSize);
sldrNewSize.valueProperty().addListener(
(ChangeListener<? super Number>) (arg0, arg1, arg2) -> {
setNewFontSize(Math.round(sldrNewSize.getValue()));
});
fldNewSize.textProperty().addListener((arg0, arg1, arg2) -> {
try {
setNewFontSize(Long.parseLong(fldNewSize.getText()));
} catch(NumberFormatException e){
e.printStackTrace();
}
});
List<String> fonts = Font.getFamilies();
for(int i = 0; i < fonts.size(); i++) {
chcbxNewFont.getItems().add(fonts.get(i));
}
chcbxNewFont.setValue(oldFont);
lblOldFont.setText(oldFont);
chcbxNewFont.getSelectionModel().selectedItemProperty().addListener((arg0, arg1, arg2) -> {
setNewFont(arg2);
});
lblCurrentJavaHome.setText(System.getenv("JAVA_HOME"));
fldNewSize.setAlignment(Pos.CENTER_RIGHT);
String os = System.getProperty("os.name").toLowerCase();
if(os.indexOf("win") >= 0) {
operatingSystem = OS.WINDOWS;
}
else if(os.indexOf("mac") >= 0) {
operatingSystem = OS.MACOS;
}
else if(os.indexOf("nix") >=0 || os.indexOf("nux") >=0) {
operatingSystem = OS.LINUX;
}
toggleDarkMode.setSelected(mainController.isDarkmode());
toggleDarkMode.selectedProperty().addListener(new ChangeListener <Boolean> () {
@Override
public void changed(
ObservableValue<? extends Boolean> observable, Boolean oldValue, Boolean newValue)
{
darkModeChanged(newValue);
}
});
toggleSwitchCustomTheme.selectedProperty().addListener(new ChangeListener <Boolean> () {
@Override
public void changed(
ObservableValue<? extends Boolean> observable, Boolean oldValue, Boolean newValue)
{
themeHandler.toggleCustomTheme(toggleSwitchCustomTheme.isSelected());
isCustomTheme = toggleSwitchCustomTheme.isSelected();
darkModeChanged(toggleDarkMode.isSelected());
}
});
listViewAddedCSS.getItems().add(new AnchorPane());
colorPickerPrimaryColor.setOnAction((event) -> {
Platform.runLater(() -> {
themeHandler.changeColor(colorPickerPrimaryColor.getValue(),
CustomColor.primaryColor);
});
});
colorPickerPrimaryTint.setOnAction((event) -> {
Platform.runLater(() -> {
themeHandler.changeColor(colorPickerPrimaryTint.getValue(),
CustomColor.primaryTint);
});
});
colorPickerSecondaryColor.setOnAction((event) -> {
Platform.runLater(() -> {
themeHandler.changeColor(colorPickerSecondaryColor.getValue(),
CustomColor.secondaryColor);
});
});
colorPickerSecondaryTint.setOnAction((event) -> {
Platform.runLater(() -> {
themeHandler.changeColor(colorPickerSecondaryTint.getValue(),
CustomColor.secondaryTint);
});
});
}
/**
* @return this stage
*/
public Stage getStage() {
return this.window;
}
/**
* @return the path to the stages custom theme stylesheet.
*/
public File getCustomThemeCSS() {
return this.customThemeCSS;
}
/**
* @return the path the the active default stylesheet.
*/
public String getActiveStylesheet() {
if(isDarkMode) {
return settingsPanelDarkMode;
}
else {
return settingsPanelLightMode;
}
}
}
|
[
"\"JAVA_HOME\"",
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
samples/aci-ap/ap.go
|
package main
import (
"log"
"os"
"github.com/kcbark/acigo/aci"
)
func main() {
debug := os.Getenv("DEBUG") != ""
if len(os.Args) < 3 {
log.Fatalf("usage: %s add|del|list tenant ap [description]", os.Args[0])
}
cmd := os.Args[1]
tenant := os.Args[2]
isList := cmd == "list"
var name, descr string
if !isList {
if len(os.Args) < 4 {
log.Fatalf("usage: %s add|del|list tenant ap [description]", os.Args[0])
}
name = os.Args[3]
if len(os.Args) > 4 {
descr = os.Args[4]
}
}
a := login(debug)
defer logout(a)
// add/del ap
execute(a, cmd, tenant, name, descr)
// display existing
aps, errList := a.ApplicationProfileList(tenant)
if errList != nil {
log.Printf("could not list application profiles: %v", errList)
return
}
for _, t := range aps {
name := t["name"]
dn := t["dn"]
descr := t["descr"]
log.Printf("FOUND application profile: name=%s dn=%s descr=%s\n", name, dn, descr)
}
}
func execute(a *aci.Client, cmd, tenant, name, descr string) {
switch cmd {
case "add":
errAdd := a.ApplicationProfileAdd(tenant, name, descr)
if errAdd != nil {
log.Printf("FAILURE: add error: %v", errAdd)
return
}
log.Printf("SUCCESS: add: %s", name)
case "del":
errDel := a.ApplicationProfileDel(tenant, name)
if errDel != nil {
log.Printf("FAILURE: del error: %v", errDel)
return
}
log.Printf("SUCCESS: del: %s", name)
case "list":
default:
log.Printf("unknown command: %s", cmd)
}
}
func login(debug bool) *aci.Client {
a, errNew := aci.New(aci.ClientOptions{Debug: debug})
if errNew != nil {
log.Printf("login new client error: %v", errNew)
os.Exit(1)
}
errLogin := a.Login()
if errLogin != nil {
log.Printf("login error: %v", errLogin)
os.Exit(1)
}
return a
}
func logout(a *aci.Client) {
errLogout := a.Logout()
if errLogout != nil {
log.Printf("logout error: %v", errLogout)
return
}
log.Printf("logout: done")
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
go-sdk-master/demo/pkg/handleputs/customobject.go
|
package handleputs
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/julienschmidt/httprouter"
lr "github.com/LoginRadius/go-sdk"
"github.com/LoginRadius/go-sdk/api/customobject"
"github.com/LoginRadius/go-sdk/lrerror"
)
func CustomObject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var errors string
respCode := 200
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
token := r.Header.Get("Authorization")[7:]
lrclient, err := lr.NewLoginradius(
&cfg,
map[string]string{"token": token},
)
if err != nil {
errors = errors + err.(lrerror.Error).OrigErr().Error()
respCode = 500
}
var customObj json.RawMessage
b, _ := ioutil.ReadAll(r.Body)
json.Unmarshal(b, &customObj)
res, err := customobject.Loginradius(customobject.Loginradius{lrclient}).PutCustomObjectUpdateByToken(
r.URL.Query().Get("object_id"),
map[string]string{"objectname": r.URL.Query().Get("object_name")},
customObj,
)
if err != nil {
errors = errors + err.(lrerror.Error).OrigErr().Error()
respCode = 500
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(respCode)
if errors != "" {
log.Printf(errors)
w.Write([]byte(errors))
return
}
w.Write([]byte(res.Body))
}
|
[
"\"APIKEY\"",
"\"APISECRET\""
] |
[] |
[
"APIKEY",
"APISECRET"
] |
[]
|
["APIKEY", "APISECRET"]
|
go
| 2 | 0 | |
vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go
|
// +build windows
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"os"
"strings"
)
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/")
|
[
"\"SystemDrive\""
] |
[] |
[
"SystemDrive"
] |
[]
|
["SystemDrive"]
|
go
| 1 | 0 | |
tour/appengine.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"html/template"
"io"
"log"
"net/http"
"os"
_ "golang.org/x/tools/playground"
)
func gaeMain() {
prepContent = gaePrepContent
socketAddr = gaeSocketAddr
analyticsHTML = template.HTML(os.Getenv("TOUR_ANALYTICS"))
root := "tour"
if err := initTour(root, "HTTPTransport"); err != nil {
log.Fatal(err)
}
http.Handle("/", hstsHandler(rootHandler))
http.Handle("/lesson/", hstsHandler(lessonHandler))
registerStatic(root)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
log.Fatal(http.ListenAndServe(":"+port, nil))
}
// gaePrepContent returns a Reader that produces the content from the given
// Reader, but strips the prefix "#appengine:", optionally followed by a space, from each line.
// It also drops any non-blank line that follows a series of 1 or more lines with the prefix.
func gaePrepContent(in io.Reader) io.Reader {
var prefix = []byte("#appengine:")
out, w := io.Pipe()
go func() {
r := bufio.NewReader(in)
drop := false
for {
b, err := r.ReadBytes('\n')
if err != nil && err != io.EOF {
w.CloseWithError(err)
return
}
if bytes.HasPrefix(b, prefix) {
b = b[len(prefix):]
if b[0] == ' ' {
// Consume a single space after the prefix.
b = b[1:]
}
drop = true
} else if drop {
if len(b) > 1 {
b = nil
}
drop = false
}
if len(b) > 0 {
w.Write(b)
}
if err == io.EOF {
w.Close()
return
}
}
}()
return out
}
// gaeSocketAddr returns the WebSocket handler address.
// The App Engine version does not provide a WebSocket handler.
func gaeSocketAddr() string { return "" }
// hstsHandler wraps an http.HandlerFunc such that it sets the HSTS header.
func hstsHandler(fn http.HandlerFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Strict-Transport-Security", "max-age=31536000; preload")
fn(w, r)
})
}
|
[
"\"TOUR_ANALYTICS\"",
"\"PORT\""
] |
[] |
[
"PORT",
"TOUR_ANALYTICS"
] |
[]
|
["PORT", "TOUR_ANALYTICS"]
|
go
| 2 | 0 | |
udp-socket/socket-client/sources/src/main/java/de/hda/fbi/ds/mbredel/Main.java
|
/*
Copyright (c) 2018, Michael Bredel, H-DA
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the H-DA and Michael Bredel
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
*/
package de.hda.fbi.ds.mbredel;
import de.hda.fbi.ds.mbredel.configuration.CliParameters;
import de.hda.fbi.ds.mbredel.configuration.CliProcessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The main class that contains the
* main method that starts the client.
*
* @author Michael Bredel
*/
public class Main {
/** The logger. */
@SuppressWarnings("unused")
private static final Logger LOGGER = LoggerFactory.getLogger(Main.class);
/**
* Sets the command-line options with values in environment variables.
* This can be used to ease the configuration of the server running
* in Docker compose.
*/
private static void parseOptionsFromEnv() {
try {
CliParameters.getInstance().setDestination(System.getenv("DESTINATION"));
} catch (NullPointerException e) {
LOGGER.debug("Environment variable \"DESTINATION\" does not exist");
}
}
/**
* The main method that starts the
* whole client. Thus, it creates
* a UDP socket client and transmits
* a string.
*
* @param args Command-line arguments.
*/
public static void main(String[] args) {
// Parse environemnt variables.
parseOptionsFromEnv();
// Parse the command line arguments.
CliProcessor.getInstance().parseCliOptions(args);
// Create the UDP socket client.
UDPSocketClient udpSocketClient = new UDPSocketClient();
// Send the message.
udpSocketClient.sendMsg(CliParameters.getInstance().getMessage());
}
}
|
[
"\"DESTINATION\""
] |
[] |
[
"DESTINATION"
] |
[]
|
["DESTINATION"]
|
java
| 1 | 0 | |
src/main/java/erjang/ErjangCodeCache.java
|
/** -*- tab-width: 4 -*-
* This file is part of Erjang - A JVM-based Erlang VM
*
* Copyright (c) 2011 by Trifork
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package erjang;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.jar.JarOutputStream;
import java.util.logging.Logger;
import java.util.zip.ZipEntry;
import erjang.beam.BeamLoader;
import erjang.beam.Compiler;
import erjang.beam.RamClassRepo;
public class ErjangCodeCache {
static final Logger log = Logger.getLogger("erjang.beam.cache");
// Config:
static final String ERJ_CACHE_DIR;
static final boolean useAsyncPersisting;
static final boolean useSyncPersisting;
static final Persister persister;
static {
String cacheDir = System.getenv("ERJ_CACHE_DIR");
if (cacheDir == null) cacheDir = System.getProperty("user.home");
ERJ_CACHE_DIR = cacheDir;
String mode = System.getProperty("erjang.codecache.mode");
if ("async".equals(mode)) {
useAsyncPersisting = true;
useSyncPersisting = false;
} else if ("sync".equals(mode)) {
useAsyncPersisting = false;
useSyncPersisting = true;
} else if ("off".equals(mode)) {
useAsyncPersisting = false;
useSyncPersisting = false;
} else {
// TODO: Warn?
// Default to 'async':
useAsyncPersisting = true;
useSyncPersisting = false;
} // Other values which might make sense: 'read-only', 'existing-only'
if (useAsyncPersisting) {
persister = new Persister();
Thread t = new Thread(persister, "Erjang Code Cache Persister");
t.setDaemon(true);
t.setPriority(Thread.MIN_PRIORITY);
t.start();
} else persister = null;
}
private static Map<String, RamClassRepo> cache = Collections.synchronizedMap(new HashMap<String, RamClassRepo>());
public static EModuleClassLoader getModuleClassLoader(String moduleName, EBinary beam_data, BeamLoader beam_parser) throws IOException {
long crc = beam_data.crc();
// crc ^= BIFUtil.all_bif_hash();
File jarFile = new File(erjdir(), moduleJarFileName(moduleName, crc));
if (jarFile.exists()) {
return new EModuleClassLoader(jarFile.toURI().toURL());
}
RamClassRepo repo = new RamClassRepo();
try {
Compiler.compile(beam_parser.load(beam_data.getByteArray()), repo);
repo.close();
cache.put(moduleName, repo);
if (useAsyncPersisting) persister.enqueue(jarFile, repo);
else if (useSyncPersisting) persister.persist(jarFile, repo);
} finally {
try {repo.close();
// jarFile.delete();
} catch (Exception e) {}
}
return new EModuleClassLoader(jarFile.toURI().toURL(), repo);
}
static File erjdir() throws IOException {
File home = ERT.newFile(ERJ_CACHE_DIR);
File dir = new File(home, ".erjang");
if (!dir.exists()) {
if (!dir.mkdirs())
throw new IOException("cannot create " + dir);
} else if (!dir.canWrite()) {
throw new IOException("cannot write to " + dir);
}
return dir;
}
public static String moduleJarFileName(String moduleName, long crc) {
return moduleFileName(moduleName, crc, "jar");
}
/*
static String moduleJarBackupFileName(String moduleName, long crc) {
return moduleFileName(moduleName, crc, "ja#");
}
*/
static String moduleFileName(String moduleName, long crc, String extension) {
return mangle(moduleName)
+ "-" + Long.toHexString(crc)
+ "." + extension;
}
/** Mangle string so that the result contains only [a-z0-9_$]. */
static String mangle(String s) {
// TODO: Faster handling of the normal case.
StringBuffer sb = new StringBuffer();
for (int i=0; i<s.length(); i++) {
char c = s.charAt(i);
if (('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9') ||
c == '-' ||
c == '.' ||
c == '_')
sb.append(c);
else
sb.append('$').append(Integer.toHexString(c)).append('$');
}
return sb.toString();
}
static class PersistRequest { // Just a Pair<File,RamClassRepo>, really.
final File file;
final RamClassRepo repo;
public PersistRequest(File file, RamClassRepo repo) {
this.file = file;
this.repo = repo;
}
}
static class Persister implements Runnable {
final Queue<PersistRequest> queue = new LinkedList<PersistRequest>();
public void run() {
while (true) {
PersistRequest request;
synchronized (queue) {
while ((request = queue.poll()) == null) {
try { queue.wait(); }
catch (InterruptedException ie) {}
}
}
persist(request.file, request.repo);
}
}
void enqueue(File file, RamClassRepo repo) {
synchronized (queue) {
queue.add(new PersistRequest(file, repo));
queue.notify();
}
}
static void persist(File file, RamClassRepo repo) {
try {
File tmpFile = File.createTempFile(file.getName(), "tmp",
file.getParentFile());
JarOutputStream jo = new JarOutputStream(new FileOutputStream(tmpFile));
for (Map.Entry<String,byte[]> e : repo.entrySet()) {
String classFilename = e.getKey() + ".class";
byte[] classContents = e.getValue();
jo.putNextEntry(new ZipEntry(classFilename));
jo.write(classContents);
jo.closeEntry();
}
jo.close();
tmpFile.renameTo(file);
} catch (IOException ioe) {
log.warning("Warning: Failed to store cached module in "+file);
}
}
}
}
|
[
"\"ERJ_CACHE_DIR\""
] |
[] |
[
"ERJ_CACHE_DIR"
] |
[]
|
["ERJ_CACHE_DIR"]
|
java
| 1 | 0 | |
tasks.py
|
"""Invoke tasks."""
import json
import os
import shutil
from typing import Iterator
from invoke import task
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "cookiecutter.json"), "r") as fp:
COOKIECUTTER_SETTINGS = json.load(fp)
# Match default value of app_name from cookiecutter.json
DEFAULT_APP_NAME = "my_flask_app"
COOKIECUTTER_SETTINGS["app_name"] = DEFAULT_APP_NAME
COOKIE = os.path.join(HERE, COOKIECUTTER_SETTINGS["app_name"])
REQUIREMENTS = os.path.join(COOKIE, "requirements", "dev.txt")
def _run_npm_command(ctx, command):
os.chdir(COOKIE)
ctx.run(f"npm {command}", echo=True)
os.chdir(HERE)
def _run_flask_command(ctx, command, *args):
os.chdir(COOKIE)
flask_command = f"flask {command}"
if args:
flask_command += f" {' '.join(args)}"
ctx.run(flask_command, echo=True)
@task
def build(ctx):
"""Build the cookiecutter."""
ctx.run(f"cookiecutter {HERE} --no-input")
@task(pre=[build])
def build_install(ctx):
"""Build the cookiecutter."""
_run_npm_command(ctx, "install")
ctx.run(f"pip install -r {REQUIREMENTS} --ignore-installed", echo=True)
@task
def clean(ctx):
"""Clean out generated cookiecutter."""
if os.path.exists(COOKIE):
shutil.rmtree(COOKIE)
@task(pre=[clean, build_install])
def lint(ctx):
"""Run lint commands."""
_run_npm_command(ctx, "run lint")
os.chdir(COOKIE)
os.environ["FLASK_ENV"] = "production"
os.environ["FLASK_DEBUG"] = "0"
_run_flask_command(ctx, "lint", "--check")
@task(pre=[clean, build_install])
def test(ctx):
"""Run tests."""
os.chdir(COOKIE)
os.environ["FLASK_ENV"] = "production"
os.environ["FLASK_DEBUG"] = "0"
_run_flask_command(ctx, "test")
def _walk_template_files() -> Iterator[str]:
template_dir = os.path.join(HERE, "{{cookiecutter.app_name}}")
for root, _, template_files in os.walk(template_dir):
for template_file in template_files:
yield os.path.join(root, template_file)
@task
def no_placeholders(ctx):
"""Check that default project name hasn't been committed to template dir"""
for template_file in _walk_template_files():
try:
with open(template_file, "r") as f:
if DEFAULT_APP_NAME in f.read():
raise ValueError(
f"Template cannot contain default app name, but {DEFAULT_APP_NAME} found in {f.name}"
)
except UnicodeDecodeError:
pass
@task(pre=[clean, build])
def test_image_build(ctx):
"""Run tests."""
os.chdir(COOKIE)
os.environ["DOCKER_BUILDKIT"] = "1"
ctx.run("docker-compose build flask-dev", echo=True)
|
[] |
[] |
[
"FLASK_ENV",
"FLASK_DEBUG",
"DOCKER_BUILDKIT"
] |
[]
|
["FLASK_ENV", "FLASK_DEBUG", "DOCKER_BUILDKIT"]
|
python
| 3 | 0 | |
tests/api/test_historic.py
|
# Author: Kelvin Lai <[email protected]>
# Copyright: This module is owned by First Street Foundation
# Standard Imports
import os
# External Imports
import pytest
# Internal Imports
import firststreet
from firststreet.errors import InvalidArgument
api_key = os.environ['FSF_API_KEY']
fs = firststreet.FirstStreet(api_key)
class TestHistoricEvent:
def test_empty(self):
with pytest.raises(InvalidArgument):
fs.historic.get_event([], "")
def test_wrong_fsid_type(self):
with pytest.raises(InvalidArgument):
fs.historic.get_event("9")
def test_invalid(self):
event_id = [0000]
historic = fs.historic.get_event(event_id)
assert len(historic) == 1
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is None
assert historic[0].valid_id is False
def test_single(self):
event_id = [9]
historic = fs.historic.get_event(event_id)
assert len(historic) == 1
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is not None
assert historic[0].valid_id is True
def test_multiple(self):
event_id = [13, 14]
historic = fs.historic.get_event(event_id)
assert len(historic) == 2
historic.sort(key=lambda x: x.eventId)
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is not None
assert historic[1].eventId == str(event_id[1])
assert historic[1].properties is not None
assert historic[0].valid_id is True
assert historic[1].valid_id is True
def test_single_csv(self, tmpdir):
event_id = [9]
historic = fs.historic.get_event(event_id, csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is not None
assert historic[0].valid_id is True
def test_multiple_csv(self, tmpdir):
event_id = [13, 14]
historic = fs.historic.get_event(event_id, csv=True, output_dir=tmpdir)
assert len(historic) == 2
historic.sort(key=lambda x: x.eventId)
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is not None
assert historic[1].eventId == str(event_id[1])
assert historic[1].properties is not None
assert historic[0].valid_id is True
assert historic[1].valid_id is True
def test_mixed_invalid(self):
event_id = [9, 0]
historic = fs.historic.get_event(event_id)
assert len(historic) == 2
historic.sort(key=lambda x: x.eventId, reverse=True)
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is not None
assert historic[1].eventId == str(event_id[1])
assert not historic[1].properties
assert historic[0].valid_id is True
assert historic[1].valid_id is False
def test_mixed_invalid_csv(self, tmpdir):
event_id = [9, 0]
historic = fs.historic.get_event(event_id, csv=True, output_dir=tmpdir)
assert len(historic) == 2
historic.sort(key=lambda x: x.eventId, reverse=True)
assert historic[0].eventId == str(event_id[0])
assert historic[0].properties is not None
assert historic[1].eventId == str(event_id[1])
assert not historic[1].properties
assert historic[0].valid_id is True
assert historic[1].valid_id is False
def test_one_of_each(self, tmpdir):
historic = fs.historic.get_event([2], csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].eventId == "2"
assert historic[0].name is not None
assert historic[0].type is not None
assert historic[0].month is not None
assert historic[0].year is not None
assert historic[0].returnPeriod is not None
assert historic[0].properties is not None
assert historic[0].properties.get("total") is not None
assert historic[0].properties.get("affected") is not None
assert historic[0].geometry is not None
class TestHistoricSummary:
def test_empty(self):
with pytest.raises(InvalidArgument):
fs.historic.get_summary([], "")
def test_empty_fsid(self):
with pytest.raises(InvalidArgument):
fs.historic.get_summary([], "property")
def test_empty_type(self):
with pytest.raises(InvalidArgument):
fs.historic.get_summary([190836953], "")
def test_wrong_fsid_type(self):
with pytest.raises(InvalidArgument):
fs.historic.get_summary(190836953, "property")
def test_wrong_fsid_number(self):
fsid = [1867176]
historic = fs.historic.get_summary(fsid, "property")
assert len(historic) == 1
assert historic[0].fsid == str(fsid[0])
assert not historic[0].historic
assert historic[0].valid_id is False
def test_incorrect_lookup_type(self, tmpdir):
fsid = [190836953]
historic = fs.historic.get_summary(fsid, "city", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].fsid == str(fsid[0])
assert not historic[0].historic
assert historic[0].valid_id is False
def test_wrong_historic_type(self):
with pytest.raises(TypeError):
fs.historic.get_summary([190836953], 190)
def test_single(self):
fsid = [190836953]
historic = fs.historic.get_summary(fsid, "property")
assert len(historic) == 1
assert historic[0].fsid == str(fsid[0])
assert historic[0].historic is not None
assert historic[0].valid_id is True
def test_multiple(self):
fsid = [190836953, 193139123]
historic = fs.historic.get_summary(fsid, "property")
assert len(historic) == 2
historic.sort(key=lambda x: x.fsid)
assert historic[0].fsid == str(fsid[0])
assert historic[0].historic is not None
assert historic[1].fsid == str(fsid[1])
assert historic[1].historic is not None
assert historic[0].valid_id is True
assert historic[1].valid_id is True
def test_single_csv(self, tmpdir):
fsid = [190836953]
historic = fs.historic.get_summary(fsid, "property", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].fsid == str(fsid[0])
assert historic[0].historic is not None
assert historic[0].valid_id is True
def test_multiple_csv(self, tmpdir):
fsid = [190836953, 193139123]
historic = fs.historic.get_summary(fsid, "property", csv=True, output_dir=tmpdir)
assert len(historic) == 2
historic.sort(key=lambda x: x.fsid)
assert historic[0].fsid == str(fsid[0])
assert historic[0].historic is not None
assert historic[1].fsid == str(fsid[1])
assert historic[1].historic is not None
assert historic[0].valid_id is True
assert historic[1].valid_id is True
def test_mixed_invalid(self):
fsid = [190836953, 000000000]
historic = fs.historic.get_summary(fsid, "property")
assert len(historic) == 2
historic.sort(key=lambda x: x.fsid, reverse=True)
assert historic[0].fsid == str(fsid[0])
assert historic[0].historic is not None
assert historic[1].fsid == str(fsid[1])
assert not historic[1].historic
assert historic[0].valid_id is True
assert historic[1].valid_id is False
def test_mixed_invalid_csv(self, tmpdir):
fsid = [190836953, 000000000]
historic = fs.historic.get_summary(fsid, "property", csv=True, output_dir=tmpdir)
assert len(historic) == 2
historic.sort(key=lambda x: x.fsid, reverse=True)
assert historic[0].fsid == str(fsid[0])
assert historic[0].historic is not None
assert historic[1].fsid == str(fsid[1])
assert not historic[1].historic
assert historic[0].valid_id is True
assert historic[1].valid_id is False
def test_coordinate_invalid(self, tmpdir):
historic = fs.historic.get_summary([(82.487671, -62.374322)], "property", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert not historic[0].historic
assert historic[0].valid_id is False
def test_single_coordinate(self, tmpdir):
historic = fs.historic.get_summary([(40.7079652311, -74.0021455387)], "property", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].historic is not None
assert historic[0].valid_id is True
def test_address_invalid_404(self, tmpdir):
historic = fs.historic.get_summary(["Shimik, Nunavut, Canada"], "property", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert not historic[0].historic
assert historic[0].valid_id is False
def test_address_invalid_500(self, tmpdir):
historic = fs.historic.get_summary(["Toronto, Ontario, Canada"], "property", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert not historic[0].historic
assert historic[0].valid_id is False
def test_single_address(self, tmpdir):
historic = fs.historic.get_summary(["247 Water St, New York, New York"], "property",
csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].historic is not None
assert historic[0].valid_id is True
def test_one_of_each(self, tmpdir):
historic = fs.historic.get_summary([511447411], "property", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "511447411"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("depth") is not None
historic = fs.historic.get_summary([540225], "neighborhood", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "540225"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
historic = fs.historic.get_summary([1982200], "city", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "1982200"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
historic = fs.historic.get_summary([50156], "zcta", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "50156"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
historic = fs.historic.get_summary([19153004900], "tract", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "19153004900"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
historic = fs.historic.get_summary([19163], "county", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "19163"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
historic = fs.historic.get_summary([1901], "cd", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "1901"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
historic = fs.historic.get_summary([39], "state", csv=True, output_dir=tmpdir)
assert len(historic) == 1
assert historic[0].valid_id is True
assert historic[0].fsid == "39"
assert historic[0].historic is not None
assert historic[0].historic[0].get("eventId") is not None
assert historic[0].historic[0].get("name") is not None
assert historic[0].historic[0].get("type") is not None
assert historic[0].historic[0].get("data") is not None
assert historic[0].historic[0].get("data")[0].get("bin") is not None
assert historic[0].historic[0].get("data")[0].get("count") is not None
class TestHistoricSummaryDetail:
def test_empty(self):
with pytest.raises(InvalidArgument):
fs.historic.get_events_by_location([], "")
def test_empty_fsid(self):
with pytest.raises(InvalidArgument):
fs.historic.get_events_by_location([], "property")
def test_empty_type(self):
with pytest.raises(InvalidArgument):
fs.historic.get_events_by_location([190836953], "")
def test_wrong_fsid_type(self):
with pytest.raises(InvalidArgument):
fs.historic.get_events_by_location(190836953, "city")
def test_wrong_fsid_number(self):
fsid = [11]
historic = fs.historic.get_events_by_location([11], "city")
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].fsid == str(fsid[0])
assert not historic[0][0].historic
assert historic[0][0].valid_id is False
assert not historic[1][0].properties
assert historic[0][0].valid_id is False
def test_incorrect_lookup_type(self, tmpdir):
fsid = [1982200]
historic = fs.historic.get_events_by_location(fsid, "state", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].fsid == str(fsid[0])
assert not historic[0][0].historic
assert historic[0][0].valid_id is False
assert not historic[1][0].properties
assert historic[0][0].valid_id is False
def test_wrong_historic_type(self):
with pytest.raises(TypeError):
fs.historic.get_events_by_location([1982200], 190)
def test_single(self):
fsid = [1982200]
historic = fs.historic.get_events_by_location(fsid, "city")
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].fsid == str(fsid[0])
assert historic[0][0].historic is not None
assert historic[0][0].valid_id is True
assert historic[1][0].properties is not None
assert historic[0][0].valid_id is True
def test_multiple(self):
fsid = [1982200, 3905074]
historic = fs.historic.get_events_by_location(fsid, "city")
assert len(historic[0]) == 2
assert len(historic[1]) == 2
historic[0].sort(key=lambda x: x.fsid)
historic[1].sort(key=lambda x: x.eventId)
assert historic[0][0].fsid == str(fsid[0])
assert historic[0][0].historic is not None
assert historic[0][1].fsid == str(fsid[1])
assert historic[0][1].historic is not None
assert historic[1][0].properties is not None
assert historic[1][1].properties is not None
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][1].valid_id is True
assert historic[1][1].valid_id is True
def test_single_csv(self, tmpdir):
fsid = [1982200]
historic = fs.historic.get_events_by_location(fsid, "city", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
historic[0].sort(key=lambda x: x.fsid)
historic[1].sort(key=lambda x: x.eventId)
assert historic[0][0].fsid == str(fsid[0])
assert historic[0][0].historic is not None
assert historic[1][0].properties is not None
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
def test_multiple_csv(self, tmpdir):
fsid = [1982200, 3905074]
historic = fs.historic.get_events_by_location(fsid, "city", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 2
assert len(historic[1]) == 2
historic[0].sort(key=lambda x: x.fsid)
historic[1].sort(key=lambda x: x.eventId)
assert historic[0][0].fsid == str(fsid[0])
assert historic[0][0].historic is not None
assert historic[0][1].fsid == str(fsid[1])
assert historic[0][1].historic is not None
assert historic[1][0].properties is not None
assert historic[1][1].properties is not None
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][1].valid_id is True
assert historic[1][1].valid_id is True
def test_mixed_invalid(self):
fsid = [1982200, 000000000]
historic = fs.historic.get_events_by_location(fsid, "city")
assert len(historic[0]) == 2
assert len(historic[1]) == 1
historic[0].sort(key=lambda x: x.fsid, reverse=True)
historic[1].sort(key=lambda x: x.eventId, reverse=True)
assert historic[0][0].fsid == str(fsid[0])
assert historic[0][0].historic is not None
assert historic[0][1].fsid == str(fsid[1])
assert not historic[0][1].historic
assert historic[1][0].properties is not None
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][1].valid_id is False
def test_mixed_invalid_csv(self, tmpdir):
fsid = [1982200, 000000000]
historic = fs.historic.get_events_by_location(fsid, "city", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 2
assert len(historic[1]) == 1
historic[0].sort(key=lambda x: x.fsid, reverse=True)
historic[1].sort(key=lambda x: x.eventId, reverse=True)
assert historic[0][0].fsid == str(fsid[0])
assert historic[0][0].historic is not None
assert historic[0][1].fsid == str(fsid[1])
assert not historic[0][1].historic
assert historic[1][0].properties is not None
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][1].valid_id is False
def test_coordinate_invalid(self, tmpdir):
historic = fs.historic.get_events_by_location([(82.487671, -62.374322)], "property",
csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert not historic[0][0].historic
assert historic[0][0].valid_id is False
assert not historic[1][0].properties
assert historic[0][0].valid_id is False
def test_single_coordinate(self, tmpdir):
historic = fs.historic.get_events_by_location([(40.7079652311, -74.0021455387)], "property",
csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].historic is not None
assert historic[0][0].valid_id is True
assert historic[1][0].properties is not None
assert historic[0][0].valid_id is True
def test_address_invalid_404(self, tmpdir):
historic = fs.historic.get_events_by_location(["Shimik, Nunavut, Canada"], "property",
csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert not historic[0][0].historic
assert historic[0][0].valid_id is False
assert not historic[1][0].properties
assert historic[0][0].valid_id is False
def test_address_invalid_500(self, tmpdir):
historic = fs.historic.get_events_by_location(["Toronto, Ontario, Canada"], "property",
csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert not historic[0][0].historic
assert historic[0][0].valid_id is False
assert not historic[1][0].properties
assert historic[0][0].valid_id is False
def test_single_address(self, tmpdir):
historic = fs.historic.get_events_by_location(["247 Water St, New York, New York"], "property",
csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].historic is not None
assert historic[0][0].valid_id is True
assert historic[1][0].properties is not None
assert historic[0][0].valid_id is True
def test_one_of_each(self, tmpdir):
historic = fs.historic.get_events_by_location([511447411], "property", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 2
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "511447411"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("depth") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([540225], "neighborhood", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "540225"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([1982200], "city", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "1982200"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([50156], "zcta", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "50156"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([19153004900], "tract", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 2
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "19153004900"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([19163], "county", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 1
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "19163"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([1901], "cd", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 2
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "1901"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
historic = fs.historic.get_events_by_location([39], "state", csv=True, output_dir=tmpdir)
assert len(historic[0]) == 1
assert len(historic[1]) == 4
assert historic[0][0].valid_id is True
assert historic[1][0].valid_id is True
assert historic[0][0].fsid == "39"
assert historic[0][0].historic is not None
assert historic[0][0].historic[0].get("eventId") is not None
assert historic[0][0].historic[0].get("name") is not None
assert historic[0][0].historic[0].get("type") is not None
assert historic[0][0].historic[0].get("data") is not None
assert historic[0][0].historic[0].get("data")[0].get("bin") is not None
assert historic[0][0].historic[0].get("data")[0].get("count") is not None
assert historic[1][0].name is not None
assert historic[1][0].type is not None
assert historic[1][0].month is not None
assert historic[1][0].year is not None
assert historic[1][0].returnPeriod is not None
assert historic[1][0].properties is not None
assert historic[1][0].properties.get("total") is not None
assert historic[1][0].properties.get("affected") is not None
assert historic[1][0].geometry is not None
|
[] |
[] |
[
"FSF_API_KEY"
] |
[]
|
["FSF_API_KEY"]
|
python
| 1 | 0 | |
examples/main.go
|
package main
import (
"errors"
"fmt"
"math/rand"
"os"
"time"
"code.cloudfoundry.org/lager"
"github.com/robdimsdale/honeylager"
)
func main() {
honeycombWriteKey := os.Getenv("HONEYCOMB_WRITE_KEY")
sink := honeylager.NewSink(
honeycombWriteKey,
"honeycomb-golang-example",
lager.DEBUG,
)
defer sink.Close()
l := lager.NewLogger("my-component")
l.RegisterSink(sink)
go honeylager.ReadResponses()
l.Info("example-starting")
for i := 0; i < 10; i++ {
duration := rand.Float64()*100 + 100
payloadLength := rand.Intn(45)*50 + 5
l.Debug("some-action", lager.Data{
"duration_ms": duration,
"method": "get",
"hostname": "appserver15",
"payload_length": payloadLength,
})
time.Sleep(100 * time.Millisecond)
}
l.Error("example-error", errors.New("This is an example error"))
l.Info("example-complete")
time.Sleep(500 * time.Millisecond)
fmt.Println("complete")
}
|
[
"\"HONEYCOMB_WRITE_KEY\""
] |
[] |
[
"HONEYCOMB_WRITE_KEY"
] |
[]
|
["HONEYCOMB_WRITE_KEY"]
|
go
| 1 | 0 | |
pgpool2_exporter.go
|
/*
Copyright (c) 2021 PgPool Global Development Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package main
import (
"fmt"
"net/http"
"os"
"database/sql"
"errors"
"math"
"regexp"
"strconv"
"sync"
"time"
"github.com/go-kit/kit/log/level"
_ "github.com/lib/pq"
"github.com/blang/semver"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/version"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
listenAddress = kingpin.Flag("web.listen-address", "Address on which to expose metrics and web interface.").Default(":9719").String()
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String()
logger = promlog.New(&promlog.Config{})
)
const (
namespace = "pgpool2"
exporter = "exporter"
landingPage = `
<html>
<head>
<title>Pgpool-II Exporter</title>
</head>
<body>
<h1>Pgpool-II Exporter</h1>
<p>
<a href='%s'>Metrics</a>
</p>
</body>
</html>`
)
// columnUsage should be one of several enum values which describe how a
// queried row is to be converted to a Prometheus metric.
type columnUsage int
// Convert a string to the corresponding columnUsage
func stringTocolumnUsage(s string) (u columnUsage, err error) {
switch s {
case "DISCARD":
u = DISCARD
case "LABEL":
u = LABEL
case "COUNTER":
u = COUNTER
case "GAUGE":
u = GAUGE
case "MAPPEDMETRIC":
u = MAPPEDMETRIC
case "DURATION":
u = DURATION
default:
err = fmt.Errorf("wrong columnUsage given : %s", s)
}
return
}
// nolint: golint
const (
DISCARD columnUsage = iota // Ignore this column
LABEL columnUsage = iota // Use this column as a label
COUNTER columnUsage = iota // Use this column as a counter
GAUGE columnUsage = iota // Use this column as a gauge
MAPPEDMETRIC columnUsage = iota // Use this column with the supplied mapping of text values
DURATION columnUsage = iota // This column should be interpreted as a text duration (and converted to milliseconds)
)
// Implement the yaml.Unmarshaller interface
func (cu *columnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error {
var value string
if err := unmarshal(&value); err != nil {
return err
}
columnUsage, err := stringTocolumnUsage(value)
if err != nil {
return err
}
*cu = columnUsage
return nil
}
// Groups metric maps under a shared set of labels
type MetricMapNamespace struct {
labels []string // Label names for this namespace
columnMappings map[string]MetricMap // Column mappings in this namespace
}
// Stores the prometheus metric description which a given column will be mapped
// to by the collector
type MetricMap struct {
discard bool // Should metric be discarded during mapping?
vtype prometheus.ValueType // Prometheus valuetype
namespace string
desc *prometheus.Desc // Prometheus descriptor
conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64
}
// User-friendly representation of a prometheus descriptor map
type ColumnMapping struct {
usage columnUsage `yaml:"usage"`
description string `yaml:"description"`
}
// Exporter collects Pgpool-II stats from the given server and exports
// them using the prometheus metrics package.
type Exporter struct {
dsn string
namespace string
mutex sync.RWMutex
duration prometheus.Gauge
up prometheus.Gauge
error prometheus.Gauge
totalScrapes prometheus.Counter
metricMap map[string]MetricMapNamespace
db *sql.DB
}
var (
metricMaps = map[string]map[string]ColumnMapping{
"pool_nodes": {
"hostname": {LABEL, "Backend hostname"},
"port": {LABEL, "Backend port"},
"role": {LABEL, "Role (primary or standby)"},
"status": {GAUGE, "Backend node Status (1 for up or waiting, 0 for down or unused)"},
"select_cnt": {GAUGE, "SELECT statement counts issued to each backend"},
"replication_delay": {GAUGE, "Replication delay"},
},
"pool_backend_stats": {
"hostname": {LABEL, "Backend hostname"},
"port": {LABEL, "Backend port"},
"role": {LABEL, "Role (primary or standby)"},
"status": {GAUGE, "Backend node Status (1 for up or waiting, 0 for down or unused)"},
"select_cnt": {GAUGE, "SELECT statement counts issued to each backend"},
"insert_cnt": {GAUGE, "INSERT statement counts issued to each backend"},
"update_cnt": {GAUGE, "UPDATE statement counts issued to each backend"},
"delete_cnt": {GAUGE, "DELETE statement counts issued to each backend"},
"ddl_cnt": {GAUGE, "DDL statement counts issued to each backend"},
"other_cnt": {GAUGE, "other statement counts issued to each backend"},
"panic_cnt": {GAUGE, "Panic message counts returned from backend"},
"fatal_cnt": {GAUGE, "Fatal message counts returned from backend)"},
"error_cnt": {GAUGE, "Error message counts returned from backend"},
},
"pool_health_check_stats": {
"hostname": {LABEL, "Backend hostname"},
"port": {LABEL, "Backend port"},
"role": {LABEL, "Role (primary or standby)"},
"status": {GAUGE, "Backend node Status (1 for up or waiting, 0 for down or unused)"},
"total_count": {GAUGE, "Number of health check count in total"},
"success_count": {GAUGE, "Number of successful health check count in total"},
"fail_count": {GAUGE, "Number of failed health check count in total"},
"skip_count": {GAUGE, "Number of skipped health check count in total"},
"retry_count": {GAUGE, "Number of retried health check count in total"},
"average_retry_count": {GAUGE, "Number of average retried health check count in a health check session"},
"max_retry_count": {GAUGE, "Number of maximum retried health check count in a health check session"},
"max_duration": {GAUGE, "Maximum health check duration in Millie seconds"},
"min_duration": {GAUGE, "Minimum health check duration in Millie seconds"},
"average_duration": {GAUGE, "Average health check duration in Millie seconds"},
},
"pool_processes": {
"pool_pid": {DISCARD, "PID of Pgpool-II child processes"},
"database": {DISCARD, "Database name of the currently active backend connection"},
},
"pool_cache": {
"cache_hit_ratio": {GAUGE, "Query cache hit ratio"},
"num_hash_entries": {GAUGE, "Number of total hash entries"},
"used_hash_entries": {GAUGE, "Number of used hash entries"},
"num_cache_entries": {GAUGE, "Number of used cache entries"},
"used_cache_entries_size": {GAUGE, "Total size of used cache size"},
"free_cache_entries_size": {GAUGE, "Total size of free cache size"},
},
}
)
// Pgpool-II version
var pgpoolVersionRegex = regexp.MustCompile(`^((\d+)(\.\d+)(\.\d+)?)`)
var version42 = semver.MustParse("4.2.0")
var pgpoolSemver semver.Version
func NewExporter(dsn string, namespace string) *Exporter {
db, err := getDBConn(dsn)
if err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
return &Exporter{
dsn: dsn,
namespace: namespace,
up: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the Pgpool-II server is up (1 for yes, 0 for no).",
}),
duration: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from Pgpool-II.",
}),
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "scrapes_total",
Help: "Total number of times Pgpool-II has been scraped for metrics.",
}),
error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from Pgpool-II resulted in an error (1 for error, 0 for success).",
}),
metricMap: makeDescMap(metricMaps, namespace),
db: db,
}
}
// Query within a namespace mapping and emit metrics. Returns fatal errors if
// the scrape fails, and a slice of errors if they were non-fatal.
func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace string, mapping MetricMapNamespace) ([]error, error) {
query := fmt.Sprintf("SHOW %s;", namespace)
// Don't fail on a bad scrape of one metric
rows, err := db.Query(query)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error running query on database: ", namespace, err))
}
defer rows.Close()
var columnNames []string
columnNames, err = rows.Columns()
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err))
}
// Make a lookup map for the column indices
var columnIdx = make(map[string]int, len(columnNames))
for i, n := range columnNames {
columnIdx[n] = i
}
var columnData = make([]interface{}, len(columnNames))
var scanArgs = make([]interface{}, len(columnNames))
for i := range columnData {
scanArgs[i] = &columnData[i]
}
nonfatalErrors := []error{}
// Read from the result of "SHOW pool_processes"
if namespace == "pool_processes" {
var frontend_total float64
var frontend_used float64
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
frontend_total++
// Loop over column names to find currently connected backend database
for idx, columnName := range columnNames {
if columnName == "database" {
if valueDatabase, _ := dbToString(columnData[idx]); len(valueDatabase) != 0 {
frontend_used++
}
}
}
}
// Generate the metric for "pool_processes"
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(prometheus.BuildFQName("pgpool2", "", "frontend_total"), "Number of total child processed", nil, nil),
prometheus.GaugeValue,
frontend_total,
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(prometheus.BuildFQName("pgpool2", "", "frontend_used"), "Number of used child processes", nil, nil),
prometheus.GaugeValue,
frontend_used,
)
return nonfatalErrors, nil
}
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
// Get the label values for this row.
labels := make([]string, len(mapping.labels))
for idx, label := range mapping.labels {
labels[idx], _ = dbToString(columnData[columnIdx[label]])
}
// Loop over column names, and match to scan data.
for idx, columnName := range columnNames {
if metricMapping, ok := mapping.columnMappings[columnName]; ok {
// Is this a metricy metric?
if metricMapping.discard {
continue
}
// If status column, convert string to int.
if columnName == "status" {
valueString, ok := dbToString(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])))
continue
}
value := parseStatusField(valueString)
// Generate the metric
ch <- prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
continue
}
value, ok := dbToFloat64(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])))
continue
}
// Generate the metric
ch <- prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
}
}
}
return nonfatalErrors, nil
}
// Establish a new DB connection using dsn.
func getDBConn(dsn string) (*sql.DB, error) {
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, err
}
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
err = db.Ping()
if err != nil {
return nil, err
}
return db, nil
}
// Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte
// types are mapped as NaN and !ok
func dbToFloat64(t interface{}) (float64, bool) {
switch v := t.(type) {
case int64:
return float64(v), true
case float64:
return v, true
case time.Time:
return float64(v.Unix()), true
case []byte:
// Try and convert to string and then parse to a float64
strV := string(v)
result, err := strconv.ParseFloat(strV, 64)
if err != nil {
return math.NaN(), false
}
return result, true
case string:
result, err := strconv.ParseFloat(v, 64)
if err != nil {
level.Error(logger).Log("msg", "Could not parse string", "err", err)
return math.NaN(), false
}
return result, true
case bool:
if v {
return 1.0, true
}
return 0.0, true
case nil:
return math.NaN(), true
default:
return math.NaN(), false
}
}
// Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings.
func dbToString(t interface{}) (string, bool) {
switch v := t.(type) {
case int64:
return fmt.Sprintf("%v", v), true
case float64:
return fmt.Sprintf("%v", v), true
case time.Time:
return fmt.Sprintf("%v", v.Unix()), true
case nil:
return "", true
case []byte:
// Try and convert to string
return string(v), true
case string:
return v, true
case bool:
if v {
return "true", true
}
return "false", true
default:
return "", false
}
}
// Convert bool to int.
func parseStatusField(value string) (float64) {
switch value {
case "true", "up", "waiting":
return 1.0
case "false", "unused", "down":
return 0.0
}
return 0.0
}
// Retrieve Pgpool-II version.
func queryVersion(db *sql.DB) (semver.Version, error) {
level.Debug(logger).Log("msg", "Querying Pgpool-II version")
versionRows, err := db.Query("SHOW POOL_VERSION;")
if err != nil {
return semver.Version{}, errors.New(fmt.Sprintln("Error querying SHOW POOL_VERSION:", err))
}
defer versionRows.Close()
var columnNames []string
columnNames, err = versionRows.Columns()
if err != nil {
return semver.Version{}, errors.New(fmt.Sprintln("Error retrieving column name for version:", err))
}
if len(columnNames) != 1 || columnNames[0] != "pool_version" {
return semver.Version{}, errors.New(fmt.Sprintln("Error returning Pgpool-II version:", err))
}
var pgpoolVersion string
for versionRows.Next() {
err := versionRows.Scan(&pgpoolVersion)
if err != nil {
return semver.Version{}, errors.New(fmt.Sprintln("Error retrieving SHOW POOL_VERSION rows:", err))
}
}
v := pgpoolVersionRegex.FindStringSubmatch(pgpoolVersion)
if len(v) > 1 {
level.Debug(logger).Log("pgpool_version", v[1])
return semver.ParseTolerant(v[1])
}
return semver.Version{}, errors.New(fmt.Sprintln("Error retrieving Pgpool-II version:", err))
}
// Iterate through all the namespace mappings in the exporter and run their queries.
func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap map[string]MetricMapNamespace) map[string]error {
// Return a map of namespace -> errors
namespaceErrors := make(map[string]error)
for namespace, mapping := range metricMap {
// pool_backend_stats and pool_health_check_stats can not be used before 4.1.
if namespace == "pool_backend_stats" || namespace == "pool_health_check_stats" {
if pgpoolSemver.LT(version42) {
continue
}
}
level.Debug(logger).Log("msg", "Querying namespace", "namespace", namespace)
nonFatalErrors, err := queryNamespaceMapping(ch, db, namespace, mapping)
// Serious error - a namespace disappeard
if err != nil {
namespaceErrors[namespace] = err
level.Info(logger).Log("msg", "namespace disappeard", "err", err)
}
// Non-serious errors - likely version or parsing problems.
if len(nonFatalErrors) > 0 {
for _, err := range nonFatalErrors {
level.Info(logger).Log("msg", "error parsing", "err", err.Error())
}
}
}
return namespaceErrors
}
// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate
// from Postgres. So we use the poor man's describe method: Run a collect
// and send the descriptors of all the collected metrics. The problem
// here is that we need to connect to the Postgres DB. If it is currently
// unavailable, the descriptors will be incomplete. Since this is a
// stand-alone exporter and not used as a library within other code
// implementing additional metrics, the worst that can happen is that we
// don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored Postgres instance may change the
// exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.duration
ch <- e.up
ch <- e.totalScrapes
ch <- e.error
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
e.totalScrapes.Inc()
var err error
defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds())
if err == nil {
e.error.Set(0)
} else {
e.error.Set(1)
}
}(time.Now())
// Check connection availability and close the connection if it fails.
if err = e.db.Ping(); err != nil {
level.Error(logger).Log("msg", "Error pinging Pgpool-II", "err", err)
if cerr := e.db.Close(); cerr != nil {
level.Error(logger).Log("msg", "Error while closing non-pinging connection", "err", err)
}
level.Info(logger).Log("msg", "Reconnecting to Pgpool-II")
e.db, err = sql.Open("postgres", e.dsn)
e.db.SetMaxOpenConns(1)
e.db.SetMaxIdleConns(1)
if err = e.db.Ping(); err != nil {
level.Error(logger).Log("msg", "Error pinging Pgpool-II", "err", err)
if cerr := e.db.Close(); cerr != nil {
level.Error(logger).Log("msg", "Error while closing non-pinging connection", "err", err)
}
e.up.Set(0)
return
}
}
e.up.Set(1)
e.error.Set(0)
e.mutex.RLock()
defer e.mutex.RUnlock()
errMap := queryNamespaceMappings(ch, e.db, e.metricMap)
if len(errMap) > 0 {
level.Error(logger).Log("err", errMap)
e.error.Set(1)
}
}
// Turn the MetricMap column mapping into a prometheus descriptor mapping.
func makeDescMap(metricMaps map[string]map[string]ColumnMapping, namespace string) map[string]MetricMapNamespace {
var metricMap = make(map[string]MetricMapNamespace)
for metricNamespace, mappings := range metricMaps {
thisMap := make(map[string]MetricMap)
// Get the constant labels
var variableLabels []string
for columnName, columnMapping := range mappings {
if columnMapping.usage == LABEL {
variableLabels = append(variableLabels, columnName)
}
}
for columnName, columnMapping := range mappings {
// Determine how to convert the column based on its usage.
switch columnMapping.usage {
case DISCARD, LABEL:
thisMap[columnName] = MetricMap{
discard: true,
conversion: func(_ interface{}) (float64, bool) {
return math.NaN(), true
},
}
case COUNTER:
thisMap[columnName] = MetricMap{
vtype: prometheus.CounterValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_%s", namespace, metricNamespace, columnName), columnMapping.description, variableLabels, nil),
conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in)
},
}
case GAUGE:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_%s", namespace, metricNamespace, columnName), columnMapping.description, variableLabels, nil),
conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in)
},
}
}
}
metricMap[metricNamespace] = MetricMapNamespace{variableLabels, thisMap}
}
return metricMap
}
func main() {
promlogConfig := &promlog.Config{}
flag.AddFlags(kingpin.CommandLine, promlogConfig)
kingpin.Version(version.Print("pgpool2_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
dsn := os.Getenv("DATA_SOURCE_NAME")
exporter := NewExporter(dsn, namespace)
prometheus.MustRegister(exporter)
// Retrieve Pgpool-II version
v, err := queryVersion(exporter.db)
if err != nil {
level.Error(logger).Log("err", err)
}
pgpoolSemver = v
level.Info(logger).Log("msg", "Starting pgpool2_exporter", "version", version.Info())
level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress)
http.Handle(*metricsPath, promhttp.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(fmt.Sprintf(landingPage, *metricsPath)))
})
if err := http.ListenAndServe(*listenAddress, nil); err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
}
|
[
"\"DATA_SOURCE_NAME\""
] |
[] |
[
"DATA_SOURCE_NAME"
] |
[]
|
["DATA_SOURCE_NAME"]
|
go
| 1 | 0 | |
rllib/tests/test_rollout_worker.py
|
from collections import Counter
import gym
import numpy as np
import os
import random
import time
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.examples.policy.random_policy import RandomPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.test_utils import check, framework_iterator
from ray.tune.registry import register_env
class MockPolicy(RandomPolicy):
@override(RandomPolicy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
return np.array([random.choice([0, 1])] * len(obs_batch)), [], {}
@override(Policy)
def postprocess_trajectory(self,
batch,
other_agent_batches=None,
episode=None):
assert episode is not None
super().postprocess_trajectory(batch, other_agent_batches, episode)
return compute_advantages(
batch, 100.0, 0.9, use_gae=False, use_critic=False)
class BadPolicy(RandomPolicy):
@override(RandomPolicy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
raise Exception("intentional error")
class FailOnStepEnv(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
raise ValueError("kaboom")
def step(self, action):
raise ValueError("kaboom")
class MockEnv(gym.Env):
def __init__(self, episode_length, config=None):
self.episode_length = episode_length
self.config = config
self.i = 0
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return 0, 1, self.i >= self.episode_length, {}
class MockEnv2(gym.Env):
def __init__(self, episode_length):
self.episode_length = episode_length
self.i = 0
self.observation_space = gym.spaces.Discrete(100)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return self.i, 100, self.i >= self.episode_length, {}
class MockVectorEnv(VectorEnv):
def __init__(self, episode_length, num_envs):
super().__init__(
observation_space=gym.spaces.Discrete(1),
action_space=gym.spaces.Discrete(2),
num_envs=num_envs)
self.envs = [MockEnv(episode_length) for _ in range(num_envs)]
@override(VectorEnv)
def vector_reset(self):
return [e.reset() for e in self.envs]
@override(VectorEnv)
def reset_at(self, index):
return self.envs[index].reset()
@override(VectorEnv)
def vector_step(self, actions):
obs_batch, rew_batch, done_batch, info_batch = [], [], [], []
for i in range(len(self.envs)):
obs, rew, done, info = self.envs[i].step(actions[i])
obs_batch.append(obs)
rew_batch.append(rew)
done_batch.append(done)
info_batch.append(info)
return obs_batch, rew_batch, done_batch, info_batch
@override(VectorEnv)
def get_unwrapped(self):
return self.envs
class TestRolloutWorker(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=5)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_basic(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"), policy=MockPolicy)
batch = ev.sample()
for key in [
"obs", "actions", "rewards", "dones", "advantages",
"prev_rewards", "prev_actions"
]:
self.assertIn(key, batch)
self.assertGreater(np.abs(np.mean(batch[key])), 0)
def to_prev(vec):
out = np.zeros_like(vec)
for i, v in enumerate(vec):
if i + 1 < len(out) and not batch["dones"][i]:
out[i + 1] = v
return out.tolist()
self.assertEqual(batch["prev_rewards"].tolist(),
to_prev(batch["rewards"]))
self.assertEqual(batch["prev_actions"].tolist(),
to_prev(batch["actions"]))
self.assertGreater(batch["advantages"][0], 1)
ev.stop()
def test_batch_ids(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
rollout_fragment_length=1)
batch1 = ev.sample()
batch2 = ev.sample()
self.assertEqual(len(set(batch1["unroll_id"])), 1)
self.assertEqual(len(set(batch2["unroll_id"])), 1)
self.assertEqual(
len(set(SampleBatch.concat(batch1, batch2)["unroll_id"])), 2)
ev.stop()
def test_global_vars_update(self):
# Allow for Unittest run.
ray.init(num_cpus=5, ignore_reinit_error=True)
for fw in framework_iterator(frameworks=()):
agent = A2CTrainer(
env="CartPole-v0",
config={
"num_workers": 1,
"lr_schedule": [[0, 0.1], [100000, 0.000001]],
"framework": fw,
})
result = agent.train()
for i in range(10):
result = agent.train()
print("num_steps_sampled={}".format(
result["info"]["num_steps_sampled"]))
print("num_steps_trained={}".format(
result["info"]["num_steps_trained"]))
print("num_steps_sampled={}".format(
result["info"]["num_steps_sampled"]))
print("num_steps_trained={}".format(
result["info"]["num_steps_trained"]))
if i == 0:
self.assertGreater(
result["info"]["learner"]["default_policy"]["cur_lr"],
0.01)
if result["info"]["learner"]["default_policy"]["cur_lr"] < \
0.07:
break
self.assertLess(
result["info"]["learner"]["default_policy"]["cur_lr"], 0.07)
agent.stop()
def test_no_step_on_init(self):
register_env("fail", lambda _: FailOnStepEnv())
for fw in framework_iterator(frameworks=()):
pg = PGTrainer(
env="fail", config={
"num_workers": 1,
"framework": fw,
})
self.assertRaises(Exception, lambda: pg.train())
pg.stop()
def test_callbacks(self):
for fw in framework_iterator(frameworks=("torch", "tf")):
counts = Counter()
pg = PGTrainer(
env="CartPole-v0", config={
"num_workers": 0,
"rollout_fragment_length": 50,
"train_batch_size": 50,
"callbacks": {
"on_episode_start":
lambda x: counts.update({"start": 1}),
"on_episode_step":
lambda x: counts.update({"step": 1}),
"on_episode_end": lambda x: counts.update({"end": 1}),
"on_sample_end":
lambda x: counts.update({"sample": 1}),
},
"framework": fw,
})
pg.train()
pg.train()
self.assertGreater(counts["sample"], 0)
self.assertGreater(counts["start"], 0)
self.assertGreater(counts["end"], 0)
self.assertGreater(counts["step"], 0)
pg.stop()
def test_query_evaluators(self):
register_env("test", lambda _: gym.make("CartPole-v0"))
for fw in framework_iterator(frameworks=("torch", "tf")):
pg = PGTrainer(
env="test",
config={
"num_workers": 2,
"rollout_fragment_length": 5,
"num_envs_per_worker": 2,
"framework": fw,
})
results = pg.workers.foreach_worker(
lambda ev: ev.rollout_fragment_length)
results2 = pg.workers.foreach_worker_with_index(
lambda ev, i: (i, ev.rollout_fragment_length))
results3 = pg.workers.foreach_worker(
lambda ev: ev.foreach_env(lambda env: 1))
self.assertEqual(results, [10, 10, 10])
self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])
self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])
pg.stop()
def test_action_clipping(self):
from ray.rllib.examples.env.random_env import RandomEnv
action_space = gym.spaces.Box(-2.0, 1.0, (3,))
# Clipping: True (clip between Policy's action_space.low/high),
ev = RolloutWorker(
env_creator=lambda _: RandomEnv(config=dict(
action_space=action_space,
max_episode_len=10,
p_done=0.0,
check_action_bounds=True,
)),
policy=RandomPolicy,
policy_config=dict(
action_space=action_space,
ignore_action_bounds=True,
),
clip_actions=True,
batch_mode="complete_episodes")
sample = ev.sample()
# Check, whether the action bounds have been breached (expected).
# We still arrived here b/c we clipped according to the Env's action
# space.
self.assertGreater(np.max(sample["actions"]), action_space.high[0])
self.assertLess(np.min(sample["actions"]), action_space.low[0])
ev.stop()
# Clipping: False and RandomPolicy produces invalid actions.
# Expect Env to complain.
ev2 = RolloutWorker(
env_creator=lambda _: RandomEnv(config=dict(
action_space=action_space,
max_episode_len=10,
p_done=0.0,
check_action_bounds=True,
)),
policy=RandomPolicy,
policy_config=dict(
action_space=action_space,
ignore_action_bounds=True,
),
clip_actions=False, # <- should lead to Env complaining
batch_mode="complete_episodes")
self.assertRaisesRegex(ValueError, r"Illegal action", ev2.sample)
ev2.stop()
# Clipping: False and RandomPolicy produces valid (bounded) actions.
# Expect "actions" in SampleBatch to be unclipped.
ev3 = RolloutWorker(
env_creator=lambda _: RandomEnv(config=dict(
action_space=action_space,
max_episode_len=10,
p_done=0.0,
check_action_bounds=True,
)),
policy=RandomPolicy,
policy_config=dict(action_space=action_space),
# Should not be a problem as RandomPolicy abides to bounds.
clip_actions=False,
batch_mode="complete_episodes")
sample = ev3.sample()
self.assertGreater(np.min(sample["actions"]), action_space.low[0])
self.assertLess(np.max(sample["actions"]), action_space.high[0])
ev3.stop()
def test_reward_clipping(self):
# Clipping: True (clip between -1.0 and 1.0).
ev = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=True,
batch_mode="complete_episodes")
self.assertEqual(max(ev.sample()["rewards"]), 1)
result = collect_metrics(ev, [])
self.assertEqual(result["episode_reward_mean"], 1000)
ev.stop()
from ray.rllib.examples.env.random_env import RandomEnv
# Clipping in certain range (-2.0, 2.0).
ev2 = RolloutWorker(
env_creator=lambda _: RandomEnv(
dict(
reward_space=gym.spaces.Box(low=-10, high=10, shape=()),
p_done=0.0,
max_episode_len=10,
)),
policy=MockPolicy,
clip_rewards=2.0,
batch_mode="complete_episodes")
sample = ev2.sample()
self.assertEqual(max(sample["rewards"]), 2.0)
self.assertEqual(min(sample["rewards"]), -2.0)
self.assertLess(np.mean(sample["rewards"]), 0.5)
self.assertGreater(np.mean(sample["rewards"]), -0.5)
ev2.stop()
# Clipping: Off.
ev2 = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=False,
batch_mode="complete_episodes")
self.assertEqual(max(ev2.sample()["rewards"]), 100)
result2 = collect_metrics(ev2, [])
self.assertEqual(result2["episode_reward_mean"], 1000)
ev2.stop()
def test_hard_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
rollout_fragment_length=10,
episode_horizon=4,
soft_horizon=False)
samples = ev.sample()
# Three logical episodes and correct episode resets (always after 4
# steps).
self.assertEqual(len(set(samples["eps_id"])), 3)
for i in range(4):
self.assertEqual(np.argmax(samples["obs"][i]), i)
self.assertEqual(np.argmax(samples["obs"][4]), 0)
# 3 done values.
self.assertEqual(sum(samples["dones"]), 3)
ev.stop()
# A gym env's max_episode_steps is smaller than Trainer's horizon.
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
batch_mode="complete_episodes",
rollout_fragment_length=10,
episode_horizon=6,
soft_horizon=False)
samples = ev.sample()
# 12 steps due to `complete_episodes` batch_mode.
self.assertEqual(len(samples["eps_id"]), 12)
# Two logical episodes and correct episode resets (always after 6(!)
# steps).
self.assertEqual(len(set(samples["eps_id"])), 2)
# 2 done values after 6 and 12 steps.
check(samples["dones"], [
False, False, False, False, False, True, False, False, False,
False, False, True
])
ev.stop()
def test_soft_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
rollout_fragment_length=10,
episode_horizon=4,
soft_horizon=True)
samples = ev.sample()
# three logical episodes
self.assertEqual(len(set(samples["eps_id"])), 3)
# only 1 hard done value
self.assertEqual(sum(samples["dones"]), 1)
ev.stop()
def test_metrics(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
remote_ev = RolloutWorker.as_remote().remote(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
ev.sample()
ray.get(remote_ev.sample.remote())
result = collect_metrics(ev, [remote_ev])
self.assertEqual(result["episodes_this_iter"], 20)
self.assertEqual(result["episode_reward_mean"], 10)
ev.stop()
def test_async(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
sample_async=True,
policy=MockPolicy)
batch = ev.sample()
for key in ["obs", "actions", "rewards", "dones", "advantages"]:
self.assertIn(key, batch)
self.assertGreater(batch["advantages"][0], 1)
ev.stop()
def test_auto_vectorization(self):
ev = RolloutWorker(
env_creator=lambda cfg: MockEnv(episode_length=20, config=cfg),
policy=MockPolicy,
batch_mode="truncate_episodes",
rollout_fragment_length=2,
num_envs=8)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
indices = []
for env in ev.async_env.vector_env.envs:
self.assertEqual(env.unwrapped.config.worker_index, 0)
indices.append(env.unwrapped.config.vector_index)
self.assertEqual(indices, [0, 1, 2, 3, 4, 5, 6, 7])
ev.stop()
def test_batches_larger_when_vectorized(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
rollout_fragment_length=4,
num_envs=4)
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
batch = ev.sample()
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 4)
ev.stop()
def test_vector_env_support(self):
ev = RolloutWorker(
env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
rollout_fragment_length=10)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
ev.stop()
def test_truncate_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
rollout_fragment_length=15,
batch_mode="truncate_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 15)
ev.stop()
def test_complete_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
rollout_fragment_length=5,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 10)
ev.stop()
def test_complete_episodes_packing(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
rollout_fragment_length=15,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 20)
self.assertEqual(
batch["t"].tolist(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
ev.stop()
def test_filter_sync(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
ev.stop()
def test_get_filters(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
self.sample_and_flush(ev)
filters = ev.get_filters(flush_after=False)
time.sleep(2)
filters2 = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
obs_f2 = filters2[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f2.rs.n, obs_f.rs.n)
self.assertGreaterEqual(obs_f2.buffer.n, obs_f.buffer.n)
ev.stop()
def test_sync_filter(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
obs_f = self.sample_and_flush(ev)
# Current State
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertLessEqual(obs_f.buffer.n, 20)
new_obsf = obs_f.copy()
new_obsf.rs._n = 100
ev.sync_filters({DEFAULT_POLICY_ID: new_obsf})
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f.rs.n, 100)
self.assertLessEqual(obs_f.buffer.n, 20)
ev.stop()
def test_extra_python_envs(self):
extra_envs = {"env_key_1": "env_value_1", "env_key_2": "env_value_2"}
self.assertFalse("env_key_1" in os.environ)
self.assertFalse("env_key_2" in os.environ)
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
extra_python_environs=extra_envs)
self.assertTrue("env_key_1" in os.environ)
self.assertTrue("env_key_2" in os.environ)
ev.stop()
# reset to original
del os.environ["env_key_1"]
del os.environ["env_key_2"]
def sample_and_flush(self, ev):
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
return obs_f
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[] |
[] |
[
"env_key_2",
"env_key_1"
] |
[]
|
["env_key_2", "env_key_1"]
|
python
| 2 | 0 | |
trainer/task.py
|
"""A simple main file to showcase the template."""
import logging
import os
import sys
from typing import List, Dict, Union
from urllib.parse import urlparse
import tensorflow as tf
import tensorflow_transform as tft
from google.cloud import storage
from google.cloud.storage import Blob
from hypertune import hypertune
from keras.layers import TextVectorization
from keras import layers
from keras import activations
from keras import models
from keras import losses
from keras import metrics
from keras.optimizer_v2.rmsprop import RMSProp
def read_dataset(filenames: List[str], feature_spec, batch_size) -> tf.data.TFRecordDataset:
dataset: tf.data.TFRecordDataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda r: tf.io.parse_single_example(r, feature_spec))
dataset = dataset.map(lambda d: (d['text'], d['target']))
dataset = dataset.batch(batch_size=batch_size)
return dataset
def get_filename_list(file_pattern: str) -> List[str]:
sc = storage.Client()
url_parts = urlparse(file_pattern)
bucket_name = url_parts.hostname
location = url_parts.path[1:]
output: List[Blob] = sc.list_blobs(bucket_name, prefix=location)
paths: List[str] = [f"gs://{b.bucket.name}/{b.name}" for b in output]
return paths
def build_model():
inputs = layers.Input(shape=(20000,))
x = layers.Dense(256, activation=activations.relu)(inputs)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation=activations.sigmoid)(x)
model = models.Model(inputs, outputs, name="my-first-model")
model.compile(optimizer=RMSProp(), loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy])
return model
def train_and_evaluate(data_location: str,
tft_location: str,
batch_size: int,
epochs: int):
train_location = os.path.join(data_location, "train/")
test_location = os.path.join(data_location, "test/")
tft_output = tft.TFTransformOutput(tft_location)
feature_spec = tft_output.transformed_feature_spec()
filenames_train = get_filename_list(train_location)
filenames_test = get_filename_list(test_location)
train_ds: tf.data.TFRecordDataset = read_dataset(filenames_train, feature_spec, batch_size)
test_ds: tf.data.TFRecordDataset = read_dataset(filenames_test, feature_spec, batch_size)
x_train_text = train_ds.map(lambda text, target: text)
vectorizer = TextVectorization(ngrams=2, max_tokens=20000, output_mode="multi_hot")
vectorizer.adapt(x_train_text)
train_ds = train_ds.map(lambda text, target: (vectorizer(text), target))
test_ds = test_ds.map(lambda text, target: (vectorizer(text), target))
model = build_model()
model.summary(print_fn=logging.info)
model.fit(train_ds, epochs=epochs, validation_data=test_ds)
loss, acc = model.evaluate(test_ds)
logging.info(f"LOSS: {loss:.4f}")
logging.info(f"ACC: {acc:.4f}")
metric_tag = "kschool_accuracy"
ht = hypertune.HyperTune()
ht.report_hyperparameter_tuning_metric(hyperparameter_metric_tag=metric_tag,
metric_value=acc,
global_step=epochs)
model_dir = os.environ.get('AIP_MODEL_DIR')
model.save(model_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data-location', default=None, required=True)
parser.add_argument('--tft-location', required=True)
parser.add_argument('--batch-size', required=True, type=int)
parser.add_argument('--epochs', required=True, type=int)
args = parser.parse_args()
loglevel = 'INFO'
logging.basicConfig(stream=sys.stdout, level=loglevel)
train_and_evaluate(args.data_location,
args.tft_location,
args.batch_size,
args.epochs)
|
[] |
[] |
[
"AIP_MODEL_DIR"
] |
[]
|
["AIP_MODEL_DIR"]
|
python
| 1 | 0 | |
src/test/java/com/gebiz/AbstractViewTest.java
|
package com.gebiz;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.openqa.selenium.By;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.chrome.ChromeDriver;
import com.vaadin.flow.theme.AbstractTheme;
import com.vaadin.testbench.ScreenshotOnFailureRule;
import com.vaadin.testbench.TestBench;
import com.vaadin.testbench.parallel.ParallelTest;
/**
* Base class for TestBench IntegrationTests on chrome.
* <p>
* The tests use Chrome driver (see pom.xml for integration-tests profile) to
* run integration tests on a headless Chrome. If a property {@code test.use
* .hub} is set to true, {@code AbstractViewTest} will assume that the
* TestBench test is running in a CI environment. In order to keep the this
* class light, it makes certain assumptions about the CI environment (such
* as available environment variables). It is not advisable to use this class
* as a base class for you own TestBench tests.
* <p>
* To learn more about TestBench, visit
* <a href="https://vaadin.com/docs/v10/testbench/testbench-overview.html">Vaadin TestBench</a>.
*/
public abstract class AbstractViewTest extends ParallelTest {
private static final int SERVER_PORT = 8080;
private final String route;
private final By rootSelector;
@Rule
public ScreenshotOnFailureRule rule = new ScreenshotOnFailureRule(this,
false);
public AbstractViewTest() {
this("", By.tagName("body"));
}
protected AbstractViewTest(String route, By rootSelector) {
this.route = route;
this.rootSelector = rootSelector;
}
@Before
public void setup() throws Exception {
if (isUsingHub()) {
super.setup();
} else {
setDriver(TestBench.createDriver(new ChromeDriver()));
}
getDriver().get(getURL(route));
}
/**
* Convenience method for getting the root element of the view based on
* the selector passed to the constructor.
*
* @return the root element
*/
protected WebElement getRootElement() {
return findElement(rootSelector);
}
/**
* Asserts that the given {@code element} is rendered using a theme
* identified by {@code themeClass}. If the theme is not found, JUnit
* assert will fail the test case.
*
* @param element web element to check for the theme
* @param themeClass theme class (such as {@code Lumo.class}
*/
protected void assertThemePresentOnElement(
WebElement element, Class<? extends AbstractTheme> themeClass) {
String themeName = themeClass.getSimpleName().toLowerCase();
Boolean hasStyle = (Boolean) executeScript("" +
"var styles = Array.from(arguments[0]._template.content" +
".querySelectorAll('style'))" +
".filter(style => style.textContent.indexOf('" +
themeName + "') > -1);" +
"return styles.length > 0;", element);
Assert.assertTrue("Element '" + element.getTagName() + "' should have" +
" had theme '" + themeClass.getSimpleName() + "'.",
hasStyle);
}
/**
* Property set to true when running on a test hub.
*/
private static final String USE_HUB_PROPERTY = "test.use.hub";
/**
* Returns deployment host name concatenated with route.
*
* @return URL to route
*/
private static String getURL(String route) {
return String.format("http://%s:%d/%s", getDeploymentHostname(),
SERVER_PORT, route);
}
/**
* Returns whether we are using a test hub. This means that the starter
* is running tests in Vaadin's CI environment, and uses TestBench to
* connect to the testing hub.
*
* @return whether we are using a test hub
*/
private static boolean isUsingHub() {
return Boolean.TRUE.toString().equals(
System.getProperty(USE_HUB_PROPERTY));
}
/**
* If running on CI, get the host name from environment variable HOSTNAME
*
* @return the host name
*/
private static String getDeploymentHostname() {
return isUsingHub() ? System.getenv("HOSTNAME") : "localhost";
}
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
java
| 1 | 0 | |
test/integration/stan/stan_test.go
|
package stan
import (
"context"
"github.com/nats-io/nats.go"
"github.com/nats-io/stan.go"
"os"
"testing"
ce_stan "github.com/cloudevents/sdk-go/protocol/stan/v2"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/event"
bindings "github.com/cloudevents/sdk-go/v2/protocol"
"github.com/cloudevents/sdk-go/v2/protocol/test"
. "github.com/cloudevents/sdk-go/v2/test"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
. "github.com/cloudevents/sdk-go/v2/binding/test"
)
const (
TEST_CLUSTER_ID = "test-cluster"
TEST_CLIENT_ID = "my-client"
)
func TestSendStructuredMessagedToStructures(t *testing.T) {
conn := testConn(t)
defer conn.Close()
type args struct {
opts []ce_stan.ProtocolOption
}
tests := []struct {
name string
args args
}{
{
name: "regular subscriber",
args: args{
opts: []ce_stan.ProtocolOption{
ce_stan.WithConsumerOptions(
ce_stan.WithSubscriptionOptions(
stan.StartAtSequence(0),
),
),
},
},
}, {
name: "queue subscriber",
args: args{
opts: []ce_stan.ProtocolOption{
ce_stan.WithConsumerOptions(
ce_stan.WithQueueSubscriber(uuid.New().String()),
ce_stan.WithSubscriptionOptions(
stan.StartAtSequence(0),
),
),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cleanup, s, r := testProtocol(t, conn, tt.args.opts...)
defer cleanup()
EachEvent(t, Events(), func(t *testing.T, eventIn event.Event) {
eventIn = ConvertEventExtensionsToString(t, eventIn)
in := MustCreateMockStructuredMessage(t, eventIn)
test.SendReceive(t, binding.WithPreferredEventEncoding(context.TODO(), binding.EncodingStructured), in, s, r, func(out binding.Message) {
eventOut := MustToEvent(t, context.Background(), out)
assert.Equal(t, binding.EncodingStructured, out.ReadEncoding())
AssertEventEquals(t, eventIn, ConvertEventExtensionsToString(t, eventOut))
})
})
})
}
}
func testConn(t testing.TB) *nats.Conn {
t.Helper()
// STAN connections actually connect to NATS, so the env var is named appropriately
s := os.Getenv("TEST_NATS_SERVER")
if s == "" {
s = "nats://localhost:4222"
}
conn, err := nats.Connect(s)
if err != nil {
t.Skipf("Cannot create STAN client to NATS server [%s]: %v", s, err)
}
return conn
}
func testProtocol(t testing.TB, natsConn *nats.Conn, opts ...ce_stan.ProtocolOption) (func(), bindings.Sender,
bindings.Receiver) {
subject := "test-ce-client-" + uuid.New().String()
// use NewProtocol rather than individual Consumer and Sender since this gives us more coverage
p, err := ce_stan.NewProtocol(TEST_CLUSTER_ID, TEST_CLIENT_ID, subject, subject, ce_stan.StanOptions(stan.NatsConn(natsConn)), opts...)
require.NoError(t, err)
go func() {
require.NoError(t, p.OpenInbound(context.TODO()))
}()
return func() {
err = p.Close(context.TODO())
require.NoError(t, err)
}, p.Sender, p.Consumer
}
func BenchmarkSendReceive(b *testing.B) {
conn := testConn(b)
defer conn.Close()
c, s, r := testProtocol(b, conn)
defer c() // Cleanup
test.BenchmarkSendReceive(b, s, r)
}
|
[
"\"TEST_NATS_SERVER\""
] |
[] |
[
"TEST_NATS_SERVER"
] |
[]
|
["TEST_NATS_SERVER"]
|
go
| 1 | 0 | |
cmd/mattermost/commands/server.go
|
// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package commands
import (
"net"
"os"
"os/signal"
"syscall"
"github.com/mattermost/mattermost-server/api4"
"github.com/mattermost/mattermost-server/app"
"github.com/mattermost/mattermost-server/config"
"github.com/mattermost/mattermost-server/manualtesting"
"github.com/mattermost/mattermost-server/mlog"
"github.com/mattermost/mattermost-server/utils"
"github.com/mattermost/mattermost-server/web"
"github.com/mattermost/mattermost-server/wsapi"
"github.com/mattermost/viper"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var serverCmd = &cobra.Command{
Use: "server",
Short: "Run the Mattermost server",
RunE: serverCmdF,
SilenceUsage: true,
}
func init() {
RootCmd.AddCommand(serverCmd)
RootCmd.RunE = serverCmdF
}
func serverCmdF(command *cobra.Command, args []string) error {
configDSN := viper.GetString("config")
disableConfigWatch, _ := command.Flags().GetBool("disableconfigwatch")
usedPlatform, _ := command.Flags().GetBool("platform")
interruptChan := make(chan os.Signal, 1)
if err := utils.TranslationsPreInit(); err != nil {
return errors.Wrapf(err, "unable to load Mattermost translation files")
}
configStore, err := config.NewStore(configDSN, !disableConfigWatch)
if err != nil {
return err
}
return runServer(configStore, disableConfigWatch, usedPlatform, interruptChan)
}
func runServer(configStore config.Store, disableConfigWatch bool, usedPlatform bool, interruptChan chan os.Signal) error {
options := []app.Option{
app.ConfigStore(configStore),
app.RunJobs,
app.JoinCluster,
app.StartElasticsearch,
app.StartMetrics,
}
server, err := app.NewServer(options...)
if err != nil {
mlog.Critical(err.Error())
return err
}
defer server.Shutdown()
if usedPlatform {
mlog.Error("The platform binary has been deprecated, please switch to using the mattermost binary.")
}
serverErr := server.Start()
if serverErr != nil {
mlog.Critical(serverErr.Error())
return serverErr
}
api := api4.Init(server, server.AppOptions, server.Router)
wsapi.Init(server.FakeApp(), server.WebSocketRouter)
web.New(server, server.AppOptions, server.Router)
// If we allow testing then listen for manual testing URL hits
if *server.Config().ServiceSettings.EnableTesting {
manualtesting.Init(api)
}
notifyReady()
// wait for kill signal before attempting to gracefully shutdown
// the running service
signal.Notify(interruptChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
<-interruptChan
return nil
}
func notifyReady() {
// If the environment vars provide a systemd notification socket,
// notify systemd that the server is ready.
systemdSocket := os.Getenv("NOTIFY_SOCKET")
if systemdSocket != "" {
mlog.Info("Sending systemd READY notification.")
err := sendSystemdReadyNotification(systemdSocket)
if err != nil {
mlog.Error(err.Error())
}
}
}
func sendSystemdReadyNotification(socketPath string) error {
msg := "READY=1"
addr := &net.UnixAddr{
Name: socketPath,
Net: "unixgram",
}
conn, err := net.DialUnix(addr.Net, nil, addr)
if err != nil {
return err
}
defer conn.Close()
_, err = conn.Write([]byte(msg))
return err
}
|
[
"\"NOTIFY_SOCKET\""
] |
[] |
[
"NOTIFY_SOCKET"
] |
[]
|
["NOTIFY_SOCKET"]
|
go
| 1 | 0 | |
appstate/state.go
|
package appstate
import (
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
yaml "gopkg.in/yaml.v2"
)
var (
// BrokenMode signal if the application is broken
BrokenMode bool
// AvailableCameras list index of detected cameras
AvailableCameras []int
// Rootdir executable code to reach assets
Rootdir string
// Datadir access to write storage path
Datadir string
)
const brokenversion = "2.0alpha1"
type versionYaml struct {
Version string `yaml:"version"`
}
func init() {
// Set main set of directories
var err error
Rootdir = os.Getenv("SNAP")
if Rootdir == "" {
if Rootdir, err = filepath.Abs(path.Join(filepath.Dir(os.Args[0]), "..")); err != nil {
log.Fatal(err)
}
}
Datadir = os.Getenv("SNAP_DATA")
if Datadir == "" {
Datadir = Rootdir
}
}
// CheckIfBroken checks and set if app is in broken state (when matching brokenversion)
func CheckIfBroken(rootdir string) {
yamlc := versionYaml{}
yamlfile := path.Join(rootdir, "meta", "snap.yaml")
// load settings
dat, err := ioutil.ReadFile(yamlfile)
if err != nil {
// no file available: can be run from trunk
fmt.Println("Couldn't open", yamlfile, ". Probably running from master, set the app as functionning.")
return
}
if err = yaml.Unmarshal(dat, &yamlc); err != nil {
fmt.Println("Couldn't unserialized snap yaml from", yamlc, ". Setting the app as functionning.")
return
}
if yamlc.Version == brokenversion {
fmt.Println("Broken version running (", brokenversion, "). Set the app property as being broken.")
BrokenMode = true
}
}
|
[
"\"SNAP\"",
"\"SNAP_DATA\""
] |
[] |
[
"SNAP",
"SNAP_DATA"
] |
[]
|
["SNAP", "SNAP_DATA"]
|
go
| 2 | 0 | |
all-core/gl/procaddr.go
|
// Code generated by glow (https://github.com/go-gl/glow). DO NOT EDIT.
// This file implements GlowGetProcAddress for every supported platform. The
// correct version is chosen automatically based on build tags:
//
// windows: WGL
// darwin: CGL
// linux freebsd: GLX
//
// Use of EGL instead of the platform's default (listed above) is made possible
// via the "egl" build tag.
//
// It is also possible to install your own function outside this package for
// retrieving OpenGL function pointers, to do this see InitWithProcAddrFunc.
package gl
/*
#cgo windows CFLAGS: -DTAG_WINDOWS
#cgo windows LDFLAGS: -lopengl32
#cgo darwin CFLAGS: -DTAG_DARWIN
#cgo darwin LDFLAGS: -framework OpenGL
#cgo linux freebsd CFLAGS: -DTAG_POSIX
#cgo !egl,linux !egl,freebsd pkg-config: gl
#cgo egl,linux egl,freebsd CFLAGS: -DTAG_EGL
#cgo egl,linux egl,freebsd pkg-config: egl
// Check the EGL tag first as it takes priority over the platform's default
// configuration of WGL/GLX/CGL.
#if defined(TAG_EGL)
#include <stdlib.h>
#include <EGL/egl.h>
void* GlowGetProcAddress_glcoreall(const char* name) {
return eglGetProcAddress(name);
}
#elif defined(TAG_WINDOWS)
#define WIN32_LEAN_AND_MEAN 1
#include <windows.h>
#include <stdlib.h>
static HMODULE ogl32dll = NULL;
void* GlowGetProcAddress_glcoreall(const char* name) {
void* pf = wglGetProcAddress((LPCSTR) name);
if (pf) {
return pf;
}
if (ogl32dll == NULL) {
ogl32dll = LoadLibraryA("opengl32.dll");
}
return GetProcAddress(ogl32dll, (LPCSTR) name);
}
#elif defined(TAG_DARWIN)
#include <stdlib.h>
#include <dlfcn.h>
void* GlowGetProcAddress_glcoreall(const char* name) {
return dlsym(RTLD_DEFAULT, name);
}
#elif defined(TAG_POSIX)
#include <stdlib.h>
#include <GL/glx.h>
void* GlowGetProcAddress_glcoreall(const char* name) {
return glXGetProcAddress((const GLubyte *) name);
}
#endif
*/
import "C"
import "unsafe"
func getProcAddress(namea string) unsafe.Pointer {
cname := C.CString(namea)
defer C.free(unsafe.Pointer(cname))
return C.GlowGetProcAddress_glcoreall(cname)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
hasher-matcher-actioner/scripts/submitter.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import time
import threading
import uuid
import datetime
import typing as t
from hma_client_lib import DeployedInstanceClient
class Submitter(threading.Thread):
def __init__(
self,
client: DeployedInstanceClient,
batch_size: int,
seconds_between_batches: int,
filepaths: t.List[str] = [],
**kwargs,
):
super(Submitter, self).__init__(**kwargs)
self.daemon = True
self._stop_signal = threading.Event()
self._lock = threading.Lock()
self.client = client
self.batch_size = batch_size
self.seconds_between_batches = seconds_between_batches
self.filepaths = filepaths
self.total_submitted = 0
def stop(self):
self._stop_signal.set()
def stopped(self):
return self._stop_signal.is_set()
def run(self):
while not self.stopped() and self._lock.acquire():
if self.stopped():
self._lock.release()
return
try:
batch_prefix = f"soak-test-{str(uuid.uuid4())}"
for i in range(self.batch_size):
content_id = f"{batch_prefix}{i}-time-{datetime.datetime.now().isoformat()}-time-"
if self.filepaths:
self.client.submit_test_content(
content_id, filepath=self.filepaths[i % len(self.filepaths)]
)
else:
self.client.submit_test_content(content_id)
self.total_submitted += 1
finally:
self._lock.release()
time.sleep(self.seconds_between_batches)
def get_total_submit_count(self) -> int:
with self._lock:
return self.total_submitted
def get_current_values(self) -> t.Tuple[int, int, int]:
with self._lock:
return (self.batch_size, self.seconds_between_batches, self.total_submitted)
def set_batch_size(self, batch_size: int):
with self._lock:
self.batch_size = batch_size
def set_seconds_between_batches(self, seconds_between_batches: int):
with self._lock:
self.seconds_between_batches = seconds_between_batches
if __name__ == "__main__":
API_URL = ""
TOKEN = ""
api_url = os.environ.get(
"HMA_API_URL",
API_URL,
)
token = os.environ.get(
"HMA_TOKEN",
TOKEN,
)
client = DeployedInstanceClient(api_url, token)
submitter = Submitter(client, batch_size=5, seconds_between_batches=5)
submitter.start()
cmd = ""
while cmd != "q":
cmd = input("Enter 'q' to shutdown: ")
submitter.stop()
|
[] |
[] |
[
"HMA_TOKEN",
"HMA_API_URL"
] |
[]
|
["HMA_TOKEN", "HMA_API_URL"]
|
python
| 2 | 0 | |
usr/lib/python3.5/ctypes/util.py
|
import sys, os
import contextlib
import subprocess
# find_library(name) returns the pathname of a library, or None.
if os.name == "nt":
def _get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
# This function was copied from Lib/distutils/msvccompiler.py
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
if majorVersion >= 13:
majorVersion += 1
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def find_msvcrt():
"""Return the name of the VC runtime dll"""
version = _get_build_version()
if version is None:
# better be safe than sorry
return None
if version <= 6:
clibname = 'msvcrt'
elif version <= 13:
clibname = 'msvcr%d' % (version * 10)
else:
# CRT is no longer directly loadable. See issue23606 for the
# discussion about alternative approaches.
return None
# If python was built with in debug mode
import importlib.machinery
if '_d.pyd' in importlib.machinery.EXTENSION_SUFFIXES:
clibname += 'd'
return clibname+'.dll'
def find_library(name):
if name in ('c', 'm'):
return find_msvcrt()
# See MSDN for the REAL search order.
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
return fname
return None
if os.name == "ce":
# search path according to MSDN:
# - absolute path specified by filename
# - The .exe launch directory
# - the Windows directory
# - ROM dll files (where are they?)
# - OEM specified search path: HKLM\Loader\SystemPath
def find_library(name):
return name
if os.name == "posix" and sys.platform == "darwin":
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except ValueError:
continue
return None
elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile
def _findLib_gcc(name):
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
fdout, ccout = tempfile.mkstemp()
os.close(fdout)
cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \
'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
try:
f = os.popen(cmd)
try:
trace = f.read()
finally:
rv = f.close()
finally:
try:
os.unlink(ccout)
except FileNotFoundError:
pass
if rv == 10:
raise OSError('gcc or cc command not found')
res = re.search(expr, trace)
if not res:
return None
return res.group(0)
if sys.platform == "sunos5":
# use /usr/ccs/bin/dump on solaris
def _get_soname(f):
if not f:
return None
cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f
with contextlib.closing(os.popen(cmd)) as f:
data = f.read()
res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data)
if not res:
return None
return res.group(1)
else:
def _get_soname(f):
# assuming GNU binutils / ELF
if not f:
return None
cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \
"objdump -p -j .dynamic 2>/dev/null " + f
f = os.popen(cmd)
try:
dump = f.read()
finally:
rv = f.close()
if rv == 10:
raise OSError('objdump command not found')
res = re.search(r'\sSONAME\s+([^\s]+)', dump)
if not res:
return None
return res.group(1)
if sys.platform.startswith(("freebsd", "openbsd", "dragonfly")):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [ sys.maxsize ]
def find_library(name):
ename = re.escape(name)
expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
with contextlib.closing(os.popen('/sbin/ldconfig -r 2>/dev/null')) as f:
data = f.read()
res = re.findall(expr, data)
if not res:
return _get_soname(_findLib_gcc(name))
res.sort(key=_num_version)
return res[-1]
elif sys.platform == "sunos5":
def _findLib_crle(name, is64):
if not os.path.exists('/usr/bin/crle'):
return None
if is64:
cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null'
else:
cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null'
paths = None
with contextlib.closing(os.popen(cmd)) as f:
for line in f.readlines():
line = line.strip()
if line.startswith('Default Library Path (ELF):'):
paths = line.split()[4]
if not paths:
return None
for dir in paths.split(":"):
libfile = os.path.join(dir, "lib%s.so" % name)
if os.path.exists(libfile):
return libfile
return None
def find_library(name, is64 = False):
return _get_soname(_findLib_crle(name, is64) or _findLib_gcc(name))
else:
def _findSoname_ldconfig(name):
import struct
# XXX this code assumes that we know all unames and that a single
# ABI is supported per uname; instead we should find what the
# ABI is (e.g. check ABI of current process) or simply ask libc
# to load the library for us
uname = os.uname()
# ARM has a variety of unames, e.g. armv7l
if uname.machine.startswith("arm"):
machine = "arm"
if struct.calcsize('l') == 4:
machine = uname.machine + '-32'
else:
machine = uname.machine + '-64'
mach_map = {
'x86_64-64': 'libc6,x86-64',
'ppc64-64': 'libc6,64bit',
'sparc64-64': 'libc6,64bit',
's390x-64': 'libc6,64bit',
'ia64-64': 'libc6,IA-64',
# this actually breaks on biarch or multiarch as the first
# library wins; uname doesn't tell us which ABI we're using
'arm-32': 'libc6(,hard-float)?',
}
abi_type = mach_map.get(machine, 'libc6')
# XXX assuming GLIBC's ldconfig (with option -p)
regex = os.fsencode(
'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type))
try:
with subprocess.Popen(['/sbin/ldconfig', '-p'],
stdin=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE,
env={'LC_ALL': 'C', 'LANG': 'C'}) as p:
res = re.search(regex, p.stdout.read())
if res:
return os.fsdecode(res.group(1))
except OSError:
pass
def find_library(name):
return _findSoname_ldconfig(name) or _get_soname(_findLib_gcc(name))
################################################################
# test code
def test():
from ctypes import cdll
if os.name == "nt":
print(cdll.msvcrt)
print(cdll.load("msvcrt"))
print(find_library("msvcrt"))
if os.name == "posix":
# find and load_version
print(find_library("m"))
print(find_library("c"))
print(find_library("bz2"))
# getattr
## print cdll.m
## print cdll.bz2
# load
if sys.platform == "darwin":
print(cdll.LoadLibrary("libm.dylib"))
print(cdll.LoadLibrary("libcrypto.dylib"))
print(cdll.LoadLibrary("libSystem.dylib"))
print(cdll.LoadLibrary("System.framework/System"))
else:
print(cdll.LoadLibrary("libm.so"))
print(cdll.LoadLibrary("libcrypt.so"))
print(find_library("crypt"))
if __name__ == "__main__":
test()
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
private_set_intersection/javascript/toolchain/cc_toolchain_config.bzl
|
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
"with_feature_set",
)
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "emcc.sh",
),
tool_path(
name = "ld",
path = "emcc.sh",
),
tool_path(
name = "ar",
path = "emar.sh",
),
tool_path(
name = "cpp",
path = "false.sh",
),
tool_path(
name = "gcov",
path = "false.sh",
),
tool_path(
name = "nm",
path = "NOT_USED",
),
tool_path(
name = "objdump",
path = "false.sh",
),
tool_path(
name = "strip",
path = "NOT_USED",
),
]
preprocessor_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.clif_match,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
toolchain_include_directories_feature = feature(
name = "toolchain_include_directories",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = [
flag_group(
flags = [
# The clang compiler comes with a definition of
# max_align_t struct in $emsdk/upstream/lib/clang/13.0.0/include/__stddef_max_align_t.h.
# It conflicts with the one defined in
# $emsdk/upstream/emscripten/cache/sysroot/include/bits/alltypes.h.
# We need both include paths to make things work.
#
# To workaround this, we are defining the following
# symbol through compiler flag so that the max_align_t
# defined in clang's header file will be skipped.
"-D",
"__CLANG_MAX_ALIGN_T_DEFINED",
# We are using emscripten 2.0.15 for this build. It
# comes with clang 13.0.0. Future emscripten release
# might change the clang version number below.
#
# Also need to change the version number in
# cxx_cxx_builtin_include_directories below.
"-isystem",
"external/emsdk/emsdk/upstream/lib/clang/13.0.0/include",
],
),
],
),
],
)
crosstool_default_flag_sets = [
# Optimized (opt)
flag_set(
actions = preprocessor_compile_actions,
flag_groups = [flag_group(flags = ["-DNDEBUG"])],
with_features = [with_feature_set(features = ["opt"])],
),
# Overriding to use -O2 instead of -O3 because asmjs breaks.
flag_set(
actions = all_compile_actions + all_link_actions,
flag_groups = [flag_group(flags = ["-g0", "-O2"])],
with_features = [with_feature_set(features = ["opt"])],
),
# Fastbuild (fastbuild)
flag_set(
actions = all_compile_actions + all_link_actions,
flag_groups = [flag_group(flags = ["-O2"])],
with_features = [with_feature_set(features = ["fastbuild"])],
),
# Debug (dbg)
flag_set(
actions = all_compile_actions + all_link_actions,
flag_groups = [flag_group(flags = ["-g2", "-O0"])],
with_features = [with_feature_set(features = ["dbg"])],
),
]
features = [
toolchain_include_directories_feature,
# These 3 features will be automatically enabled by blaze in the
# corresponding build mode.
feature(
name = "opt",
provides = ["variant:crosstool_build_mode"],
),
feature(
name = "dbg",
provides = ["variant:crosstool_build_mode"],
),
feature(
name = "fastbuild",
provides = ["variant:crosstool_build_mode"],
),
feature(
name = "crosstool_default_flags",
enabled = True,
flag_sets = crosstool_default_flag_sets,
),
]
cxx_builtin_include_directories = [
"external/emsdk/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1",
"external/emsdk/emsdk/upstream/emscripten/cache/sysroot/include/compat",
"external/emsdk/emsdk/upstream/emscripten/cache/sysroot/include",
# We are using emscripten 2.0.15 for this build. It comes with clang
# 13.0.0. Future emscripten release might change the clang version
# number below.
#
# Also need to change the version number in
# toolchain_include_directories_feature above.
"external/emsdk/emsdk/upstream/lib/clang/13.0.0/include",
]
builtin_sysroot = "external/emsdk/emsdk/upstream/emscripten/cache/sysroot"
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
toolchain_identifier = "wasm-toolchain",
host_system_name = "i686-unknown-linux-gnu",
target_system_name = "wasm-unknown-emscripten",
target_cpu = "wasm",
target_libc = "musl/js",
compiler = "emscripten",
abi_version = "emscripten_syscalls",
abi_libc_version = "default",
tool_paths = tool_paths,
features = features,
builtin_sysroot = builtin_sysroot,
cxx_builtin_include_directories = cxx_builtin_include_directories,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
def _emsdk_impl(ctx):
if "EMSDK" not in ctx.os.environ or ctx.os.environ["EMSDK"].strip() == "":
fail("The environment variable EMSDK is not found. " +
"Did you run source ./emsdk_env.sh ?")
path = ctx.os.environ["EMSDK"]
ctx.symlink(path, "emsdk")
ctx.file("BUILD", """
filegroup(
name = "all",
srcs = glob(["emsdk/**"]),
visibility = ["//visibility:public"],
)
""")
emsdk_configure = repository_rule(
implementation = _emsdk_impl,
local = True,
)
|
[] |
[] |
[
"EMSDK"
] |
[]
|
["EMSDK"]
|
python
| 1 | 0 | |
apps/beeswax/src/beeswax/design.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The HQLdesign class can (de)serialize a design to/from a QueryDict.
"""
import json
import logging
import os
import re
import urlparse
import django.http
from django import forms
from desktop.lib.django_forms import BaseSimpleFormSet, MultiForm
from desktop.lib.django_mako import render_to_string
from hadoop.cluster import get_hdfs
LOG = logging.getLogger(__name__)
SERIALIZATION_VERSION = '0.4.1'
def hql_query(hql, database='default', query_type=None):
data_dict = json.loads('{"query": {"email_notify": false, "query": null, "type": 0, "is_parameterized": true, "database": "default"}, '
'"functions": [], "VERSION": "0.4.1", "file_resources": [], "settings": []}')
if not (isinstance(hql, str) or isinstance(hql, unicode)):
raise Exception('Requires a SQL text query of type <str>, <unicode> and not %s' % type(hql))
data_dict['query']['query'] = strip_trailing_semicolon(hql)
data_dict['query']['database'] = database
if query_type:
data_dict['query']['type'] = query_type
hql_design = HQLdesign()
hql_design._data_dict = data_dict
return hql_design
class HQLdesign(object):
"""
Represents an HQL design, with methods to perform (de)serialization.
We support queries that aren't parameterized, in case users
want to use "$" natively, but we leave that as an advanced
option to turn off.
"""
_QUERY_ATTRS = [ 'query', 'type', 'is_parameterized', 'email_notify', 'database' ]
_SETTINGS_ATTRS = [ 'key', 'value' ]
_FILE_RES_ATTRS = [ 'type', 'path' ]
_FUNCTIONS_ATTRS = [ 'name', 'class_name' ]
def __init__(self, form=None, query_type=None):
"""Initialize the design from a valid form data."""
if form is not None:
assert isinstance(form, MultiForm)
self._data_dict = {
'query': normalize_form_dict(form.query, HQLdesign._QUERY_ATTRS),
'settings': normalize_formset_dict(form.settings, HQLdesign._SETTINGS_ATTRS),
'file_resources': normalize_formset_dict(form.file_resources, HQLdesign._FILE_RES_ATTRS),
'functions': normalize_formset_dict(form.functions, HQLdesign._FUNCTIONS_ATTRS)
}
if query_type is not None:
self._data_dict['query']['type'] = query_type
def dumps(self):
"""Returns the serialized form of the design in a string"""
dic = self._data_dict.copy()
dic['VERSION'] = SERIALIZATION_VERSION
return json.dumps(dic)
@property
def hql_query(self):
return self._data_dict['query']['query']
@hql_query.setter
def hql_query(self, query):
self._data_dict['query']['query'] = query
@property
def query(self):
return self._data_dict['query'].copy()
@property
def settings(self):
return list(self._data_dict['settings'])
@property
def file_resources(self):
return list(self._data_dict['file_resources'])
@property
def functions(self):
return list(self._data_dict['functions'])
def get_configuration_statements(self):
configuration = []
for f in self.file_resources:
if not urlparse.urlsplit(f['path']).scheme:
scheme = get_hdfs().fs_defaultfs
else:
scheme = ''
configuration.append(render_to_string("hql_resource.mako", dict(type=f['type'], path=f['path'], scheme=scheme)))
for f in self.functions:
configuration.append(render_to_string("hql_function.mako", f))
return configuration
def get_query_dict(self):
# We construct the mform to use its structure and prefix. We don't actually bind data to the forms.
from beeswax.forms import QueryForm
mform = QueryForm()
mform.bind()
res = django.http.QueryDict('', mutable=True)
res.update(denormalize_form_dict(
self._data_dict['query'], mform.query, HQLdesign._QUERY_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['settings'], mform.settings, HQLdesign._SETTINGS_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['file_resources'], mform.file_resources, HQLdesign._FILE_RES_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['functions'], mform.functions, HQLdesign._FUNCTIONS_ATTRS))
return res
@staticmethod
def loads(data):
"""Returns an HQLdesign from the serialized form"""
dic = json.loads(data)
dic = dict(map(lambda k: (str(k), dic.get(k)), dic.keys()))
if dic['VERSION'] != SERIALIZATION_VERSION:
LOG.error('Design version mismatch. Found %s; expect %s' % (dic['VERSION'], SERIALIZATION_VERSION))
# Convert to latest version
del dic['VERSION']
if 'type' not in dic['query'] or dic['query']['type'] is None:
dic['query']['type'] = 0
if 'database' not in dic['query']:
dic['query']['database'] = 'default'
design = HQLdesign()
design._data_dict = dic
return design
def get_query(self):
return self._data_dict["query"]
@property
def statement_count(self):
return len(self.statements)
def get_query_statement(self, n=0):
return self.statements[n]
@property
def statements(self):
hql_query = strip_trailing_semicolon(self.hql_query)
return [strip_trailing_semicolon(statement.strip()) for statement in split_statements(hql_query)]
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def split_statements(hql):
"""
Split statments at semicolons ignoring the ones inside
quotes and comments. The comment symbols that come
inside quotes should be ignored.
"""
statements = []
current = ''
prev = ''
between_quotes = None
is_comment = None
lines = hql.splitlines()
for line in lines:
for c in line:
current += c
if c in ('"', "'") and prev != '\\' and is_comment is None:
if between_quotes == c:
between_quotes = None
elif between_quotes is None:
between_quotes = c
elif c == '-' and prev == '-' and between_quotes is None and is_comment is None:
is_comment = True
elif c == ';':
if between_quotes is None and is_comment is None:
current = current.strip()
# Strip off the trailing semicolon
current = current[:-1]
if len(current) > 1:
statements.append(current)
current = ''
# This character holds no significance if it was escaped within a string
if prev == '\\' and between_quotes is not None:
c = ''
prev = c
is_comment = None
prev = os.linesep
if current != '':
current += os.linesep
if current and current != ';':
current = current.strip()
statements.append(current)
return statements
def normalize_form_dict(form, attr_list):
"""
normalize_form_dict(form, attr_list) -> A dictionary of (attr, value)
Each attr is a field name. And the value is obtained by looking up the form's data dict.
"""
assert isinstance(form, forms.Form)
res = { }
for attr in attr_list:
res[attr] = form.cleaned_data.get(attr)
return res
def normalize_formset_dict(formset, attr_list):
"""
normalize_formset_dict(formset, attr_list) -> A list of dictionary of (attr, value)
"""
assert isinstance(formset, BaseSimpleFormSet)
res = [ ]
for form in formset.forms:
res.append(normalize_form_dict(form, attr_list))
return res
def denormalize_form_dict(data_dict, form, attr_list):
"""
denormalize_form_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(form, forms.Form)
res = django.http.QueryDict('', mutable=True)
for attr in attr_list:
try:
res[str(form.add_prefix(attr))] = data_dict[attr]
except KeyError:
pass
return res
def denormalize_formset_dict(data_dict_list, formset, attr_list):
"""
denormalize_formset_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(formset, BaseSimpleFormSet)
res = django.http.QueryDict('', mutable=True)
for i, data_dict in enumerate(data_dict_list):
prefix = formset.make_prefix(i)
form = formset.form(prefix=prefix)
res.update(denormalize_form_dict(data_dict, form, attr_list))
res[prefix + '-_exists'] = 'True'
res[str(formset.management_form.add_prefix('next_form_id'))] = str(len(data_dict_list))
return res
def __str__(self):
return '%s: %s' % (self.__class__, self.query)
_SEMICOLON_WHITESPACE = re.compile(";\s*$")
def strip_trailing_semicolon(query):
"""As a convenience, we remove trailing semicolons from queries."""
s = _SEMICOLON_WHITESPACE.split(query, 2)
if len(s) > 1:
assert len(s) == 2
assert s[1] == ''
return s[0]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
examples/analyse-in-memory/monitor/spark_monitors.py
|
import os
import time
import logging
from driftage.monitor import Monitor
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType
logger = logging.getLogger("spark_monitor")
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
ROWS = [
"right_bicep",
"left_bicep",
"left_tricep",
"right_thigh",
"right_hamstring",
"left_thigh",
"left_hamstring"
]
healthSchema = StructType()
for row in ROWS:
healthSchema.add(row, "integer")
class MonitorManager():
"""Examples using files from dataset
https://archive.ics.uci.edu/ml/datasets/EMG+Physical+Action+Data+Set
"""
def open(self, partition_id, epoch_id):
self.monitors = []
for key, identifier in enumerate(ROWS[0:2]):
monitor = Monitor(f"monitor_{key}@localhost", # nosec
os.environ["MONITOR_PASSWORD"], identifier)
monitor.start()
self.monitors.append(monitor)
while not all([m.is_alive() for m in self.monitors]):
time.sleep(1)
print("Waiting all monitors alive")
while not all([bool(m.available_contacts) for m in self.monitors]):
time.sleep(1)
print(
"Waiting analysers connected "
f"{[len(m.available_contacts) for m in self.monitors]}")
print("All monitors alive, starting...")
return True
def process(self, row):
for monitor in self.monitors:
monitor(
dict(sensor=row[monitor._identifier])
)
time.sleep(0.001) # simulating milisseconds
def close(self, error):
print("Closing all monitors...")
if error:
print(f"Got error {error}")
for monitor in self.monitors:
while not all([b.is_done() for b in monitor.behaviours]):
left = sum([not b.is_done() for b in monitor.behaviours])
print(f"Waiting monitors stop to send... {left} left")
time.sleep(1)
monitor.stop()
print("All monitors stopped!")
time.sleep(5)
spark = SparkSession \
.builder \
.appName("ProcessHealthData") \
.getOrCreate()
lines = spark \
.readStream \
.option("sep", "\t") \
.schema(healthSchema) \
.csv("data/")
query = lines.writeStream.foreach(MonitorManager()).start()
query.awaitTermination()
|
[] |
[] |
[
"MONITOR_PASSWORD"
] |
[]
|
["MONITOR_PASSWORD"]
|
python
| 1 | 0 | |
examples/pwr_run/checkpointing/non_slurm/max_pwr/job27.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.005
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 87
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
test/e2e/upgrade/upgrade_test.go
|
// Copyright Project Contour Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build e2e
package upgrade
import (
"context"
"os"
"testing"
. "github.com/onsi/ginkgo"
"github.com/projectcontour/contour/test/e2e"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
networking_v1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
f *e2e.Framework
// Contour container image to upgrade deployment to.
// If running against a kind cluster, this image should be loaded into
// the cluster prior to running this test suite.
contourUpgradeToImage string
// Contour version we are upgrading from.
contourUpgradeFromVersion string
)
func TestUpgrade(t *testing.T) {
RunSpecs(t, "Upgrade Suite")
}
var _ = BeforeSuite(func() {
f = e2e.NewFramework(GinkgoT())
contourUpgradeFromVersion = os.Getenv("CONTOUR_UPGRADE_FROM_VERSION")
require.NotEmpty(f.T(), contourUpgradeFromVersion, "CONTOUR_UPGRADE_FROM_VERSION environment variable not supplied")
By("Testing Contour upgrade from " + contourUpgradeFromVersion)
contourUpgradeToImage = os.Getenv("CONTOUR_UPGRADE_TO_IMAGE")
require.NotEmpty(f.T(), contourUpgradeToImage, "CONTOUR_UPGRADE_TO_IMAGE environment variable not supplied")
By("upgrading Contour image to " + contourUpgradeToImage)
})
var _ = Describe("upgrading Contour", func() {
var namespace string
const appHost = "upgrade-echo.test.com"
BeforeEach(func() {
namespace = "contour-upgrade-test"
f.CreateNamespace(namespace)
By("deploying an app")
f.Fixtures.Echo.Deploy(namespace, "echo")
i := &networking_v1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "echo",
},
Spec: networking_v1.IngressSpec{
Rules: []networking_v1.IngressRule{
{
Host: appHost,
IngressRuleValue: networking_v1.IngressRuleValue{
HTTP: &networking_v1.HTTPIngressRuleValue{
Paths: []networking_v1.HTTPIngressPath{
{
Path: "/",
PathType: ingressPathTypePtr(networking_v1.PathTypePrefix),
Backend: networking_v1.IngressBackend{
Service: &networking_v1.IngressServiceBackend{
Name: "echo",
Port: networking_v1.ServiceBackendPort{Number: 80},
},
},
},
},
},
},
},
},
},
}
require.NoError(f.T(), f.Client.Create(context.TODO(), i))
By("ensuring it is routable")
checkRoutability(appHost)
})
AfterEach(func() {
By("cleaning up test artifacts")
f.DeleteNamespace(namespace)
})
Specify("applications remain routable after the upgrade", func() {
updateContourDeploymentResources()
By("waiting for contour deployment to be updated")
require.NoError(f.T(), f.Deployment.WaitForContourDeploymentUpdated())
By("waiting for envoy daemonset to be updated")
require.NoError(f.T(), f.Deployment.WaitForEnvoyDaemonSetUpdated())
By("ensuring app is still routable")
checkRoutability(appHost)
})
})
func ingressPathTypePtr(t networking_v1.PathType) *networking_v1.PathType {
return &t
}
func checkRoutability(host string) {
res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{
Host: host,
Path: "/echo",
Condition: e2e.HasStatusCode(200),
})
require.Truef(f.T(), ok, "expected 200 response code, got %d", res.StatusCode)
}
func updateContourDeploymentResources() {
By("updating contour namespace")
require.NoError(f.T(), f.Deployment.EnsureNamespace())
By("updating contour service account")
require.NoError(f.T(), f.Deployment.EnsureContourServiceAccount())
By("updating envoy service account")
require.NoError(f.T(), f.Deployment.EnsureEnvoyServiceAccount())
By("updating contour config map")
require.NoError(f.T(), f.Deployment.EnsureContourConfigMap())
By("updating contour CRDs")
require.NoError(f.T(), f.Deployment.EnsureExtensionServiceCRD())
require.NoError(f.T(), f.Deployment.EnsureHTTPProxyCRD())
require.NoError(f.T(), f.Deployment.EnsureTLSCertDelegationCRD())
By("updating certgen service account")
require.NoError(f.T(), f.Deployment.EnsureCertgenServiceAccount())
By("updating contour role binding")
require.NoError(f.T(), f.Deployment.EnsureContourRoleBinding())
By("updating certgen role")
require.NoError(f.T(), f.Deployment.EnsureCertgenRole())
By("updating certgen job")
// Update container image.
require.Len(f.T(), f.Deployment.CertgenJob.Spec.Template.Spec.Containers, 1)
f.Deployment.CertgenJob.Spec.Template.Spec.Containers[0].Image = contourUpgradeToImage
f.Deployment.CertgenJob.Spec.Template.Spec.Containers[0].ImagePullPolicy = v1.PullIfNotPresent
require.NoError(f.T(), f.Deployment.EnsureCertgenJob())
By("updating contour cluster role binding")
require.NoError(f.T(), f.Deployment.EnsureContourClusterRoleBinding())
By("updating contour cluster role")
require.NoError(f.T(), f.Deployment.EnsureContourClusterRole())
By("updating contour service")
// Set cluster ip.
tempS := new(v1.Service)
require.NoError(f.T(), f.Client.Get(context.TODO(), client.ObjectKeyFromObject(f.Deployment.ContourService), tempS))
f.Deployment.ContourService.Spec.ClusterIP = tempS.Spec.ClusterIP
f.Deployment.ContourService.Spec.ClusterIPs = tempS.Spec.ClusterIPs
require.NoError(f.T(), f.Deployment.EnsureContourService())
By("updating envoy service")
// Set cluster ip and health check node port.
require.NoError(f.T(), f.Client.Get(context.TODO(), client.ObjectKeyFromObject(f.Deployment.EnvoyService), tempS))
f.Deployment.EnvoyService.Spec.ClusterIP = tempS.Spec.ClusterIP
f.Deployment.EnvoyService.Spec.ClusterIPs = tempS.Spec.ClusterIPs
f.Deployment.EnvoyService.Spec.HealthCheckNodePort = tempS.Spec.HealthCheckNodePort
require.NoError(f.T(), f.Deployment.EnsureEnvoyService())
By("updating contour deployment")
// Update container image.
require.Len(f.T(), f.Deployment.ContourDeployment.Spec.Template.Spec.Containers, 1)
f.Deployment.ContourDeployment.Spec.Template.Spec.Containers[0].Image = contourUpgradeToImage
f.Deployment.ContourDeployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = v1.PullIfNotPresent
require.NoError(f.T(), f.Deployment.EnsureContourDeployment())
By("updating envoy daemonset")
// Update container image.
require.Len(f.T(), f.Deployment.EnvoyDaemonSet.Spec.Template.Spec.InitContainers, 1)
f.Deployment.EnvoyDaemonSet.Spec.Template.Spec.InitContainers[0].Image = contourUpgradeToImage
f.Deployment.EnvoyDaemonSet.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = v1.PullIfNotPresent
require.Len(f.T(), f.Deployment.EnvoyDaemonSet.Spec.Template.Spec.Containers, 2)
f.Deployment.EnvoyDaemonSet.Spec.Template.Spec.Containers[0].Image = contourUpgradeToImage
f.Deployment.EnvoyDaemonSet.Spec.Template.Spec.Containers[0].ImagePullPolicy = v1.PullIfNotPresent
require.NoError(f.T(), f.Deployment.EnsureEnvoyDaemonSet())
}
|
[
"\"CONTOUR_UPGRADE_FROM_VERSION\"",
"\"CONTOUR_UPGRADE_TO_IMAGE\""
] |
[] |
[
"CONTOUR_UPGRADE_TO_IMAGE",
"CONTOUR_UPGRADE_FROM_VERSION"
] |
[]
|
["CONTOUR_UPGRADE_TO_IMAGE", "CONTOUR_UPGRADE_FROM_VERSION"]
|
go
| 2 | 0 | |
Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go
|
// +build acceptance rackspace objectstorage v1
package v1
import (
"os"
"testing"
"github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud"
"github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools"
"github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace"
th "github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper"
)
func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions {
// Obtain credentials from the environment.
options, err := rackspace.AuthOptionsFromEnv()
th.AssertNoErr(t, err)
options = tools.OnlyRS(options)
if options.Username == "" {
t.Fatal("Please provide a Rackspace username as RS_USERNAME.")
}
if options.APIKey == "" {
t.Fatal("Please provide a Rackspace API key as RS_API_KEY.")
}
return options
}
func createClient(t *testing.T, cdn bool) (*gophercloud.ServiceClient, error) {
region := os.Getenv("RS_REGION")
if region == "" {
t.Fatal("Please provide a Rackspace region as RS_REGION")
}
ao := rackspaceAuthOptions(t)
provider, err := rackspace.NewClient(ao.IdentityEndpoint)
th.AssertNoErr(t, err)
err = rackspace.Authenticate(provider, ao)
th.AssertNoErr(t, err)
if cdn {
return rackspace.NewObjectCDNV1(provider, gophercloud.EndpointOpts{
Region: region,
})
}
return rackspace.NewObjectStorageV1(provider, gophercloud.EndpointOpts{
Region: region,
})
}
|
[
"\"RS_REGION\""
] |
[] |
[
"RS_REGION"
] |
[]
|
["RS_REGION"]
|
go
| 1 | 0 | |
src/doc/conf.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
MOCK_MODULES = ['pytz']
if MOCK_MODULES and on_rtd:
if sys.version_info > (3, 3):
from unittest.mock import MagicMock
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ----------------------------------------------------
project = 'Astral'
author = 'Simon Kennedy'
copyright = '2009-2018, %s' % author
version = '1.6'
release = '1.6'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
intersphinx_mapping = {'python': ('http://docs.python.org/3', None)}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.abspath('..')))
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# -- Options for HTML output ---------------------------------------------------
if not on_rtd:
project_home = os.environ.get('PROJECT_HOME', None)
if not project_home:
dev_home = os.environ.get('DEV_HOME', None)
if dev_home:
project_home = os.path.join(os.path.expanduser(dev_home), 'projects')
else:
project_home = os.path.expanduser(project_home)
if project_home:
theme_root = os.path.join(project_home, 'sphinx-theme', 'sffjunkie', 'trunk')
html_theme_path = [theme_root]
else:
raise OSError('Unable to find theme root: Please set the PROJECT_HOME environment variable')
html_theme = 'sffjunkie'
html_theme_options = {'logo_shadow': True, 'fixed_header': False}
else:
html_theme = 'default'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = os.path.join('static', 'earth_sun.png')
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#if not on_rtd:
# html_favicon = os.path.join('_static', 'favicon.png')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# -- Options for HTML Help output --------------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AstralDoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'a4'
latex_elements = {'papersize': 'a4'}
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Astral.tex', 'Astral v%s' % release, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
[] |
[] |
[
"PROJECT_HOME",
"DEV_HOME",
"READTHEDOCS"
] |
[]
|
["PROJECT_HOME", "DEV_HOME", "READTHEDOCS"]
|
python
| 3 | 0 | |
scalingo/utils.go
|
package scalingo
import (
"context"
"errors"
"os"
"github.com/Scalingo/go-scalingo/v4"
"github.com/turbot/steampipe-plugin-sdk/plugin"
)
func connect(ctx context.Context, d *plugin.QueryData) (*scalingo.Client, error) {
endpoint := os.Getenv("SCALINGO_ENDPOINT")
token := os.Getenv("SCALINGO_TOKEN")
scalingoConfig := GetConfig(d.Connection)
if &scalingoConfig != nil {
if scalingoConfig.Endpoint != nil {
endpoint = *scalingoConfig.Endpoint
}
if scalingoConfig.Token != nil {
token = *scalingoConfig.Token
}
}
if endpoint == "" {
return nil, errors.New("'endpoint' must be set in the connection configuration. Edit your connection configuration file and then restart Steampipe")
}
if token == "" {
return nil, errors.New("'token' must be set in the connection configuration. Edit your connection configuration file and then restart Steampipe")
}
config := scalingo.ClientConfig{
APIEndpoint: endpoint,
APIToken: token,
}
return scalingo.New(config)
}
func appNameQual(_ context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
return d.KeyColumnQuals["app_name"].GetStringValue(), nil
}
|
[
"\"SCALINGO_ENDPOINT\"",
"\"SCALINGO_TOKEN\""
] |
[] |
[
"SCALINGO_ENDPOINT",
"SCALINGO_TOKEN"
] |
[]
|
["SCALINGO_ENDPOINT", "SCALINGO_TOKEN"]
|
go
| 2 | 0 | |
hgcn/test.py
|
from __future__ import division
from __future__ import print_function
import datetime
import json
import logging
import os
import pickle
import time
import numpy as np
import optimizers
import torch
from config import parser
from models.base_models import NCModel, LPModel
from utils.data_utils import load_data
from utils.train_utils import get_dir_name, format_metrics
import torch.cuda.profiler as profiler
def test(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if int(args.double_precision):
torch.set_default_dtype(torch.float64)
if int(args.cuda) >= 0:
torch.cuda.manual_seed(args.seed)
args.device = 'cuda:' + str(args.cuda) if int(args.cuda) >= 0 else 'cpu'
args.patience = args.epochs if not args.patience else int(args.patience)
logging.getLogger().setLevel(logging.INFO)
if args.save:
if not args.save_dir:
dt = datetime.datetime.now()
date = f"{dt.year}_{dt.month}_{dt.day}"
models_dir = os.path.join(os.environ['LOG_DIR'], args.task, date)
save_dir = get_dir_name(models_dir)
else:
save_dir = args.save_dir
logging.basicConfig(level=logging.INFO,
handlers=[
logging.FileHandler(os.path.join(save_dir, 'log.txt')),
logging.StreamHandler()
])
logging.info(f'Using: {args.device}')
logging.info("Using seed {}.".format(args.seed))
# Load data
data = load_data(args, os.path.join(os.environ['DATAPATH'], args.dataset))
args.n_nodes, args.feat_dim = data['features'].shape
if args.task == 'nc':
Model = NCModel
args.n_classes = int(data['labels'].max() + 1)
logging.info(f'Num classes: {args.n_classes}')
else:
args.nb_false_edges = len(data['train_edges_false'])
args.nb_edges = len(data['train_edges'])
if args.task == 'lp':
Model = LPModel
else:
Model = RECModel
# No validation for reconstruction task
args.eval_freq = args.epochs + 1
if not args.lr_reduce_freq:
args.lr_reduce_freq = args.epochs
# Model and optimizer
model = Model(args)
checkpoint_path="hgcn_chkpt/model.pth"
model.load_state_dict(torch.load(checkpoint_path))
logging.info(str(model))
optimizer = getattr(optimizers, args.optimizer)(params=model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=int(args.lr_reduce_freq),
gamma=float(args.gamma)
)
tot_params = sum([np.prod(p.size()) for p in model.parameters()])
logging.info(f"Total number of parameters: {tot_params}")
if args.cuda is not None and int(args.cuda) >= 0 :
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
model = model.to(args.device)
for x, val in data.items():
if torch.is_tensor(data[x]):
data[x] = data[x].to(args.device)
if len(args.time_file) == 0:
model.eval() # set evaluation mode
embeddings = model.encode(data['features'], data['adj_train_norm'])
val_metrics = model.compute_metrics(embeddings, data, 'val')
else:
n_warmup = 50
n_sample = 50
model.eval() # set evaluation mode
print("=== Running Warmup Passes")
for i in range(0,n_warmup):
embeddings = model.encode(data['features'], data['adj_train_norm'])
val_metrics = model.compute_metrics(embeddings, data, 'val')
print("=== Collecting Runtime over ", str(n_sample), " Passes")
tic = time.perf_counter()
for i in range(0,n_sample):
embeddings = model.encode(data['features'], data['adj_train_norm'])
val_metrics = model.compute_metrics(embeddings, data, 'val')
toc = time.perf_counter()
avg_runtime = float(toc - tic)/n_sample
print("average runtime = ", avg_runtime)
# write runtime to file
f = open(args.time_file, "w")
f.write(str(avg_runtime)+"\n")
f.close()
if __name__ == '__main__':
parser.add_argument('--time_file', type=str, default='', help='timing output file')
args = parser.parse_args()
profiler.start()
test(args)
profiler.stop()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"LOG_DIR",
"DATAPATH"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "LOG_DIR", "DATAPATH"]
|
python
| 3 | 0 | |
imagetest/cmd/manager/main.go
|
package main
import (
"context"
"encoding/xml"
"flag"
"fmt"
"log"
"os"
"regexp"
"strings"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/guest-test-infra/imagetest"
"github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/disk"
imageboot "github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/image_boot"
imagevalidation "github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/image_validation"
"github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/metadata"
"github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/network"
"github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/security"
"github.com/GoogleCloudPlatform/guest-test-infra/imagetest/test_suites/ssh"
)
var (
project = flag.String("project", "", "project to use for test runner")
testProjects = flag.String("test_projects", "", "comma separated list of projects to be used for tests. defaults to the test runner project")
zone = flag.String("zone", "", "zone to be used for tests")
printwf = flag.Bool("print", false, "print out the parsed test workflows and exit")
validate = flag.Bool("validate", false, "validate all the test workflows and exit")
outPath = flag.String("out_path", "junit.xml", "junit xml path")
gcsPath = flag.String("gcs_path", "", "GCS Path for Daisy working directory")
images = flag.String("images", "", "comma separated list of images to test")
timeout = flag.String("timeout", "30m", "timeout for the test suite")
parallelCount = flag.Int("parallel_count", 5, "TestParallelCount")
filter = flag.String("filter", "", "only run tests matching filter")
)
var (
imageMap = map[string]string{
"centos-7": "projects/centos-cloud/global/images/family/centos-7",
"centos-8": "projects/centos-cloud/global/images/family/centos-8",
"centos-stream-8": "projects/centos-cloud/global/images/family/centos-stream-8",
"cos-77-lts": "projects/cos-cloud/global/images/family/cos-77-lts",
"cos-81-lts": "projects/cos-cloud/global/images/family/cos-81-lts",
"cos-85-lts": "projects/cos-cloud/global/images/family/cos-85-lts",
"cos-89-lts": "projects/cos-cloud/global/images/family/cos-89-lts",
"cos-beta": "projects/cos-cloud/global/images/family/cos-beta",
"cos-dev": "projects/cos-cloud/global/images/family/cos-dev",
"cos-stable": "projects/cos-cloud/global/images/family/cos-stable",
"debian-10": "projects/debian-cloud/global/images/family/debian-10",
"debian-11": "projects/debian-cloud/global/images/family/debian-11",
"debian-9": "projects/debian-cloud/global/images/family/debian-9",
"fedora-coreos-next": "projects/fedora-coreos-cloud/global/images/family/fedora-coreos-next",
"fedora-coreos-stable": "projects/fedora-coreos-cloud/global/images/family/fedora-coreos-stable",
"fedora-coreos-testing": "projects/fedora-coreos-cloud/global/images/family/fedora-coreos-testing",
"rhel-7": "projects/rhel-cloud/global/images/family/rhel-7",
"rhel-7-4-sap": "projects/rhel-sap-cloud/global/images/family/rhel-7-4-sap",
"rhel-7-6-sap-ha": "projects/rhel-sap-cloud/global/images/family/rhel-7-6-sap-ha",
"rhel-7-7-sap-ha": "projects/rhel-sap-cloud/global/images/family/rhel-7-7-sap-ha",
"rhel-8": "projects/rhel-cloud/global/images/family/rhel-8",
"rhel-8-1-sap-ha": "projects/rhel-sap-cloud/global/images/family/rhel-8-1-sap-ha",
"rhel-8-2-sap-ha": "projects/rhel-sap-cloud/global/images/family/rhel-8-2-sap-ha",
"rocky-linux-8": "projects/rocky-linux-cloud/global/images/family/rocky-linux-8",
"sles-12": "projects/suse-cloud/global/images/family/sles-12",
"sles-12-sp3-sap": "projects/suse-sap-cloud/global/images/family/sles-12-sp3-sap",
"sles-12-sp4-sap": "projects/suse-sap-cloud/global/images/family/sles-12-sp4-sap",
"sles-12-sp5-sap": "projects/suse-sap-cloud/global/images/family/sles-12-sp5-sap",
"sles-15": "projects/suse-cloud/global/images/family/sles-15",
"sles-15-sap": "projects/suse-sap-cloud/global/images/family/sles-15-sap",
"sles-15-sp1-sap": "projects/suse-sap-cloud/global/images/family/sles-15-sp1-sap",
"sles-15-sp2-sap": "projects/suse-sap-cloud/global/images/family/sles-15-sp2-sap",
"ubuntu-1604-lts": "projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts",
"ubuntu-1804-lts": "projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts",
"ubuntu-2004-lts": "projects/ubuntu-os-cloud/global/images/family/ubuntu-2004-lts",
"ubuntu-2010": "projects/ubuntu-os-cloud/global/images/family/ubuntu-2010",
"ubuntu-2104": "projects/ubuntu-os-cloud/global/images/family/ubuntu-2104",
"ubuntu-minimal-1604-lts": "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-1604-lts",
"ubuntu-minimal-1804-lts": "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-1804-lts",
"ubuntu-minimal-2004-lts": "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2004-lts",
"ubuntu-minimal-2010": "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2010",
"ubuntu-minimal-2104": "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2104",
"ubuntu-pro-1604-lts": "projects/ubuntu-os-pro-cloud/global/images/family/ubuntu-pro-1604-lts",
"ubuntu-pro-1804-lts": "projects/ubuntu-os-pro-cloud/global/images/family/ubuntu-pro-1804-lts",
"ubuntu-pro-2004-lts": "projects/ubuntu-os-pro-cloud/global/images/family/ubuntu-pro-2004-lts",
}
)
type logWriter struct {
log *log.Logger
}
func (l *logWriter) Write(b []byte) (int, error) {
l.log.Print(string(b))
return len(b), nil
}
func main() {
flag.Parse()
if *project == "" || *zone == "" || *images == "" {
log.Fatal("Must provide project, zone and images arguments")
return
}
var testProjectsReal []string
if *testProjects == "" {
testProjectsReal = append(testProjectsReal, *project)
} else {
testProjectsReal = strings.Split(*testProjects, ",")
}
log.Printf("Running in project %s zone %s. Tests will run in projects: %s", *project, *zone, testProjectsReal)
if *gcsPath != "" {
log.Printf("gcs_path set to %s", *gcsPath)
}
var regex *regexp.Regexp
if *filter != "" {
var err error
regex, err = regexp.Compile(*filter)
if err != nil {
log.Fatal("-filter flag not valid:", err)
}
log.Printf("using -filter %s", *filter)
}
// Setup tests.
testPackages := []struct {
name string
setupFunc func(*imagetest.TestWorkflow) error
}{
{
imagevalidation.Name,
imagevalidation.TestSetup,
},
{
imageboot.Name,
imageboot.TestSetup,
},
{
network.Name,
network.TestSetup,
},
{
security.Name,
security.TestSetup,
},
{
disk.Name,
disk.TestSetup,
},
{
ssh.Name,
ssh.TestSetup,
},
{
metadata.Name,
metadata.TestSetup,
},
}
var testWorkflows []*imagetest.TestWorkflow
for _, testPackage := range testPackages {
if regex != nil && !regex.MatchString(testPackage.name) {
continue
}
for _, image := range strings.Split(*images, ",") {
if !strings.Contains(image, "/") {
fullimage, ok := imageMap[image]
if !ok {
log.Fatalf("unknown image %s", image)
}
image = fullimage
}
log.Printf("Add test workflow for test %s on image %s", testPackage.name, image)
test, err := imagetest.NewTestWorkflow(testPackage.name, image, *timeout)
if err != nil {
log.Fatalf("Failed to create test workflow: %v", err)
}
testWorkflows = append(testWorkflows, test)
if err := testPackage.setupFunc(test); err != nil {
log.Fatalf("%s.TestSetup for %s failed: %v", testPackage.name, image, err)
}
}
}
log.Println("imagetest: Done with setup")
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
log.Printf("failed to set up storage client: %v", err)
return
}
if *printwf {
imagetest.PrintTests(ctx, client, testWorkflows, *project, *zone, *gcsPath)
return
}
if *validate {
if err := imagetest.ValidateTests(ctx, client, testWorkflows, *project, *zone, *gcsPath); err != nil {
log.Printf("Validate failed: %v\n", err)
}
return
}
suites, err := imagetest.RunTests(ctx, client, testWorkflows, *project, *zone, *gcsPath, *parallelCount, testProjectsReal)
if err != nil {
log.Fatalf("Failed to run tests: %v", err)
}
bytes, err := xml.MarshalIndent(suites, "", "\t")
if err != nil {
log.Fatalf("failed to marshall result: %v", err)
}
var outFile *os.File
if artifacts := os.Getenv("ARTIFACTS"); artifacts != "" {
outFile, err = os.Create(artifacts + "/junit.xml")
} else {
outFile, err = os.Create(*outPath)
}
if err != nil {
log.Fatalf("failed to create output file: %v", err)
}
defer outFile.Close()
outFile.Write(bytes)
outFile.Write([]byte{'\n'})
fmt.Printf("%s\n", bytes)
if suites.Errors != 0 || suites.Failures != 0 {
log.Fatalf("test suite has error or failure")
}
}
|
[
"\"ARTIFACTS\""
] |
[] |
[
"ARTIFACTS"
] |
[]
|
["ARTIFACTS"]
|
go
| 1 | 0 | |
code/04_keypoints_from_images.py
|
# From Python
# It requires OpenCV installed for Python
import sys
import cv2
import os
from sys import platform
import argparse
import time
import numpy as np
import func
try:
# Import Openpose (Windows/Ubuntu/OSX)
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
# Windows Import
if platform == "win32":
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '/../../python/openpose/Release');
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
import pyopenpose as op
else:
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append('../../../build/python');
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
# # Flags
parser = argparse.ArgumentParser()
parser.add_argument("--image_dir", default="../../../examples/media/", help="Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).")
# parser.add_argument("--image_dir", help="Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).")
parser.add_argument("--no_display", default=False, help="Enable to disable the visual display.")
args = parser.parse_known_args()
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../../../models/"
params['net_resolution'] = '-1x224'
# Add others in path?
for i in range(0, len(args[1])):
curr_item = args[1][i]
if i != len(args[1])-1: next_item = args[1][i+1]
else: next_item = "1"
if "--" in curr_item and "--" in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = "1"
elif "--" in curr_item and "--" not in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = next_item
# Construct it from system arguments
# op.init_argv(args[1])
# oppython = op.OpenposePython()
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Read frames on directory
# imagePaths = op.get_images_on_directory(args[0].image_dir);
start = time.time()
# Process and display images
# for imagePath in imagePaths:
cap = cv2.VideoCapture(0)
while 1:
ret, frame = cap.read()
screen_origin = np.zeros((640, 640, 3), dtype="uint8")
screen = np.zeros((640, 640, 3), dtype="uint8")
datum = op.Datum()
# imageToProcess = cv2.imread(frame)
datum.cvInputData = frame
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# print(type(datum.poseKeypoints))
# print(datum.poseKeypoints.shape)
if not args[0].no_display:
cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
# print(datum.poseKeypoints.dtype)
func.pose2plot(datum.poseKeypoints, screen_origin)
func.pose2plot_seikika(datum.poseKeypoints, screen)
key = cv2.waitKey(15)
if key == 27: break
end = time.time()
# print("OpenPose demo successfully finished. Total time: " + str(end - start) + " seconds")
except Exception as e:
print(e)
sys.exit(-1)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
Empire/creds/__init__.py
|
'''
Copyright (c) 2015, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
"""
Credential Manager Base Classes
CredentialManager - Manage the location, collection and encryption/decryption of credentials
"""
import sys
import os.path
import json
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from creds.credentials import Credentials
from creds.credentials import CredentialsInvalidError, OAuthCredentials
import helpers
CRYPTO_FERNET=True
try:
from cryptography.fernet import Fernet
except:
CRYPTO_FERNET=False
import fernet
fernet.Configuration.enforce_ttl = False
__copyright__ = "2015 Salesforce.com, Inc"
__status__ = "Prototype"
class CredentialManager(object):
def __init__(self, filename, key=None):
def decrypt(ciphertext):
if key is None:
return ciphertext
try:
value = ciphertext
if CRYPTO_FERNET == True:
f = Fernet(key)
value = f.decrypt(ciphertext)
else:
verifier = fernet.verifier(key, ciphertext)
if verifier.valid() == True:
value = verifier.message
return value
except Exception as e:
pass
return ciphertext
def encrypt(value):
if key is None:
return value
if CRYPTO_FERNET == True:
f = Fernet(key)
token = f.encrypt(bytes(value))
else:
token = fernet.generate(key, value)
return token
self.filename = filename
self.encrypt = encrypt
self.decrypt = decrypt
def credential_object_for_type(self, identifier, type):
if (type == "password"):
return Credentials(identifier)
raise NotImplementedError("Credential type '%s' is not supported" % (type))
def get_credentials_for(self, identifier, check_valid=True):
file_creds = {}
try:
with open(self.filename) as fp:
file_creds = json.load(fp,object_hook=helpers.convert)
except IOError as ioe:
raise NameError("Credential file not found. " + str(ioe))
user_creds = file_creds.get(identifier)
if user_creds is None:
raise NameError(identifier)
return
#XXX: Why bother type checking here if we're specifying that in the function
#XXX: call in providence.py?
type = user_creds.get("type")
credentials = self.credential_object_for_type(identifier, type)
credentials.credential_manager = self
credentials.encrypt = self.encrypt
credentials.decrypt = self.decrypt
credentials.read(user_creds)
if check_valid == True and credentials.valid() == False:
if isinstance(creds, OAuthCredentials):
try:
credentials.refreshToken()
if credentials.valid():
if write == True:
credentials.write()
return credentials
except NotImplementedError, nie:
pass
raise CredentialsInvalidError("Invalid Credentials for %s" % identifier)
return credentials
def get_or_create_credentials_for(self, identifier, type, write=True, check_valid=True):
try:
creds = self.get_credentials_for(identifier, check_valid=False)
except NameError, ne:
creds = self.new_credentials_for(identifier, type)
if write == True:
creds.write()
if check_valid == True and creds.valid() == False:
# If it's an Oauth token maybe it needs refreshed?
if isinstance(creds, OAuthCredentials):
try:
creds.refreshToken()
if creds.valid():
if write == True:
creds.write()
return creds
except NotImplementedError, nie:
pass
# Lets give them 3 attempts to login
for attempt_numer in range(3):
creds.new_credentials()
if creds.valid():
if write == True:
creds.write()
return creds
# All attempts failed, return creds are bad
raise CredentialsInvalidError("Invalid Credentials for %s" % identifier)
return creds
def new_credentials_for(self, identifier, type, server_data=None):
credentials = self.credential_object_for_type(identifier, type)
credentials.credential_manager = self
credentials.encrypt = self.encrypt
credentials.decrypt = self.decrypt
if (server_data is not None):
credentials.server_data = server_data
credentials.new_credentials()
return credentials
def write_back_credentials(self, credentials):
file_creds = {}
try:
file_data = open(self.filename)
file_creds = json.load(file_data)
except IOError as ioe:
print "Credential file not found, will create..."
user_creds = file_creds.get(credentials.identifier)
if user_creds is None:
user_creds = {}
user_creds = credentials._populate_user_creds(user_creds)
file_creds[credentials.identifier] = user_creds
with open(self.filename, 'w') as outfile:
json.dump(file_creds, outfile, indent=2, separators=(',', ': '))
if __name__ == '__main__':
import os
credential_key = os.environ.get('CREDENTIAL_KEY')
credentials_file = "credentials2.json"
credential_manager = CredentialManager(credentials_file, credential_key)
|
[] |
[] |
[
"CREDENTIAL_KEY"
] |
[]
|
["CREDENTIAL_KEY"]
|
python
| 1 | 0 | |
pkg/fed/fed.go
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package fed
import (
"os"
"strconv"
"github.com/moov-io/customers/internal/util"
"github.com/moov-io/base/log"
)
func Cache(logger log.Logger, endpoint string, debug bool) Client {
client := NewClient(logger, endpoint, debug)
data := util.Or(os.Getenv("FED_CACHE_SIZE"), "1024")
maxSize, _ := strconv.ParseInt(data, 10, 32)
return NewCacheClient(client, int(maxSize))
}
|
[
"\"FED_CACHE_SIZE\""
] |
[] |
[
"FED_CACHE_SIZE"
] |
[]
|
["FED_CACHE_SIZE"]
|
go
| 1 | 0 | |
atv/__init__.py
|
#Imports
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from atv.security import groupfinder
import os
from .models import (
DBSession,
Base,
)
#All views must be stated here in order for view decorators to function
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
authn_policy = AuthTktAuthenticationPolicy('sosecret', callback=groupfinder,
hashalg='sha512')
authz_policy = ACLAuthorizationPolicy()
memcache_server = os.environ.get('MEMCACHE_SERVERS')
settings['beaker.cache.url'] = memcache_server
config = Configurator(settings=settings,
root_factory='atv.models.RootFactory')
config.include('pyramid_chameleon')
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.add_static_view('URL',
'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('panda', '/panda/authorize_upload')
config.add_route('search', '/search')
config.add_route('searchb', '/search/')
config.add_route('answer', '/answer')
config.add_route('delete', '/delete')
config.add_route('denied', '/denied')
config.add_route('explore', '/explore')
config.add_route('exploreb', '/explore/')
config.add_route('exploretrending', '/explore/trending')
config.add_route('exploretrendingb', '/explore/trending/')
config.add_route('explorelatest', '/explore/latest')
config.add_route('explorelatestb', '/explore/latest/')
config.add_route('exploreourpicks', '/explore/ourpicks')
config.add_route('exploreourpicksb', '/explore/ourpicks/')
config.add_route('vote', '/vote')
config.add_route('deleteanswer', '/deleteanswer')
config.add_route('stream', '/i/stream')
config.add_route('streamb', '/i/stream/')
config.add_route('streamlatest', '/i/stream/latest')
config.add_route('streamlatestb', '/i/stream/latest/')
config.add_route('streamtop', '/i/stream/top')
config.add_route('streamtopb', '/i/stream/top/')
config.add_route('edit', '/i/edit')
config.add_route('editb', '/i/edit/')
config.add_route('followunfollow', '/2x4b32cp')
config.add_route('deletenotification', '/2x4b32qp')
config.add_route('chanlatest', '/{channel}/latest')
config.add_route('chanlatestb', '/{channel}/latest/')
config.add_route('chanrising', '/{channel}/top')
config.add_route('chanrisingb', '/{channel}/top/')
config.add_route('ask', '/ask')
config.add_route('signup', '/signup')
config.add_route('signupb', '/signup/')
config.add_route('login', '/login')
config.add_route('loginb', '/login/')
config.add_route('logout', '/logout')
config.add_route('logoutb', '/logout/')
config.add_route('privacy', '/privacy')
config.add_route('privacyb', '/privacy/')
config.add_route('terms', '/terms')
config.add_route('termsb', '/terms/')
config.add_route('blog', '/blog')
config.add_route('blogb', '/blog/')
config.add_route('admin', '/admin')
config.add_route('adminb', '/admin/')
config.add_route('copyright', '/copyright')
config.add_route('copyrightb', '/copyright/')
config.add_route('contact', '/contact')
config.add_route('contactb', '/contact/')
config.add_route('verify', '/verify')
config.add_route('verifyb', '/verify/')
config.add_route('reset', '/reset')
config.add_route('resetb', '/reset/')
config.add_route('ereset', '/ereset')
config.add_route('eresetb', '/ereset/')
config.add_route('verifyereset', '/ereset/{code}')
config.add_route('verifyreset', '/reset/{code}')
config.add_route('verifyemail', '/verify/{code}')
config.add_route('following', '/{channel}/following')
config.add_route('followingb', '/{channel}/following/')
config.add_route('a_history', '/{channel}/history/a')
config.add_route('a_historyb', '/{channel}/history/a/')
config.add_route('history', '/{channel}/history/q')
config.add_route('historyb', '/{channel}/history/q/')
config.add_route('question', '/{channel}/{question}')
config.add_route('questionb', '/{channel}/{question}/')
config.add_route('channel', '/{channel}')
config.add_route('channelb', '/{channel}/')
#Create WSGI app
config.scan()
return config.make_wsgi_app()
|
[] |
[] |
[
"MEMCACHE_SERVERS"
] |
[]
|
["MEMCACHE_SERVERS"]
|
python
| 1 | 0 | |
pipenv/vendor/click/_unicodefun.py
|
import os
import sys
import codecs
from ._compat import PY2
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
click = sys.modules[__name__.rsplit('.', 1)[0]]
def _find_unicode_literals_frame():
import __future__
frm = sys._getframe(1)
idx = 1
while frm is not None:
if frm.f_globals.get('__name__', '').startswith('click.'):
frm = frm.f_back
idx += 1
elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
return idx
else:
break
return 0
def _check_for_unicode_literals():
if not __debug__:
return
if not PY2 or click.disable_unicode_literals_warning:
return
bad_frame = _find_unicode_literals_frame()
if bad_frame <= 0:
return
from warnings import warn
warn(Warning('Click detected the use of the unicode_literals '
'__future__ import. This is heavily discouraged '
'because it can introduce subtle bugs in your '
'code. You should instead use explicit u"" literals '
'for your unicode strings. For more information see '
'http://click.pocoo.org/python3/'),
stacklevel=bad_frame)
def _verify_python3_env():
"""Ensures that the environment is good for unicode on Python 3."""
if PY2:
return
try:
import locale
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
except Exception:
fs_enc = 'ascii'
if fs_enc != 'ascii':
return
extra = ''
if os.name == 'posix':
import subprocess
rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
good_locales = set()
has_c_utf8 = False
# Make sure we're operating on text here.
if isinstance(rv, bytes):
rv = rv.decode('ascii', 'replace')
for line in rv.splitlines():
locale = line.strip()
if locale.lower().endswith(('.utf-8', '.utf8')):
good_locales.add(locale)
if locale.lower() in ('c.utf8', 'c.utf-8'):
has_c_utf8 = True
extra += '\n\n'
if not good_locales:
extra += (
'Additional information: on this system no suitable UTF-8\n'
'locales were discovered. This most likely requires resolving\n'
'by reconfiguring the locale system.'
)
elif has_c_utf8:
extra += (
'This system supports the C.UTF-8 locale which is recommended.\n'
'You might be able to resolve your issue by exporting the\n'
'following environment variables:\n\n'
' export LC_ALL=C.UTF-8\n'
' export LANG=C.UTF-8'
)
else:
extra += (
'This system lists a couple of UTF-8 supporting locales that\n'
'you can pick from. The following suitable locales where\n'
'discovered: %s'
) % ', '.join(sorted(good_locales))
bad_locale = None
for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
if locale and locale.lower().endswith(('.utf-8', '.utf8')):
bad_locale = locale
if locale is not None:
break
if bad_locale is not None:
extra += (
'\n\nClick discovered that you exported a UTF-8 locale\n'
'but the locale system could not pick up from it because\n'
'it does not exist. The exported locale is "%s" but it\n'
'is not supported'
) % bad_locale
raise RuntimeError('Click will abort further execution because Python 3 '
'was configured to use ASCII as encoding for the '
'environment. Consult http://click.pocoo.org/python3/ '
'for mitigation steps.' + extra)
|
[] |
[] |
[
"LC_ALL",
"LANG"
] |
[]
|
["LC_ALL", "LANG"]
|
python
| 2 | 0 | |
plugin/src/main/groovy/com/akaita/android/easylauncher/filter/ColorRibbonFilter.java
|
package com.akaita.android.easylauncher.filter;
import java.awt.Color;
import java.awt.Font;
import java.awt.FontMetrics;
import java.awt.Graphics2D;
import java.awt.RenderingHints;
import java.awt.font.FontRenderContext;
import java.awt.geom.AffineTransform;
import java.awt.geom.Rectangle2D;
import java.awt.image.BufferedImage;
public class ColorRibbonFilter implements EasyLauncherFilter {
static final boolean debug = Boolean.parseBoolean(System.getenv("EASYLAUNCHER_DEBUG"));
final Color ribbonColor;
final Color labelColor;
String label;
String fontName = "Default";
int fontStyle = Font.PLAIN;
int fontSize = 0;
int fontSizeLarge = 0;
boolean largeRibbon = false;
public ColorRibbonFilter(String label, Color ribbonColor, Color labelColor, int fontSize, int fontSizeLarge) {
this.label = label;
this.ribbonColor = ribbonColor;
this.labelColor = labelColor;
this.fontSize = fontSize;
this.fontSizeLarge = fontSizeLarge;
}
public ColorRibbonFilter(String label, Color ribbonColor, Color labelColor, int fontSize) {
this(label, ribbonColor, Color.WHITE, fontSize, 0);
}
public ColorRibbonFilter(String label, Color ribbonColor) {
this(label, ribbonColor, Color.WHITE, 0, 0);
}
private static int calculateMaxLabelWidth(int y) {
return (int) Math.sqrt(Math.pow(y, 2) * 2);
}
private static void drawString(Graphics2D g, String str, int x, int y) {
g.drawString(str, x, y);
if (debug) {
FontMetrics fm = g.getFontMetrics();
Rectangle2D bounds = g.getFont().getStringBounds(str,
new FontRenderContext(g.getTransform(), true, true));
g.drawRect(x, y - fm.getAscent(), (int) bounds.getWidth(), fm.getAscent());
}
}
@Override
public void setAdaptiveLauncherMode(boolean enable) {
largeRibbon = enable;
}
@Override
public void apply(BufferedImage image) {
int width = image.getWidth();
int height = image.getHeight();
Graphics2D g = (Graphics2D) image.getGraphics();
g.setTransform(AffineTransform.getRotateInstance(Math.toRadians(-45)));
int y = height / (largeRibbon ? 2 : 4);
// calculate the rectangle where the label is rendered
FontRenderContext frc = new FontRenderContext(g.getTransform(), true, true);
int maxLabelWidth = calculateMaxLabelWidth(y);
g.setFont(getFont(maxLabelWidth, frc));
Rectangle2D labelBounds = g.getFont().getStringBounds(label == null ? "" : label, frc);
// draw the ribbon
g.setColor(ribbonColor);
g.fillRect(-width, y, width * 2, (int) (labelBounds.getHeight()));
if (label != null) {
// draw the label
g.setRenderingHint(RenderingHints.KEY_ANTIALIASING,
RenderingHints.VALUE_ANTIALIAS_ON);
g.setColor(labelColor);
FontMetrics fm = g.getFontMetrics();
drawString(g, label,
(int) -labelBounds.getWidth() / 2,
y + fm.getAscent());
}
g.dispose();
}
private Font getFont(int maxLabelWidth, FontRenderContext frc) {
final int size = (largeRibbon && fontSizeLarge > 0) ? fontSizeLarge : (!largeRibbon && fontSize > 0) ? fontSize : 0;
if (size > 0) {
return new Font(fontName, fontStyle, size);
}
int max = largeRibbon ? 64 : 32;
if (label == null) {
return new Font(fontName, fontStyle, max / 2);
}
int min = 0;
int x = max;
for (int i = 0; i < 10; i++) {
int m = ((max + min) / 2);
if (m == x) {
break;
}
Font font = new Font(fontName, fontStyle, m);
Rectangle2D labelBounds = font.getStringBounds(label, frc);
int px = (int) labelBounds.getWidth();
if (px > maxLabelWidth) {
max = m;
} else {
min = m;
}
x = m;
}
return new Font(fontName, fontStyle, x);
}
}
|
[
"\"EASYLAUNCHER_DEBUG\""
] |
[] |
[
"EASYLAUNCHER_DEBUG"
] |
[]
|
["EASYLAUNCHER_DEBUG"]
|
java
| 1 | 0 | |
glusterfs-plugin/main.go
|
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/go-plugins-helpers/volume"
"github.com/sirupsen/logrus"
"github.com/origin-nexus/docker-volume-glusterfs/glusterfs-volume"
)
const socketAddress = "/run/docker/plugins/glusterfs.sock"
func NewDriver(root string) (*Driver, error) {
logrus.WithField("method", "new glusterfs driver").Debug(root)
options := map[string]string{}
options_str := os.Getenv("OPTIONS")
for _, option := range strings.Split(options_str, " ") {
if option == "" {
continue
}
kv := strings.SplitN(option, "=", 2)
if len(kv) == 1 {
kv = append(kv, "")
}
switch kv[0] {
default:
if err := glusterfsvolume.CheckOption(kv[0], kv[1]); err != nil {
return nil, err
}
options[kv[0]] = kv[1]
}
}
loglevel := os.Getenv("LOGLEVEL")
switch loglevel {
case "TRACE":
logrus.SetLevel(logrus.TraceLevel)
case "DEBUG":
logrus.SetLevel(logrus.DebugLevel)
case "INFO":
logrus.SetLevel(logrus.InfoLevel)
case "":
loglevel = "WARNING"
fallthrough
case "WARNING":
logrus.SetLevel(logrus.WarnLevel)
case "ERROR":
logrus.SetLevel(logrus.ErrorLevel)
case "CRITICAL":
logrus.SetLevel(logrus.ErrorLevel)
case "NONE":
logrus.SetLevel(logrus.ErrorLevel)
default:
return nil, fmt.Errorf("unknown log level '%v'", loglevel)
}
servers := os.Getenv("SERVERS")
volumeName := os.Getenv("VOLUME_NAME")
_, dedicatedMounts := options["dedicated-mount"]
delete(options, "dedicated-mount")
return &Driver{
root: root,
statePath: filepath.Join(root, "glusterfs-state.json"),
glusterConfig: glusterfsvolume.Config{
Servers: servers,
VolumeName: volumeName,
DedicatedMount: dedicatedMounts,
Options: options,
},
state: State{
DockerVolumes: map[string]*DockerVolume{},
GlusterVolumes: glusterfsvolume.State{},
},
}, nil
}
func main() {
d, err := NewDriver("/mnt")
if err != nil {
logrus.Fatal(err)
}
h := volume.NewHandler(d)
logrus.Infof("listening on %s", socketAddress)
logrus.Error(h.ServeUnix(socketAddress, 0))
}
func executeCommand(cmd string, args ...string) ([]byte, error) {
return exec.Command(cmd, args...).CombinedOutput()
}
|
[
"\"OPTIONS\"",
"\"LOGLEVEL\"",
"\"SERVERS\"",
"\"VOLUME_NAME\""
] |
[] |
[
"OPTIONS",
"SERVERS",
"VOLUME_NAME",
"LOGLEVEL"
] |
[]
|
["OPTIONS", "SERVERS", "VOLUME_NAME", "LOGLEVEL"]
|
go
| 4 | 0 | |
client/router_create.go
|
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strconv"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/go-connections/nat"
"github.com/skupperproject/skupper/pkg/certs"
"github.com/skupperproject/skupper-docker/api/types"
"github.com/skupperproject/skupper-docker/pkg/docker"
"github.com/skupperproject/skupper-docker/pkg/utils"
"github.com/skupperproject/skupper-docker/pkg/utils/configs"
)
// TODO: move all the certs stuff to a package?
func getCertData(name string) (certs.CertificateData, error) {
certData := certs.CertificateData{}
certPath := types.CertPath + name
files, err := ioutil.ReadDir(certPath)
if err == nil {
for _, f := range files {
dataString, err := ioutil.ReadFile(certPath + "/" + f.Name())
if err == nil {
certData[f.Name()] = []byte(dataString)
} else {
return certData, fmt.Errorf("Failed to read certificat data: %w", err)
}
}
}
return certData, err
}
func generateCredentials(ca string, name string, subject string, hosts string, includeConnectJson bool) error {
caData, _ := getCertData(ca)
certData := certs.GenerateCertificateData(name, subject, hosts, caData)
for k, v := range certData {
if err := ioutil.WriteFile(types.CertPath+name+"/"+k, v, 0755); err != nil {
return fmt.Errorf("Failed to write certificate file: %w", err)
}
}
if includeConnectJson {
certData["connect.json"] = []byte(configs.ConnectJson())
if err := ioutil.WriteFile(types.CertPath+name+"/connect.json", []byte(configs.ConnectJson()), 0755); err != nil {
return fmt.Errorf("Failed to write connect file: %w", err)
}
}
return nil
}
func ensureCA(name string) (certs.CertificateData, error) {
// check if existing by looking at path/dir, if not create dir to persist
caData := certs.GenerateCACertificateData(name, name)
if err := os.Mkdir(types.CertPath+name, 0755); err != nil {
return nil, fmt.Errorf("Failed to create certificate directory: %w", err)
}
for k, v := range caData {
if err := ioutil.WriteFile(types.CertPath+name+"/"+k, v, 0755); err != nil {
return nil, fmt.Errorf("Failed to write CA certificate file: %w", err.Error())
}
}
return caData, nil
}
func (cli *VanClient) GetRouterSpecFromOpts(options types.SiteConfigSpec, siteId string) (*types.RouterSpec, error) {
van := &types.RouterSpec{}
//TODO: think througn van name, router name, secret names, etc.
if options.SkupperName == "" {
info, _ := cli.DockerInterface.Info()
van.Name = info.Name
} else {
van.Name = options.SkupperName
}
if os.Getenv("QDROUTERD_MAGE") != "" {
van.Transport.Image = os.Getenv("QDROUTERD_IMAGE")
} else {
van.Transport.Image = types.DefaultTransportImage
}
van.AuthMode = types.ConsoleAuthMode(options.AuthMode)
van.Transport.LivenessPort = types.TransportLivenessPort
van.Transport.Labels = map[string]string{
"application": types.TransportDeploymentName,
"skupper.io/component": types.TransportComponentName,
"prometheus.io/port": "9090",
"prometheus.io/scrape": "true",
}
listeners := []types.Listener{}
interRouterListeners := []types.Listener{}
edgeListeners := []types.Listener{}
sslProfiles := []types.SslProfile{}
listeners = append(listeners, types.Listener{
Name: "amqp",
Host: "localhost",
Port: 5672,
})
sslProfiles = append(sslProfiles, types.SslProfile{
Name: "skupper-amqps",
})
listeners = append(listeners, types.Listener{
Name: "amqps",
Host: "0.0.0.0",
Port: 5671,
SslProfile: "skupper-amqps",
SaslMechanisms: "EXTERNAL",
AuthenticatePeer: false,
})
if options.EnableRouterConsole {
if van.AuthMode == types.ConsoleAuthModeInternal {
listeners = append(listeners, types.Listener{
Name: types.ConsolePortName,
Host: "0.0.0.0",
Port: types.ConsoleDefaultServicePort,
Http: true,
AuthenticatePeer: true,
})
} else if van.AuthMode == types.ConsoleAuthModeUnsecured {
listeners = append(listeners, types.Listener{
Name: types.ConsolePortName,
Host: "0.0.0.0",
Port: types.ConsoleDefaultServicePort,
Http: true,
})
}
}
if !options.IsEdge {
sslProfiles = append(sslProfiles, types.SslProfile{
Name: "skupper-internal",
})
interRouterListeners = append(interRouterListeners, types.Listener{
Name: "interior-listener",
Host: "0.0.0.0",
Port: types.InterRouterListenerPort,
SslProfile: types.InterRouterProfile,
SaslMechanisms: "EXTERNAL",
AuthenticatePeer: true,
})
edgeListeners = append(edgeListeners, types.Listener{
Name: "edge-listener",
Host: "0.0.0.0",
Port: types.EdgeListenerPort,
SslProfile: types.InterRouterProfile,
SaslMechanisms: "EXTERNAL",
AuthenticatePeer: true,
})
}
// TODO: remove redundancy, needed for now for config template
van.Assembly.Name = van.Name
if options.IsEdge {
van.Assembly.Mode = string(types.TransportModeEdge)
} else {
van.Assembly.Mode = string(types.TransportModeInterior)
}
van.Assembly.Listeners = listeners
van.Assembly.InterRouterListeners = interRouterListeners
van.Assembly.EdgeListeners = edgeListeners
van.Assembly.SslProfiles = sslProfiles
envVars := []string{}
if !options.IsEdge {
envVars = append(envVars, "APPLICATION_NAME="+types.TransportDeploymentName)
// TODO: auto_mesh for non k8s deploy
// envVars = append(envVars, "QDROUTERD_AUTO_MESH_DISCOVERY=QUERY")
}
if options.AuthMode == string(types.ConsoleAuthModeInternal) {
envVars = append(envVars, "QDROUTERD_AUTO_CREATE_SASLDB_SOURCE=/etc/qpid-dispatch/sasl-users/")
envVars = append(envVars, "QDROUTERD_AUTO_CREATE_SASLDB_PATH=/tmp/qdrouterd.sasldb")
}
// envVars = append(envVars, "PN_TRACE_FRM=1")
envVars = append(envVars, "QDROUTERD_CONF="+configs.QdrouterdConfig(&van.Assembly))
van.Transport.EnvVar = envVars
ports := nat.PortSet{}
ports["5671/tcp"] = struct{}{}
if options.AuthMode != "" {
ports[nat.Port(strconv.Itoa(int(types.ConsoleDefaultServicePort))+"/tcp")] = struct{}{}
}
ports[nat.Port(strconv.Itoa(int(types.TransportLivenessPort)))+"/tcp"] = struct{}{}
if !options.IsEdge {
ports[nat.Port(strconv.Itoa(int(types.InterRouterListenerPort)))+"/tcp"] = struct{}{}
ports[nat.Port(strconv.Itoa(int(types.EdgeListenerPort)))+"/tcp"] = struct{}{}
}
van.Transport.Ports = ports
volumes := []string{
"skupper",
"skupper-amqps",
}
if !options.IsEdge {
volumes = append(volumes, "skupper-internal")
}
if options.AuthMode == string(types.ConsoleAuthModeInternal) {
volumes = append(volumes, "skupper-console-users")
volumes = append(volumes, "skupper-sasl-config")
}
van.Transport.Volumes = volumes
// Note: use index to make directory, use index/value to make mount
mounts := make(map[string]string)
mounts[types.CertPath] = "/etc/qpid-dispatch-certs"
mounts[types.ConnPath] = "/etc/qpid-dispatch/connections"
mounts[types.ConsoleUsersPath] = "/etc/qpid-dispatch/sasl-users/"
mounts[types.SaslConfigPath] = "/etc/sasl2"
van.Transport.Mounts = mounts
cas := []types.CertAuthority{}
cas = append(cas, types.CertAuthority{Name: "skupper-ca"})
if !options.IsEdge {
cas = append(cas, types.CertAuthority{Name: "skupper-internal-ca"})
}
van.CertAuthoritys = cas
credentials := []types.Credential{}
credentials = append(credentials, types.Credential{
CA: "skupper-ca",
Name: "skupper-amqps",
Subject: "skupper-router",
Hosts: "skupper-router",
ConnectJson: false,
Post: false,
})
credentials = append(credentials, types.Credential{
CA: "skupper-ca",
Name: "skupper",
Subject: "skupper-router",
Hosts: "",
ConnectJson: true,
Post: false,
})
if !options.IsEdge {
credentials = append(credentials, types.Credential{
CA: "skupper-internal-ca",
Name: "skupper-internal",
Subject: "skupper-internal",
Hosts: "skupper-internal",
ConnectJson: false,
Post: false,
})
}
van.Credentials = credentials
// Controller spec portion
if os.Getenv("SKUPPER_CONTROLLER_IMAGE") != "" {
van.Controller.Image = os.Getenv("SKUPPER_CONTROLLER_IMAGE")
} else {
van.Controller.Image = types.DefaultControllerImage
}
van.Controller.Labels = map[string]string{
"application": types.ControllerDeploymentName,
"skupper.io/component": types.ControllerComponentName,
}
van.Controller.EnvVar = []string{
"SKUPPER_SITE_ID=" + siteId,
"SKUPPER_PROXY_IMAGE=" + van.Controller.Image,
}
van.Controller.Mounts = map[string]string{
types.CertPath + "skupper": "/etc/messaging",
types.ServicePath: "/etc/messaging/services",
"/var/run": "/var/run",
}
return van, nil
}
// RouterCreate instantiates a VAN Router (transport and controller)
func (cli *VanClient) RouterCreate(options types.SiteConfigSpec) error {
//TODO return error
if options.EnableConsole {
if options.AuthMode == string(types.ConsoleAuthModeInternal) || options.AuthMode == "" {
options.AuthMode = string(types.ConsoleAuthModeInternal)
if options.User == "" {
options.User = "admin"
}
if options.Password == "" {
options.Password = utils.RandomId(10)
}
} else {
if options.User != "" {
return fmt.Errorf("--router-console-user only valid when --router-console-auth=internal")
}
if options.Password != "" {
return fmt.Errorf("--router-console-password only valid when --router-console-auth=internal")
}
}
}
// TODO check if resources already exist: either delete them all or error out
// setup host dirs
_ = os.RemoveAll(types.HostPath)
// create host dirs TODO this should not be here
if err := os.MkdirAll(types.HostPath, 0755); err != nil {
return err
}
if err := os.Mkdir(types.SitePath, 0755); err != nil {
return err
}
sc, err := cli.SiteConfigCreate(options)
if err != nil {
return err
}
van, err := cli.GetRouterSpecFromOpts(options, sc.UID)
if err != nil {
return err
}
err = cli.DockerInterface.PullImage(van.Transport.Image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{})
if err != nil {
return err
}
err = cli.DockerInterface.PullImage(van.Controller.Image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{})
if err != nil {
return err
}
for mnt, _ := range van.Transport.Mounts {
if err := os.Mkdir(mnt, 0755); err != nil {
return err
}
}
for _, v := range van.Transport.Volumes {
if err := os.Mkdir(types.CertPath+v, 0755); err != nil {
return err
}
}
// this one is needed by the controller
if err := os.Mkdir(types.ServicePath, 0755); err != nil {
return err
}
if err := os.Mkdir(types.ServicePath+"local/", 0755); err != nil {
return err
}
if err := os.Mkdir(types.ServicePath+"all/", 0755); err != nil {
return err
}
// create skupper-services file
svcDefs := make(map[string]types.ServiceInterface)
encoded, err := json.Marshal(svcDefs)
if err != nil {
return err
}
err = ioutil.WriteFile(types.LocalServiceDefsFile, encoded, 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(types.AllServiceDefsFile, encoded, 0755)
if err != nil {
return err
}
if options.EnableConsole && options.AuthMode == string(types.ConsoleAuthModeInternal) {
config := `
pwcheck_method: auxprop
auxprop_plugin: sasldb
sasldb_path: /tmp/qdrouterd.sasldb
`
err := ioutil.WriteFile(types.SaslConfigPath+"/qdrouterd.conf", []byte(config), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(types.ConsoleUsersPath+"/"+options.User, []byte(options.Password), 0755)
if err != nil {
return err
}
}
// create user network
_, err = docker.NewTransportNetwork(types.TransportNetworkName, cli.DockerInterface)
if err != nil {
return err
}
// fire up the containers
transport, err := docker.NewTransportContainer(van, cli.DockerInterface)
if err != nil {
return err
}
for _, ca := range van.CertAuthoritys {
ensureCA(ca.Name)
}
for _, cred := range van.Credentials {
generateCredentials(cred.CA, cred.Name, cred.Subject, cred.Hosts, cred.ConnectJson)
}
//TODO : generate certs first?
err = docker.StartContainer(transport.Name, cli.DockerInterface)
if err != nil {
return fmt.Errorf("Could not start transport container: %w", err)
}
controller, err := docker.NewControllerContainer(van, cli.DockerInterface)
if err != nil {
return err
}
err = docker.StartContainer(controller.Name, cli.DockerInterface)
if err != nil {
return fmt.Errorf("Could not start controller container: %w", err)
}
return nil
}
|
[
"\"QDROUTERD_MAGE\"",
"\"QDROUTERD_IMAGE\"",
"\"SKUPPER_CONTROLLER_IMAGE\"",
"\"SKUPPER_CONTROLLER_IMAGE\""
] |
[] |
[
"QDROUTERD_MAGE",
"QDROUTERD_IMAGE",
"SKUPPER_CONTROLLER_IMAGE"
] |
[]
|
["QDROUTERD_MAGE", "QDROUTERD_IMAGE", "SKUPPER_CONTROLLER_IMAGE"]
|
go
| 3 | 0 | |
conect_login.py
|
#importando a biblioteca :
from mysql.connector import connect
#abrindo uma conexão com o banco de dados
conexao = connect(host='localhost', port=3306, user='root')
print(conexao)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
parallel_accel/shared/test/shared/redis/test_workers.py
|
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=protected-access
"""Unit test for workers module"""
import importlib
import os
import secrets
import time
import unittest
import unittest.mock
import uuid
import redis
from parallel_accel.shared import schemas
from parallel_accel.shared.redis import base, workers
class TestWorkersRedisStore(unittest.TestCase):
"""Tests WorkersRedisStore class behavior."""
API_KEY = secrets.token_hex(16)
JOB_ID = uuid.uuid4()
WORKER = schemas.WorkerInternal(schemas.WorkerState.OFFLINE)
@classmethod
def setUpClass(cls) -> None:
"""See base class documentation."""
cls.patchers = []
cls.mocked_time = unittest.mock.Mock(spec=time.time)
patcher = unittest.mock.patch("time.time", cls.mocked_time)
cls.patchers.append(patcher)
cls.mocked_redis = unittest.mock.Mock(spec=redis.Redis)
cls.mocked_redis.return_value = cls.mocked_redis
patcher = unittest.mock.patch("redis.Redis", cls.mocked_redis)
cls.patchers.append(patcher)
for patcher in cls.patchers:
patcher.start()
importlib.reload(base)
importlib.reload(workers)
os.environ["REDISHOST"] = "localhost"
cls.store = workers.WorkersRedisStore()
cls.connection = cls.store._connections[workers.RedisInstances.WORKERS]
@classmethod
def tearDownClass(cls) -> None:
"""See base class documentation."""
del os.environ["REDISHOST"]
for patcher in cls.patchers:
patcher.stop()
def setUp(self) -> None:
"""See base class documentation."""
self.connection.exists.return_value = 1
serialized = schemas.encode(schemas.WorkerSchema, self.WORKER)
self.connection.get.return_value = serialized
def tearDown(self) -> None:
"""See base class documentation."""
for mock in [x for x in dir(self) if x.startswith("mocked_")]:
getattr(self, mock).reset_mock()
self.connection.exists.side_effect = None
def test_get_worker(self) -> None:
"""Tests get_worker method: worker exists."""
# Run test
worker = self.store.get_worker(self.API_KEY)
# Verifciation
self.assertEqual(worker, self.WORKER)
self._verify_redis_exists_call()
result = self.connection.get.called_once_with(self.API_KEY)
self.assertTrue(result)
def test_get_worker_not_exists(self) -> None:
"""Tests get_worker method: worker does not exist."""
# Set up
self.connection.exists.return_value = 0
# Run test
with self.assertRaises(workers.WorkerNotFoundError):
self.store.get_worker(self.API_KEY)
# Verification
self._verify_redis_exists_call()
def test_has_worker(self) -> None:
"""Tests has_worker method."""
# Run test
result = self.store.has_worker(self.API_KEY)
self.assertTrue(result)
# Verifciation
self._verify_redis_exists_call()
def test_set_offline(self) -> None:
"""Tests set_offline method."""
# Run test
self.store.set_offline(self.API_KEY)
# Verification
data = schemas.WorkerInternal(state=schemas.WorkerState.OFFLINE)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def test_set_offline_same_state(self) -> None:
"""Tests set_offline method: worker already in OFFLINE state"""
# Set up
worker = schemas.WorkerInternal(state=schemas.WorkerState.OFFLINE)
serialized = schemas.encode(schemas.WorkerSchema, worker)
self.connection.get.return_value = serialized
# Run test
self.store.set_offline(self.API_KEY)
# Verification
self._verify_redis_exists_call()
self.mocked_redis.set.assert_not_called()
def test_set_offline_not_exist(self) -> None:
"""Tests set_offline method: worker does not exist"""
# Set up
self.connection.exists.side_effect = [0, 1]
# Run test
self.store.set_offline(self.API_KEY)
# Verification
data = schemas.WorkerInternal(state=schemas.WorkerState.OFFLINE)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def test_set_booting(self) -> None:
"""Tests set_booting method."""
# Run test
self.store.set_booting(self.API_KEY)
# Verification
data = schemas.WorkerInternal(state=schemas.WorkerState.BOOTING)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def test_set_idle(self) -> None:
"""Tests set_idle method."""
# Run test
self.store.set_idle(self.API_KEY)
# Verification
data = schemas.WorkerInternal(state=schemas.WorkerState.IDLE)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def test_set_processing_job(self) -> None:
"""Tests set_processing_job method."""
job_id = uuid.uuid4()
now = 1234567890
self.mocked_time.return_value = now
# Run test
self.store.set_processing_job(self.API_KEY, job_id)
# Verification
data = schemas.WorkerInternal(
state=schemas.WorkerState.PROCESSING_JOB,
job_id=job_id,
job_timestamp=now,
)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def test_set_error(self) -> None:
"""Tests set_error method."""
error = "Some error"
# Run test
self.store.set_error(self.API_KEY, error)
# Verification
data = schemas.WorkerInternal(
state=schemas.WorkerState.ERROR, error=error
)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def test_set_shutting_down(self) -> None:
"""Tests set_shutting_down method."""
# Run test
self.store.set_shutting_down(self.API_KEY)
# Verification
data = schemas.WorkerInternal(state=schemas.WorkerState.SHUTTING_DOWN)
self._verify_redis_exists_call()
self._verify_redis_set_call(data)
def _verify_redis_set_call(self, data: schemas.Worker) -> None:
"""Verifies calls to the mocked redis.Redis.set() function.
Args:
data: Worker object that was passed to the set() function.
"""
serialized = schemas.encode(schemas.WorkerInternalSchema, data)
result = self.connection.set.called_once_with(self.API_KEY, serialized)
self.assertTrue(result)
def _verify_redis_exists_call(self) -> None:
"""Verifies calls to the mocked redis.Redis.exists() function."""
result = self.connection.exists.called_once_with(self.API_KEY)
self.assertTrue(result)
|
[] |
[] |
[
"REDISHOST"
] |
[]
|
["REDISHOST"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OA.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/models/test_renderedtifields.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for RenderedTaskInstanceFields."""
import os
import unittest
from datetime import date, timedelta
from unittest import mock
from parameterized import parameterized
from airflow import settings
from airflow.models import Variable
from airflow.models.dag import DAG
from airflow.models.renderedtifields import RenderedTaskInstanceFields as RTIF
from airflow.models.taskinstance import TaskInstance as TI
from airflow.operators.bash import BashOperator
from airflow.utils.session import create_session
from airflow.utils.timezone import datetime
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_rendered_ti_fields
TEST_DAG = DAG("example_rendered_ti_field", schedule_interval=None)
START_DATE = datetime(2018, 1, 1)
EXECUTION_DATE = datetime(2019, 1, 1)
class ClassWithCustomAttributes:
"""Class for testing purpose: allows to create objects with custom attributes in one single statement."""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{ClassWithCustomAttributes.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
class TestRenderedTaskInstanceFields(unittest.TestCase):
"""Unit tests for RenderedTaskInstanceFields."""
def setUp(self):
clear_rendered_ti_fields()
def tearDown(self):
clear_rendered_ti_fields()
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("test-string", "test-string"),
({"foo": "bar"}, {"foo": "bar"}),
("{{ task.task_id }}", "test"),
(date(2018, 12, 6), "2018-12-06"),
(datetime(2018, 12, 6, 10, 55), "2018-12-06 10:55:00+00:00"),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes({'att1': 'test', 'att2': '{{ task.task_id }}', "
"'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes({'nested1': ClassWithCustomAttributes("
"{'att1': 'test', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes("
"{'att3': '{{ task.task_id }}', 'att4': '{{ task.task_id }}', 'template_fields': ['att3']}), "
"'template_fields': ['nested1']})",
),
]
)
def test_get_templated_fields(self, templated_field, expected_rendered_field):
"""
Test that template_fields are rendered correctly, stored in the Database,
and are correctly fetched using RTIF.get_templated_fields
"""
dag = DAG("test_serialized_rendered_fields", start_date=START_DATE)
with dag:
task = BashOperator(task_id="test", bash_command=templated_field)
ti = TI(task=task, execution_date=EXECUTION_DATE)
rtif = RTIF(ti=ti)
assert ti.dag_id == rtif.dag_id
assert ti.task_id == rtif.task_id
assert ti.execution_date == rtif.execution_date
assert expected_rendered_field == rtif.rendered_fields.get("bash_command")
with create_session() as session:
session.add(rtif)
assert {"bash_command": expected_rendered_field, "env": None} == RTIF.get_templated_fields(ti=ti)
# Test the else part of get_templated_fields
# i.e. for the TIs that are not stored in RTIF table
# Fetching them will return None
with dag:
task_2 = BashOperator(task_id="test2", bash_command=templated_field)
ti2 = TI(task_2, EXECUTION_DATE)
assert RTIF.get_templated_fields(ti=ti2) is None
@parameterized.expand(
[
(0, 1, 0, 1),
(1, 1, 1, 1),
(1, 0, 1, 0),
(3, 1, 1, 1),
(4, 2, 2, 1),
(5, 2, 2, 1),
]
)
def test_delete_old_records(self, rtif_num, num_to_keep, remaining_rtifs, expected_query_count):
"""
Test that old records are deleted from rendered_task_instance_fields table
for a given task_id and dag_id.
"""
session = settings.Session()
dag = DAG("test_delete_old_records", start_date=START_DATE)
with dag:
task = BashOperator(task_id="test", bash_command="echo {{ ds }}")
rtif_list = [
RTIF(TI(task=task, execution_date=EXECUTION_DATE + timedelta(days=num)))
for num in range(rtif_num)
]
session.add_all(rtif_list)
session.commit()
result = session.query(RTIF).filter(RTIF.dag_id == dag.dag_id, RTIF.task_id == task.task_id).all()
for rtif in rtif_list:
assert rtif in result
assert rtif_num == len(result)
# Verify old records are deleted and only 'num_to_keep' records are kept
# For other DBs,an extra query is fired in RenderedTaskInstanceFields.delete_old_records
expected_query_count_based_on_db = (
expected_query_count + 1
if session.bind.dialect.name == "mssql" and expected_query_count != 0
else expected_query_count
)
with assert_queries_count(expected_query_count_based_on_db):
RTIF.delete_old_records(task_id=task.task_id, dag_id=task.dag_id, num_to_keep=num_to_keep)
result = session.query(RTIF).filter(RTIF.dag_id == dag.dag_id, RTIF.task_id == task.task_id).all()
assert remaining_rtifs == len(result)
def test_write(self):
"""
Test records can be written and overwritten
"""
Variable.set(key="test_key", value="test_val")
session = settings.Session()
result = session.query(RTIF).all()
assert [] == result
with DAG("test_write", start_date=START_DATE):
task = BashOperator(task_id="test", bash_command="echo {{ var.value.test_key }}")
rtif = RTIF(TI(task=task, execution_date=EXECUTION_DATE))
rtif.write()
result = (
session.query(RTIF.dag_id, RTIF.task_id, RTIF.rendered_fields)
.filter(
RTIF.dag_id == rtif.dag_id,
RTIF.task_id == rtif.task_id,
RTIF.execution_date == rtif.execution_date,
)
.first()
)
assert ('test_write', 'test', {'bash_command': 'echo test_val', 'env': None}) == result
# Test that overwrite saves new values to the DB
Variable.delete("test_key")
Variable.set(key="test_key", value="test_val_updated")
with DAG("test_write", start_date=START_DATE):
updated_task = BashOperator(task_id="test", bash_command="echo {{ var.value.test_key }}")
rtif_updated = RTIF(TI(task=updated_task, execution_date=EXECUTION_DATE))
rtif_updated.write()
result_updated = (
session.query(RTIF.dag_id, RTIF.task_id, RTIF.rendered_fields)
.filter(
RTIF.dag_id == rtif_updated.dag_id,
RTIF.task_id == rtif_updated.task_id,
RTIF.execution_date == rtif_updated.execution_date,
)
.first()
)
assert (
'test_write',
'test',
{'bash_command': 'echo test_val_updated', 'env': None},
) == result_updated
@mock.patch.dict(os.environ, {"AIRFLOW_IS_K8S_EXECUTOR_POD": "True"})
@mock.patch('airflow.utils.log.secrets_masker.redact', autospec=True, side_effect=lambda d, _=None: d)
def test_get_k8s_pod_yaml(self, redact):
"""
Test that k8s_pod_yaml is rendered correctly, stored in the Database,
and are correctly fetched using RTIF.get_k8s_pod_yaml
"""
dag = DAG("test_get_k8s_pod_yaml", start_date=START_DATE)
with dag:
task = BashOperator(task_id="test", bash_command="echo hi")
ti = TI(task=task, execution_date=EXECUTION_DATE)
render_k8s_pod_yaml = mock.patch.object(
ti, 'render_k8s_pod_yaml', return_value={"I'm a": "pod"}
).start()
rtif = RTIF(ti=ti)
assert ti.dag_id == rtif.dag_id
assert ti.task_id == rtif.task_id
assert ti.execution_date == rtif.execution_date
expected_pod_yaml = {"I'm a": "pod"}
assert rtif.k8s_pod_yaml == render_k8s_pod_yaml.return_value
# K8s pod spec dict was passed to redact
redact.assert_any_call(rtif.k8s_pod_yaml)
with create_session() as session:
session.add(rtif)
session.flush()
assert expected_pod_yaml == RTIF.get_k8s_pod_yaml(ti=ti, session=session)
session.rollback()
# Test the else part of get_k8s_pod_yaml
# i.e. for the TIs that are not stored in RTIF table
# Fetching them will return None
assert RTIF.get_k8s_pod_yaml(ti=ti, session=session) is None
@mock.patch.dict(os.environ, {"AIRFLOW_VAR_API_KEY": "secret"})
@mock.patch('airflow.utils.log.secrets_masker.redact', autospec=True)
def test_redact(self, redact):
dag = DAG("test_ritf_redact", start_date=START_DATE)
with dag:
task = BashOperator(
task_id="test",
bash_command="echo {{ var.value.api_key }}",
env={'foo': 'secret', 'other_api_key': 'masked based on key name'},
)
redact.side_effect = [
'val 1',
'val 2',
]
ti = TI(task=task, execution_date=EXECUTION_DATE)
rtif = RTIF(ti=ti)
assert rtif.rendered_fields == {
'bash_command': 'val 1',
'env': 'val 2',
}
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/e2e/govmomi_test.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"flag"
"fmt"
"net/url"
"os"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/soap"
)
func init() {
flag.StringVar(&vsphereServer, "e2e.vsphereServer", os.Getenv("VSPHERE_SERVER"), "the vSphere server used for e2e tests")
flag.StringVar(&vsphereDatacenter, "e2e.vsphereDataceter", os.Getenv("VSPHERE_DATACENTER"), "the inventory path of the vSphere datacenter in which VMs are created")
flag.StringVar(&vsphereFolder, "e2e.vsphereFolder", os.Getenv("VSPHERE_FOLDER"), "the inventory path of the vSphere folder in which VMs are created")
flag.StringVar(&vspherePool, "e2e.vspherePool", os.Getenv("VSPHERE_RESOURCE_POOL"), "the inventory path of the vSphere resource pool in which VMs are created")
flag.StringVar(&vsphereDatastore, "e2e.vsphereDatastore", os.Getenv("VSPHERE_DATASTORE"), "the name of the vSphere datastore in which VMs are created")
flag.StringVar(&vsphereNetwork, "e2e.vsphereNetwork", os.Getenv("VSPHERE_NETWORK"), "the name of the vSphere network to which VMs are connected")
flag.StringVar(&vsphereMachineTemplate, "e2e.vsphereMachineTemplate", os.Getenv("VSPHERE_MACHINE_TEMPLATE"), "the template from which the Kubernetes VMs are cloned")
flag.StringVar(&vsphereHAProxyTemplate, "e2e.vsphereHAProxyTemplate", os.Getenv("VSPHERE_HAPROXY_TEMPLATE"), "the template from which the HAProxy load balancer VM is cloned")
}
func initVSphereSession() {
By("parsing vSphere server URL")
serverURL, err := soap.ParseURL(vsphereServer)
Expect(err).ShouldNot(HaveOccurred())
By("creating vSphere client", func() {
var err error
serverURL.User = url.UserPassword(vsphereUsername, vspherePassword)
vsphereClient, err = govmomi.NewClient(ctx, serverURL, true)
Expect(err).ShouldNot(HaveOccurred())
})
By("creating vSphere finder")
vsphereFinder = find.NewFinder(vsphereClient.Client)
By("configuring vSphere datacenter")
datacenter, err := vsphereFinder.DatacenterOrDefault(ctx, vsphereDatacenter)
Expect(err).ShouldNot(HaveOccurred())
vsphereFinder.SetDatacenter(datacenter)
}
func destroyVMsWithPrefix(prefix string) {
vmList, _ := vsphereFinder.VirtualMachineList(ctx, vspherePool)
for _, vm := range vmList {
if strings.HasPrefix(vm.Name(), prefix) {
destroyVM(vm)
}
}
}
func destroyVM(vm *object.VirtualMachine) {
if task, _ := vm.PowerOff(ctx); task != nil {
if err := task.Wait(ctx); err != nil {
fmt.Printf("error powering off %s machine: %s\n", vm.Name(), err)
}
}
if task, _ := vm.Destroy(ctx); task != nil {
if err := task.Wait(ctx); err != nil {
fmt.Printf("error destroying %s machine: %s\n", vm.Name(), err)
}
}
}
|
[
"\"VSPHERE_SERVER\"",
"\"VSPHERE_DATACENTER\"",
"\"VSPHERE_FOLDER\"",
"\"VSPHERE_RESOURCE_POOL\"",
"\"VSPHERE_DATASTORE\"",
"\"VSPHERE_NETWORK\"",
"\"VSPHERE_MACHINE_TEMPLATE\"",
"\"VSPHERE_HAPROXY_TEMPLATE\""
] |
[] |
[
"VSPHERE_FOLDER",
"VSPHERE_DATACENTER",
"VSPHERE_NETWORK",
"VSPHERE_SERVER",
"VSPHERE_RESOURCE_POOL",
"VSPHERE_MACHINE_TEMPLATE",
"VSPHERE_HAPROXY_TEMPLATE",
"VSPHERE_DATASTORE"
] |
[]
|
["VSPHERE_FOLDER", "VSPHERE_DATACENTER", "VSPHERE_NETWORK", "VSPHERE_SERVER", "VSPHERE_RESOURCE_POOL", "VSPHERE_MACHINE_TEMPLATE", "VSPHERE_HAPROXY_TEMPLATE", "VSPHERE_DATASTORE"]
|
go
| 8 | 0 | |
testing/s3/suite.go
|
package s3
import (
"fmt"
"os"
"github.com/stretchr/testify/suite"
)
// ----------------------------------------------------------------------
// Docker test suite
// ----------------------------------------------------------------------
// Suite provides a base type to use for constructing test suites that
// depend on docker container. Concrete test suite types must set the
// Start function.
type Suite struct {
suite.Suite
// Container is a pointer to the Container structure, which will
// be assigned after SetupSuite() successfully starts the
// container.
Container *Container
// Start is a function which launches a container, returning the
// container and an optional error. Each concrete docker test
// suite must set the Start member before propagating the
// SetupSuite() call, and should call one of the container
// starting functions provided by dockerutil.
//
// See S3Suite for a concrete example.
Start func() (*Container, error)
}
func (s *Suite) SetupSuite() {
container, err := s.Start()
if err != nil {
s.T().Fatalf("Failed to start docker container: %s", err)
}
err = container.WaitForContainer()
if err != nil {
s.T().Fatalf("Failed to start docker container: %s", err)
}
s.Container = container
}
var (
DefaultTerminateAction = "PURGE"
DefaultContainerTimeout uint = 10
)
func (s *Suite) TearDownSuite() {
terminateAction := DefaultTerminateAction
if v := os.Getenv("DOCKER_TERMINATE_ACTION"); v != "" {
terminateAction = v
}
if s.Container != nil {
switch terminateAction {
case "PURGE":
fmt.Println("Stopping & Deleting Container " + s.Container.DockerContainer.ID)
s.Container.Stop(true, DefaultContainerTimeout, true)
case "STOP":
fmt.Println("Stopping Container " + s.Container.DockerContainer.ID)
s.Container.Stop(false, DefaultContainerTimeout, true)
default:
fmt.Printf("Leaving container %s intact at IP address %s\n", s.Container.DockerContainer.ID, s.Container.IpAddress())
}
}
}
|
[
"\"DOCKER_TERMINATE_ACTION\""
] |
[] |
[
"DOCKER_TERMINATE_ACTION"
] |
[]
|
["DOCKER_TERMINATE_ACTION"]
|
go
| 1 | 0 | |
native_client_sdk/src/build_tools/build_sdk.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
GNBUILD_DIR = 'gnbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir, arch).
TOOLCHAIN_PACKAGE_MAP = {
'arm_glibc': ('nacl_arm_glibc', '%(platform)s_arm_glibc', 'arm'),
'x86_glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc', 'x86'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl', 'pnacl')
}
def GetToolchainDirName(tcname):
"""Return the directory name for a given toolchain"""
return TOOLCHAIN_PACKAGE_MAP[tcname][1] % {'platform': getos.GetPlatform()}
def GetToolchainDir(pepperdir, tcname):
"""Return the full path to a given toolchain within a given sdk root"""
return os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
def GetToolchainLibc(tcname):
if tcname == 'pnacl':
return 'newlib'
for libc in ('glibc', 'newlib', 'host'):
if libc in tcname:
return libc
def GetToolchainNaClInclude(pepperdir, tcname, arch=None):
tcpath = GetToolchainDir(pepperdir, tcname)
if arch is None:
arch = TOOLCHAIN_PACKAGE_MAP[tcname][2]
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GNBUILD_DIR + '-' + arch)
def GetGnBuiltLib(tc, arch):
if 'glibc' in tc:
out_dir = 'glibc_%s' % arch
elif arch == 'pnacl':
out_dir = 'newlib_pnacl'
else:
out_dir = 'clang_newlib_%s' % arch
return os.path.join(GetNinjaOutDir('x64'), out_dir)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86', 'x64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
if arch == 'x86':
arch = 'x86-32'
elif arch == 'x64':
arch = 'x86-64'
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcdir, _ = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcdir % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if isinstance(file_spec, str):
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tcname):
"""Copies NaCl headers to expected locations in the toolchain."""
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[GetToolchainLibc(tcname)])
def GnNinjaInstall(pepperdir, toolchains):
tools_files_x86 = [
['sel_ldr', 'sel_ldr_x86_32'],
]
tools_files_x64 = [
['sel_ldr', 'sel_ldr_x86_64'],
['ncval_new', 'ncval'],
['clang_newlib_arm/elf_loader.nexe', 'elf_loader_arm.nexe'],
['irt_x86/irt_core.nexe', 'irt_core_x86_32.nexe'],
['irt_x64/irt_core.nexe', 'irt_core_x86_64.nexe'],
]
tools_files_arm = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_x64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
if platform == 'linux':
tools_files_x86 += [['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32']]
tools_files_x64 += [['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64']]
# Add ARM trusted binaries (linux only)
if not options.no_arm_trusted:
tools_files_x64 += [
['irt_arm/irt_core.nexe', 'irt_core_arm.nexe'],
]
tools_files_arm += [
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['sel_ldr', 'sel_ldr_arm']
]
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_x86 + tools_files_x64:
if platform == 'win' and not os.path.splitext(pair[0])[1]:
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_x64)
InstallFiles(GetNinjaOutDir('x86'), tools_dir, tools_files_x86)
if platform == 'linux':
InstallFiles(GetNinjaOutDir('arm'), tools_dir, tools_files_arm)
stub_dir = os.path.join(SRC_DIR, 'ppapi/native_client/src/untrusted/irt_stub')
for tc in toolchains:
if tc in ('host', 'clang-newlib'):
continue
elif tc == 'pnacl':
xarches = ('pnacl', 'x86', 'x64', 'arm')
elif tc in ('x86_glibc'):
xarches = ('x86', 'x64')
elif tc == 'arm_glibc':
xarches = ('arm',)
else:
raise AssertionError('unexpected toolchain value: %s' % tc)
for xarch in xarches:
src_dir = GetGnBuiltLib(tc, xarch)
src_dir = os.path.join(src_dir, 'obj', 'ppapi', 'native_client', 'src',
'untrusted', 'irt_stub')
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, ['libppapi_stub.a'])
InstallFiles(stub_dir, dst_dir, ['libppapi.a'])
if 'glibc' in tc:
InstallFiles(stub_dir, dst_dir, ['libppapi.so'])
def GnNinjaBuildAll(rel_out_dir):
def MakeNinjaRelPath(suffix):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), rel_out_dir + suffix)
GnNinjaBuild('x64', MakeNinjaRelPath('-x64'), ['nacl_sdk_untrusted=true'])
GnNinjaBuild('x86', MakeNinjaRelPath('-x86'))
if getos.GetPlatform() == 'linux':
GnNinjaBuild('arm', MakeNinjaRelPath('-arm'))
def GetGNExecutable(platform):
# TODO(sbc): Remove this code, which is duplicated from mb.py and simply
# rely on the depot_tools gn wrapper which should be in the PATH.
# http://crbug.com/588794
if platform == 'linux':
subdir, exe = 'linux64', 'gn'
elif platform == 'mac':
subdir, exe = 'mac', 'gn'
else:
subdir, exe = 'win', 'gn.exe'
return os.path.join(SRC_DIR, 'buildtools', subdir, exe)
def GnNinjaBuild(arch, out_dir, extra_gn_args=None):
gn_args = ['is_debug=false']
if extra_gn_args is not None:
gn_args += extra_gn_args
platform = getos.GetPlatform()
if platform == 'mac':
if options.mac_sdk:
gn_args.append('mac_sdk_min="%s"' % options.mac_sdk)
if arch == 'arm':
# Without this the target_cpu='arm' build complains about missing code
# signing identity
gn_args.append('ios_enable_code_signing=false')
gn_exe = GetGNExecutable(platform)
if arch is not None:
gn_args.append('target_cpu="%s"' % arch)
if arch == 'arm':
if options.no_arm_trusted:
gn_args.append('enable_cross_trusted=false')
gn_args = ' '.join(gn_args)
buildbot_common.Run([gn_exe, 'gen', '--args=%s' % gn_args, out_dir],
cwd=SRC_DIR)
buildbot_common.Run(['ninja', '-C', out_dir, 'nacl_core_sdk'], cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GNBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GnNinjaBuildAll(GNBUILD_DIR)
GnNinjaInstall(pepperdir, toolchains)
for toolchain in toolchains:
if toolchain not in ('host', 'clang-newlib'):
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, toolchain),
toolchain)
if 'pnacl' in toolchains:
# NOTE: gn build all untrusted code in the x86 build
build_dir = GetNinjaOutDir('x64')
nacl_arches = ['x86', 'x64', 'arm']
for nacl_arch in nacl_arches:
shim_file = os.path.join(build_dir, 'clang_newlib_' + nacl_arch, 'obj',
'ppapi', 'native_client', 'src', 'untrusted',
'pnacl_irt_shim', 'libpnacl_irt_shim.a')
pnacldir = GetToolchainDir(pepperdir, 'pnacl')
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(shim_file, pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
dsc_toolchains = []
for t in toolchains:
if t.startswith('x86_') or t.startswith('arm_'):
if t[4:] not in dsc_toolchains:
dsc_toolchains.append(t[4:])
elif t == 'host':
dsc_toolchains.append(getos.GetPlatform())
else:
dsc_toolchains.append(t)
filters['TOOLS'] = dsc_toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=dsc_toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, args=None):
BuildStepMakeAll(pepperdir, 'src', 'Build Libraries Debug',
clean=True, config='Debug', args=args)
BuildStepMakeAll(pepperdir, 'src', 'Build Libraries Release',
clean=True, config='Release', args=args)
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gn build directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if buildbot_common.IsSDKBuilder():
options.archive = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'x86_glibc', 'arm_glibc', 'clang-newlib', 'host']
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libdl.so.2',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir)
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
|
[] |
[] |
[
"NACL_SDK_ROOT"
] |
[]
|
["NACL_SDK_ROOT"]
|
python
| 1 | 0 | |
pkg/cmd/main.go
|
package main
import (
"errors"
"os"
variant "github.com/mumoshu/variant2"
)
func main() {
err := variant.RunMain(variant.Env{
Args: os.Args,
Getenv: os.Getenv,
Getwd: os.Getwd,
})
var verr variant.Error
var code int
if err != nil {
if ok := errors.As(err, &verr); ok {
code = verr.ExitCode
} else {
code = 1
}
} else {
code = 0
}
os.Exit(code)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
pkg/batchssh/batchssh.go
|
/*
Copyright © 2021 windvalley
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package batchssh
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
"github.com/windvalley/gossh/pkg/log"
)
const (
exportLangPattern = "export LANG=%s;export LC_ALL=%s;export LANGUAGE=%s;"
// SuccessIdentifier for result output.
SuccessIdentifier = "SUCCESS"
// FailedIdentifier for result output.
FailedIdentifier = "FAILED"
)
// Task execute command or copy file or execute script.
type Task interface {
RunSSH(addr string) (string, error)
}
// Result of ssh command.
type Result struct {
Addr string `json:"addr"`
Status string `json:"status"`
Message string `json:"message"`
}
// Client for ssh.
type Client struct {
User string
Password string
Auths []ssh.AuthMethod
Port int
ConnTimeout time.Duration
CommandTimeout time.Duration
Concurrency int
Proxy *Proxy
}
// Proxy server.
type Proxy struct {
SSHClient *ssh.Client
Err error
}
// NewClient session.
func NewClient(
user, password string,
auths []ssh.AuthMethod,
options ...func(*Client),
) (*Client, error) {
log.Debugf("Login user: %s", user)
client := Client{
User: user,
Password: password,
Auths: auths,
Port: 22,
ConnTimeout: 10 * time.Second,
CommandTimeout: 0,
Concurrency: 100,
Proxy: &Proxy{},
}
for _, option := range options {
option(&client)
}
return &client, nil
}
// BatchRun command on remote servers.
func (c *Client) BatchRun(
addrs []string,
sshTask Task,
) <-chan *Result {
addrCh := make(chan string)
go func() {
defer close(addrCh)
for _, addr := range addrs {
addrCh <- addr
}
}()
resCh := make(chan *Result)
var wg sync.WaitGroup
wg.Add(c.Concurrency)
for i := 0; i < c.Concurrency; i++ {
go func(wg *sync.WaitGroup) {
for addr := range addrCh {
var result *Result
done := make(chan struct{})
go func() {
defer close(done)
output, err := sshTask.RunSSH(addr)
if err != nil {
result = &Result{addr, FailedIdentifier, err.Error()}
} else {
result = &Result{addr, SuccessIdentifier, output}
}
}()
if c.CommandTimeout > 0 {
select {
case <-done:
case <-time.After(c.CommandTimeout):
result = &Result{
addr,
FailedIdentifier,
fmt.Sprintf(
"command timeout, timeout value: %d seconds",
c.CommandTimeout/time.Second,
),
}
}
} else {
<-done
}
resCh <- result
}
wg.Done()
}(&wg)
}
go func(wg *sync.WaitGroup) {
wg.Wait()
close(resCh)
}(&wg)
return resCh
}
// ExecuteCmd on remote host.
func (c *Client) ExecuteCmd(addr, command, lang, runAs string, sudo bool) (string, error) {
client, err := c.getClient(addr)
if err != nil {
return "", err
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", err
}
exportLang := ""
if lang != "" {
exportLang = fmt.Sprintf(exportLangPattern, lang, lang, lang)
}
if sudo {
command = fmt.Sprintf("%ssudo -u %s -H bash -c '%s'", exportLang, runAs, command)
} else {
command = exportLang + command
}
return c.executeCmd(session, command)
}
// CopyFiles to remote host.
func (c *Client) CopyFiles(
addr string,
srcFiles, srcZipFiles []string,
dstDir string,
allowOverwrite bool,
) (string, error) {
client, err := c.getClient(addr)
if err != nil {
return "", err
}
defer client.Close()
ftpC, err := sftp.NewClient(client)
if err != nil {
return "", err
}
defer ftpC.Close()
for i, f := range srcZipFiles {
srcFile := srcFiles[i]
dstZipFile := filepath.Base(f)
done := make(chan struct{})
var (
err error
file *sftp.File
)
go func() {
defer close(done)
file, err = c.copyZipFile(ftpC, f, filepath.Base(srcFile), dstDir, allowOverwrite)
if err == nil {
file.Close()
}
}()
<-done
if err != nil {
return "", err
}
session, err := client.NewSession()
if err != nil {
return "", err
}
_, err = c.executeCmd(
session,
fmt.Sprintf(
`which unzip &>/dev/null && { cd %s;unzip -o %s;rm %s;} ||
{ echo "need install 'unzip' command";cd %s;rm %s;exit 1;}`,
dstDir,
dstZipFile,
dstZipFile,
dstDir,
dstZipFile,
),
)
if err != nil {
return "", err
}
session.Close()
}
hasOrHave := "has"
if len(srcFiles) > 1 {
hasOrHave = "have"
}
return fmt.Sprintf("'%s' %s been copied to '%s'", strings.Join(srcFiles, ","), hasOrHave, dstDir), nil
}
// ExecuteScript on remote host.
func (c *Client) ExecuteScript(
addr, srcFile, dstDir, lang, runAs string,
sudo, remove, allowOverwrite bool,
) (string, error) {
client, err := c.getClient(addr)
if err != nil {
return "", err
}
defer client.Close()
ftpC, err := sftp.NewClient(client)
if err != nil {
return "", err
}
defer ftpC.Close()
file, err := c.copyFile(ftpC, srcFile, dstDir, allowOverwrite)
if err != nil {
return "", err
}
//nolint:gomnd,govet
if err := file.Chmod(0755); err != nil {
return "", err
}
script := file.Name()
file.Close()
session, err := client.NewSession()
if err != nil {
return "", err
}
defer session.Close()
exportLang := ""
if lang != "" {
exportLang = fmt.Sprintf(exportLangPattern, lang, lang, lang)
}
command := ""
switch {
case sudo && remove:
command = fmt.Sprintf("%ssudo -u %s -H bash -c '%s;rm -f %s'", exportLang, runAs, script, script)
case sudo && !remove:
command = fmt.Sprintf("%ssudo -u %s -H bash -c '%s'", exportLang, runAs, script)
case !sudo && remove:
command = fmt.Sprintf("%s%s;rm -f %s", exportLang, script, script)
case !sudo && !remove:
command = exportLang + script
}
return c.executeCmd(session, command)
}
func (c *Client) executeCmd(session *ssh.Session, command string) (string, error) {
modes := ssh.TerminalModes{
ssh.ECHO: 0,
ssh.TTY_OP_ISPEED: 28800,
ssh.TTY_OP_OSPEED: 28800,
}
//nolint:gomnd
if err := session.RequestPty("xterm", 100, 100, modes); err != nil {
return "", err
}
w, err := session.StdinPipe()
if err != nil {
return "", err
}
r, err := session.StdoutPipe()
if err != nil {
return "", err
}
out, isWrongPass := c.handleOutput(w, r)
go func() {
err = session.Run(command)
}()
var output []byte
done := make(chan struct{})
go func() {
defer close(done)
for v := range out {
output = append(output, v...)
}
}()
<-done
outputStr := string(output)
if <-isWrongPass {
return "", errors.New("wrong sudo password")
}
if err != nil {
return "", errors.New(outputStr)
}
return outputStr, nil
}
func (c *Client) copyFile(
ftpC *sftp.Client,
srcFile, dstDir string,
allowOverwrite bool,
) (*sftp.File, error) {
homeDir := os.Getenv("HOME")
if strings.HasPrefix(srcFile, "~/") {
srcFile = strings.Replace(srcFile, "~", homeDir, 1)
}
content, err := ioutil.ReadFile(srcFile)
if err != nil {
return nil, err
}
fileStat, err := os.Stat(srcFile)
if err != nil {
return nil, err
}
srcFileBaseName := filepath.Base(srcFile)
dstFile := path.Join(dstDir, srcFileBaseName)
if !allowOverwrite {
dstFileInfo, _ := ftpC.Stat(dstFile)
if dstFileInfo != nil {
return nil, fmt.Errorf(
"%s alreay exists, you can add '-F' flag to overwrite it",
dstFile,
)
}
}
file, err := ftpC.Create(dstFile)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("dest dir '%s' not exist", dstDir)
}
if err, ok := err.(*sftp.StatusError); ok && err.Code == uint32(sftp.ErrSshFxPermissionDenied) {
return nil, fmt.Errorf("no permission to write to dest dir '%s'", dstDir)
}
return nil, err
}
_, err = file.Write(content)
if err != nil {
return nil, err
}
if err := file.Chmod(fileStat.Mode()); err != nil {
return nil, err
}
if err := ftpC.Chtimes(dstFile, time.Now(), fileStat.ModTime()); err != nil {
return nil, err
}
return file, nil
}
func (c *Client) copyZipFile(
ftpC *sftp.Client,
srcZipFile, srcFileName, dstDir string,
allowOverwrite bool,
) (*sftp.File, error) {
homeDir := os.Getenv("HOME")
if strings.HasPrefix(srcZipFile, "~/") {
srcZipFile = strings.Replace(srcZipFile, "~", homeDir, 1)
}
content, err := ioutil.ReadFile(srcZipFile)
if err != nil {
return nil, err
}
srcZipFileName := filepath.Base(srcZipFile)
dstZipFile := path.Join(dstDir, srcZipFileName)
dstFile := path.Join(dstDir, srcFileName)
if !allowOverwrite {
dstFileInfo, _ := ftpC.Stat(dstFile)
if dstFileInfo != nil {
return nil, fmt.Errorf(
"%s alreay exists, you can add '-F' flag to overwrite it",
dstFile,
)
}
}
file, err := ftpC.Create(dstZipFile)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("dest dir '%s' not exist", dstDir)
}
if err, ok := err.(*sftp.StatusError); ok && err.Code == uint32(sftp.ErrSshFxPermissionDenied) {
return nil, fmt.Errorf("no permission to write to dest dir '%s'", dstDir)
}
return nil, err
}
_, err = file.Write(content)
if err != nil {
return nil, err
}
return file, nil
}
func (c *Client) getClient(addr string) (*ssh.Client, error) {
var (
client *ssh.Client
err error
)
sshConfig := &ssh.ClientConfig{
User: c.User,
Auth: c.Auths,
Timeout: c.ConnTimeout,
}
//nolint:gosec
sshConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()
remoteHost := net.JoinHostPort(addr, strconv.Itoa(c.Port))
if c.Proxy.SSHClient != nil || c.Proxy.Err != nil {
if c.Proxy.Err != nil {
return nil, c.Proxy.Err
}
conn, err2 := c.Proxy.SSHClient.Dial("tcp", remoteHost)
if err2 != nil {
return nil, err2
}
ncc, chans, reqs, err3 := ssh.NewClientConn(conn, remoteHost, sshConfig)
if err3 != nil {
return nil, err3
}
client = ssh.NewClient(ncc, chans, reqs)
} else {
client, err = ssh.Dial("tcp", remoteHost, sshConfig)
if err != nil {
return nil, err
}
}
return client, nil
}
// handle output stream, and give sudo password if necessary.
func (c *Client) handleOutput(w io.Writer, r io.Reader) (<-chan []byte, <-chan bool) {
out := make(chan []byte, 1)
isWrongPass := make(chan bool, 1)
go func() {
sudoTimes := 0
for {
//nolint:gomnd
buf := make([]byte, 2048)
n, err := r.Read(buf)
if err != nil {
isWrongPass <- false
close(out)
return
}
if s := string(buf); strings.Contains(s, "[sudo]") {
sudoTimes++
if sudoTimes == 1 {
if _, err := w.Write([]byte(c.Password + "\n")); err != nil {
isWrongPass <- false
close(out)
return
}
} else {
isWrongPass <- true
close(out)
return
}
}
out <- buf[:n]
}
}()
return out, isWrongPass
}
// WithConnTimeout ssh connection timeout option.
func WithConnTimeout(timeout time.Duration) func(*Client) {
return func(c *Client) {
c.ConnTimeout = timeout
}
}
// WithCommandTimeout task connection timeout option.
func WithCommandTimeout(timeout time.Duration) func(*Client) {
return func(c *Client) {
c.CommandTimeout = timeout
}
}
// WithPort port option.
func WithPort(port int) func(*Client) {
return func(c *Client) {
c.Port = port
}
}
// WithConcurrency concurrency tasks number option.
func WithConcurrency(count int) func(*Client) {
return func(c *Client) {
c.Concurrency = count
}
}
// WithProxyServer connect remote hosts by proxy server.
func WithProxyServer(proxyServer, user string, port int, auths []ssh.AuthMethod) func(*Client) {
return func(c *Client) {
log.Debugf("Proxy login user: %s", user)
proxySSHConfig := &ssh.ClientConfig{
User: user,
Auth: auths,
Timeout: c.ConnTimeout,
}
//nolint:gosec
proxySSHConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()
proxyClient, err1 := ssh.Dial(
"tcp",
net.JoinHostPort(proxyServer, strconv.Itoa(port)),
proxySSHConfig,
)
if err1 != nil {
c.Proxy.Err = fmt.Errorf("connet to proxy %s:%d failed: %s", proxyServer, port, err1)
return
}
c.Proxy.SSHClient = proxyClient
}
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
dev/archery/archery/utils/source.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from pathlib import Path
import subprocess
from .git import git
class InvalidArrowSource(Exception):
pass
class ArrowSources:
""" ArrowSources is a companion class representing a directory containing
Apache Arrow's sources.
"""
# Note that WORKSPACE is a reserved git revision name by this module to
# reference the current git workspace. In other words, this indicates to
# ArrowSources.at_revision that no cloning/checkout is required.
WORKSPACE = "WORKSPACE"
def __init__(self, path):
""" Initialize an ArrowSources
The caller must ensure that path is valid arrow source directory (can
be checked with ArrowSources.valid)
Parameters
----------
path : src
"""
path = Path(path)
# validate by checking a specific path in the arrow source tree
if not (path / 'cpp' / 'CMakeLists.txt').exists():
raise InvalidArrowSource(
"No Arrow C++ sources found in {}.".format(path)
)
self.path = path
@property
def archery(self):
""" Returns the archery directory of an Arrow sources. """
return self.dev / "archery"
@property
def cpp(self):
""" Returns the cpp directory of an Arrow sources. """
return self.path / "cpp"
@property
def dev(self):
""" Returns the dev directory of an Arrow sources. """
return self.path / "dev"
@property
def java(self):
""" Returns the java directory of an Arrow sources. """
return self.path / "java"
@property
def python(self):
""" Returns the python directory of an Arrow sources. """
return self.path / "python"
@property
def pyarrow(self):
""" Returns the python/pyarrow directory of an Arrow sources. """
return self.python / "pyarrow"
@property
def r(self):
""" Returns the r directory of an Arrow sources. """
return self.path / "r"
@property
def rust(self):
""" Returns the rust directory of an Arrow sources. """
return self.path / "rust"
@property
def git_backed(self):
""" Indicate if the sources are backed by git. """
return (self.path / ".git").exists()
@property
def git_dirty(self):
""" Indicate if the sources is a dirty git directory. """
return self.git_backed and git.dirty(git_dir=self.path)
def archive(self, path, dereference=False, compressor=None, revision=None):
""" Saves a git archive at path. """
if not self.git_backed:
raise ValueError("{} is not backed by git".format(self))
rev = revision if revision else "HEAD"
archive = git.archive("--prefix=apache-arrow/", rev,
git_dir=self.path)
# TODO(fsaintjacques): fix dereference for
if compressor:
archive = compressor(archive)
with open(path, "wb") as archive_fd:
archive_fd.write(archive)
def at_revision(self, revision, clone_dir):
""" Return a copy of the current sources for a specified git revision.
This method may return the current object if no checkout is required.
The caller is responsible to remove the cloned repository directory.
The user can use the special WORKSPACE token to mean the current git
workspace (no checkout performed).
The second value of the returned tuple indicates if a clone was
performed.
Parameters
----------
revision : str
Revision to checkout sources at.
clone_dir : str
Path to checkout the local clone.
"""
if not self.git_backed:
raise ValueError("{} is not backed by git".format(self))
if revision == ArrowSources.WORKSPACE:
return self, False
# A local clone is required to leave the current sources intact such
# that builds depending on said sources are not invalidated (or worse
# slightly affected when re-invoking the generator).
# "--local" only works when dest dir is on same volume of source dir.
# "--shared" works even if dest dir is on different volume.
git.clone("--shared", self.path, clone_dir)
# Revision can reference "origin/" (or any remotes) that are not found
# in the local clone. Thus, revisions are dereferenced in the source
# repository.
original_revision = git.rev_parse(revision)
git.checkout(original_revision, git_dir=clone_dir)
return ArrowSources(clone_dir), True
@staticmethod
def find(path=None):
""" Infer Arrow sources directory from various method.
The following guesses are done in order until a valid match is found:
1. Checks the given optional parameter.
2. Checks if the environment variable `ARROW_SRC` is defined and use
this.
3. Checks if the current working directory (cwd) is an Arrow source
directory.
4. Checks if this file (cli.py) is still in the original source
repository. If so, returns the relative path to the source
directory.
"""
# Explicit via environment
env = os.environ.get("ARROW_SRC")
# Implicit via cwd
cwd = Path.cwd()
# Implicit via current file
try:
this = Path(__file__).parents[4]
except IndexError:
this = None
# Implicit via git repository (if archery is installed system wide)
try:
repo = git.repository_root(git_dir=cwd)
except subprocess.CalledProcessError:
# We're not inside a git repository.
repo = None
paths = list(filter(None, [path, env, cwd, this, repo]))
for p in paths:
try:
return ArrowSources(p)
except InvalidArrowSource:
pass
searched_paths = "\n".join([" - {}".format(p) for p in paths])
raise InvalidArrowSource(
"Unable to locate Arrow's source directory. "
"Searched paths are:\n{}".format(searched_paths)
)
def __repr__(self):
return self.path
|
[] |
[] |
[
"ARROW_SRC"
] |
[]
|
["ARROW_SRC"]
|
python
| 1 | 0 | |
pkg/image/api/v1/conversion.go
|
package v1
import (
"fmt"
"sort"
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
newer "github.com/projectatomic/atomic-enterprise/pkg/image/api"
)
// The docker metadata must be cast to a version
func convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Scope) error {
if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
return err
}
out.DockerImageReference = in.DockerImageReference
out.DockerImageManifest = in.DockerImageManifest
version := in.DockerImageMetadataVersion
if len(version) == 0 {
version = "1.0"
}
data, err := kapi.Scheme.EncodeToVersion(&in.DockerImageMetadata, version)
if err != nil {
return err
}
out.DockerImageMetadata.RawJSON = data
out.DockerImageMetadataVersion = version
return nil
}
func convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Scope) error {
if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
return err
}
out.DockerImageReference = in.DockerImageReference
out.DockerImageManifest = in.DockerImageManifest
version := in.DockerImageMetadataVersion
if len(version) == 0 {
version = "1.0"
}
if len(in.DockerImageMetadata.RawJSON) > 0 {
// TODO: add a way to default the expected kind and version of an object if not set
obj, err := kapi.Scheme.New(version, "DockerImage")
if err != nil {
return err
}
if err := kapi.Scheme.DecodeInto(in.DockerImageMetadata.RawJSON, obj); err != nil {
return err
}
if err := s.Convert(obj, &out.DockerImageMetadata, 0); err != nil {
return err
}
}
out.DockerImageMetadataVersion = version
return nil
}
func convert_v1_ImageStreamSpec_To_api_ImageStreamSpec(in *ImageStreamSpec, out *newer.ImageStreamSpec, s conversion.Scope) error {
out.DockerImageRepository = in.DockerImageRepository
out.Tags = make(map[string]newer.TagReference)
return s.Convert(&in.Tags, &out.Tags, 0)
}
func convert_api_ImageStreamSpec_To_v1_ImageStreamSpec(in *newer.ImageStreamSpec, out *ImageStreamSpec, s conversion.Scope) error {
out.DockerImageRepository = in.DockerImageRepository
out.Tags = make([]NamedTagReference, 0, 0)
return s.Convert(&in.Tags, &out.Tags, 0)
}
func convert_v1_ImageStreamStatus_To_api_ImageStreamStatus(in *ImageStreamStatus, out *newer.ImageStreamStatus, s conversion.Scope) error {
out.DockerImageRepository = in.DockerImageRepository
out.Tags = make(map[string]newer.TagEventList)
return s.Convert(&in.Tags, &out.Tags, 0)
}
func convert_api_ImageStreamStatus_To_v1_ImageStreamStatus(in *newer.ImageStreamStatus, out *ImageStreamStatus, s conversion.Scope) error {
out.DockerImageRepository = in.DockerImageRepository
out.Tags = make([]NamedTagEventList, 0, 0)
return s.Convert(&in.Tags, &out.Tags, 0)
}
func convert_api_ImageStreamMapping_To_v1_ImageStreamMapping(in *newer.ImageStreamMapping, out *ImageStreamMapping, s conversion.Scope) error {
return s.DefaultConvert(in, out, conversion.DestFromSource)
}
func convert_v1_ImageStreamMapping_To_api_ImageStreamMapping(in *ImageStreamMapping, out *newer.ImageStreamMapping, s conversion.Scope) error {
return s.DefaultConvert(in, out, conversion.SourceToDest)
}
func init() {
err := kapi.Scheme.AddConversionFuncs(
func(in *[]NamedTagEventList, out *map[string]newer.TagEventList, s conversion.Scope) error {
for _, curr := range *in {
newTagEventList := newer.TagEventList{}
if err := s.Convert(&curr.Items, &newTagEventList.Items, 0); err != nil {
return err
}
(*out)[curr.Tag] = newTagEventList
}
return nil
},
func(in *map[string]newer.TagEventList, out *[]NamedTagEventList, s conversion.Scope) error {
allKeys := make([]string, 0, len(*in))
for key := range *in {
allKeys = append(allKeys, key)
}
sort.Strings(allKeys)
for _, key := range allKeys {
newTagEventList := (*in)[key]
oldTagEventList := &NamedTagEventList{Tag: key}
if err := s.Convert(&newTagEventList.Items, &oldTagEventList.Items, 0); err != nil {
return err
}
*out = append(*out, *oldTagEventList)
}
return nil
},
func(in *[]NamedTagReference, out *map[string]newer.TagReference, s conversion.Scope) error {
for _, curr := range *in {
r := newer.TagReference{
Annotations: curr.Annotations,
}
if err := s.Convert(&curr.From, &r.From, 0); err != nil {
return err
}
(*out)[curr.Name] = r
}
return nil
},
func(in *map[string]newer.TagReference, out *[]NamedTagReference, s conversion.Scope) error {
allTags := make([]string, 0, len(*in))
for tag := range *in {
allTags = append(allTags, tag)
}
sort.Strings(allTags)
for _, tag := range allTags {
newTagReference := (*in)[tag]
oldTagReference := NamedTagReference{
Name: tag,
Annotations: newTagReference.Annotations,
}
if err := s.Convert(&newTagReference.From, &oldTagReference.From, 0); err != nil {
return err
}
*out = append(*out, oldTagReference)
}
return nil
},
convert_api_Image_To_v1_Image,
convert_v1_Image_To_api_Image,
convert_v1_ImageStreamSpec_To_api_ImageStreamSpec,
convert_api_ImageStreamSpec_To_v1_ImageStreamSpec,
convert_v1_ImageStreamStatus_To_api_ImageStreamStatus,
convert_api_ImageStreamStatus_To_v1_ImageStreamStatus,
convert_api_ImageStreamMapping_To_v1_ImageStreamMapping,
convert_v1_ImageStreamMapping_To_api_ImageStreamMapping,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = kapi.Scheme.AddFieldLabelConversionFunc("v1", "ImageStream",
func(label, value string) (string, string, error) {
switch label {
case "name":
return "metadata.name", value, nil
case "metadata.name", "spec.dockerImageRepository", "status.dockerImageRepository":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
import paddle
from paddle.fluid import core
from paddle.fluid.core import StandaloneExecutor
import numpy as np
paddle.enable_static()
class LinearTestCase(unittest.TestCase):
def setUp(self):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
self.place = core.Place()
self.place.set_place(place)
def build_program(self):
a = paddle.static.data(name="a", shape=[2, 2], dtype='float32')
b = paddle.ones([2, 2]) * 2
t = paddle.static.nn.fc(a, 2)
c = t + b
main_program = paddle.fluid.default_main_program()
startup_program = paddle.fluid.default_startup_program()
return startup_program, main_program, c
return standaloneexecutor, c
def test_interp_base(self):
startup_program, main_program, c = self.build_program()
standaloneexecutor = StandaloneExecutor(
self.place, startup_program.desc, main_program.desc, core.Scope())
out = standaloneexecutor.run({
"a": np.ones(
[2, 2], dtype="float32") * 2
}, [c.name])
for i in range(10):
out = standaloneexecutor.run({
"a": np.ones(
[2, 2], dtype="float32") * i
}, [c.name])
for i in range(10):
out = standaloneexecutor.run({
"a": np.ones(
[2, 2], dtype="float32") * i
}, ['a', c.name])
def test_dry_run(self):
startup_program, main_program, c = self.build_program()
standaloneexecutor = StandaloneExecutor(
self.place, startup_program.desc, main_program.desc, core.Scope())
# test for cost_info
cost_info = standaloneexecutor.dry_run({
"a": np.ones(
[2, 2], dtype="float32")
})
self.check_cost_info(cost_info)
def check_cost_info(self, cost_info):
IS_WINDOWS = sys.platform.startswith('win')
if core.is_compiled_with_cuda():
# # w,bias,b, out, memory block is at least 256 bytes on Linux
gt = 16 * 4 if IS_WINDOWS else 256 * 4
self.assertGreater(cost_info.device_memory_bytes(), gt)
else:
self.assertEqual(cost_info.device_memory_bytes(), 0)
def build_program():
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
with paddle.static.device_guard('cpu'):
data = paddle.ones([4, 64], dtype='float32', name='data')
# data -> [memcpy_h2d] -> data' -> [matmul] -> out ->[add] -> add_out
with paddle.static.device_guard('gpu'):
weight = paddle.randn([64, 64], name='weight') # gpu
matmul_out = paddle.matmul(data, weight, name='matmul_out') # gpus
bias = paddle.ones([4, 64], dtype='float32', name='bias')
add_out = paddle.add(matmul_out, bias, name='add_out')
# add_out -> [memcpy_d2h] -> add_out' -> [sub] -> sub_out -> [tanh] -> tanh_out
with paddle.static.device_guard('cpu'):
sub_out = paddle.subtract(add_out, data, name='sub_out')
tanh_out = paddle.tanh(sub_out, name='tanh_out')
with paddle.static.device_guard('gpu'):
bias_1 = paddle.add(bias, sub_out, name='bias_1')
out_before = paddle.tanh(bias_1, name='out_before')
out_last = paddle.subtract(tanh_out, data, name='out_last')
out = paddle.add(out_before, out_last, name='out')
mean = paddle.mean(out, name='mean_out')
return main_program, startup_program, [mean]
class MultiStreamModelTestCase(unittest.TestCase):
def setUp(self):
self.iter_n = 2
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
def test_result(self):
ground_truths = self.run_raw_executor()
res = self.run_new_executor()
for gt, out in zip(ground_truths, res):
self.assertEqual(gt[0], out[0])
def run_raw_executor(self):
paddle.seed(2020)
main_program, startup_program, fetch_list = build_program()
exe = paddle.static.Executor(self.place)
exe.run(startup_program)
outs = []
for i in range(self.iter_n):
outs.append(exe.run(main_program, fetch_list=fetch_list))
return outs
def run_new_executor(self):
paddle.seed(2020)
main_program, startup_program, fetch_list = build_program()
fetch_list = [x.name for x in fetch_list]
p = core.Place()
p.set_place(self.place)
inter_core = StandaloneExecutor(p, startup_program.desc,
main_program.desc, core.Scope())
outs = []
for i in range(self.iter_n):
outs.append(
np.array(inter_core.run({}, fetch_list)._move_to_list()[0]))
return outs
class SwitchExecutorInterfaceTestCase(MultiStreamModelTestCase):
def run_new_executor(self):
paddle.seed(2020)
os.environ['FLAGS_USE_STANDALONE_EXECUTOR'] = '1'
main_program, startup_program, fetch_list = build_program()
exe = paddle.static.Executor(self.place)
exe.run(startup_program)
outs = []
for i in range(self.iter_n):
outs.append(exe.run(main_program, fetch_list=fetch_list))
del os.environ['FLAGS_USE_STANDALONE_EXECUTOR']
return outs
class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
self.iter_run = 2
def build_program(self, is_double=False):
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
a = paddle.static.data(name="a", shape=[2, 2], dtype='float32')
b = paddle.ones([2, 2]) * 2
t = paddle.static.nn.fc(a, 2)
c = t + b
if is_double:
c = c + c
return main_program, startup_program, [c]
def _run(self, feed, use_str=False, is_double=False, add_wrong_fetch=False):
paddle.seed(2020)
main_program, startup_program, fetch_vars = self.build_program(
is_double)
exe = paddle.static.Executor(self.place)
exe.run(startup_program)
if use_str: # test for fetch name
fetch_vars = [x.name for x in fetch_vars]
if add_wrong_fetch: # test for wrong fetch type
fetch_vars.append(1123)
outs = []
for i in range(self.iter_run):
out = exe.run(main_program, feed=feed, fetch_list=fetch_vars)[0]
outs.append(out)
return outs
def run_raw_executor(self, feed):
# run construct program 1
out1 = self._run(feed, use_str=False, is_double=False)
# run construct program 2 with same executor
out2 = self._run(feed, use_str=True, is_double=True)
return [out1, out2]
def run_new_executor(self, feed):
os.environ['FLAGS_USE_STANDALONE_EXECUTOR'] = '1'
out = self.run_raw_executor(feed)
del os.environ['FLAGS_USE_STANDALONE_EXECUTOR']
return out
def test_with_feed(self):
data = np.ones([2, 2], dtype="float32")
feed = {"a": data, 'fake_input': data}
res = self.run_new_executor(feed)
gt = self.run_raw_executor(feed)
for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y))
def test_with_error(self):
feed = [{'a': np.ones([2, 2], dtype="float32")}]
with self.assertRaises(TypeError):
os.environ['FLAGS_USE_STANDALONE_EXECUTOR'] = '1'
self._run(feed[0], add_wrong_fetch=True)
del os.environ['FLAGS_USE_STANDALONE_EXECUTOR']
class TestException(unittest.TestCase):
def setUp(self):
self.place = paddle.CPUPlace()
def build_program(self):
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
w = paddle.rand([10, 3])
ids = paddle.static.data(name="id", shape=[5], dtype='int64')
data = paddle.static.data(name="data", shape=[3], dtype='float32')
emb = paddle.nn.functional.embedding(
x=ids, weight=w, sparse=False, name="embedding")
emb = emb + data
return main_program, startup_program, emb
def _run(self, feeds):
paddle.seed(2020)
main_program, startup_program, fetch_vars = self.build_program()
exe = paddle.static.Executor(self.place)
exe.run(startup_program)
for feed in feeds:
out = exe.run(main_program, feed=feed, fetch_list=fetch_vars)
print(out)
return out
def run_new_executor(self, feed):
os.environ['FLAGS_USE_STANDALONE_EXECUTOR'] = '1'
out = self._run(feed)
del os.environ['FLAGS_USE_STANDALONE_EXECUTOR']
return out
def test_exception(self):
feed = [{
'id': np.array([1, 2, 3, 4, 5]).astype(np.int64),
'data': np.array([1, 2, 3, 4]).astype(np.float32),
}, {
'id': np.array([1, 2, 3, 4, 11]).astype(np.int64),
'data': np.array([1, 2, 3, 4]).astype(np.float32),
}]
self.assertRaises(ValueError, self.run_new_executor, feed)
def test_nan(self):
flags = {'FLAGS_check_nan_inf': True, 'FLAGS_benchmark': True}
paddle.fluid.set_flags(flags)
feed = [{
'id': np.array([1, 2, 3, 4, 5]).astype(np.int64),
'data': np.array([1, 2, 3]).astype(np.float32),
}, {
'id': np.array([1, 2, 3, 4, 5]).astype(np.int64),
'data': np.array([1, 2, 3]).astype(np.float32),
}]
feed[1]['data'][0] = np.nan
self.assertRaises(RuntimeError, self.run_new_executor, feed)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"FLAGS_USE_STANDALONE_EXECUTOR"
] |
[]
|
["FLAGS_USE_STANDALONE_EXECUTOR"]
|
python
| 1 | 0 | |
src/cmd/compile/internal/ssagen/ssa.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"bufio"
"bytes"
"cmd/compile/internal/abi"
"fmt"
"go/constant"
"html"
"internal/buildcfg"
"os"
"path/filepath"
"sort"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
var ssaDir string // optional destination for ssa dump file
var ssaDumpStdout bool // whether to dump to stdout
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
var ssaDumpInlined []*ir.Func
func DumpInline(fn *ir.Func) {
if ssaDump != "" && ssaDump == ir.FuncName(fn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
}
func InitEnv() {
ssaDump = os.Getenv("GOSSAFUNC")
ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
if strings.HasSuffix(ssaDump, "+") {
ssaDump = ssaDump[:len(ssaDump)-1]
ssaDumpStdout = true
}
spl := strings.Split(ssaDump, ":")
if len(spl) > 1 {
ssaDump = spl[0]
ssaDumpCFG = spl[1]
}
}
}
func InitConfig() {
types_ := ssa.NewTypes()
if Arch.SoftFloat {
softfloatInit()
}
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
_ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
_ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread")
ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
// asm funcs with special ABI
if base.Ctxt.Arch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
}
}
if Arch.LinkArch.Family == sys.Wasm {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
} else {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
}
if Arch.LinkArch.PtrSize == 4 {
ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
}
// Wasm (all asm funcs with special ABIs)
ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
}
// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
// This is not necessarily the ABI used to call it.
// Currently (1.17 dev) such a stack map is always ABI0;
// any ABI wrapper that is present is nosplit, hence a precise
// stack map is not needed there (the parameters survive only long
// enough to call the wrapped assembly function).
// This always returns a freshly copied ABI.
func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
}
// These are disabled but remain ready for use in case they are needed for the next regabi port.
// TODO if they are not needed for 1.18 / next register abi port, delete them.
const magicNameDotSuffix = ".*disabled*MagicMethodNameForTestingRegisterABI"
const magicLastTypeName = "*disabled*MagicLastTypeNameForTestingRegisterABI"
// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
// Passing a nil function returns the default ABI based on experiment configuration.
func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
if buildcfg.Experiment.RegabiArgs {
// Select the ABI based on the function's defining ABI.
if fn == nil {
return abi1
}
switch fn.ABI {
case obj.ABI0:
return abi0
case obj.ABIInternal:
// TODO(austin): Clean up the nomenclature here.
// It's not clear that "abi1" is ABIInternal.
return abi1
}
base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
panic("not reachable")
}
a := abi0
if fn != nil {
name := ir.FuncName(fn)
magicName := strings.HasSuffix(name, magicNameDotSuffix)
if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
if strings.Contains(name, ".") {
if !magicName {
base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
}
}
a = abi1
} else if magicName {
if base.FmtPos(fn.Pos()) == "<autogenerated>:1" {
// no way to put a pragma here, and it will error out in the real source code if they did not do it there.
a = abi1
} else {
base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name)
}
}
if regAbiForFuncType(fn.Type().FuncType()) {
// fmt.Printf("Saw magic last type name for function %s\n", name)
a = abi1
}
}
return a
}
func regAbiForFuncType(ft *types.Func) bool {
np := ft.Params.NumFields()
return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
}
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
}
if v < 1<<7 {
return objw.Uint8(x, off, uint8(v))
}
off = objw.Uint8(x, off, uint8((v&127)|128))
if v < 1<<14 {
return objw.Uint8(x, off, uint8(v>>7))
}
off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
if v < 1<<21 {
return objw.Uint8(x, off, uint8(v>>14))
}
off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
if v < 1<<28 {
return objw.Uint8(x, off, uint8(v>>21))
}
off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
return objw.Uint8(x, off, uint8(v>>28))
}
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
// that is using open-coded defers. This funcdata is used to determine the active
// defers in a function and execute those defers during panic processing.
//
// The funcdata is all encoded in varints (since values will almost always be less than
// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
// - Offset of the closure value to call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
x.Set(obj.AttrContentAddressable, true)
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
off = dvarint(x, off, -r.closureNode.FrameOffset())
}
}
func okOffset(offset int64) int64 {
if offset == types.BOGUS_FUNARG_OFFSET {
panic(fmt.Errorf("Bogus offset %d", offset))
}
return offset
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *ir.Func, worker int) *ssa.Func {
name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
pkgDotName := base.Ctxt.Pkgpath + "." + name
printssa = name == ssaDump ||
strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
ir.FDumpList(astBuf, "buildssa-body", fn.Body)
ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
}
}
var s state
s.pushLine(fn.Pos())
defer s.popLine()
s.hasdefer = fn.HasDefer()
if fn.Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
fe := ssafn{
curfn: fn,
log: printssa && ssaDumpStdout,
}
s.curfn = fn
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
s.f.ABI1 = ssaConfig.ABI1.Copy()
s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
// TODO: generate and print a mapping from nodes to values and blocks
dumpSourcesColumn(s.f.HTMLWriter, fn)
s.f.HTMLWriter.WriteAST("AST", astBuf)
}
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
case base.Debug.NoOpenDefer != 0:
s.hasOpenDefers = false
case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that we don't track correctly.
s.hasOpenDefers = false
}
if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
// Skip doing open defers if there is any extra exit code (likely
// race detection), since we will not generate that code in the
// case of the extra deferreturn/ret segment.
s.hasOpenDefers = false
}
if s.hasOpenDefers {
// Similarly, skip if there are any heap-allocated result
// parameters that need to be copied back to their stack slots.
for _, f := range s.curfn.Type().Results().FieldSlice() {
if !f.Nname.(*ir.Name).OnStack() {
s.hasOpenDefers = false
break
}
}
}
if s.hasOpenDefers &&
s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
// Open-coded defers are most important for improving performance
// for smaller functions (which don't have many returns).
s.hasOpenDefers = false
}
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[memVar] = s.startmem
if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
deferBitsTemp.SetAddrtaken(true)
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp)
s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use
// by panics) and stores to deferBits are not eliminated, even if
// all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all
// unconditional.
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
}
var params *abi.ABIParamResultInfo
params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
// Generate addresses of local declarations
s.decladdrs = map[*ir.Name]*ssa.Value{}
for _, n := range fn.Dcl {
switch n.Class {
case ir.PPARAM:
// Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
case ir.PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
default:
s.Fatalf("local variable with class %v unimplemented", n.Class)
}
}
s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
// Populate SSAable arguments.
for _, n := range fn.Dcl {
if n.Class == ir.PPARAM {
if s.canSSA(n) {
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
} else { // address was taken AND/OR too large for SSA
paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
if len(paramAssignment.Registers) > 0 {
if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.store(n.Type(), s.decladdrs[n], v)
} else { // Too big for SSA.
// Brute force, and early, do a bunch of stores from registers
// TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
}
}
}
}
}
// Populate closure variables.
if fn.Needctxt() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
for _, n := range fn.ClosureVars {
typ := n.Type()
if !n.Byval() {
typ = types.NewPtr(typ)
}
offset = types.Rnd(offset, typ.Alignment())
ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
offset += typ.Size()
// If n is a small variable captured by value, promote
// it to PAUTO so it can be converted to SSA.
//
// Note: While we never capture a variable by value if
// the user took its address, we may have generated
// runtime calls that did (#43701). Since we don't
// convert Addrtaken variables to SSA anyway, no point
// in promoting them either.
if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
n.Class = ir.PAUTO
fn.Dcl = append(fn.Dcl, n)
s.assign(n, s.load(n.Type(), ptr), false, 0)
continue
}
if !n.Byval() {
ptr = s.load(typ, ptr)
}
s.setHeapaddr(fn.Pos(), n, ptr)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Enter)
s.zeroResults()
s.paramsToHeap()
s.stmtList(fn.Body)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Endlineno)
s.exit()
s.popLine()
}
for _, b := range s.f.Blocks {
if b.Pos != src.NoXPos {
s.updateUnsetPredPos(b)
}
}
s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
s.insertPhis()
// Main call to ssa package to compile function
ssa.Compile(s.f)
if s.hasOpenDefers {
s.emitOpenDeferInfo()
}
// Record incoming parameter spill information for morestack calls emitted in the assembler.
// This is done here, using all the parameters (used, partially used, and unused) because
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
o := offs[i] // offset within parameter
fo := p.FrameOffset(params) // offset of parameter in frame
reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
}
}
return s.f
}
func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
typs, offs := paramAssignment.RegisterTypesAndOffsets()
for i, t := range typs {
if pointersOnly && !t.IsPtrShaped() {
continue
}
r := paramAssignment.Registers[i]
o := offs[i]
op, reg := ssa.ArgOpAndRegisterFor(r, abi)
aux := &ssa.AuxNameOffset{Name: n, Offset: o}
v := s.newValue0I(op, t, reg)
v.Aux = aux
p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
s.store(t, p, v)
}
}
// zeroResults zeros the return values at the start of the function.
// We need to do this very early in the function. Defer might stop a
// panic and show the return values as they exist at the time of
// panic. For precise stacks, the garbage collector assumes results
// are always live, so we need to zero them before any allocations,
// even allocations to move params/results to the heap.
func (s *state) zeroResults() {
for _, f := range s.curfn.Type().Results().FieldSlice() {
n := f.Nname.(*ir.Name)
if !n.OnStack() {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:(*liveness).epilogue.
continue
}
// Zero the stack location containing f.
if typ := n.Type(); TypeOK(typ) {
s.assign(n, s.zeroVal(typ), false, 0)
} else {
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.zero(n.Type(), s.decladdrs[n])
}
}
}
// paramsToHeap produces code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
func (s *state) paramsToHeap() {
do := func(params *types.Type) {
for _, f := range params.FieldSlice() {
if f.Nname == nil {
continue // anonymous or blank parameter
}
n := f.Nname.(*ir.Name)
if ir.IsBlank(n) || n.OnStack() {
continue
}
s.newHeapaddr(n)
if n.Class == ir.PPARAM {
s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
}
}
}
typ := s.curfn.Type()
do(typ.Recvs())
do(typ.Params())
do(typ.Results())
}
// newHeapaddr allocates heap memory for n and sets its heap address.
func (s *state) newHeapaddr(n *ir.Name) {
s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
}
// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
// and then sets it as n's heap address.
func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
}
// Declare variable to hold address.
addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
addr.SetType(types.NewPtr(n.Type()))
addr.Class = ir.PAUTO
addr.SetUsed(true)
addr.Curfn = s.curfn
s.curfn.Dcl = append(s.curfn.Dcl, addr)
types.CalcSize(addr.Type())
if n.Class == ir.PPARAMOUT {
addr.SetIsOutputParamHeapAddr(true)
}
n.Heapaddr = addr
s.assign(addr, ptr, false, 0)
}
// newObject returns an SSA value denoting new(typ).
func (s *state) newObject(typ *types.Type) *ssa.Value {
if typ.Size() == 0 {
return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
}
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
}
func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
if !n.Type().IsPtr() {
s.Fatalf("expected pointer type: %v", n.Type())
}
elem := n.Type().Elem()
if count != nil {
if !elem.IsArray() {
s.Fatalf("expected array type: %v", elem)
}
elem = elem.Elem()
}
size := elem.Size()
// Casting from larger type to smaller one is ok, so for smallest type, do nothing.
if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) {
return
}
if count == nil {
count = s.constInt(types.Types[types.TUINTPTR], 1)
}
if count.Type.Size() != s.config.PtrSize {
s.Fatalf("expected count fit to an uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
}
s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, s.reflectType(elem), count)
}
// reflectType returns an SSA value representing a pointer to typ's
// reflection type descriptor.
func (s *state) reflectType(typ *types.Type) *ssa.Value {
lsym := reflectdata.TypeLinksym(typ)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
}
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
// Read sources of inlined functions.
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
elno := fi.Endlineno
fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
}
inlFns = append(inlFns, fnLines)
}
sort.Sort(ssa.ByTopo(inlFns))
if targetFn != nil {
inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
}
writer.WriteSources("sources", inlFns)
}
func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
f, err := os.Open(os.ExpandEnv(file))
if err != nil {
return nil, err
}
defer f.Close()
var lines []string
ln := uint(1)
scanner := bufio.NewScanner(f)
for scanner.Scan() && ln <= end {
if ln >= start {
lines = append(lines, scanner.Text())
}
ln++
}
return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
}
// updateUnsetPredPos propagates the earliest-value position information for b
// towards all of b's predecessors that need a position, and recurs on that
// predecessor if its position is updated. B should have a non-empty position.
func (s *state) updateUnsetPredPos(b *ssa.Block) {
if b.Pos == src.NoXPos {
s.Fatalf("Block %s should have a position", b)
}
bestPos := src.NoXPos
for _, e := range b.Preds {
p := e.Block()
if !p.LackingPos() {
continue
}
if bestPos == src.NoXPos {
bestPos = b.Pos
for _, v := range b.Values {
if v.LackingPos() {
continue
}
if v.Pos != src.NoXPos {
// Assume values are still in roughly textual order;
// TODO: could also seek minimum position?
bestPos = v.Pos
break
}
}
}
p.Pos = bestPos
s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
}
}
// Information about each open-coded defer.
type openDeferInfo struct {
// The node representing the call of the defer
n *ir.CallExpr
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// Node for function
curfn *ir.Func
// labels in f
labels map[string]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables on the stack.
decladdrs map[*ir.Name]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
deferBitsTemp *ir.Name
// line number stack. The current line number is top of stack
line []src.XPos
// the last line number processed; it may have been popped
lastPos src.XPos
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
softFloat bool
hasOpenDefers bool // whether we are doing open-coded defers
checkPtrEnabled bool // whether to insert checkptr instrumentation
// If doing open-coded defers, list of info about the defer calls in
// scanning order. Hence, at exit we should run these defers in reverse
// order of this list
openDefers []*openDeferInfo
// For open-coded defers, this is the beginning and end blocks of the last
// defer exit code that we have generated so far. We use these to share
// code between exits if the shareDeferExits option (disabled by default)
// is on.
lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
lastDeferCount int // Number of defers encountered at that point
prevCall *ssa.Value // the previous call; use this to tie results to the call op.
}
type funcLine struct {
f *obj.LSym
base *src.PosBase
line uint
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
}
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *types.Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
func (s *state) Log() bool { return s.f.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) {
s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
}
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
func ssaMarker(name string) *ir.Name {
return typecheck.NewName(&types.Sym{Name: name})
}
var (
// marker node for the memory variable
memVar = ssaMarker("mem")
// marker nodes for temporary variables
ptrVar = ssaMarker("ptr")
lenVar = ssaMarker("len")
newlenVar = ssaMarker("newlen")
capVar = ssaMarker("cap")
typVar = ssaMarker("typ")
okVar = ssaMarker("ok")
deferBitsVar = ssaMarker("deferBits")
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
if b.LackingPos() {
// Empty plain blocks get the line of their successor (handled after all blocks created),
// except for increment blocks in For statements (handled in ssa conversion of OFOR),
// and for blocks ending in GOTO/BREAK/CONTINUE.
b.Pos = src.NoXPos
} else {
b.Pos = s.lastPos
}
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line src.XPos) {
if !line.IsKnown() {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
if base.Flag.K != 0 {
base.Warn("buildssa: unknown position (line 0)")
}
} else {
s.lastPos = line
}
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekPos peeks the top of the line number stack.
func (s *state) peekPos() src.XPos {
return s.line[len(s.line)-1]
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekPos(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1Apos adds a new value with one argument and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2A adds a new value with two arguments and an aux value to the current block.
func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue2Apos adds a new value with two arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3Apos adds a new value with three arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
}
// newValue4 adds a new value with four arguments to the current block.
func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
}
// newValue4 adds a new value with four arguments and an auxint value to the current block.
func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
}
func (s *state) entryBlock() *ssa.Block {
b := s.f.Entry
if base.Flag.N > 0 && s.curBlock != nil {
// If optimizations are off, allocate in current block instead. Since with -N
// we're not doing the CSE or tighten passes, putting lots of stuff in the
// entry block leads to O(n^2) entries in the live value map during regalloc.
// See issue 45897.
b = s.curBlock
}
return b
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.entryBlock().NewValue0(src.NoXPos, op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
}
// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t *types.Type) *ssa.Value {
return s.f.ConstSlice(t)
}
func (s *state) constInterface(t *types.Type) *ssa.Value {
return s.f.ConstInterface(t)
}
func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(t)
}
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(types.Types[types.TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(t, c)
}
func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
return s.f.ConstInt16(t, c)
}
func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
return s.f.ConstInt32(t, c)
}
func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
return s.f.ConstInt64(t, c)
}
func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(t, c)
}
func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(t, c)
}
func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
if s.config.PtrSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(t, c, s.sp)
}
// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
// soft-float runtime function instead (when emitting soft-float code).
func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
if s.softFloat {
if c, ok := s.sfcall(op, arg); ok {
return c
}
}
return s.newValue1(op, t, arg)
}
func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
if s.softFloat {
if c, ok := s.sfcall(op, arg0, arg1); ok {
return c
}
}
return s.newValue2(op, t, arg0, arg1)
}
type instrumentKind uint8
const (
instrumentRead = iota
instrumentWrite
instrumentMove
)
func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
s.instrument2(t, addr, nil, kind)
}
// instrumentFields instruments a read/write operation on addr.
// If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments
// operation for each field, instead of for the whole struct.
func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() {
s.instrument(t, addr, kind)
return
}
for _, f := range t.Fields().Slice() {
if f.Sym.IsBlank() {
continue
}
offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
s.instrumentFields(f.Type, offptr, kind)
}
}
func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
if base.Flag.MSan {
s.instrument2(t, dst, src, instrumentMove)
} else {
s.instrument(t, src, instrumentRead)
s.instrument(t, dst, instrumentWrite)
}
}
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
if !s.curfn.InstrumentBody() {
return
}
w := t.Size()
if w == 0 {
return // can't race on zero-sized things
}
if ssa.IsSanitizerSafeAddr(addr) {
return
}
var fn *obj.LSym
needWidth := false
if addr2 != nil && kind != instrumentMove {
panic("instrument2: non-nil addr2 for non-move instrumentation")
}
if base.Flag.MSan {
switch kind {
case instrumentRead:
fn = ir.Syms.Msanread
case instrumentWrite:
fn = ir.Syms.Msanwrite
case instrumentMove:
fn = ir.Syms.Msanmove
default:
panic("unreachable")
}
needWidth = true
} else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
// for composite objects we have to write every address
// because a write might happen to any subobject.
// composites with only one element don't have subobjects, though.
switch kind {
case instrumentRead:
fn = ir.Syms.Racereadrange
case instrumentWrite:
fn = ir.Syms.Racewriterange
default:
panic("unreachable")
}
needWidth = true
} else if base.Flag.Race {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
switch kind {
case instrumentRead:
fn = ir.Syms.Raceread
case instrumentWrite:
fn = ir.Syms.Racewrite
default:
panic("unreachable")
}
} else if base.Flag.ASan {
switch kind {
case instrumentRead:
fn = ir.Syms.Asanread
case instrumentWrite:
fn = ir.Syms.Asanwrite
default:
panic("unreachable")
}
needWidth = true
} else {
panic("unreachable")
}
args := []*ssa.Value{addr}
if addr2 != nil {
args = append(args, addr2)
}
if needWidth {
args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
}
s.rtcall(fn, true, nil, args...)
}
func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
s.instrumentFields(t, src, instrumentRead)
return s.rawLoad(t, src)
}
func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpLoad, t, src, s.mem())
}
func (s *state) store(t *types.Type, dst, val *ssa.Value) {
s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
}
func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, instrumentWrite)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t
s.vars[memVar] = store
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrumentMove(t, dst, src)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
s.vars[memVar] = store
}
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l ir.Nodes) {
for _, n := range l {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n ir.Node) {
if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
s.stmtList(n.Init())
switch n.Op() {
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
s.stmtList(n.List)
// No-ops
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if ir.IsIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
s.callResult(n, callNormal)
if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
}
case ir.ODEFER:
n := n.(*ir.GoDeferStmt)
if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
} else if n.Esc() == ir.EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
s.openDeferRecord(n.Call.(*ir.CallExpr))
} else {
d := callDefer
if n.Esc() == ir.EscNever {
d = callDeferStack
}
s.callResult(n.Call.(*ir.CallExpr), d)
}
case ir.OGO:
n := n.(*ir.GoDeferStmt)
s.callResult(n.Call.(*ir.CallExpr), callGo)
case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt)
var res, resok *ssa.Value
if n.Rhs[0].Op() == ir.ODOTTYPE2 {
res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
} else {
res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
}
deref := false
if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
mem := s.mem()
if mem.Op == ssa.OpVarKill {
mem = mem.Args[0]
}
if res.Args[1] != mem {
s.Fatalf("memory no longer live from 2-result dottype load")
}
deref = true
res = res.Args[0]
}
s.assign(n.Lhs[0], res, deref, 0)
s.assign(n.Lhs[1], resok, false, 0)
return
case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
n := n.(*ir.AssignListStmt)
call := n.Rhs[0].(*ir.CallExpr)
if !ir.IsIntrinsicCall(call) {
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
}
v := s.intrinsicCall(call)
v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
s.assign(n.Lhs[0], v1, false, 0)
s.assign(n.Lhs[1], v2, false, 0)
return
case ir.ODCL:
n := n.(*ir.Decl)
if v := n.X; v.Esc() == ir.EscHeap {
s.newHeapaddr(v)
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
sym := n.Label
lab := s.label(sym)
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// Go to that label.
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
if s.curBlock != nil {
b := s.endBlock()
b.AddEdgeTo(lab.target)
}
s.startBlock(lab.target)
case ir.OGOTO:
n := n.(*ir.BranchStmt)
sym := n.Label
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
b := s.endBlock()
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
case ir.OAS:
n := n.(*ir.AssignStmt)
if n.X == n.Y && n.X.Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
// Evaluate RHS.
rhs := n.Y
if rhs != nil {
switch rhs.Op() {
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !ir.IsZero(rhs) {
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case ir.OAPPEND:
rhs := rhs.(*ir.CallExpr)
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if s.canSSA(n.X) {
if base.Debug.Append > 0 { // replicating old diagnostic message
base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
if base.Debug.Append > 0 {
base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
if ir.IsBlank(n.X) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
s.expr(rhs)
}
return
}
var t *types.Type
if n.Y != nil {
t = n.Y.Type()
} else {
t = n.X.Type()
}
var r *ssa.Value
deref := !TypeOK(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
var skip skipMask
if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
rhs := rhs.(*ir.SliceExpr)
i, j, k := rhs.Low, rhs.High, rhs.Max
if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.X, r, deref, skip)
case ir.OIF:
n := n.(*ir.IfStmt)
if ir.IsConst(n.Cond, constant.Bool) {
s.stmtList(n.Cond.Init())
if ir.BoolVal(n.Cond) {
s.stmtList(n.Body)
} else {
s.stmtList(n.Else)
}
break
}
bEnd := s.f.NewBlock(ssa.BlockPlain)
var likely int8
if n.Likely {
likely = 1
}
var bThen *ssa.Block
if len(n.Body) != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
if len(n.Else) != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
s.condBranch(n.Cond, bThen, bElse, likely)
if len(n.Body) != 0 {
s.startBlock(bThen)
s.stmtList(n.Body)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
if len(n.Else) != 0 {
s.startBlock(bElse)
s.stmtList(n.Else)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
s.stmtList(n.Results)
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
s.callResult(n.Call, callTail)
call := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.
b.SetControl(call)
case ir.OCONTINUE, ir.OBREAK:
n := n.(*ir.BranchStmt)
var to *ssa.Block
if n.Label == nil {
// plain break/continue
switch n.Op() {
case ir.OCONTINUE:
to = s.continueTo
case ir.OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
sym := n.Label
lab := s.label(sym)
switch n.Op() {
case ir.OCONTINUE:
to = lab.continueTarget
case ir.OBREAK:
to = lab.breakTarget
}
}
b := s.endBlock()
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(to)
case ir.OFOR, ir.OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// cond (Left); body (Nbody); incr (Right)
//
// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
n := n.(*ir.ForStmt)
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
if n.Op() == ir.OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Cond != nil {
s.condBranch(n.Cond, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
} else {
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
var lab *ssaLabel
if sym := n.Label; sym != nil {
// labeled for loop
lab = s.label(sym)
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmtList(n.Body)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
if n.Post != nil {
s.stmt(n.Post)
}
if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
if b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
} else {
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
s.condBranch(n.Cond, bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
s.stmtList(n.Late)
s.endBlock().AddEdgeTo(bBody)
}
s.startBlock(bEnd)
case ir.OSWITCH, ir.OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
var sym *types.Sym
var body ir.Nodes
if n.Op() == ir.OSWITCH {
n := n.(*ir.SwitchStmt)
sym = n.Label
body = n.Compiled
} else {
n := n.(*ir.SelectStmt)
sym = n.Label
body = n.Compiled
}
var lab *ssaLabel
if sym != nil {
// labeled
lab = s.label(sym)
lab.breakTarget = bEnd
}
// generate body code
s.stmtList(body)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// walk adds explicit OBREAK nodes to the end of all reachable code paths.
// If we still have a current block here, then mark it unreachable.
if s.curBlock != nil {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
}
s.startBlock(bEnd)
case ir.OVARDEF:
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
n := n.(*ir.UnaryExpr)
v := n.X.(*ir.Name)
if !v.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
}
switch v.Class {
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
}
s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
case ir.OCHECKNIL:
n := n.(*ir.UnaryExpr)
p := s.expr(n.X)
s.nilCheck(p)
case ir.OINLMARK:
n := n.(*ir.InlineMarkStmt)
s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
default:
s.Fatalf("unhandled stmt %v", n.Op())
}
}
// If true, share as many open-coded defer exits as possible (with the downside of
// worse line-number information)
const shareDeferExits = false
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if s.hasdefer {
if s.hasOpenDefers {
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
if s.curBlock.Kind != ssa.BlockPlain {
panic("Block for an exit should be BlockPlain")
}
s.curBlock.AddEdgeTo(s.lastDeferExit)
s.endBlock()
return s.lastDeferFinalBlock
}
s.openDeferExit()
} else {
s.rtcall(ir.Syms.Deferreturn, true, nil)
}
}
var b *ssa.Block
var m *ssa.Value
// Do actual return.
// These currently turn into self-copies (in many cases).
resultFields := s.curfn.Type().Results().FieldSlice()
results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
for i, f := range resultFields {
n := f.Nname.(*ir.Name)
if s.canSSA(n) { // result is in some SSA variable
if !n.IsOutputParamInRegisters() {
// We are about to store to the result slot.
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
}
results[i] = s.variable(n, n.Type())
} else if !n.OnStack() { // result is actually heap allocated
// We are about to copy the in-heap result to the result slot.
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
ha := s.expr(n.Heapaddr)
s.instrumentFields(n.Type(), ha, instrumentRead)
results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
} else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
// Before register ABI this ought to be a self-move, home=dest,
// With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
// No VarDef, as the result slot is already holding live value.
results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
}
}
// Run exit code. Today, this is just racefuncexit, in -race mode.
// TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
// Spills in register allocation might just fix it.
s.stmtList(s.curfn.Exit)
results[len(results)-1] = s.mem()
m.AddArgs(results...)
b = s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
if s.hasdefer && s.hasOpenDefers {
s.lastDeferFinalBlock = b
}
return b
}
type opAndType struct {
op ir.Op
etype types.Kind
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.Kind {
e := t.Kind()
switch e {
default:
return e
case types.TINT:
if s.config.PtrSize == 8 {
return types.TINT64
}
return types.TINT32
case types.TUINT:
if s.config.PtrSize == 8 {
return types.TUINT64
}
return types.TUINT32
case types.TUINTPTR:
if s.config.PtrSize == 8 {
return types.TUINT64
}
return types.TUINT32
}
}
func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Fatalf("unhandled binary op %v %s", op, etype)
}
return x
}
type opAndTwoTypes struct {
op ir.Op
etype1 types.Kind
etype2 types.Kind
}
type twoTypes struct {
etype1 types.Kind
etype2 types.Kind
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType types.Kind
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
// unsigned
twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
}
// uint64<->float conversions, only on machines that have instructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
}
return x
}
func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
return s.newValue1(ssa.OpCopy, tt, v)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %v -> %v", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
return s.newValue1(op, tt, v)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint32Tofloat32(n, v, ft, tt)
}
if tt.Size() == 8 {
return s.uint32Tofloat64(n, v, ft, tt)
}
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint32(n, v, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint32(n, v, ft, tt)
}
}
}
if !ok {
s.Fatalf("weird float conversion %v -> %v", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return v
}
return s.newValueOrSfCall1(op2, tt, v)
}
if op2 == ssa.OpCopy {
return s.newValueOrSfCall1(op1, tt, v)
}
return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, v, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, v, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
}
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, v, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, v, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
switch ft.Size() {
case 8:
op = ssa.OpRound32F
case 16:
op = ssa.OpRound64F
default:
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
ftp := types.FloatForComplex(ft)
ttp := types.FloatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
}
s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
return nil
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n ir.Node) *ssa.Value {
return s.exprCheckPtr(n, true)
}
func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
if ir.HasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Pos())
defer s.popLine()
}
s.stmtList(n.Init())
switch n.Op() {
case ir.OBYTES2STRTMP:
n := n.(*ir.ConvExpr)
slice := s.expr(n.X)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
case ir.OSTR2BYTESTMP:
n := n.(*ir.ConvExpr)
str := s.expr(n.X)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
case ir.OCFUNC:
n := n.(*ir.UnaryExpr)
aux := n.X.(*ir.Name).Linksym()
// OCFUNC is used to build function values, which must
// always reference ABIInternal entry points.
if aux.ABI() != obj.ABIInternal {
s.Fatalf("expected ABIInternal: %v", aux.ABI())
}
return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
// "value" of a function is the address of the function's closure
sym := staticdata.FuncLinksym(n)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type())
}
return s.load(n.Type(), s.addr(n))
case ir.OLINKSYMOFFSET:
n := n.(*ir.LinksymOffsetExpr)
return s.load(n.Type(), s.addr(n))
case ir.ONIL:
n := n.(*ir.NilExpr)
t := n.Type()
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case ir.OLITERAL:
switch u := n.Val(); u.Kind() {
case constant.Int:
i := ir.IntVal(n.Type(), u)
switch n.Type().Size() {
case 1:
return s.constInt8(n.Type(), int8(i))
case 2:
return s.constInt16(n.Type(), int16(i))
case 4:
return s.constInt32(n.Type(), int32(i))
case 8:
return s.constInt64(n.Type(), i)
default:
s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
case constant.String:
i := constant.StringVal(u)
if i == "" {
return s.constEmptyString(n.Type())
}
return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
case constant.Bool:
return s.constBool(constant.BoolVal(u))
case constant.Float:
f, _ := constant.Float64Val(u)
switch n.Type().Size() {
case 4:
return s.constFloat32(n.Type(), f)
case 8:
return s.constFloat64(n.Type(), f)
default:
s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch n.Type().Size() {
case 8:
pt := types.Types[types.TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat32(pt, re),
s.constFloat32(pt, im))
case 16:
pt := types.Types[types.TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat64(pt, re),
s.constFloat64(pt, im))
default:
s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
default:
s.Fatalf("unhandled OLITERAL %v", u.Kind())
return nil
}
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
to := n.Type()
from := n.X.Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.X)
if to == from {
return x
}
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Kind() == types.TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Kind() == to.Kind() {
return v
}
// unsafe.Pointer <--> *T
if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() {
s.checkPtrAlignment(n, v, nil)
}
return v
}
// map <--> *hmap
if to.Kind() == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
types.CalcSize(from)
types.CalcSize(to)
if from.Size() != to.Size() {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size())
return nil
}
if etypesign(from.Kind()) != etypesign(to.Kind()) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
return nil
}
if base.Flag.Cfg.Instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Kind()) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case ir.OCONV:
n := n.(*ir.ConvExpr)
x := s.expr(n.X)
return s.conv(n, x, n.X.Type(), n.Type())
case ir.ODOTTYPE:
n := n.(*ir.TypeAssertExpr)
res, _ := s.dottype(n, false)
return res
case ir.ODYNAMICDOTTYPE:
n := n.(*ir.DynamicTypeAssertExpr)
res, _ := s.dynamicDottype(n, false)
return res
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.X.Type().IsComplex() {
pt := types.FloatForComplex(n.X.Type())
op := s.ssaOp(ir.OEQ, pt)
r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
switch n.Op() {
case ir.OEQ:
return c
case ir.ONE:
return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
op := n.Op()
switch op {
case ir.OGE:
op, a, b = ir.OLE, b, a
case ir.OGT:
op, a, b = ir.OLT, b, a
}
if n.X.Type().IsFloat() {
// float comparison
return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
case ir.OMUL:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.ODIV:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
case ir.OMOD:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
return s.intDivide(n, a, b)
case ir.OADD, ir.OSUB:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
pt := types.FloatForComplex(n.Type())
op := s.ssaOp(n.Op(), pt)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OAND, ir.OOR, ir.OXOR:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OANDNOT:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
bt := b.Type
if bt.IsSigned() {
cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, ir.Syms.Panicshift)
bt = bt.ToUnsigned()
}
return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
n := n.(*ir.LogicalExpr)
el := s.expr(n.X)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Y)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, types.Types[types.TBOOL])
case ir.OCOMPLEX:
n := n.(*ir.BinaryExpr)
r := s.expr(n.X)
i := s.expr(n.Y)
return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
case ir.ONEG:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
if n.Type().IsComplex() {
tp := types.FloatForComplex(n.Type())
negop := s.ssaOp(n.Op(), tp)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.ONOT, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.OIMAG, ir.OREAL:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
case ir.OPLUS:
n := n.(*ir.UnaryExpr)
return s.expr(n.X)
case ir.OADDR:
n := n.(*ir.AddrExpr)
return s.addr(n.X)
case ir.ORESULT:
n := n.(*ir.ResultExpr)
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
panic("Expected to see a previous call")
}
which := n.Index
if which == -1 {
panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
}
return s.resultOfCall(s.prevCall, which, n.Type())
case ir.ODEREF:
n := n.(*ir.StarExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.load(n.Type(), p)
case ir.ODOT:
n := n.(*ir.SelectorExpr)
if n.X.Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !ir.IsZero(n.X) {
s.Fatalf("literal with nonzero value in SSA: %v", n.X)
}
return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
// prevents false memory dependencies in race/msan/asan
// instrumentation.
if ir.IsAddressable(n) && !s.canSSA(n) {
p := s.addr(n)
return s.load(n.Type(), p)
}
v := s.expr(n.X)
return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
return s.load(n.Type(), p)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
switch {
case n.X.Type().IsString():
if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
}
a := s.expr(n.X)
i := s.expr(n.Index)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if ir.IsConst(n.Index, constant.Int) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.load(types.Types[types.TUINT8], ptr)
case n.X.Type().IsSlice():
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
case n.X.Type().IsArray():
if TypeOK(n.X.Type()) {
// SSA can handle arrays of length at most 1.
bound := n.X.Type().NumElem()
a := s.expr(n.X)
i := s.expr(n.Index)
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
// But not quite junk, in case bounds checks are turned off. See issue 48092.
return s.zeroVal(n.Type())
}
len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
default:
s.Fatalf("bad type for index %v", n.X.Type())
return nil
}
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
switch {
case n.X.Type().IsSlice():
op := ssa.OpSliceLen
if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
case n.X.Type().IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
case n.X.Type().IsMap(), n.X.Type().IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.X))
default: // array
return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
}
case ir.OSPTR:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
if n.X.Type().IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
case ir.OITAB:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(ssa.OpITab, n.Type(), a)
case ir.OIDATA:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(ssa.OpIData, n.Type(), a)
case ir.OEFACE:
n := n.(*ir.BinaryExpr)
tab := s.expr(n.X)
data := s.expr(n.Y)
return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
case ir.OSLICEHEADER:
n := n.(*ir.SliceHeaderExpr)
p := s.expr(n.Ptr)
l := s.expr(n.Len)
c := s.expr(n.Cap)
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
n := n.(*ir.SliceExpr)
check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
v := s.exprCheckPtr(n.X, !check)
var i, j, k *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
}
if n.High != nil {
j = s.expr(n.High)
}
if n.Max != nil {
k = s.expr(n.Max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
if check {
// Emit checkptr instrumentation after bound check to prevent false positive, see #46938.
s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR]))
}
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICESTR:
n := n.(*ir.SliceExpr)
v := s.expr(n.X)
var i, j *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
}
if n.High != nil {
j = s.expr(n.High)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
case ir.OSLICE2ARRPTR:
// if arrlen > slice.len {
// panic(...)
// }
// slice.ptr
n := n.(*ir.ConvExpr)
v := s.expr(n.X)
arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem())
cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), v)
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if ir.IsIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
case ir.OGETG:
n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
case ir.OGETCALLERPC:
n := n.(*ir.CallExpr)
return s.newValue0(ssa.OpGetCallerPC, n.Type())
case ir.OGETCALLERSP:
n := n.(*ir.CallExpr)
return s.newValue0(ssa.OpGetCallerSP, n.Type())
case ir.OAPPEND:
return s.append(n.(*ir.CallExpr), false)
case ir.OSTRUCTLIT, ir.OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
n := n.(*ir.CompLitExpr)
if !ir.IsZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
return s.zeroVal(n.Type())
case ir.ONEW:
n := n.(*ir.UnaryExpr)
return s.newObject(n.Type().Elem())
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
ptr := s.expr(n.X)
len := s.expr(n.Y)
// Force len to uintptr to prevent misuse of garbage bits in the
// upper part of the register (#48536).
len = s.conv(n, len, len.Type, types.Types[types.TUINTPTR])
return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
default:
s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
aux := c.Aux.(*ssa.AuxCall)
pa := aux.ParamAssignmentForResult(which)
// TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
// SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
if len(pa.Registers) == 0 && !TypeOK(t) {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
return s.rawLoad(t, addr)
}
return s.newValue1I(ssa.OpSelectN, t, which, c)
}
func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
aux := c.Aux.(*ssa.AuxCall)
pa := aux.ParamAssignmentForResult(which)
if len(pa.Registers) == 0 {
return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
}
_, addr := s.temp(c.Pos, t)
rval := s.newValue1I(ssa.OpSelectN, t, which, c)
s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
return addr
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// if uint(newlen) > uint(cap) {
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
// vardef(a) // if necessary, advise liveness we are writing a new a
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// newlen = len + 3 // recalculate to avoid a spill
// *a.len = newlen
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
sn := n.Args[0] // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(len(n.Args) - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
s.vars[ptrVar] = p
if !inplace {
s.vars[newlenVar] = nl
s.vars[capVar] = c
} else {
s.vars[lenVar] = l
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.expr(n.X)
r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op() == ir.ONAME {
sn := sn.(*ir.Name)
if sn.Class != ir.PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
s.vars[ptrVar] = s.load(pt, addr)
s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[ptrVar] = r[0]
s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
s.vars[capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
if inplace {
l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
s.store(types.Types[types.TINT], lenaddr, nl)
}
// Evaluate args
type argRec struct {
// if store is true, we're appending the value v. If false, we're appending the
// value at *v.
v *ssa.Value
store bool
}
args := make([]argRec, 0, nargs)
for _, n := range n.Args[1:] {
if TypeOK(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
args = append(args, argRec{v: v})
}
}
p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0, true)
} else {
s.move(et, addr, arg.v)
}
}
delete(s.vars, ptrVar)
if inplace {
delete(s.vars, lenVar)
return nil
}
delete(s.vars, newlenVar)
delete(s.vars, capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
switch cond.Op() {
case ir.OANDAND:
cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
s.condBranch(cond.X, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
case ir.OOROR:
cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
s.condBranch(cond.X, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
case ir.ONOT:
cond := cond.(*ir.UnaryExpr)
s.stmtList(cond.Init())
s.condBranch(cond.X, no, yes, -likely)
return
case ir.OCONVNOP:
cond := cond.(*ir.ConvExpr)
s.stmtList(cond.Init())
s.condBranch(cond.X, yes, no, likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
t := left.Type()
types.CalcSize(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
left := left.(*ir.SelectorExpr)
t := left.X.Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.X)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.X, new, false, 0)
// TODO: do we need to update named values here?
return
}
if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
left := left.(*ir.IndexExpr)
s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
t := left.X.Type()
n := t.NumElem()
i := s.expr(left.Index) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
return
}
if n != 1 {
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
s.assign(left.X, v, false, 0)
return
}
left := left.(*ir.Name)
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left)
if ir.IsReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
// reflect.SliceHeader and reflect.StringHeader's Data fields
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
t = types.Types[types.TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
if right == nil {
s.zero(t, addr)
} else {
s.move(t, addr, right)
}
return
}
// Treat as a store.
s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %v", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %v", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(types.Types[types.TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(types.Types[types.TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i)))
}
return v
case t.IsArray():
switch t.NumElem() {
case 0:
return s.entryNewValue0(ssa.OpArrayMake0, t)
case 1:
return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
}
}
s.Fatalf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callDeferStack
callGo
callTail
)
type sfRtCallDef struct {
rtfn *obj.LSym
rtype types.Kind
}
var softFloatOps map[ssa.Op]sfRtCallDef
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
}
}
// TODO: do not emit sfcall if operation can be optimized to constant in later
// opt phase
func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
f2i := func(t *types.Type) *types.Type {
switch t.Kind() {
case types.TFLOAT32:
return types.Types[types.TUINT32]
case types.TFLOAT64:
return types.Types[types.TUINT64]
}
return t
}
if callDef, ok := softFloatOps[op]; ok {
switch op {
case ssa.OpLess32F,
ssa.OpLess64F,
ssa.OpLeq32F,
ssa.OpLeq64F:
args[0], args[1] = args[1], args[0]
case ssa.OpSub32F,
ssa.OpSub64F:
args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
// runtime functions take uints for floats and returns uints.
// Convert to uints so we use the right calling convention.
for i, a := range args {
if a.Type.IsFloat() {
args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
}
}
rt := types.Types[callDef.rtype]
result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
if rt.IsFloat() {
result = s.newValue1(ssa.OpCopy, rt, result)
}
if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
result = s.newValue1(ssa.OpNot, result.Type, result)
}
return result, true
}
return nil, false
}
var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
pkg string
fn string
}
func InitTables() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
var p4 []*sys.Arch
var p8 []*sys.Arch
var lwatomics []*sys.Arch
for _, a := range &sys.Archs {
all = append(all, a)
if a.PtrSize == 4 {
p4 = append(p4, a)
} else {
p8 = append(p8, a)
}
if a.Family != sys.PPC64 {
lwatomics = append(lwatomics, a)
}
}
// add adds the intrinsic b for pkg.fn for the given list of architectures.
add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
for _, a := range archs {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
// addF does the same as add but operates on architecture families.
addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
m := 0
for _, f := range archFamilies {
if f >= 32 {
panic("too many architecture families")
}
m |= 1 << uint(f)
}
for _, a := range all {
if m>>uint(a.Family)&1 != 0 {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
aliased := false
for _, a := range archs {
if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
intrinsics[intrinsicKey{a, pkg, fn}] = b
aliased = true
}
}
if !aliased {
panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
}
}
/******** runtime ********/
if !base.Flag.Cfg.Instrumenting {
add("runtime", "slicebytetostringtmp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
addF("runtime/internal/math", "MulUintptr",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.MIPS64, sys.RISCV64)
add("runtime", "KeepAlive",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
addF("runtime", "publicationBarrier",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
return nil
},
sys.ARM64)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/****** Prefetch ******/
makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem())
return nil
}
}
// Make Prefetch intrinsics for supported platforms
// On the unsupported platforms stub function will be eliminated
addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache),
sys.AMD64, sys.ARM64, sys.PPC64)
addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed),
sys.AMD64, sys.ARM64, sys.PPC64)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have atomic instructions - use it directly.
s.startBlock(bTrue)
emit(s, n, args, op1, typ)
s.endBlock().AddEdgeTo(bEnd)
// Use original instruction sequence.
s.startBlock(bFalse)
emit(s, n, args, op0, typ)
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
if rtyp == types.TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
}
}
}
atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "And",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Or",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
// Aliases for atomic load operations
alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
// Aliases for atomic store operations
alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
// Aliases for atomic swap operations
alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
// Aliases for atomic add operations
alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
// Aliases for atomic CAS operations
alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
/******** math ********/
addF("math", "Sqrt",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm)
addF("math", "Copysign",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.RISCV64, sys.Wasm)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
if buildcfg.GOAMD64 >= 3 {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
}
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // >= haswell cpus are common
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if buildcfg.GOAMD64 >= 2 {
return s.newValue1(op, types.Types[types.TFLOAT64], args[0])
}
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
}
}
addF("math", "RoundToEven",
makeRoundAMD64(ssa.OpRoundToEven),
sys.AMD64)
addF("math", "Floor",
makeRoundAMD64(ssa.OpFloor),
sys.AMD64)
addF("math", "Ceil",
makeRoundAMD64(ssa.OpCeil),
sys.AMD64)
addF("math", "Trunc",
makeRoundAMD64(ssa.OpTrunc),
sys.AMD64)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.PPC64)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if buildcfg.GOAMD64 >= 2 {
return s.newValue1(op, types.Types[types.TINT], args[0])
}
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "Mul64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchRISCV64)
alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchRISCV64)
addF("math/bits", "Add64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X)
addF("math/bits", "Sub64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, ir.Syms.Panicdivide)
cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
s.check(cmpOverflow, ir.Syms.Panicoverflow)
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
/******** sync/atomic ********/
// Note: these are disabled by flag_race in findIntrinsic below.
alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
}
// findIntrinsic returns a function which builds the SSA equivalent of the
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
func findIntrinsic(sym *types.Sym) intrinsicBuilder {
if sym == nil || sym.Pkg == nil {
return nil
}
pkg := sym.Pkg.Path
if sym.Pkg == types.LocalPkg {
pkg = base.Ctxt.Pkgpath
}
if sym.Pkg == ir.Pkgs.Runtime {
pkg = "runtime"
}
if base.Flag.Race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
}
// Skip intrinsifying math functions (which may contain hard-float
// instructions) when soft-float
if Arch.SoftFloat && pkg == "math" {
return nil
}
fn := sym.Name
if ssa.IntrinsicsDisable {
if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
// These runtime functions don't have definitions, must be intrinsics.
} else {
return nil
}
}
return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
}
func IsIntrinsicCall(n *ir.CallExpr) bool {
if n == nil {
return false
}
name, ok := n.X.(*ir.Name)
if !ok {
return false
}
return findIntrinsic(name.Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
x = s.mem()
}
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
}
return v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
args := make([]*ssa.Value, len(n.Args))
for i, n := range n.Args {
args[i] = s.expr(n)
}
return args
}
// openDeferRecord adds code to evaluate and store the function for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
// call. We will also record funcdata information on where the function is stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
opendefer := &openDeferInfo{
n: n,
}
fn := n.X
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Name)
if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
opendefer.closure = closure
}
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
// the function is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
}
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
// reloaded and used for the appropriate call on exit. Type t must be a function type
// (therefore SSAable). val is the value to be stored. The function returns an SSA
// value representing a pointer to the autotmp location.
func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
if !TypeOK(t) {
s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
}
if !t.HasPointers() {
s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
}
pos := val.Pos
temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
temp.SetOpenDeferSlot(true)
var addrTemp *ssa.Value
// Use OpVarLive to make sure stack slot for the closure is not removed by
// dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
// Force the tmp storing this defer function to be declared in the entry
// block, so that it will be live for the defer exit code (which will
// actually access it only if the associated defer call has been activated).
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
}
// Since we may use this temp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
temp.SetNeedzero(true)
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
s.store(t, addrTemp, val)
return addrTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
// The code involves loading deferBits, and checking each of the bits to see if
// the corresponding defer statement was executed. For each bit that is turned
// on, the associated defer call is made.
func (s *state) openDeferExit() {
deferExit := s.f.NewBlock(ssa.BlockPlain)
s.endBlock().AddEdgeTo(deferExit)
s.startBlock(deferExit)
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
zeroval := s.constInt8(types.Types[types.TUINT8], 0)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
// Generate code to check if the bit associated with the current
// defer is set.
bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(eqVal)
b.AddEdgeTo(bEnd)
b.AddEdgeTo(bCond)
bCond.AddEdgeTo(bEnd)
s.startBlock(bCond)
// Clear this bit in deferBits and force store back to stack, so
// we will not try to re-run this defer call if this defer call panics.
nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous
// bits cleared.
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure that were stored in argtmps at the point of the defer
// statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
var callArgs []*ssa.Value
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
s.endBlock()
s.startBlock(bEnd)
}
}
func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, false)
}
func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var callee *ir.Name // target function (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.X
var ACArgs []*types.Type // AuxCall args
var ACResults []*types.Type // AuxCall results
var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
callABI := s.f.ABIDefault
if !buildcfg.Experiment.RegabiArgs {
var magicFnNameSym *types.Sym
if fn.Name() != nil {
magicFnNameSym = fn.Name().Sym()
ss := magicFnNameSym.Name
if strings.HasSuffix(ss, magicNameDotSuffix) {
callABI = s.f.ABI1
}
}
if magicFnNameSym == nil && n.Op() == ir.OCALLINTER {
magicFnNameSym = fn.(*ir.SelectorExpr).Sym()
ss := magicFnNameSym.Name
if strings.HasSuffix(ss, magicNameDotSuffix[1:]) {
callABI = s.f.ABI1
}
}
}
if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
switch n.Op() {
case ir.OCALLFUNC:
if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
callee = fn
if buildcfg.Experiment.RegabiArgs {
// This is a static call, so it may be
// a direct call to a non-ABIInternal
// function. fn.Func may be nil for
// some compiler-generated functions,
// but those are all ABIInternal.
if fn.Func != nil {
callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
}
} else {
// TODO(register args) remove after register abi is working
inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
if inRegistersImported || inRegistersSamePackage {
callABI = s.f.ABI1
}
}
break
}
closure = s.expr(fn)
if k != callDefer && k != callDeferStack {
// Deferred nil function needs to panic when the function is invoked,
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
case ir.OCALLINTER:
if fn.Op() != ir.ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
fn := fn.(*ir.SelectorExpr)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
} else {
closure = iclosure
}
}
if !buildcfg.Experiment.RegabiArgs {
if regAbiForFuncType(n.X.Type().FuncType()) {
// Magic last type in input args to call
callABI = s.f.ABI1
}
}
params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
types.CalcSize(fn.Type())
stksize := params.ArgWidth() // includes receiver, args, and results
res := n.X.Type().Results()
if k == callNormal || k == callTail {
for _, p := range params.OutParams() {
ACResults = append(ACResults, p.Type)
}
}
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
if stksize != 0 {
s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
}
t := deferstruct()
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match deferstruct() below and src/runtime/runtime2.go:_defer.
// 0: started, set in deferprocStack
// 1: heap, set in deferprocStack
// 2: openDefer
// 3: sp, set in deferprocStack
// 4: pc, set in deferprocStack
// 5: fn
s.store(closure.Type,
s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
closure)
// 6: panic, set in deferprocStack
// 7: link, set in deferprocStack
// 8: fd
// 9: varp
// 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal && k != callTail {
// Write closure (arg to newproc/deferproc).
ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
stksize += int64(types.PtrSize)
argStart += int64(types.PtrSize)
}
// Set receiver (for interface calls).
if rcvr != nil {
callArgs = append(callArgs, rcvr)
}
// Write args.
t := n.X.Type()
args := n.Args
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
}
// Split the entry block if there are open defers, because later calls to
// openDeferSave may cause a mismatch between the mem for an OpDereference
// and the call site which uses it. See #49282.
if s.curBlock.ID == s.f.Entry.ID && s.hasOpenDefers {
b := s.endBlock()
b.Kind = ssa.BlockPlain
curb := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(curb)
s.startBlock(curb)
}
for i, n := range args {
callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
}
callArgs = append(callArgs, s.mem())
// call target
switch {
case k == callDefer:
aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
case k == callGo:
aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
// can't always figure that out currently, and it's
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
case codeptr != nil:
// Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
aux := ssa.InterfaceAuxCall(params)
call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
case callee != nil:
aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
if k == callTail {
call.Op = ssa.OpTailLECall
stksize = 0 // Tail call does not use stack. We reuse caller's frame.
}
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AddArgs(callArgs...)
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
s.prevCall = call
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
// Insert OVARLIVE nodes
for _, name := range n.KeepAlive {
s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
}
// Finish block for defers
if k == callDefer || k == callDeferStack {
b := s.endBlock()
b.Kind = ssa.BlockDefer
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
// Add recover edge to exit code.
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
s.startBlock(bNext)
}
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
if returnResultAddr {
return s.resultAddrOfCall(call, 0, fp.Type)
}
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// architecture-dependent situations and, if so, emits the nil check.
func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
// TODO(neelance): On other architectures this should be eliminated by the optimization steps
s.nilCheck(closure)
}
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
i := s.expr(fn.X)
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.Kind) int8 {
switch e {
case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
return +1
}
return 0
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
func (s *state) addr(n ir.Node) *ssa.Value {
if n.Op() != ir.ONAME {
s.pushLine(n.Pos())
defer s.popLine()
}
if s.canSSA(n) {
s.Fatalf("addr of canSSA expression: %+v", n)
}
t := types.NewPtr(n.Type())
linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if offset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
}
return v
}
switch n.Op() {
case ir.OLINKSYMOFFSET:
no := n.(*ir.LinksymOffsetExpr)
return linksymOffset(no.Linksym, no.Offset_)
case ir.ONAME:
n := n.(*ir.Name)
if n.Heapaddr != nil {
return s.expr(n.Heapaddr)
}
switch n.Class {
case ir.PEXTERN:
// global variable
return linksymOffset(n.Linksym(), 0)
case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case ir.PAUTO:
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
default:
s.Fatalf("variable address class %v not implemented", n.Class)
return nil
}
case ir.ORESULT:
// load return from callee
n := n.(*ir.ResultExpr)
return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.X.Type().IsSlice() {
a := s.expr(n.X)
i := s.expr(n.Index)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.X)
i := s.expr(n.Index)
len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
}
case ir.ODEREF:
n := n.(*ir.StarExpr)
return s.exprPtr(n.X, n.Bounded(), n.Pos())
case ir.ODOT:
n := n.(*ir.SelectorExpr)
p := s.addr(n.X)
return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.Type() == n.X.Type() {
return s.addr(n.X)
}
addr := s.addr(n.X)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callAddr(n, callNormal)
case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
var v *ssa.Value
if n.Op() == ir.ODOTTYPE {
v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
} else {
v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
}
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
if v.Args[1] != s.mem() {
s.Fatalf("memory no longer live from dottype load")
}
return v.Args[0]
default:
s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n ir.Node) bool {
if base.Flag.N != 0 {
return false
}
for {
nn := n
if nn.Op() == ir.ODOT {
nn := nn.(*ir.SelectorExpr)
n = nn.X
continue
}
if nn.Op() == ir.OINDEX {
nn := nn.(*ir.IndexExpr)
if nn.X.Type().IsArray() {
n = nn.X
continue
}
}
break
}
if n.Op() != ir.ONAME {
return false
}
return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
}
func (s *state) canSSAName(name *ir.Name) bool {
if name.Addrtaken() || !name.OnStack() {
return false
}
switch name.Class {
case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
// Or maybe not, see issue 18860. Even unnamed return values
// must be written back so if a defer recovers, the caller can see them.
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
return true
// TODO: try to make more variables SSAable?
}
// TypeOK reports whether variables of type t are SSA-able.
func TypeOK(t *types.Type) bool {
types.CalcSize(t)
if t.Size() > int64(4*types.PtrSize) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Kind() {
case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
return TypeOK(t.Elem())
}
return false
case types.TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !TypeOK(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
s.f.Warnl(lineno, "removed nil check")
}
return p
}
s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
// Starts a new block on return.
// On input, len must be converted to full int width and be nonnegative.
// Returns idx converted to full int width.
// If bounded is true then caller guarantees the index is not out of bounds
// (but boundsCheck will still extend the index to full int width).
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
if bounded || base.Flag.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
// Here, bounded == true if the compiler generated the index itself,
// such as in the expansion of a slice initializer. These indexes are
// compiler-generated, not Go program variables, so they cannot be
// attacker-controlled, so we can omit Spectre masking as well.
//
// Note that we do not want to omit Spectre masking in code like:
//
// if 0 <= i && i < len(x) {
// use(x[i])
// }
//
// Lucky for us, bounded==false for that code.
// In that case (handled below), we emit a bound check (and Spectre mask)
// and then the prove pass will remove the bounds check.
// In theory the prove pass could potentially remove certain
// Spectre masks, but it's very delicate and probably better
// to be conservative and leave them all in.
return idx
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
kind = ssa.BoundsIndexU
case ssa.BoundsSliceAlen:
kind = ssa.BoundsSliceAlenU
case ssa.BoundsSliceAcap:
kind = ssa.BoundsSliceAcapU
case ssa.BoundsSliceB:
kind = ssa.BoundsSliceBU
case ssa.BoundsSlice3Alen:
kind = ssa.BoundsSlice3AlenU
case ssa.BoundsSlice3Acap:
kind = ssa.BoundsSlice3AcapU
case ssa.BoundsSlice3B:
kind = ssa.BoundsSlice3BU
case ssa.BoundsSlice3C:
kind = ssa.BoundsSlice3CU
}
}
var cmp *ssa.Value
if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
} else {
cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
if Arch.LinkArch.Family == sys.Wasm {
// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
// Should be similar to gcWriteBarrier, but I can't make it work.
s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
} else {
mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
s.endBlock().SetControl(mem)
}
s.startBlock(bNext)
// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
if base.Flag.Cfg.SpectreIndex {
op := ssa.OpSpectreIndex
if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
op = ssa.OpSpectreSliceIndex
}
idx = s.newValue2(op, types.Types[types.TINT], idx, len)
}
return idx
}
// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
pos := base.Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[fl] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
if b.AuxInt != 0 {
needcheck = false
}
}
if needcheck {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, ir.Syms.Panicdivide)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
off := base.Ctxt.FixedFrameSize()
var callArgs []*ssa.Value
var callArgTypes []*types.Type
for _, arg := range args {
t := arg.Type
off = types.Rnd(off, t.Alignment())
size := t.Size()
callArgs = append(callArgs, arg)
callArgTypes = append(callArgTypes, t)
off += size
}
off = types.Rnd(off, int64(types.RegSize))
// Accumulate results types and offsets
offR := off
for _, t := range results {
offR = types.Rnd(offR, t.Alignment())
offR += t.Size()
}
// Issue call
var call *ssa.Value
aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
if !returns {
// Finish block
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off - base.Ctxt.FixedFrameSize()
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
return nil
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = types.Rnd(off, t.Alignment())
res[i] = s.resultOfCall(call, int64(i), t)
off += t.Size()
}
off = types.Rnd(off, int64(types.PtrSize))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// do *left = right for type t.
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
s.instrument(t, left, instrumentWrite)
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
}
// store scalar fields first, so write barrier stores for
// pointer fields can be grouped together, and scalar values
// don't need to be live across the write barrier call.
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && t.HasPointers() {
s.storeTypePtrs(t, left, right)
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
s.store(t, left, right) // see issue 42032
}
// otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.store(types.Types[types.TINT], lenAddr, len)
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.store(types.Types[types.TINT], lenAddr, len)
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
s.store(types.Types[types.TINT], capAddr, cap)
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
s.store(types.Types[types.TUINTPTR], left, itab)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft, addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
break // see issue 42032
}
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
s.store(s.f.Config.Types.BytePtr, left, ptr)
case t.IsSlice():
elType := types.NewPtr(t.Elem())
ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
s.store(elType, left, ptr)
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !ft.HasPointers() {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft, addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
var a *ssa.Value
if !TypeOK(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
}
return a
}
func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
// Use special routine that avoids allocation on duplicate offsets.
addr = s.constOffPtrSP(pt, off)
} else {
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
}
if !TypeOK(t) {
a := s.addr(n)
s.move(t, addr, a)
return
}
a := s.expr(n)
s.storeType(t, addr, a, 0, false)
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// v may be a slice, string or pointer to an array.
func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
t := v.Type
var ptr, len, cap *ssa.Value
switch {
case t.IsSlice():
ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
case t.IsString():
ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
s.nilCheck(v)
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = s.constInt(types.Types[types.TINT], 0)
}
if j == nil {
j = len
}
three := true
if k == nil {
three = false
k = cap
}
// Panic if slice indices are not in bounds.
// Make sure we check these in reverse order so that we're always
// comparing against a value known to be nonnegative. See issue 28797.
if three {
if k != cap {
kind := ssa.BoundsSlice3Alen
if t.IsSlice() {
kind = ssa.BoundsSlice3Acap
}
k = s.boundsCheck(k, cap, kind, bounded)
}
if j != k {
j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
}
i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
} else {
if j != k {
kind := ssa.BoundsSliceAlen
if t.IsSlice() {
kind = ssa.BoundsSliceAcap
}
j = s.boundsCheck(j, k, kind, bounded)
}
i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
}
// Word-sized integer operations.
subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
// Calculate the length (rlen) and capacity (rcap) of the new slice.
// For strings the capacity of the result is unimportant. However,
// we use rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
rcap := rlen
if j != k && !t.IsString() {
rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
}
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
// No pointer arithmetic necessary.
return ptr, rlen, rcap
}
// Calculate the base pointer (rptr) for the new slice.
//
// Generate the following code assuming that indexes are in bounds.
// The masking is to make sure that we don't generate a slice
// that points to the next object in memory. We cannot just set
// the pointer to nil because then we would create a nil slice or
// string.
//
// rcap = k - i
// rlen = j - i
// rptr = ptr + (mask(rcap) & (i * stride))
//
// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
// of the element type.
stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size())
// The delta is the number of bytes to offset ptr by.
delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
// Compute rptr = ptr + delta.
rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
return rptr, rlen, rcap
}
type u642fcvtTab struct {
leq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, *types.Type, int64) *ssa.Value
}
var u64_f64 = u642fcvtTab{
leq: ssa.OpLeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 = u642fcvtTab{
leq: ssa.OpLeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
type u322fcvtTab struct {
cvtI2F, cvtF2F ssa.Op
}
var u32_f64 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to64F,
cvtF2F: ssa.OpCopy,
}
var u32_f32 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to32F,
cvtF2F: ssa.OpCvt64Fto32F,
}
func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type()
nilValue := s.constNil(types.Types[types.TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
switch n.Op() {
case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
s.vars[n] = s.load(lenType, sw)
default:
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf, or ssa.Op
floatValue func(*state, *types.Type, float64) *ssa.Value
intValue func(*state, *types.Type, int64) *ssa.Value
cutoff uint64
}
var f32_u64 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
or: ssa.OpOr64,
floatValue: (*state).constFloat32,
intValue: (*state).constInt64,
cutoff: 1 << 63,
}
var f64_u64 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
or: ssa.OpOr64,
floatValue: (*state).constFloat64,
intValue: (*state).constInt64,
cutoff: 1 << 63,
}
var f32_u32 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto32,
subf: ssa.OpSub32F,
or: ssa.OpOr32,
floatValue: (*state).constFloat32,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 1 << 31,
}
var f64_u32 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto32,
subf: ssa.OpSub64F,
or: ssa.OpOr32,
floatValue: (*state).constFloat64,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 1 << 31,
}
func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// } else {
// y = x - floatX(cutoff)
// z = uintY(y)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, cutoff)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
a1 := s.newValue2(cvttab.or, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X) // input interface
target := s.reflectType(n.Type()) // target type
var targetItab *ssa.Value
if n.Itab != nil {
targetItab = s.expr(n.Itab)
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
}
func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X)
target := s.expr(n.T)
var itab *ssa.Value
if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
byteptr := s.f.Config.Types.BytePtr
itab = target
target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) // itab.typ
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, itab, commaok)
}
// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
// and src is the type we're asserting from.
// target is the *runtime._type of dst.
// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
byteptr := s.f.Config.Types.BytePtr
if dst.IsInterface() {
if dst.IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
if src.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// Branch on nilness.
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// On failure, panic by calling panicnildottype.
s.startBlock(bFail)
s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
if src.IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, dst, typ, idata)
return
}
s.startBlock(bOk)
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
s.vars[typVar] = itab
s.endBlock()
// Merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
resok = cond
delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion not inlined")
}
if !commaok {
fn := ir.Syms.AssertI2I
if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I
}
data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
tab := s.newValue1(ssa.OpITab, byteptr, iface)
tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
return s.newValue2(ssa.OpIMake, dst, tab, data), nil
}
fn := ir.Syms.AssertI2I2
if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I2
}
res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
return
}
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion inlined")
}
// Converting to a concrete type.
direct := types.IsDirectIface(dst)
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion inlined")
}
var wantedFirstWord *ssa.Value
if src.IsEmptyInterface() {
// Looking for pointer to target type.
wantedFirstWord = target
} else {
// Looking for pointer to itab for target type and source interface.
wantedFirstWord = targetItab
}
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !TypeOK(dst) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp, addr = s.temp(pos, dst)
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.reflectType(src)
if src.IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
}
// on success, return data from interface
s.startBlock(bOk)
if direct {
return s.newValue1(ssa.OpIData, dst, iface), nil
}
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
return s.load(dst, p), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
valVar := ssaMarker("val")
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
s.vars[valVar] = s.load(dst, p)
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
s.move(dst, addr, p)
}
s.vars[okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
s.vars[valVar] = s.zeroVal(dst)
} else {
s.zero(dst, addr)
}
s.vars[okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
res = s.variable(valVar, dst)
delete(s.vars, valVar)
} else {
res = s.load(dst, addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
delete(s.vars, okVar)
return res, resok
}
// temp allocates a temp of type t at position pos
func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
tmp := typecheck.TempAt(pos, s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr := s.addr(tmp)
return tmp, addr
}
// variable returns the value of a variable at the current location.
func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
v := s.vars[n]
if v != nil {
return v
}
v = s.fwdVars[n]
if v != nil {
return v
}
if s.curBlock == s.f.Entry {
// No variable should be live at entry.
s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
s.fwdVars[n] = v
if n.Op() == ir.ONAME {
s.addNamedValue(n.(*ir.Name), v)
}
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(memVar, types.TypeMem)
}
func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
if n.Class == ir.Pxxx {
// Don't track our marker nodes (memVar etc.).
return
}
if ir.IsAutoTmp(n) {
// Don't track temporary variables.
return
}
if n.Class == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, &loc)
s.f.CanonicalLocalSlots[loc] = &loc
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// State contains state needed during Prog generation.
type State struct {
ABI obj.ABI
pp *objw.Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
maxarg int64 // largest frame size for arguments to calls made by the function
// Map from GC safe points to liveness index, generated by
// liveness analysis.
livenessMap liveness.Map
// partLiveArgs includes arguments that may be partially live, for which we
// need to generate instructions that spill the argument registers.
partLiveArgs map[*ir.Name]bool
// lineRunStart records the beginning of the current run of instructions
// within a single block sharing the same line number
// Used to move statement marks to the beginning of such runs.
lineRunStart *obj.Prog
// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
OnWasmStackSkipped int
}
func (s *State) FuncInfo() *obj.FuncInfo {
return s.pp.CurFunc.LSym.Func()
}
// Prog appends a new Prog.
func (s *State) Prog(as obj.As) *obj.Prog {
p := s.pp.Prog(as)
if objw.LosesStmtMark(as) {
return p
}
// Float a statement start to the beginning of any same-line run.
// lineRunStart is reset at block boundaries, which appears to work well.
if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
s.lineRunStart = p
} else if p.Pos.IsStmt() == src.PosIsStmt {
s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
p.Pos = p.Pos.WithNotStmt()
}
return p
}
// Pc returns the current Prog.
func (s *State) Pc() *obj.Prog {
return s.pp.Next
}
// SetPos sets the current source position.
func (s *State) SetPos(pos src.XPos) {
s.pp.Pos = pos
}
// Br emits a single branch instruction and returns the instruction.
// Not all architectures need the returned instruction, but otherwise
// the boilerplate is common to all.
func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
p := s.Prog(op)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{P: p, B: target})
return p
}
// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
// that reduce "jumpy" line number churn when debugging.
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// These are not statements
s.SetPos(v.Pos.WithNotStmt())
default:
p := v.Pos
if p != src.NoXPos {
// If the position is defined, update the position.
// Also convert default IsStmt to NotStmt; only
// explicit statement boundaries should appear
// in the generated code.
if p.IsStmt() != src.PosIsStmt {
p = p.WithNotStmt()
// Calls use the pos attached to v, but copy the statement mark from State
}
s.SetPos(p)
} else {
s.SetPos(s.pp.Pos.WithNotStmt())
}
}
}
// emit argument info (locations on stack) for traceback.
func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
ft := e.curfn.Type()
if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
return
}
x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
x.Set(obj.AttrContentAddressable, true)
e.curfn.LSym.Func().ArgInfo = x
// Emit a funcdata pointing at the arg info data.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_ArgInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
}
// emit argument info (locations on stack) of f for traceback.
func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
// NOTE: do not set ContentAddressable here. This may be referenced from
// assembly code by name (in this case f is a declaration).
// Instead, set it in emitArgInfo above.
PtrSize := int64(types.PtrSize)
uintptrTyp := types.Types[types.TUINTPTR]
isAggregate := func(t *types.Type) bool {
return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
}
// Populate the data.
// The data is a stream of bytes, which contains the offsets and sizes of the
// non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
// arguments, along with special "operators". Specifically,
// - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
// size (1 byte)
// - special operators:
// - 0xff - end of sequence
// - 0xfe - print { (at the start of an aggregate-typed argument)
// - 0xfd - print } (at the end of an aggregate-typed argument)
// - 0xfc - print ... (more args/fields/elements)
// - 0xfb - print _ (offset too large)
// These constants need to be in sync with runtime.traceback.go:printArgs.
const (
_endSeq = 0xff
_startAgg = 0xfe
_endAgg = 0xfd
_dotdotdot = 0xfc
_offsetTooLarge = 0xfb
_special = 0xf0 // above this are operators, below this are ordinary offsets
)
const (
limit = 10 // print no more than 10 args/components
maxDepth = 5 // no more than 5 layers of nesting
// maxLen is a (conservative) upper bound of the byte stream length. For
// each arg/component, it has no more than 2 bytes of data (size, offset),
// and no more than one {, }, ... at each level (it cannot have both the
// data and ... unless it is the last one, just be conservative). Plus 1
// for _endSeq.
maxLen = (maxDepth*3+2)*limit + 1
)
wOff := 0
n := 0
writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
// Write one non-aggrgate arg/field/element.
write1 := func(sz, offset int64) {
if offset >= _special {
writebyte(_offsetTooLarge)
} else {
writebyte(uint8(offset))
writebyte(uint8(sz))
}
n++
}
// Visit t recursively and write it out.
// Returns whether to continue visiting.
var visitType func(baseOffset int64, t *types.Type, depth int) bool
visitType = func(baseOffset int64, t *types.Type, depth int) bool {
if n >= limit {
writebyte(_dotdotdot)
return false
}
if !isAggregate(t) {
write1(t.Size(), baseOffset)
return true
}
writebyte(_startAgg)
depth++
if depth >= maxDepth {
writebyte(_dotdotdot)
writebyte(_endAgg)
n++
return true
}
switch {
case t.IsInterface(), t.IsString():
_ = visitType(baseOffset, uintptrTyp, depth) &&
visitType(baseOffset+PtrSize, uintptrTyp, depth)
case t.IsSlice():
_ = visitType(baseOffset, uintptrTyp, depth) &&
visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
case t.IsComplex():
_ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
case t.IsArray():
if t.NumElem() == 0 {
n++ // {} counts as a component
break
}
for i := int64(0); i < t.NumElem(); i++ {
if !visitType(baseOffset, t.Elem(), depth) {
break
}
baseOffset += t.Elem().Size()
}
case t.IsStruct():
if t.NumFields() == 0 {
n++ // {} counts as a component
break
}
for _, field := range t.Fields().Slice() {
if !visitType(baseOffset+field.Offset, field.Type, depth) {
break
}
}
}
writebyte(_endAgg)
return true
}
start := 0
if strings.Contains(f.LSym.Name, "[") {
// Skip the dictionary argument - it is implicit and the user doesn't need to see it.
start = 1
}
for _, a := range abiInfo.InParams()[start:] {
if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
break
}
}
writebyte(_endSeq)
if wOff > maxLen {
base.Fatalf("ArgInfo too large")
}
return x
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s State
s.ABI = f.OwnAux.Fn.ABI()
e := f.Frontend().(*ssafn)
s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
emitArgInfo(e, f, pp)
argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = openDeferInfo
}
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block
var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name]
if gatherPrintInfo {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
progToBlock[s.pp.Next] = f.Blocks[0]
}
if base.Ctxt.Flag_locationlists {
if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
}
valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
for i := range valueToProgAfter {
valueToProgAfter[i] = nil
}
}
// If the very first instruction is not tagged as a statement,
// debuggers may attribute it to previous function in program.
firstPos := src.NoXPos
for _, v := range f.Entry.Values {
if v.Pos.IsStmt() == src.PosIsStmt {
firstPos = v.Pos
v.Pos = firstPos.WithDefaultStmt()
break
}
}
// inlMarks has an entry for each Prog that implements an inline mark.
// It maps from that Prog to the global inlining id of the inlined body
// which should unwind to this Prog's location.
var inlMarks map[*obj.Prog]int32
var inlMarkList []*obj.Prog
// inlMarksByPos maps from a (column 1) source position to the set of
// Progs that are in the set above and have that source position.
var inlMarksByPos map[src.XPos][]*obj.Prog
var argLiveIdx int = -1 // argument liveness info index
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil
// Attach a "default" liveness info. Normally this will be
// overwritten in the Values loop below for each Value. But
// for an empty block this will be used for its control
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx
p := s.pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
p.To.SetConst(int64(idx))
}
// Emit values in block
Arch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.Next
s.DebugFriendlySetPosFrom(v)
if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
// nothing to do
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
// nothing to do; already used by liveness
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpConvert:
// nothing to do; no-op conversion for liveness
if v.Args[0].Reg() != v.Reg() {
v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
}
case ssa.OpInlMark:
p := Arch.Ginsnop(s.pp)
if inlMarks == nil {
inlMarks = map[*obj.Prog]int32{}
inlMarksByPos = map[src.XPos][]*obj.Prog{}
}
inlMarks[p] = v.AuxInt32()
inlMarkList = append(inlMarkList, p)
pos := v.Pos.AtColumn1()
inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
default:
// Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
s.SetPos(firstPos)
firstPos = src.NoXPos
}
// Attach this safe point to the next
// instruction.
s.pp.NextLive = s.livenessMap.Get(v)
// let the backend handle it
Arch.SSAGenValue(&s, v)
}
if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx
p := s.pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
p.To.SetConst(int64(idx))
}
if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.Next
}
if gatherPrintInfo {
for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v
}
}
}
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
p := Arch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos {
b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
if b.Pos == src.NoXPos {
b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
}
}
b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && base.Flag.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := s.pp.Next
s.SetPos(b.Pos)
Arch.SSAGenBlock(&s, b, next)
if gatherPrintInfo {
for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
}
}
}
if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
// We need the return address of a panic call to
// still be inside the function in question. So if
// it ends in a call which doesn't return, add a
// nop (which will never execute) after the call.
Arch.Ginsnop(pp)
}
if openDeferInfo != nil {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
s.pp.NextLive = s.livenessMap.DeferReturn
p := pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Deferreturn
// Load results into registers. So when a deferred function
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
for _, o := range f.OwnAux.ABIInfo().OutParams() {
n := o.Name.(*ir.Name)
rts, offs := o.RegisterTypesAndOffsets()
for i := range o.Registers {
Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
}
}
pp.Prog(obj.ARET)
}
if inlMarks != nil {
// We have some inline marks. Try to find other instructions we're
// going to emit anyway, and use those instructions instead of the
// inline marks.
for p := pp.Text; p != nil; p = p.Link {
if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
// Don't use 0-sized instructions as inline marks, because we need
// to identify inline mark instructions by pc offset.
// (Some of these instructions are sometimes zero-sized, sometimes not.
// We must not use anything that even might be zero-sized.)
// TODO: are there others?
continue
}
if _, ok := inlMarks[p]; ok {
// Don't use inline marks themselves. We don't know
// whether they will be zero-sized or not yet.
continue
}
pos := p.Pos.AtColumn1()
s := inlMarksByPos[pos]
if len(s) == 0 {
continue
}
for _, m := range s {
// We found an instruction with the same source position as
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
m.From = obj.Addr{}
m.To = obj.Addr{}
}
delete(inlMarksByPos, pos)
}
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
if base.Ctxt.Flag_locationlists {
var debugInfo *ssa.FuncDebug
if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
debugInfo = ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
} else {
debugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
}
e.curfn.DebugInfo = debugInfo
bstart := s.bstart
idToIdx := make([]int, f.NumBlocks())
for i, b := range f.Blocks {
idToIdx[b.ID] = i
}
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
// be done later.
debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
return 0 // Start at the very beginning, at the assembler-generated prologue.
// this should only happen for function args (ssa.OpArg)
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
blk := f.Blocks[idToIdx[b]]
nv := len(blk.Values)
return valueToProgAfter[blk.Values[nv-1].ID].Pc
case ssa.FuncEnd.ID:
return e.curfn.LSym.Size
default:
return valueToProgAfter[v].Pc
}
}
}
// Resolve branches, and relax DefaultStmt into NotStmt
for _, br := range s.Branches {
br.P.To.SetTarget(s.bstart[br.B.ID])
if br.P.Pos.IsStmt() != src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
}
}
if e.log { // spew to stdout
filename := ""
for p := pp.Text; p != nil; p = p.Link {
if p.Pos.IsKnown() && p.InnermostFilename() != filename {
filename = p.InnermostFilename()
f.Logf("# %s\n", filename)
}
var s string
if v, ok := progToValue[p]; ok {
s = v.String()
} else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
}
}
if f.HTMLWriter != nil { // spew to ssa.html
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
filename := ""
for p := pp.Text; p != nil; p = p.Link {
// Don't spam every line with the file name, which is often huge.
// Only print changes, and "unknown" is not a change.
if p.Pos.IsKnown() && p.InnermostFilename() != filename {
filename = p.InnermostFilename()
buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString("# " + filename))
buf.WriteString("</dd>")
}
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := progToValue[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := progToBlock[p]; ok {
buf.WriteString("<b>" + b.HTML() + "</b>")
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
buf.WriteString("</dd>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
}
if ssa.GenssaDump[f.Name] {
fi := f.DumpFileForPhase("genssa")
if fi != nil {
// inliningDiffers if any filename changes or if any line number except the innermost (index 0) changes.
inliningDiffers := func(a, b []src.Pos) bool {
if len(a) != len(b) {
return true
}
for i := range a {
if a[i].Filename() != b[i].Filename() {
return true
}
if i > 0 && a[i].Line() != b[i].Line() {
return true
}
}
return false
}
var allPosOld []src.Pos
var allPos []src.Pos
for p := pp.Text; p != nil; p = p.Link {
if p.Pos.IsKnown() {
allPos = p.AllPos(allPos)
if inliningDiffers(allPos, allPosOld) {
for i := len(allPos) - 1; i >= 0; i-- {
pos := allPos[i]
fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line())
}
allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage.
}
}
var s string
if v, ok := progToValue[p]; ok {
s = v.String()
} else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString())
}
fi.Close()
}
}
defframe(&s, e, f)
f.HTMLWriter.Close()
f.HTMLWriter = nil
}
func defframe(s *State, e *ssafn, f *ssa.Func) {
pp := s.pp
frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
if Arch.PadFrame != nil {
frame = Arch.PadFrame(frame)
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize)))
pp.Text.To.Offset = frame
p := pp.Text
// Insert code to spill argument registers if the named slot may be partially
// live. That is, the named slot is considered live by liveness analysis,
// (because a part of it is live), but we may not spill all parts into the
// slot. This can only happen with aggregate-typed arguments that are SSA-able
// and not address-taken (for non-SSA-able or address-taken arguments we always
// spill upfront).
// Note: spilling is unnecessary in the -N/no-optimize case, since all values
// will be considered non-SSAable and spilled up front.
// TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
// First, see if it is already spilled before it may be live. Look for a spill
// in the entry block up to the first safepoint.
type nameOff struct {
n *ir.Name
off int64
}
partLiveArgsSpilled := make(map[nameOff]bool)
for _, v := range f.Entry.Values {
if v.Op.IsCall() {
break
}
if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
continue
}
n, off := ssa.AutoVar(v)
if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
continue
}
partLiveArgsSpilled[nameOff{n, off}] = true
}
// Then, insert code to spill registers if not already.
for _, a := range f.OwnAux.ABIInfo().InParams() {
n, ok := a.Name.(*ir.Name)
if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
continue
}
rts, offs := a.RegisterTypesAndOffsets()
for i := range a.Registers {
if !rts[i].HasPointers() {
continue
}
if partLiveArgsSpilled[nameOff{n, offs[i]}] {
continue // already spilled
}
reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
}
}
}
// Insert code to zero ambiguously live variables so that the
// garbage collector only sees initialized values when it
// looks for pointers.
var lo, hi int64
// Opaque state for backend to use. Current backends use it to
// keep track of which helper registers have been zeroed.
var state uint32
// Iterate through declarations. Autos are sorted in decreasing
// frame offset order.
for _, n := range e.curfn.Dcl {
if !n.Needzero() {
continue
}
if n.Class != ir.PAUTO {
e.Fatalf(n.Pos(), "needzero class %d", n.Class)
}
if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
}
if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
// Merge with range we already have.
lo = n.FrameOffset()
continue
}
// Zero old range
p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.FrameOffset()
hi = lo + n.Type().Size()
}
// Zero final range.
Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
// For generating consecutive jump instructions to model a specific branching
type IndexJump struct {
Jump obj.As
Index int
}
func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
p.Pos = b.Pos
}
// CombJump generates combinational instructions (2 at present) for a block jump,
// thereby the behaviour of non-standard condition codes could be simulated
func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
switch next {
case b.Succs[0].Block():
s.oneJump(b, &jumps[0][0])
s.oneJump(b, &jumps[0][1])
case b.Succs[1].Block():
s.oneJump(b, &jumps[1][0])
s.oneJump(b, &jumps[1][1])
default:
var q *obj.Prog
if b.Likely != ssa.BranchUnlikely {
s.oneJump(b, &jumps[1][0])
s.oneJump(b, &jumps[1][1])
q = s.Br(obj.AJMP, b.Succs[1].Block())
} else {
s.oneJump(b, &jumps[0][0])
s.oneJump(b, &jumps[0][1])
q = s.Br(obj.AJMP, b.Succs[0].Block())
}
q.Pos = b.Pos
}
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch n := v.Aux.(type) {
case *ssa.AuxCall:
a.Name = obj.NAME_EXTERN
a.Sym = n.Fn
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
case *ir.Name:
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
a.Sym = ir.Orig(n).(*ir.Name).Linksym()
a.Offset += n.FrameOffset()
break
}
a.Name = obj.NAME_AUTO
if n.Class == ir.PPARAMOUT {
a.Sym = ir.Orig(n).(*ir.Name).Linksym()
} else {
a.Sym = n.Linksym()
}
a.Offset += n.FrameOffset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
// panic with the given kind if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
size := idx.Type.Size()
if size == s.config.PtrSize {
return idx
}
if size > s.config.PtrSize {
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
var lo *ssa.Value
if idx.Type.IsSigned() {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
} else {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
}
if bounded || base.Flag.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
kind = ssa.BoundsIndexU
case ssa.BoundsSliceAlen:
kind = ssa.BoundsSliceAlenU
case ssa.BoundsSliceAcap:
kind = ssa.BoundsSliceAcapU
case ssa.BoundsSliceB:
kind = ssa.BoundsSliceBU
case ssa.BoundsSlice3Alen:
kind = ssa.BoundsSlice3AlenU
case ssa.BoundsSlice3Acap:
kind = ssa.BoundsSlice3AcapU
case ssa.BoundsSlice3B:
kind = ssa.BoundsSlice3BU
case ssa.BoundsSlice3C:
kind = ssa.BoundsSlice3CU
}
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
s.endBlock().SetControl(mem)
s.startBlock(bNext)
return lo
}
// Extend value to the required size
var op ssa.Op
if idx.Type.IsSigned() {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", idx.Type)
}
} else {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", idx.Type)
}
}
return s.newValue1(op, types.Types[types.TINT], idx)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) {
if v.Op != ssa.OpPhi {
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
}
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
}
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
// except for incoming in-register arguments.
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
// That register contains the closure pointer on closure entry.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block {
base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
for _, w := range entry.Values {
if w == v {
break
}
switch w.Op {
case ssa.OpArgIntReg, ssa.OpArgFloatReg:
// okay
default:
base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
}
// CheckArgReg ensures that v is in the function's entry block.
func CheckArgReg(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block {
base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
}
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := ssa.AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Linksym()
a.Reg = int16(Arch.REGSP)
a.Offset = n.FrameOffset() + off
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
}
// Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call.
func (s *State) Call(v *ssa.Value) *obj.Prog {
pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v)
p := s.Prog(obj.ACALL)
if pPosIsStmt == src.PosIsStmt {
p.Pos = v.Pos.WithIsStmt()
} else {
p.Pos = v.Pos.WithNotStmt()
}
if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym.Fn
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch Arch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
base.Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
return p
}
// TailCall returns a new tail call instruction for the SSA value v.
// It is like Call, but for a tail call.
func (s *State) TailCall(v *ssa.Value) *obj.Prog {
p := s.Call(v)
p.As = obj.ARET
return p
}
// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
// It must be called immediately before emitting the actual CALL instruction,
// since it emits PCDATA for the stack map at the call (calls are safe points).
func (s *State) PrepareCall(v *ssa.Value) {
idx := s.livenessMap.Get(v)
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
base.Fatalf("missing stack map index for %v", v.LongString())
}
}
call, ok := v.Aux.(*ssa.AuxCall)
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
if nowritebarrierrecCheck != nil {
nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
}
}
if s.maxarg < v.AuxInt {
s.maxarg = v.AuxInt
}
}
// UseArgs records the fact that an instruction needs a certain amount of
// callee args space for its use.
func (s *State) UseArgs(n int64) {
if s.maxarg < n {
s.maxarg = n
}
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *ir.SelectorExpr) int {
t := n.X.Type()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
for i, f := range t.Fields().Slice() {
if f.Sym == n.Sel {
if f.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
}
}
panic(fmt.Sprintf("can't find field in expr %v\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
curfn *ir.Func
strings map[string]*obj.LSym // map from constant string to data symbols
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
}
// StringData returns a symbol which
// is the data component of a global string constant containing s.
func (e *ssafn) StringData(s string) *obj.LSym {
if aux, ok := e.strings[s]; ok {
return aux
}
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
data := staticdata.StringSym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
if node.Class != ir.PAUTO || node.Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
n := ir.NewNameAt(parent.N.Pos(), s)
s.Def = n
ir.AsNode(s.Def).Name().SetUsed(true)
n.SetType(t)
n.Class = ir.PAUTO
n.SetEsc(ir.EscNever)
n.Curfn = e.curfn
e.curfn.Dcl = append(e.curfn.Dcl, n)
types.CalcSize(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
func (e *ssafn) CanSSA(t *types.Type) bool {
return TypeOK(t)
}
func (e *ssafn) Line(pos src.XPos) string {
return base.FmtPos(pos)
}
// Log logs a message from the compiler.
func (e *ssafn) Logf(msg string, args ...interface{}) {
if e.log {
fmt.Printf(msg, args...)
}
}
func (e *ssafn) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
base.Pos = pos
nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
base.Fatalf("'%s': "+msg, nargs...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
base.WarnfAt(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
return base.Debug.Nil != 0
}
func (e *ssafn) UseWriteBarrier() bool {
return base.Flag.WB
}
func (e *ssafn) Syslook(name string) *obj.LSym {
switch name {
case "goschedguarded":
return ir.Syms.Goschedguarded
case "writeBarrier":
return ir.Syms.WriteBarrier
case "gcWriteBarrier":
return ir.Syms.GCWriteBarrier
case "typedmemmove":
return ir.Syms.Typedmemmove
case "typedmemclr":
return ir.Syms.Typedmemclr
}
e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
return nil
}
func (e *ssafn) SetWBPos(pos src.XPos) {
e.curfn.SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
return base.Ctxt.Pkgpath
}
func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT {
n := n.(*ir.SelectorExpr)
if n.X.Type().NumFields() == 1 {
return clobberBase(n.X)
}
}
if n.Op() == ir.OINDEX {
n := n.(*ir.IndexExpr)
if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
return clobberBase(n.X)
}
}
return n
}
// callTargetLSym returns the correct LSym to call 'callee' using its ABI.
func callTargetLSym(callee *ir.Name) *obj.LSym {
if callee.Func == nil {
// TODO(austin): This happens in a few cases of
// compiler-generated functions. These are all
// ABIInternal. It would be better if callee.Func was
// never nil and we didn't need this case.
return callee.Linksym()
}
return callee.LinksymABI(callee.Func.ABI)
}
func min8(a, b int8) int8 {
if a < b {
return a
}
return b
}
func max8(a, b int8) int8 {
if a > b {
return a
}
return b
}
// deferstruct makes a runtime._defer structure.
func deferstruct() *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
// These fields must match the ones in runtime/runtime2.go:_defer and
// (*state).call above.
fields := []*types.Field{
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
makefield("sp", types.Types[types.TUINTPTR]),
makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
}
// build struct holding the above fields
s := types.NewStruct(types.NoPkg, fields)
s.SetNoalg(true)
types.CalcStructSize(s)
return s
}
// SlotAddr uses LocalSlot information to initialize an obj.Addr
// The resulting addr is used in a non-standard context -- in the prologue
// of a function, before the frame has been constructed, so the standard
// addressing for the parameters will be wrong.
func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
return obj.Addr{
Name: obj.NAME_NONE,
Type: obj.TYPE_MEM,
Reg: baseReg,
Offset: spill.Offset + extraOffset,
}
}
var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym
|
[
"\"GOSSAFUNC\"",
"\"GOSSADIR\""
] |
[] |
[
"GOSSADIR",
"GOSSAFUNC"
] |
[]
|
["GOSSADIR", "GOSSAFUNC"]
|
go
| 2 | 0 | |
KnowDistil/preliminary_small_net.py
|
# !/usr/bin/python2
# Preliminary experiments on MNIST
# Reference: Distilling the Knowledge in a Neural Network
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use('Agg')
import tensorflow as tf
import tensorlayer as tl
import time
import os
def small_net(X_placeholder):
net = tl.layers.InputLayer(X_placeholder, name='input_layer')
net = tl.layers.DenseLayer(net, n_units=800, act=tf.nn.relu, name='small_fc1')
net = tl.layers.DenseLayer(net, n_units=800, act=tf.nn.relu, name='small_fc2')
net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, name='small_fc3')
return net
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(
shape=(-1, 784))
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int64)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int64)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int64)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
batch_size = 128
# Read data
X = tf.placeholder(tf.float32, shape=[batch_size, 784], name='X')
y_ = tf.placeholder(tf.int64, shape=[batch_size, ], name='y_')
# Build the network
net = small_net(X)
y = net.outputs
# Compute loss
ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_))
cost = ce
# Compute accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Dropout switcher
dp_dict = tl.utils.dict_to_one(net.all_drop)
# Visualizing network
writer = tf.train.SummaryWriter('log/test_logs', sess.graph)
writer.flush()
# Define model saveing name (in the current path)
save_name = r'model_small.npz'
# If saved model esited, load it
if os.path.exists(save_name):
print('load existed model')
load_params = tl.files.load_npz(path='', name=save_name)
tl.files.assign_params(sess, load_params, net)
# else train a model
else:
print('train a new model')
# Training settings
n_epoch = 50
lr = 1e-4
print_freq = 5
print('learning_rate: %f' % lr)
print('batch_size: %d' % batch_size)
train_params = net.all_params
train_op = tf.train.AdamOptimizer(
lr, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False).minimize(cost, var_list=train_params)
sess.run(tf.initialize_all_variables())
# Print network params
net.print_params()
net.print_layers()
print('strat training')
for epoch in range(1, n_epoch + 1):
start_time = time.time()
for X_train_a, y_train_a in tl.iterate.minibatches(
X_train, y_train, batch_size, shuffle=True):
feed_dict = {X: X_train_a, y_: y_train_a}
feed_dict.update(net.all_drop)
sess.run(train_op, feed_dict=feed_dict)
if epoch == 1 or epoch % print_freq == 0:
print('Epoch %d of %d took %fs' % (epoch, n_epoch, time.time() - start_time))
train_loss, train_acc, n_batch = 0, 0, 0
for X_train_a, y_train_a in tl.iterate.minibatches(
X_train, y_train, batch_size, shuffle=False):
feed_dict = {X: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
train_loss += err
train_acc += ac
n_batch += 1
print('train loss: %f' % (train_loss / n_batch))
print('train acc: %f' % (train_acc / n_batch))
val_loss, val_acc, n_batch = 0, 0, 0
for X_val_a, y_val_a in tl.iterate.minibatches(
X_val, y_val, batch_size, shuffle=False):
feed_dict = {X: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
val_loss += err
val_acc += ac
n_batch += 1
print('val loss: %f' % (val_loss / n_batch))
print('val acc: %f' % (val_acc / n_batch))
# Save model
tl.files.save_npz(net.all_params, name=save_name)
print('Evaluation')
test_loss, test_acc, n_batch = 0, 0, 0
for X_test_a, y_test_a in tl.iterate.minibatches(
X_test, y_test, batch_size, shuffle=False):
feed_dict = {X: X_test_a, y_: y_test_a}
feed_dict.update(dp_dict)
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
test_loss += err
test_acc += ac
n_batch += 1
print('test loss: %f' % (test_loss / n_batch))
print('test acc: %f' % (test_acc / n_batch))
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
main.go
|
package main
import (
"context"
"flag"
"fmt"
"os"
"simpleforecast/apis/weatherapi"
"simpleforecast/config"
"simpleforecast/presentation"
)
type CommandLineOptions struct {
NumberOfDays int
Location string
Forecast bool
Language string
}
func parseCommandLineArguments(config *config.Config) *CommandLineOptions {
numberOfDays := flag.Int("days", 0, "Number of days for the forecast(1 to 3)")
location := flag.String("location", config.Location, "Location for weather data(default is location via ip)")
lang := flag.String("lang", config.Language, "Language of the weather status(sunny, cloudy, etc.)")
flag.Parse()
return &CommandLineOptions{
NumberOfDays: *numberOfDays,
Location: *location,
Forecast: (*numberOfDays > 0),
Language: *lang,
}
}
func main() {
configuration, err := config.LoadConfiguration()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
options := parseCommandLineArguments(configuration)
client := weatherapi.NewWeatherClient(os.Getenv("MY_WEATHER_API_KEY"), weatherapi.BaseURLV1)
if options.Forecast == true {
data, err := client.GetForecast(context.TODO(),
&weatherapi.ForecastOptions{NumberOfDays: options.NumberOfDays, Location: options.Location, Language: options.Language})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
presentation.PrintForecast(data)
} else {
data, err := client.GetCurrent(context.TODO(),
&weatherapi.ForecastOptions{Location: options.Location, Language: options.Language})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
presentation.PrintCurrentWeather(data)
}
}
|
[
"\"MY_WEATHER_API_KEY\""
] |
[] |
[
"MY_WEATHER_API_KEY"
] |
[]
|
["MY_WEATHER_API_KEY"]
|
go
| 1 | 0 | |
capi/type_checking.py
|
"""Functions for runtime type checking. More strict but slower than availabe
static type checking. Off by default.
"""
import os
from typing import Any, Optional, Tuple
import torch
def assert_joint_probability(
x: torch.Tensor, shape: Tuple[int, ...], allow_improper: bool = False
) -> None:
"""Assert `x` is joint probability distribution over two variables
Args:
x: Possible joint probability distribution
shape: Required shape
allow_improper: Whether improper distribution (all zeros) is permitted
"""
if os.getenv("strict_type_check") == "1":
norm = x.sum(dim=(-1, -2))
if allow_improper:
norm[norm == 0] = 1
assert torch.isclose(norm, torch.Tensor([1.0]).to(norm.device)).all()
assert x.shape[-1] == x.shape[-2]
assert x.shape == shape
def assert_prescription(
x: torch.Tensor,
shape: Tuple[int, ...],
pure: bool = True,
allow_improper: bool = False,
) -> None:
"""Assert `x` is valid prescription
Args:
x: Possible prescription
shape: Required shape
pure: Whether prescription is required to be deterministic
allow_improper: Whether improper distribution (all zeros) is permitted
"""
if os.getenv("strict_type_check") == "1":
norm = x.sum(dim=-1)
if allow_improper:
norm[norm == 0] = 1
assert torch.isclose(norm, torch.Tensor([1.0]).to(x.device)).all()
assert (x >= 0).all()
assert x.shape == shape
if pure:
max_vals = x.max(dim=-1).values
if allow_improper:
max_vals[max_vals == 0] = 1
assert (max_vals == 1).all()
def assert_label_prescription(
x: torch.Tensor, num_actions: int, shape: Tuple[int, ...]
) -> None:
"""Assert `x` is valid label prescription
Args:
x: Possible prescription
num_actions: Number of action labels
shape: Required shape
"""
if os.getenv("strict_type_check") == "1":
assert x.dtype == torch.int64
assert (x >= 0).all()
assert (x < num_actions).all()
assert x.shape == shape
def assert_shape(
x: torch.Tensor, shape: Tuple[int, ...], dim: Optional[int] = None
) -> None:
"""Assert `x` has shape `shape`
Args:
x: Tensor
shape: Required shape
dim: If specified, enforce shape requirement only for axis `dim`
"""
if os.getenv("strict_type_check") == "1":
if dim:
assert (x.shape[dim],) == shape
else:
assert x.shape == shape
def assert_num_dims(x: torch.Tensor, num_dims: int) -> None:
"""Assert `x` has `num_dims` dimensions
Args:
x: Tensor
num_dims: Required number of dimensions
"""
if os.getenv("strict_type_check") == "1":
assert len(x.shape) == num_dims
def assert_element(x: Any, collection: Tuple[Any, ...]) -> None:
"""Assert `x` in `collection`
Args:
x: Anything
collection: Tuple
"""
if os.getenv("strict_type_check") == "1":
assert x in collection
|
[] |
[] |
[
"strict_type_check"
] |
[]
|
["strict_type_check"]
|
python
| 1 | 0 | |
artifacts/old_dataset_versions/minimal_commits_v02/Cirq/Cirq#3948/after/_compat.py
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workarounds for compatibility issues between versions and libraries."""
import functools
import importlib
import os
import re
import sys
import traceback
import warnings
from types import ModuleType
from typing import Any, Callable, Optional, Dict, Tuple, Type, Set
import numpy as np
import pandas as pd
import sympy
def proper_repr(value: Any) -> str:
"""Overrides sympy and numpy returning repr strings that don't parse."""
if isinstance(value, sympy.Basic):
result = sympy.srepr(value)
# HACK: work around https://github.com/sympy/sympy/issues/16074
# (only handles a few cases)
fixed_tokens = ['Symbol', 'pi', 'Mul', 'Pow', 'Add', 'Mod', 'Integer', 'Float', 'Rational']
for token in fixed_tokens:
result = result.replace(token, 'sympy.' + token)
return result
if isinstance(value, np.ndarray):
if np.issubdtype(value.dtype, np.datetime64):
return f'np.array({value.tolist()!r}, dtype=np.{value.dtype!r})'
return f'np.array({value.tolist()!r}, dtype=np.{value.dtype})'
if isinstance(value, pd.MultiIndex):
return f'pd.MultiIndex.from_tuples({repr(list(value))}, names={repr(list(value.names))})'
if isinstance(value, pd.Index):
return (
f'pd.Index({repr(list(value))}, '
f'name={repr(value.name)}, '
f'dtype={repr(str(value.dtype))})'
)
if isinstance(value, pd.DataFrame):
cols = [value[col].tolist() for col in value.columns]
rows = list(zip(*cols))
return (
f'pd.DataFrame('
f'\n columns={proper_repr(value.columns)}, '
f'\n index={proper_repr(value.index)}, '
f'\n data={repr(rows)}'
f'\n)'
)
return repr(value)
def proper_eq(a: Any, b: Any) -> bool:
"""Compares objects for equality, working around __eq__ not always working.
For example, in numpy a == b broadcasts and returns an array instead of
doing what np.array_equal(a, b) does. This method uses np.array_equal(a, b)
when dealing with numpy arrays.
"""
if type(a) == type(b):
if isinstance(a, np.ndarray):
return np.array_equal(a, b)
if isinstance(a, (pd.DataFrame, pd.Index, pd.MultiIndex)):
return a.equals(b)
if isinstance(a, (tuple, list)):
return len(a) == len(b) and all(proper_eq(x, y) for x, y in zip(a, b))
return a == b
def _warn_or_error(msg):
from cirq.testing.deprecation import ALLOW_DEPRECATION_IN_TEST
called_from_test = 'PYTEST_CURRENT_TEST' in os.environ
deprecation_allowed = ALLOW_DEPRECATION_IN_TEST in os.environ
if called_from_test and not deprecation_allowed:
raise ValueError(f"Cirq should not use deprecated functionality: {msg}")
# we have to dynamically count the non-internal frames
# due to the potentially multiple nested module wrappers
stack_level = 1
for filename, _, _, _ in reversed(traceback.extract_stack()):
if not _is_internal(filename) and "_compat.py" not in filename:
break
if "_compat.py" in filename:
stack_level += 1
warnings.warn(
msg,
DeprecationWarning,
stacklevel=stack_level,
)
def _validate_deadline(deadline: str):
DEADLINE_REGEX = r"^v(\d)+\.(\d)+$"
assert re.match(DEADLINE_REGEX, deadline), "deadline should match vX.Y"
def deprecated(
*, deadline: str, fix: str, name: Optional[str] = None
) -> Callable[[Callable], Callable]:
"""Marks a function as deprecated.
Args:
deadline: The version where the function will be deleted. It should be a minor version
(e.g. "v0.7").
fix: A complete sentence describing what the user should be using
instead of this particular function (e.g. "Use cos instead.")
name: How to refer to the function.
Defaults to `func.__qualname__`.
Returns:
A decorator that decorates functions with a deprecation warning.
"""
_validate_deadline(deadline)
def decorator(func: Callable) -> Callable:
@functools.wraps(func)
def decorated_func(*args, **kwargs) -> Any:
qualname = func.__qualname__ if name is None else name
_warn_or_error(
f'{qualname} was used but is deprecated.\n'
f'It will be removed in cirq {deadline}.\n'
f'{fix}\n'
)
return func(*args, **kwargs)
decorated_func.__doc__ = (
f'THIS FUNCTION IS DEPRECATED.\n\n'
f'IT WILL BE REMOVED IN `cirq {deadline}`.\n\n'
f'{fix}\n\n'
f'{decorated_func.__doc__ or ""}'
)
return decorated_func
return decorator
def deprecated_class(
*, deadline: str, fix: str, name: Optional[str] = None
) -> Callable[[Type], Type]:
"""Marks a class as deprecated.
Args:
deadline: The version where the function will be deleted. It should be a minor version
(e.g. "v0.7").
fix: A complete sentence describing what the user should be using
instead of this particular function (e.g. "Use cos instead.")
name: How to refer to the class.
Defaults to `class.__qualname__`.
Returns:
A decorator that decorates classes with a deprecation warning.
"""
_validate_deadline(deadline)
def decorator(clazz: Type) -> Type:
clazz_new = clazz.__new__
def patched_new(cls, *args, **kwargs):
qualname = clazz.__qualname__ if name is None else name
_warn_or_error(
f'{qualname} was used but is deprecated.\n'
f'It will be removed in cirq {deadline}.\n'
f'{fix}\n'
)
return clazz_new(cls)
setattr(clazz, '__new__', patched_new)
clazz.__doc__ = (
f'THIS CLASS IS DEPRECATED.\n\n'
f'IT WILL BE REMOVED IN `cirq {deadline}`.\n\n'
f'{fix}\n\n'
f'{clazz.__doc__ or ""}'
)
return clazz
return decorator
def deprecated_parameter(
*,
deadline: str,
fix: str,
func_name: Optional[str] = None,
parameter_desc: str,
match: Callable[[Tuple[Any, ...], Dict[str, Any]], bool],
rewrite: Optional[
Callable[[Tuple[Any, ...], Dict[str, Any]], Tuple[Tuple[Any, ...], Dict[str, Any]]]
] = None,
) -> Callable[[Callable], Callable]:
"""Marks a function parameter as deprecated.
Also handles rewriting the deprecated parameter into the new signature.
Args:
deadline: The version where the function will be deleted. It should be a minor version
(e.g. "v0.7").
fix: A complete sentence describing what the user should be using
instead of this particular function (e.g. "Use cos instead.")
func_name: How to refer to the function.
Defaults to `func.__qualname__`.
parameter_desc: The name and type of the parameter being deprecated,
e.g. "janky_count" or "janky_count keyword" or
"positional janky_count".
match: A lambda that takes args, kwargs and determines if the
deprecated parameter is present or not. This determines whether or
not the deprecation warning is printed, and also whether or not
rewrite is called.
rewrite: Returns new args/kwargs that don't use the deprecated
parameter. Defaults to making no changes.
Returns:
A decorator that decorates functions with a parameter deprecation
warning.
"""
_validate_deadline(deadline)
def decorator(func: Callable) -> Callable:
@functools.wraps(func)
def decorated_func(*args, **kwargs) -> Any:
if match(args, kwargs):
if rewrite is not None:
args, kwargs = rewrite(args, kwargs)
qualname = func.__qualname__ if func_name is None else func_name
_warn_or_error(
f'The {parameter_desc} parameter of {qualname} was '
f'used but is deprecated.\n'
f'It will be removed in cirq {deadline}.\n'
f'{fix}\n',
)
return func(*args, **kwargs)
return decorated_func
return decorator
def deprecate_attributes(module: ModuleType, deprecated_attributes: Dict[str, Tuple[str, str]]):
"""Wrap a module with deprecated attributes that give warnings.
Args:
module: The module to wrap.
deprecated_attributes: A dictionary from attribute name to a tuple of
strings, where the first string gives the version that the attribute
will be removed in, and the second string describes what the user
should do instead of accessing this deprecated attribute.
Returns:
Wrapped module with deprecated attributes. Use of these attributes
will cause a warning for these deprecated attributes.
"""
for (deadline, _) in deprecated_attributes.values():
_validate_deadline(deadline)
class Wrapped(ModuleType):
__dict__ = module.__dict__
def __getattr__(self, name):
if name in deprecated_attributes:
deadline, fix = deprecated_attributes[name]
_warn_or_error(
f'{name} was used but is deprecated.\n'
f'It will be removed in cirq {deadline}.\n'
f'{fix}\n'
)
return getattr(module, name)
return Wrapped(module.__name__, module.__doc__)
class DeprecatedModuleLoader(importlib.abc.Loader):
"""A Loader for deprecated modules.
It wraps an existing Loader instance, to which it delegates the loading. On top of that
it ensures that the sys.modules cache has both the deprecated module's name and the
new module's name pointing to the same exact ModuleType instance.
Args:
loader: the loader to be wrapped
old_module_name: the deprecated module's fully qualified name
new_module_name: the new module's fully qualified name
"""
def __init__(self, loader: Any, old_module_name: str, new_module_name: str):
"""A module loader that uses an existing module loader and intercepts
the execution of a module.
"""
self.loader = loader
if hasattr(loader, 'exec_module'):
# mypy#2427
self.exec_module = self._wrap_exec_module(loader.exec_module) # type: ignore
# while this is rare and load_module was deprecated in 3.4
# in older environments this line makes them work as well
if hasattr(loader, 'load_module'):
# mypy#2427
self.load_module = self._wrap_load_module(loader.load_module) # type: ignore
if hasattr(loader, 'create_module'):
self.create_module = loader.create_module # type: ignore
self.old_module_name = old_module_name
self.new_module_name = new_module_name
def module_repr(self, module: ModuleType) -> str:
return self.loader.module_repr(module)
def _wrap_load_module(self, method: Any) -> Any:
def load_module(fullname: str) -> ModuleType:
assert fullname == self.old_module_name, (
f"DeprecatedModuleLoader for {self.old_module_name} was asked to "
f"load {fullname}"
)
if self.new_module_name in sys.modules:
sys.modules[self.old_module_name] = sys.modules[self.new_module_name]
return sys.modules[self.old_module_name]
method(self.new_module_name)
assert self.new_module_name in sys.modules, (
f"Wrapped loader {self.loader} was "
f"expected to insert "
f"{self.new_module_name} in sys.modules "
f"but it did not."
)
sys.modules[self.old_module_name] = sys.modules[self.new_module_name]
return sys.modules[self.old_module_name]
return load_module
def _wrap_exec_module(self, method: Any) -> Any:
def exec_module(module: ModuleType) -> None:
assert module.__name__ == self.old_module_name, (
f"DeprecatedModuleLoader for {self.old_module_name} was asked to "
f"load {module.__name__}"
)
# check for new_module whether it was loaded
if self.new_module_name in sys.modules:
# found it - no need to load the module again
sys.modules[self.old_module_name] = sys.modules[self.new_module_name]
return
# now we know we have to initialize the module
sys.modules[self.old_module_name] = module
sys.modules[self.new_module_name] = module
try:
return method(module)
except BaseException:
# if there's an error, we atomically remove both
del sys.modules[self.new_module_name]
del sys.modules[self.old_module_name]
raise
return exec_module
def _is_internal(filename: str) -> bool:
"""Returns whether filename is internal to python.
This is similar to how the built-in warnings module differentiates frames from internal modules.
It is specific to CPython - see
https://github.com/python/cpython/blob/41ec17e45d54473d32f543396293256f1581e44d/Lib/warnings.py#L275.
"""
return 'importlib' in filename and '_bootstrap' in filename
_warned: Set[str] = set()
def _deduped_module_warn_or_error(old_module_name, new_module_name, deadline):
if old_module_name in _warned:
return
_warned.add(old_module_name)
_warn_or_error(
f"{old_module_name} was used but is deprecated.\n "
f"it will be removed in cirq {deadline}.\n "
f"Use {new_module_name} instead.\n",
)
class DeprecatedModuleFinder(importlib.abc.MetaPathFinder):
"""A module finder to handle deprecated module references.
It sends a deprecation warning when a deprecated module is asked to be found.
It is meant to be used as a wrapper around existing MetaPathFinder instances.
Args:
finder: the finder to wrap.
new_module_name: the new module's fully qualified name
old_module_name: the deprecated module's fully qualified name
deadline: the deprecation deadline
"""
def __init__(
self,
finder: Any,
new_module_name: str,
old_module_name: str,
deadline: str,
):
"""An aliasing module finder that uses an existing module finder to find a python
module spec and intercept the execution of matching modules.
"""
self.finder = finder
self.new_module_name = new_module_name
self.old_module_name = old_module_name
self.deadline = deadline
# to cater for metadata path finders
# https://docs.python.org/3/library/importlib.metadata.html#extending-the-search-algorithm
if hasattr(finder, "find_distributions"):
def find_distributions(context):
return self.finder.find_distributions(context)
self.find_distributions = find_distributions
if hasattr(finder, "invalidate_caches"):
def invalidate_caches() -> None:
return self.finder.invalidate_caches()
# mypy#2427
self.invalidate_caches = invalidate_caches # type: ignore
def find_spec(self, fullname: str, path: Any = None, target: Any = None) -> Any:
"""Finds the specification of a module.
This is an implementation of the importlib.abc.MetaPathFinder.find_spec method.
See https://docs.python.org/3/library/importlib.html#importlib.abc.MetaPathFinder.
Args:
fullname: name of the module.
path: if presented, this is the parent module's submodule search path.
target: When passed in, target is a module object that the finder may use to make a more
educated guess about what spec to return. We don't use it here, just pass it along
to the wrapped finder.
"""
if fullname != self.old_module_name and not fullname.startswith(self.old_module_name + "."):
# if we are not interested in it, then just pass through to the wrapped finder
return self.finder.find_spec(fullname, path, target)
# warn for deprecation
_deduped_module_warn_or_error(self.old_module_name, self.new_module_name, self.deadline)
new_fullname = self.new_module_name + fullname[len(self.old_module_name) :]
# find the corresponding spec in the new structure
if fullname == self.old_module_name:
# this is the first time the deprecated module is being found
# which means that the new parent needs to be found first and under
# the new parent's path, we should be able to find the new name of
# the deprecated module
# this code is heavily inspired by importlib.util.find_spec
parent_name = new_fullname.rpartition('.')[0]
if parent_name:
parent = __import__(parent_name, fromlist=['__path__'])
# note that compared to importlib.util.find_spec we don't handle
# AttributeError here because it is not expected to happen in case
# of a DeprecatedModuleLoader - the new parent should exist and be
# a proper package
parent_path = parent.__path__
else:
parent_path = None
spec = self.finder.find_spec(new_fullname, parent_path, None)
else:
# we are finding a submodule of the parent of the deprecated module,
# which means that the parent was already found, and thus, `path` is
# correctly pointing to the module's parent in the new hierarchy
spec = self.finder.find_spec(
new_fullname,
path=path,
target=target,
)
# if the spec exists, return the DeprecatedModuleLoader that will do the loading as well
# as set the alias(es) in sys.modules as necessary
if spec is not None:
# change back the name to the deprecated module name
spec.name = fullname
# some loaders do a check to ensure the module's name is the same
# as the loader was created for
if getattr(spec.loader, "name", None) == new_fullname:
setattr(spec.loader, "name", fullname)
spec.loader = DeprecatedModuleLoader(spec.loader, fullname, new_fullname)
return spec
def deprecated_submodule(
*, new_module_name: str, old_parent: str, old_child: str, deadline: str, create_attribute: bool
):
"""Creates a deprecated module reference recursively for a module.
For `new_module_name` (e.g. cirq_google) creates an alias (e.g cirq.google) in Python's module
cache. It also recursively checks for the already imported submodules (e.g. cirq_google.api) and
creates the alias for them too (e.g. cirq.google.api). With this method it is possible to create
an alias that really looks like a module, e.g you can do things like
`from cirq.google import api` - which would be otherwise impossible.
Note that this method will execute `new_module_name` in order to ensure that it is in the module
cache.
Args:
new_module_name: absolute module name for the new module
old_parent: the current module that had the original submodule
old_child: the submodule that is being relocated
create_attribute: if True, the submodule will be added as a deprecated attribute to the
old_parent module
Returns:
None
"""
_validate_deadline(deadline)
old_module_name = f"{old_parent}.{old_child}"
if create_attribute:
new_module = importlib.import_module(new_module_name)
_setup_deprecated_submodule_attribute(
new_module_name, old_parent, old_child, deadline, new_module
)
def wrap(finder: Any) -> Any:
if not hasattr(finder, 'find_spec'):
return finder
# this is just to make mypy not complain about the type of new_module_spec being Optional
return DeprecatedModuleFinder(finder, new_module_name, old_module_name, deadline)
sys.meta_path = [wrap(finder) for finder in sys.meta_path]
def _setup_deprecated_submodule_attribute(
new_module_name: str, old_parent: str, old_child: str, deadline: str, new_module: ModuleType
):
parent_module = sys.modules[old_parent]
setattr(parent_module, old_child, new_module)
class Wrapped(ModuleType):
__dict__ = parent_module.__dict__
def __getattr__(self, name):
if name == old_child:
_deduped_module_warn_or_error(
f"{old_parent}.{old_child}", new_module_name, deadline
)
return getattr(parent_module, name)
sys.modules[old_parent] = Wrapped(parent_module.__name__, parent_module.__doc__)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
dash/views/apps.py
|
import os
import json
import requests
def getAppDetails(envs):
appDetails = getCuratedAppDetails(envs)
retDict = {}
names = []
counts = []
appCpus = []
appMem = []
totalTasks = 0
for item in ['tasks', 'apps', 'cpus', 'mem']:
dictItem = {}
names = []
values = []
totalValue = 0
for team, value in appDetails[item].items():
names.append(team)
values.append(value)
totalValue += value
dictItem['name'] = names
dictItem['data'] = values
dictItem['total'] = totalValue
retDict[item] = dictItem
retDict['tasks']['title'] = '{} tasks are running on RogerOS ({})...'.format(retDict['tasks']['total'], '+'.join(envs))
retDict['apps']['title'] = '...which are instances of {} applicatons.'.format(retDict['apps']['total'])
retDict['cpus']['title'] = '{} cores (cpus) are currently allocated to them...'.format(retDict['cpus']['total'])
retDict['mem']['title'] = ' ...along with a total of {} mb of memory.'.format(retDict['mem']['total'])
retDict['tasks']['headers'] = ['name', 'count']
retDict['apps']['headers'] = ['name', 'count']
retDict['cpus']['headers'] = ['name', 'allocation']
retDict['mem']['headers'] = ['name', 'allocation']
return retDict
def getTeamNamesDict():
return json.loads(os.environ['GROUP_DATA'])
def getCuratedAppDetails(envs):
teamNames = getTeamNamesDict()
tasks = {}
apps = {}
cpus = {}
mem = {}
for env in envs:
try:
rawData = getRawAppDetails(env)
for id, (running, cpusAlloc, memAlloc) in rawData.items():
team = 'others'
for team_name, patterns in teamNames.items():
if isMatchingName(id, patterns):
team = team_name
break
if team in tasks:
apps[team] += 1
tasks[team] += running
cpus[team] += cpusAlloc
mem[team] += memAlloc
else:
apps[team] = 1
tasks[team] = running
cpus[team] = cpusAlloc
mem[team] = memAlloc
except Exception as e:
print e
flash('Had trouble accessing {} environment. Please try again soon.'.format(env))
pass # possible connection error.. continue to next env
return { 'tasks':tasks, 'apps': apps, 'cpus':cpus, 'mem':mem }
def isMatchingName(name, patterns):
for item in patterns:
if item in name:
return True
return False
def getRawAppDetails(env):
endpoints = json.loads(os.environ['MARATHON_ENDPOINTS'])
url = endpoints[env] + '/v2/apps'
resp = requests.get(url, auth=(os.environ['MARATHON_USER'], os.environ['MARATHON_PASSWD']))
rdata = resp.json()
apps = {}
for app in rdata['apps']:
apps [app['id']] = [ app['tasksRunning'], float(app['instances']) * app['cpus'], float(app['instances']) * app['mem'] ]
return apps
|
[] |
[] |
[
"GROUP_DATA",
"MARATHON_ENDPOINTS",
"MARATHON_PASSWD",
"MARATHON_USER"
] |
[]
|
["GROUP_DATA", "MARATHON_ENDPOINTS", "MARATHON_PASSWD", "MARATHON_USER"]
|
python
| 4 | 0 | |
server/src/main/java/net/feedbacky/app/controller/login/ServiceLoginController.java
|
package net.feedbacky.app.controller.login;
import net.feedbacky.app.data.user.ConnectedAccount;
import net.feedbacky.app.data.user.MailPreferences;
import net.feedbacky.app.data.user.User;
import net.feedbacky.app.data.user.dto.FetchUserDto;
import net.feedbacky.app.exception.types.LoginFailedException;
import net.feedbacky.app.login.LoginProvider;
import net.feedbacky.app.login.LoginProviderRegistry;
import net.feedbacky.app.repository.UserRepository;
import net.feedbacky.app.util.JwtTokenUtil;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import javax.net.ssl.HttpsURLConnection;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* @author Plajer
* <p>
* Created at 21.01.2021
*/
@CrossOrigin
@RestController
public class ServiceLoginController {
private final LoginProviderRegistry loginProviderRegistry;
private final UserRepository userRepository;
@Autowired
public ServiceLoginController(LoginProviderRegistry loginProviderRegistry, UserRepository userRepository) {
this.loginProviderRegistry = loginProviderRegistry;
this.userRepository = userRepository;
}
@GetMapping("v1/service/{id}")
public ResponseEntity handle(HttpServletResponse response, HttpServletRequest request, @PathVariable String id, @RequestParam(name = "code") String code) throws IOException {
LoginProvider provider = getLoginProviderById(id);
if(!provider.isEnabled()) {
throw new LoginFailedException("Sign in with '" + provider.getProviderData().getName() + "' is disabled.");
}
URL url = new URL(provider.getOauthDetails().getTokenUrl());
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
conn.setRequestMethod("POST");
conn.setRequestProperty("User-Agent", LoginProvider.USER_AGENT);
conn.setRequestProperty("Accept", "application/json");
conn.setDoOutput(true);
OutputStream os = conn.getOutputStream();
String content = "client_id={CLIENT_ID}&client_secret={CLIENT_SECRET}&redirect_uri={REDIRECT_URI}&code={CODE}&grant_type=authorization_code";
content = StringUtils.replace(content, "{CLIENT_ID}", LoginProvider.EnvironmentVariables.readEnvVariable(provider.getEnvironmentVariables().getClientId()));
content = StringUtils.replace(content, "{CLIENT_SECRET}", LoginProvider.EnvironmentVariables.readEnvVariable(provider.getEnvironmentVariables().getClientSecret()));
content = StringUtils.replace(content, "{REDIRECT_URI}", URLEncoder.encode(
LoginProvider.EnvironmentVariables.readEnvVariable(provider.getEnvironmentVariables().getRedirectUri()), "UTF-8"));
content = StringUtils.replace(content, "{CODE}", code);
os.write(content.getBytes(StandardCharsets.UTF_8));
os.flush();
os.close();
int responseCode = conn.getResponseCode();
if(responseCode != HttpURLConnection.HTTP_OK) {
throw new LoginFailedException("Failed to sign in with '" + provider.getProviderData().getName() + "' ! Code: " + responseCode + ". Message: " + conn.getResponseMessage());
}
Map<String, String> tokenData = new ObjectMapper().readValue(getResponse(conn.getInputStream()), Map.class);
conn.disconnect();
String token = tokenData.get("access_token");
User user = connectAsUser(id, provider, token);
Map<String, Object> json = new HashMap<>();
String jwtToken = JwtTokenUtil.generateToken(user.getEmail());
json.put("token", jwtToken);
json.put("user", new FetchUserDto().from(user).withConfidentialData(user));
return ResponseEntity.ok().body(json);
}
private LoginProvider getLoginProviderById(String id) {
for(LoginProvider provider : loginProviderRegistry.getProviders()) {
if(provider.getId().equals(id)) {
return provider;
}
}
throw new LoginFailedException("Sign in with '" + id + "' is not supported.");
}
private String getResponse(InputStream inputStream) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(inputStream));
String inputLine;
StringBuilder response = new StringBuilder();
while((inputLine = in.readLine()) != null) {
response.append(inputLine);
}
in.close();
return response.toString();
}
private User connectAsUser(String id, LoginProvider provider, String token) throws IOException {
URL url = new URL(provider.getOauthDetails().getUserUrl());
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
conn.setRequestProperty("User-Agent", LoginProvider.USER_AGENT);
conn.setRequestProperty("Content-Type", "application/x-www-form-urlencoded");
String authorization = provider.getOauthDetails().getAuthorizationProperty();
authorization = StringUtils.replace(authorization, "{TOKEN}", token);
conn.setRequestProperty("Authorization", authorization);
conn.setDoOutput(true);
Map<String, Object> responseData = new ObjectMapper().readValue(getResponse(conn.getInputStream()), Map.class);
conn.disconnect();
if(responseData.get(provider.getOauthDetails().getDataFields().getEmail()) == null) {
throw new LoginFailedException("Email address not found, please contact administrator if you think it's an error.");
}
if(provider.getOauthDetails().getDataFields().getEmailVerified() != null) {
Boolean mailVerified = (Boolean) responseData.get(provider.getOauthDetails().getDataFields().getEmailVerified());
if(mailVerified != null && !mailVerified) {
throw new LoginFailedException("Email address you sign in with is not verified at '" + provider.getProviderData().getName() + "', please verify email to continue.");
}
}
return createOrUpdateUser(id, responseData, provider.getOauthDetails().getDataFields());
}
private User createOrUpdateUser(String id, Map<String, Object> data, LoginProvider.OauthDetails.DataFields fields) {
Optional<User> optional = userRepository.findByEmail((String) data.get(fields.getEmail()));
if(!optional.isPresent()) {
optional = Optional.of(new User());
User user = optional.get();
user.setEmail((String) data.get(fields.getEmail()));
if(fields.getAvatar() == null || data.get(fields.getAvatar()) == null) {
user.setAvatar(System.getenv("REACT_APP_DEFAULT_USER_AVATAR").replace("%nick%", (String) data.get(fields.getUsername())));
} else {
user.setAvatar((String) data.get(fields.getAvatar()));
}
user.setUsername((String) data.get(fields.getUsername()));
MailPreferences preferences = new MailPreferences();
preferences.setUnsubscribeToken(RandomStringUtils.randomAlphanumeric(12));
preferences.setNotificationsEnabled(true);
preferences.setUser(user);
user.setMailPreferences(preferences);
Set<ConnectedAccount> accounts = new HashSet<>();
accounts.add(generateConnectedAccount(id, data, fields, user));
user.setConnectedAccounts(accounts);
//first user, set as service staff
if(userRepository.count() == 0) {
user.setServiceStaff(true);
}
return userRepository.save(user);
}
User user = optional.get();
if(user.getConnectedAccounts().stream().noneMatch(acc -> acc.getProvider().equals(id))) {
Set<ConnectedAccount> accounts = new HashSet<>(user.getConnectedAccounts());
accounts.add(generateConnectedAccount(id, data, fields, user));
user.setConnectedAccounts(accounts);
return userRepository.save(user);
}
return user;
}
private ConnectedAccount generateConnectedAccount(String id, Map<String, Object> data, LoginProvider.OauthDetails.DataFields fields, User user) {
ConnectedAccount account = new ConnectedAccount();
account.setUser(user);
account.setProvider(id);
account.setAccountId(String.valueOf(data.get(fields.getId())));
return account;
}
}
|
[
"\"REACT_APP_DEFAULT_USER_AVATAR\""
] |
[] |
[
"REACT_APP_DEFAULT_USER_AVATAR"
] |
[]
|
["REACT_APP_DEFAULT_USER_AVATAR"]
|
java
| 1 | 0 | |
jwt/hs256/hs256.go
|
package hs256
import (
"errors"
"os"
"github.com/golang-jwt/jwt"
)
// Encode encodes a jwt token using data gotten from payload.
func Encode(payload map[string]interface{}) (tokenString string, err error) {
secretKey := getSecret()
if len(secretKey) < 1 {
return "", errors.New("No 'JWT_SECRET_KEY' value in environment variables")
}
claims := jwt.MapClaims{}
for k, v := range payload {
claims[k] = v
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err = token.SignedString(secretKey)
return
}
// Decode decodes a jwt token string.
//
// If the jwt token is invalid it returns an error.
func Decode(tokenString string) (claims map[string]interface{}, err error) {
secretKey := getSecret()
if len(secretKey) < 1 {
return nil, errors.New("No 'JWT_SECRET_KEY' value in environment variables")
}
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return "", errors.New("Invalid jwt token string")
}
return secretKey, nil
})
if err != nil {
return
}
if token.Valid {
claims = token.Claims.(jwt.MapClaims)
return
}
err = errors.New("An unknowm error occured while decoding jwt")
return
}
func getSecret() (secret []byte) {
secret = []byte(os.Getenv("JWT_SECRET_KEY"))
return secret
}
|
[
"\"JWT_SECRET_KEY\""
] |
[] |
[
"JWT_SECRET_KEY"
] |
[]
|
["JWT_SECRET_KEY"]
|
go
| 1 | 0 | |
examples/custom_model.py
|
"""
This example uses a very simple model and the MNIST dataset to show how the model,
the training and validation datasets, as well as the training and testing loops can
be customized in Plato.
"""
import os
import torch
from torch import nn
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
# os.environ['config_file'] = 'configs/fedavg_lenet5.yml'
from plato.clients import simple
from plato.datasources import base
from plato.servers import fedavg
from plato.trainers import basic
class DataSource(base.DataSource):
"""A custom datasource with custom training and validation
datasets.
"""
def __init__(self):
super().__init__()
self.trainset = MNIST("./data",
train=True,
download=True,
transform=ToTensor())
self.testset = MNIST("./data",
train=False,
download=True,
transform=ToTensor())
class Trainer(basic.Trainer):
"""A custom trainer with custom training and testing loops. """
def train_model(self, config, trainset, sampler, cut_layer=None): # pylint: disable=unused-argument
"""A custom training loop. """
optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
criterion = nn.CrossEntropyLoss()
train_loader = torch.utils.data.DataLoader(
dataset=trainset,
shuffle=False,
batch_size=config['batch_size'],
sampler=sampler)
num_epochs = 1
for __ in range(num_epochs):
for examples, labels in train_loader:
examples = examples.view(len(examples), -1)
logits = self.model(examples)
loss = criterion(logits, labels)
print("train loss: ", loss.item())
loss.backward()
optimizer.step()
optimizer.zero_grad()
def test_model(self, config, testset): # pylint: disable=unused-argument
"""A custom testing loop. """
test_loader = torch.utils.data.DataLoader(
testset, batch_size=config['batch_size'], shuffle=False)
correct = 0
total = 0
with torch.no_grad():
for examples, labels in test_loader:
examples, labels = examples.to(self.device), labels.to(
self.device)
examples = examples.view(len(examples), -1)
outputs = self.model(examples)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return accuracy
def main():
"""A Plato federated learning training session using a custom model. """
model = nn.Sequential(
nn.Linear(28 * 28, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 10),
)
datasource = DataSource()
trainer = Trainer(model=model)
client = simple.Client(model=model, datasource=datasource, trainer=trainer)
server = fedavg.Server(model=model, trainer=trainer)
server.run(client)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"config_file"
] |
[]
|
["config_file"]
|
python
| 1 | 0 | |
core/tests/test_utils.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example signup-page.mainpage.html is present in
core/templates/pages/signup-page and error-page.mainpage.html is present in
core/templates/pages/error-pages. So we walk through core/templates/pages
and a match for signup-page.component.html is found in signup-page
subdirectory and a match for error-page.directive.html is found in
error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def __init__(self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream(python_utils.OBJECT):
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_call_counter(
self, obj, attr, raises=None, returns=None, call_through=False):
"""Swap obj.attr with a CallCounter instance.
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
raises: Exception|None. The exception raised by the swapped
function. If None, then no exception is raised.
returns: *. The return value of the swapped function.
call_through: bool. Whether to call through to the real function,
rather than use a stub implementation. If True, the `raises` and
`returns` arguments will be ignored.
Yields:
CallCounter. A CallCounter instance that's installed as obj.attr's
implementation while within the context manager returned.
"""
if call_through:
impl = obj.attr
else:
def impl(*_, **__):
"""Behaves according to the given values."""
if raises is not None:
# Pylint thinks we're trying to raise `None` even though
# we've explicitly checked for it above.
raise raises # pylint: disable=raising-bad-type
return returns
call_counter = CallCounter(impl)
with self.swap(obj, attr, call_counter):
yield call_counter
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_value: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original = getattr(obj, attr)
# The actual error message will also include detail assert error message
# via the `self.longMessage` below.
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
"""Wrapper function for the new value. This function will do the
check before the wrapped function is invoked. After the function
finished, the wrapper will update how many times this function is
invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_value`.
"""
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
# This will show the detailed assert message.
self.longMessage = True
yield
except Exception:
error_occurred = True
# Raise issues thrown by the called function or assert error.
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=keyword-arg-before-vararg
self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
"""Returns a list of all queue names."""
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
"""Counts the jobs in the given MapReduce taskqueue."""
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
"""Returns the jobs in the given MapReduce taskqueue. If queue_name is
None, defaults to returning the jobs in all available queues.
"""
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
"""Execute MapReduce queued tasks.
Args:
tasks: list(google.appengine.api.taskqueue.taskqueue.Task). The
queued tasks.
"""
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
"""Runs and flushes pending MapReduce tasks. If queue_name is None, does
so for all queues; otherwise, this only runs and flushes tasks for the
specified queue.
For more information on taskqueue_stub, see:
https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/api/taskqueue/taskqueue_stub.py
"""
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
""""Runs, but does not flush, the pending MapReduce tasks."""
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = '[email protected]'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = '[email protected]'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = '[email protected]'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = '[email protected]'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = '[email protected]'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = '[email protected]'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = '[email protected]'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = '[email protected]'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = '[email protected]'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def login(self, email, is_super_admin=False):
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
"""Simulates a logout by resetting the environment variables."""
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
"""Mocks response from datetime.datetime.utcnow method.
Example usage:
import datetime
mocked_datetime_utcnow = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
print datetime.datetime.utcnow() # prints time reduced by 1 day
print datetime.datetime.utcnow() # prints current time.
Args:
mocked_datetime: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
"""Sets the given role for this user.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
"""Sets role of given users as ADMIN.
Args:
admin_usernames: list(str). List of usernames.
"""
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
"""
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
"""Sets role of given users as BANNED_USER.
Args:
banned_usernames: list(str). List of usernames.
"""
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
correctness_feedback_enabled: bool. Whether the correctness feedback
is enabled or not for the exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.correctness_feedback_enabled = correctness_feedback_enabled
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
if correctness_feedback_enabled:
from_state.interaction.default_outcome.labelled_as_correct = (
True)
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.update_timestamps()
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.get_user_actions_info(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.get_user_actions_info(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
"""Base class for audit jobs tests."""
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output.
Args:
expected_output: list(*). The expected result of the job.
sort: bool. Whether to sort the outputs before comparison.
literal_eval: bool. Whether to use ast.literal_eval before
comparison.
"""
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'[email protected]': {'first': 'Bob', 'id': 1},
'[email protected]': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'[email protected]': {'first': 'Bob', 'id': 1},
'[email protected]': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
|
[] |
[] |
[
"USER_IS_ADMIN",
"USER_EMAIL",
"USER_ID"
] |
[]
|
["USER_IS_ADMIN", "USER_EMAIL", "USER_ID"]
|
python
| 3 | 0 | |
src/main/org/brailleblaster/BBIni.java
|
/* BrailleBlaster Braille Transcription Application
*
* Copyright (C) 2010, 2012
* ViewPlus Technologies, Inc. www.viewplus.com
* and
* Abilitiessoft, Inc. www.abilitiessoft.com
* and
* American Printing House for the Blind, Inc. www.aph.org
*
* All rights reserved
*
* This file may contain code borrowed from files produced by various
* Java development teams. These are gratefully acknoledged.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the Apache 2.0 License, as given at
* http://www.apache.org/licenses/
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE
* See the Apache 2.0 License for more details.
*
* You should have received a copy of the Apache 2.0 License along with
* this program; see the file LICENSE.
* If not, see
* http://www.apache.org/licenses/
*
* Maintained by John J. Boyer [email protected]
*/
package org.brailleblaster;
import java.io.File;
import java.io.IOException;
import java.util.logging.FileHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.brailleblaster.localization.LocaleHandler;
import org.brailleblaster.util.BrailleblasterPath;
import org.brailleblaster.util.FileUtils;
import org.eclipse.swt.SWT;
import org.eclipse.swt.SWTError;
import org.eclipse.swt.widgets.Display;
import org.liblouis.liblouisutdml;
import java.util.UUID;
/**
* Determine and set initial conditions.
*/
public final class BBIni {
private static BBIni bbini;
public static BBIni initialize (String[] args) {
if (bbini == null)
bbini = new BBIni(args);
return bbini;
}
private static boolean debug = false;
private static boolean gotGui = true;
private static boolean utd = false;
private static boolean multipleSubcommands = false;
private static Logger logger;
private static Display display = null;
private static String BBVersion;
private static String releaseDate;
private static String brailleblasterPath; // FO
private static String osName;
private static String osVersion;
private static String fileSep;
private static String nativeCommandPath;
private static String nativeLibraryPath;
private static String programDataPath;
private static String helpDocsPath;
private static String nativeCommandSuffix;
private static String nativeLibrarySuffix;
private static String recentDocs;
private static String settingsPath;
private static String tempFilesPath;
private static String platformName;
private static String userSettings;
private static String stylePath;
public final static String propExtension = ".properties";
private static boolean hSubcommands = false;
private static boolean hLiblouisutdml = false;
private static FileHandler logFile;
private static final String BBID = "brlblst";
private static String instanceId;
private BBIni(String[] args) {
long seconds = System.currentTimeMillis() / 1000;
instanceId = Long.toString (seconds, 32);
//System.out.println (instanceId);
LocaleHandler lh = new LocaleHandler();
Main m = new Main();
brailleblasterPath = BrailleblasterPath.getPath (m);
osName = System.getProperty ("os.name");
osVersion = System.getProperty ("os.version");
fileSep = System.getProperty ("file.separator");
platformName = SWT.getPlatform();
String userHome = System.getProperty ("user.home");
String BBHome;
programDataPath = brailleblasterPath + fileSep + "programData";
helpDocsPath = brailleblasterPath + fileSep + "helpDocs";
if (platformName.equals("win32")) {
BBHome = System.getenv ("APPDATA") + fileSep + BBID;
nativeLibrarySuffix = ".dll";
}
else if (platformName.equals ("cocoa")) {
BBHome = userHome + fileSep + BBID;
nativeLibrarySuffix = ".dylib";
}
else {
BBHome = userHome + fileSep + BBID;
nativeLibrarySuffix = ".so";
}
nativeLibraryPath = brailleblasterPath + fileSep + "native" + fileSep +
"lib";
FileUtils fu = new FileUtils();
settingsPath = BBHome + fileSep + "settings";
File settings = new File (settingsPath);
if (!settings.exists())
settings.mkdirs();
userSettings = settingsPath + fileSep +
"user_settings.properties";
if (!fu.exists (userSettings)) {
fu.copyFile (programDataPath + fileSep + "settings" + fileSep +
"user_settings.properties", userSettings);
}
//this part initialize recent_documents.txt
recentDocs = settingsPath + fileSep + "recent_documents.txt";
fu.create(recentDocs);
//FO Aug 03
stylePath = settingsPath + fileSep + "styles";
File styleDir = new File (stylePath);
if (!styleDir.exists())
styleDir.mkdirs();
tempFilesPath = BBHome + fileSep + "temp" + fileSep + instanceId;
File temps = new File (tempFilesPath);
if (!temps.exists())
temps.mkdirs();
logger = Logger.getLogger ("org.brailleblaster");
try {
logFile = new FileHandler
(tempFilesPath + fileSep + "log.xml");
} catch (IOException e) {
e.printStackTrace();
logger.log (Level.SEVERE, "cannot open logfile", e);
e.printStackTrace();
}
if (logFile != null) {
logger.addHandler (logFile);
}
// disable output to console
logger.setUseParentHandlers(false);
if (args.length > 0) {
int i = 0;
while (i < args.length) {
if (args[i].charAt(0) != '-') {
break;
}
if (args[i].equals ("-debug")) {
debug = true;
}
else if (args[i].equals ("-nogui")) {
gotGui = false;
}
else if (args[i].equals ("-utd")) {
utd = true;
}
else if (args[i].equals ("-multcom")) {
multipleSubcommands = true;
}
else {
System.out.println ("Bad option '" + args[i] + "'");
}
i++;
}
if (i < args.length) {
hSubcommands = true;
}
}
if (gotGui) {
try {
display = new Display();
} catch (SWTError e) {
logger.log (Level.SEVERE, "Can't find GUI", e);
}
}
try {
liblouisutdml.loadLibrary (nativeLibraryPath, nativeLibrarySuffix);
liblouisutdml.initialize (programDataPath, tempFilesPath,
"liblouisutdml.log");
hLiblouisutdml = true;
} catch (UnsatisfiedLinkError e) {
logger.log (Level.SEVERE, "Problem with liblouisutdml library", e);
}
catch (Exception e) {
logger.log (Level.WARNING, "This shouldn't happen", e);
}
}
public static boolean debugging() {
return debug;
}
public static boolean haveGui() {
return gotGui;
}
public static Display getDisplay()
{
return display;
}
public static boolean haveLiblouisutdml()
{
return hLiblouisutdml;
}
public static boolean haveSubcommands() {
return hSubcommands;
}
public static void setVersion (String version) {
BBVersion = version;
}
public static String getVersion() {
return BBVersion;
}
public static void setReleaseDate (String relDate) {
releaseDate = relDate;
}
public static String getReleaseDate () {
return releaseDate;
}
public static String getBrailleblasterPath()
{
return brailleblasterPath;
}
public static String getFileSep()
{
return fileSep;
}
public static String getNativeCommandPath()
{
return nativeCommandPath;
}
public static String getNativeLibraryPath()
{
return nativeLibraryPath;
}
public static String getProgramDataPath()
{
return programDataPath;
}
public static String getHelpDocsPath() {
return helpDocsPath;
}
public static String getNativeCommandSuffix()
{
return nativeCommandSuffix;
}
public static String getNativeLibrarySuffix()
{
return nativeLibrarySuffix;
}
public static String getSettingsPath()
{
return settingsPath;
}
public static String getTempFilesPath () {
return tempFilesPath;
}
public static String getPlatformName() {
return platformName;
}
public static String getUserSettings(){
return userSettings;
}
public static String getRecentDocs(){
return recentDocs;
}
public static Logger getLogger() {
return logger;
}
public static boolean useUtd () {
return utd;
}
public static boolean multCommands () {
return multipleSubcommands;
}
// FO
public static void setUtd (boolean trueFalse) {
utd = trueFalse;
return;
}
public static String getStylePath(){
return stylePath;
}
public static String getInstanceID() {
return instanceId;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
scripts/generate-docs-index.py
|
#!/usr/bin/env python3
import os
import pathlib
import pprint
import sys
import traceback
import m2r
import msgpack
from jinja2 import Template
from tqdm import tqdm
DISABLE_TQDM = "CI" in os.environ
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
LOCAL_CACHE_PATH = pathlib.Path(
os.environ.get("LOCAL_CACHE_PATH") or REPO_ROOT.joinpath(".cache")
).resolve()
if not LOCAL_CACHE_PATH.is_dir():
LOCAL_CACHE_PATH.mkdir(0o755)
PACKAGE_INFO_CACHE = LOCAL_CACHE_PATH / "packages-info"
if not PACKAGE_INFO_CACHE.is_dir():
PACKAGE_INFO_CACHE.mkdir(0o755)
RESULTS_DIR = REPO_ROOT / "results"
print(f"Local Cache Path: {LOCAL_CACHE_PATH}", file=sys.stderr, flush=True)
print(f"Results Path: {RESULTS_DIR}", file=sys.stderr, flush=True)
if sys.version_info < (3, 7):
print("This script is meant to only run on Py3.7+", file=sys.stderr, flush=True)
def set_progress_description(progress, message):
progress.set_description(f"{message: <60}")
def collect_extensions_info():
extensions = {}
for path in sorted(PACKAGE_INFO_CACHE.glob("*.msgpack")):
if path.stem == "salt-extensions":
continue
extension_data = msgpack.unpackb(path.read_bytes())
extension = extension_data["info"]["name"]
description = extension_data["info"]["description"].rstrip()
if "markdown" in extension_data["info"]["description_content_type"]:
description = m2r.convert(description)
summary = extension_data["info"]["summary"].strip()
extensions[extension] = {
"summary": summary,
"description": description,
}
return extensions
def collect_extensions_results():
results = {}
results["osnames"] = []
results["python_versions"] = []
for extension in sorted(RESULTS_DIR.iterdir()):
if not extension.is_dir():
continue
results[extension.name] = {}
for salt_version in sorted(extension.iterdir()):
results[extension.name][salt_version.name] = {}
for ospath in sorted(salt_version.iterdir()):
osname = ospath.name.replace("-latest", "")
if osname not in results["osnames"]:
results["osnames"].append(osname)
results[extension.name][salt_version.name][osname] = {}
for python_version in sorted(ospath.iterdir()):
python_version_name = python_version.name
if python_version_name not in results["python_versions"]:
results["python_versions"].append(python_version_name)
url = python_version.joinpath("url").read_text().strip()
status = python_version.joinpath("status").read_text().strip()
results[extension.name][salt_version.name][osname][
python_version_name
] = {"url": url, "status": status}
return results
def main():
results = collect_extensions_results()
extensions = collect_extensions_info()
sphinx_results_dir = REPO_ROOT / "docs" / "results"
if not sphinx_results_dir.is_dir():
sphinx_results_dir.mkdir(0o0755)
docs_dir = REPO_ROOT / "docs"
table_template = REPO_ROOT / "templates" / "results.html.j2"
sphinx_index = REPO_ROOT / "docs" / "index.rst"
progress = tqdm(
total=len(results),
unit="pkg",
unit_scale=True,
desc=f"{' ' * 60} :",
disable=DISABLE_TQDM,
)
with progress:
progress.write(f"Collected Extension Test Results:\n{pprint.pformat(results)}")
progress.write(f"Collected Extensions:\n{pprint.pformat(extensions)}")
contents = f"{sphinx_index.read_text()}\n"
for extension in results:
if extension in ("osnames", "python_versions"):
progress.update()
continue
set_progress_description(progress, f"Processing {extension}")
if extension not in extensions:
progress.write(
f"The extension {extension!r} cannot be found in the extensions listing"
)
progress.update()
continue
title = extension
header = "-" * len(title)
summary = extensions[extension]["summary"]
description = extensions[extension]["description"]
context = dict(
results=results[extension],
python_versions=results["python_versions"],
osnames=results["osnames"],
)
extension_index = docs_dir / f"{extension}.rst"
table_contents = Template(table_template.read_text()).render(**context)
html_table_path = sphinx_results_dir / f"{extension}.html"
html_table_path.write_text(table_contents)
html_table_rel_path = html_table_path.relative_to(docs_dir)
contents += (
f"{title}\n{header}\n{summary} (:ref:`more info<{extension}>`)\n\n"
)
contents += f".. raw:: html\n :file: {html_table_rel_path}\n\n"
extension_contents = (
":orphan:\n\n"
f".. _{extension}:\n\n{title}\n{header.replace('-', '=')}\n\n"
)
extension_contents += "Compatibility\n-------------\n"
extension_contents += f".. raw:: html\n :file: {html_table_rel_path}\n\n"
extension_contents += f"Description\n-----------\n{description}\n"
extension_index.write_text(extension_contents)
progress.update()
set_progress_description(progress, "Writing extenstions index")
contents += ".. |date| date::\n\nLast Updated on |date|"
sphinx_index.write_text(f"{contents}\n")
progress.write("Complete")
if __name__ == "__main__":
exitcode = 0
try:
main()
except Exception:
exitcode = 1
traceback.print_exc()
finally:
sys.exit(exitcode)
|
[] |
[] |
[
"LOCAL_CACHE_PATH"
] |
[]
|
["LOCAL_CACHE_PATH"]
|
python
| 1 | 0 | |
code/ci360-twilio-connector/aws-lambda/python/twilioMessageRequest/lambda_function.py
|
"""
Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import os
import json
import datetime
import urllib3
print("Initializing function")
# Get the service resource
http = urllib3.PoolManager()
# Initialize global variables
default_sender = os.environ['default_sender']
event_ttl = int(os.environ['event_ttl']) * 1000
sm_secret_id_prefix = os.environ['sm_secret_id_prefix']
secret_cache = {}
"""
lambda_handler:
Main event handler entry point for lamdba request
"""
def lambda_handler(event, context):
if event is not None and event["body"] is not None:
body = json.loads(event["body"])
print("Received event:", body["eventName"])
event_type = body["eventType"]
if event_type == "outboundSystem":
if process_event(body):
return {
'statusCode': 200,
'body': json.dumps('OK')
}
else:
return {
'statusCode': 500,
'body': json.dumps('ERROR')
}
else:
print("event type other than outboundSystem")
return {
'statusCode': 200,
'body': json.dumps('OK')
}
"""
process_event:
Top level of business logic, event processing is here
"""
def process_event(event_body):
# determine if event is "fresh"
event_age = int(datetime.datetime.utcnow().timestamp() * 1000) - int(event_body["date"]["generatedTimestamp"])
if event_age > event_ttl:
print("event too old, age:", event_age)
return True
print("identityId:", event_body["identityId"], "tenant_id:", event_body["externalTenantId"])
# parse creative
creative_content = event_body["impression"]["creativeContent"]
#print("creative:", creative_content)
msg_req = parse_creative(creative_content)
# validate
if validate_request(msg_req):
# now call API
http_status = call_twilio_api(msg_req, event_body["externalTenantId"])
if http_status == 200 or http_status == 201:
return True
return False
"""
parse_creative:
Creative content parsing logic, returns message request object
"""
def parse_creative(creative_content):
msg_req = {}
from_channel = None
creative_parts = creative_content.split(";")
# body of message to be sent is last part of creative
msg_req["Body"] = creative_parts[len(creative_parts)-1].strip()
# parse out the rest of the creative (key:value format)
for i in range(len(creative_parts)-1):
msg_attr = creative_parts[i].split(":", 1)
if msg_attr[0].upper() == "TO":
to_address = msg_attr[1]
msg_req["To"] = msg_attr[1]
elif msg_attr[0].upper() == "FROM":
from_channel = msg_attr[1]
elif msg_attr[0].upper() == "MEDIA_URLS":
msg_req["MediaUrl"] = msg_attr[1]
# set default sender first, override below if provided
msg_req["From"] = default_sender
if from_channel is not None:
msg_req["From"] = from_channel
return msg_req
"""
validate_request:
Validate message request object, check that it's populated correctly
"""
def validate_request(msg_req):
if msg_req["Body"] is None or not msg_req["Body"].strip():
return False
elif msg_req["To"] is None or not msg_req["To"].strip():
return False
elif msg_req["From"] is None or not msg_req["From"].strip():
return False
return True
"""
call_twilio_api
Twilio API call function
"""
def call_twilio_api(msg_req, tenant_id):
secret = get_secret(tenant_id)
#print("msg_req:", msg_req)
auth_string = secret["twilio_account_sid"] + ":" + secret["twilio_auth_token"]
req_headers = urllib3.util.make_headers(basic_auth=auth_string)
r = http.request('POST', secret["twilio_api_url"], fields = msg_req, headers = req_headers)
print("Response Status:", r.status, "Body:", r.data)
return r.status
"""
get_secret
Retrieve Secret from SecretsManager containing API
"""
def get_secret(tenant_id):
try:
secret = secret_cache[tenant_id]
#print("secret found in cache")
except KeyError:
print(f"secret not found in cache, fetching for tenant: {tenant_id}")
secret = fetch_secret(tenant_id)
secret_cache[tenant_id] = secret
return secret
def fetch_secret(tenant_id):
# Get secrets
session = boto3.session.Session()
client = session.client(service_name='secretsmanager', region_name="us-east-1")
try:
secret_id = sm_secret_id_prefix + tenant_id
get_secret_value_response = client.get_secret_value(SecretId=secret_id)
except ClientError as e:
print("Failed to get secrets: ", e.response)
raise e
else:
if 'SecretString' in get_secret_value_response:
secret = json.loads(get_secret_value_response['SecretString'])
return secret
else:
print("No SecretString found")
return None
|
[] |
[] |
[
"sm_secret_id_prefix",
"event_ttl",
"default_sender"
] |
[]
|
["sm_secret_id_prefix", "event_ttl", "default_sender"]
|
python
| 3 | 0 | |
voic/__init__.py
|
import flask
import flask_bcrypt
import flask_ckeditor
import flask_login
import flask_sqlalchemy
import flask_mobility
import flask_mail
import dotenv
import os
dotenv.load_dotenv()
app = flask.Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get('FLASK_SECRET_KEY')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db_url = os.environ.get('DATABASE_URL')
db_url = db_url.split(':')
if db_url[0] == 'postgres':
db_url[0] = 'postgresql'
app.config['SQLALCHEMY_DATABASE_URI'] = ':'.join(db_url)
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ.get('EMAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('EMAIL_PASSWORD')
mobility = flask_mobility.Mobility(app)
db = flask_sqlalchemy.SQLAlchemy(app)
bcrypt = flask_bcrypt.Bcrypt(app)
login_manager = flask_login.LoginManager(app)
mail = flask_mail.Mail(app)
ckeditor = flask_ckeditor.CKEditor(app)
from voic import routes
|
[] |
[] |
[
"EMAIL_USERNAME",
"DATABASE_URL",
"EMAIL_PASSWORD",
"FLASK_SECRET_KEY"
] |
[]
|
["EMAIL_USERNAME", "DATABASE_URL", "EMAIL_PASSWORD", "FLASK_SECRET_KEY"]
|
python
| 4 | 0 | |
bts/dataset.py
|
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
from PIL import Image
import os
import random
class TumorDataset(Dataset):
""" Returns a TumorDataset class object which represents our tumor dataset.
TumorDataset inherits from torch.utils.data.Dataset class.
"""
def __init__(self, root_dir, transform=True, DEBUG=False):
""" Constructor for our TumorDataset class.
Parameters:
root_dir(str): Directory with all the images.
transform(bool): Flag to apply image random transformation.
DEBUG(bool): To switch to debug mode for image transformation.
Returns: None
"""
self.root_dir = root_dir
self.transform = {'hflip': TF.hflip,
'vflip': TF.vflip,
'rotate': TF.rotate}
self.default_transformation = transforms.Compose([
transforms.Grayscale(),
transforms.Resize((512, 512))
])
self.DEBUG = DEBUG
if not transform:
self.transform = None
def __getitem__(self, index):
""" Overridden method from inheritted class to support
indexing of dataset such that datset[I] can be used
to get Ith sample.
Parameters:
index(int): Index of the dataset sample
Return:
sample(dict): Contains the index, image, mask torch.Tensor.
'index': Index of the image.
'image': Contains the tumor image torch.Tensor.
'mask' : Contains the mask image torch.Tensor.
"""
image_name = os.path.join(self.root_dir, str(index)+'.png')
mask_name = os.path.join(self.root_dir, str(index)+'_mask.png')
image = Image.open(image_name)
mask = Image.open(mask_name)
image = self.default_transformation(image)
mask = self.default_transformation(mask)
# Custom transformations
if self.transform:
image, mask = self._random_transform(image, mask)
image = TF.to_tensor(image)
mask = TF.to_tensor(mask)
sample = {'index': int(index), 'image': image, 'mask': mask}
return sample
def _random_transform(self, image, mask):
""" Applies a set of transformation in random order.
Each transformation has a probability of 0.5
"""
choice_list = list(self.transform)
for _ in range(len(choice_list)):
choice_key = random.choice(choice_list)
if self.DEBUG:
print(f'Transform choose: {choice_key}')
action_prob = random.randint(0, 1)
if action_prob >= 0.5:
if self.DEBUG:
print(f'\tApplying transformation: {choice_key}')
if choice_key == 'rotate':
rotation = random.randint(15, 75)
if self.DEBUG:
print(f'\t\tRotation by: {rotation}')
image = self.transform[choice_key](image, rotation)
mask = self.transform[choice_key](mask, rotation)
else:
image = self.transform[choice_key](image)
mask = self.transform[choice_key](mask)
choice_list.remove(choice_key)
return image, mask
def __len__(self):
""" Overridden method from inheritted class so that
len(self) returns the size of the dataset.
"""
error_msg = 'Part of dataset is missing!\nNumber of tumor and mask images are not same.'
total_files = len(os.listdir(self.root_dir))
assert (total_files % 2 == 0), error_msg
return total_files//2
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tripleo_common/image/image_uploader.py
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
from concurrent import futures
import hashlib
import json
import os
import random
import re
import requests
from requests import auth as requests_auth
from requests.adapters import HTTPAdapter
import shutil
from urllib.parse import urlparse
import socket
import subprocess
import tempfile
import tenacity
import yaml
from datetime import datetime
from dateutil.parser import parse as dt_parse
from dateutil.tz import tzlocal
from oslo_concurrency import processutils
from oslo_log import log as logging
from tripleo_common.utils import ansible
from tripleo_common.image.base import BaseImageManager
from tripleo_common.image.exception import ImageNotFoundException
from tripleo_common.image.exception import ImageRateLimitedException
from tripleo_common.image.exception import ImageUploaderException
from tripleo_common.image.exception import ImageUploaderThreadException
from tripleo_common.image import image_export
from tripleo_common.utils import image as image_utils
from tripleo_common.utils.locks import threadinglock
LOG = logging.getLogger(__name__)
SECURE_REGISTRIES = (
'trunk.registry.rdoproject.org',
'registry.redhat.io',
'registry.access.redhat.com',
'docker.io',
'registry-1.docker.io',
)
NO_VERIFY_REGISTRIES = ()
CLEANUP = (
CLEANUP_FULL, CLEANUP_PARTIAL, CLEANUP_NONE
) = (
'full', 'partial', 'none'
)
CALL_TYPES = (
CALL_PING,
CALL_MANIFEST,
CALL_BLOB,
CALL_UPLOAD,
CALL_TAGS,
CALL_CATALOG
) = (
'/',
'%(image)s/manifests/%(tag)s',
'%(image)s/blobs/%(digest)s',
'%(image)s/blobs/uploads/',
'%(image)s/tags/list',
'/_catalog',
)
MEDIA_TYPES = (
MEDIA_MANIFEST_V1,
MEDIA_MANIFEST_V1_SIGNED,
MEDIA_MANIFEST_V2,
MEDIA_MANIFEST_V2_LIST,
MEDIA_OCI_MANIFEST_V1,
MEDIA_OCI_CONFIG_V1,
MEDIA_OCI_INDEX_V1,
MEDIA_OCI_LAYER,
MEDIA_OCI_LAYER_COMPRESSED,
MEDIA_CONFIG,
MEDIA_BLOB,
MEDIA_BLOB_COMPRESSED
) = (
'application/vnd.docker.distribution.manifest.v1+json',
'application/vnd.docker.distribution.manifest.v1+prettyjws',
'application/vnd.docker.distribution.manifest.v2+json',
'application/vnd.docker.distribution.manifest.list.v2+json',
'application/vnd.oci.image.manifest.v1+json',
'application/vnd.oci.image.config.v1+json',
'application/vnd.oci.image.index.v1+json',
'application/vnd.oci.image.layer.v1.tar',
'application/vnd.oci.image.layer.v1.tar+gzip',
'application/vnd.docker.container.image.v1+json',
'application/vnd.docker.image.rootfs.diff.tar',
'application/vnd.docker.image.rootfs.diff.tar.gzip'
)
DEFAULT_UPLOADER = 'python'
def get_undercloud_registry():
ctlplane_hostname = '.'.join([socket.gethostname().split('.')[0],
'ctlplane'])
cmd = ['getent', 'hosts', ctlplane_hostname]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
universal_newlines=True)
out, err = process.communicate()
if process.returncode != 0:
LOG.warning('No entry for %s in /etc/hosts. Falling back to use the '
'default (localhost) undercloud registry.'
% ctlplane_hostname)
address = 'localhost'
else:
address = out.split()[1]
return '%s:%s' % (address, '8787')
class MakeSession(object):
"""Class method to uniformly create sessions.
Sessions created by this class will retry on errors with an exponential
backoff before raising an exception. Because our primary interaction is
with the container registries the adapter will also retry on 401 and
404. This is being done because registries commonly return 401 when an
image is not found, which is commonly a cache miss. See the adapter
definitions for more on retry details.
"""
def __init__(self, verify=True):
self.session = requests.Session()
self.session.verify = verify
adapter = HTTPAdapter(
max_retries=8,
pool_connections=24,
pool_maxsize=24,
pool_block=False
)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
def create(self):
return self.__enter__()
def __enter__(self):
return self.session
def __exit__(self, *args, **kwargs):
self.session.close()
class RegistrySessionHelper(object):
""" Class with various registry session helpers
This class contains a bunch of static methods to be used when making
session requests against a container registry. The methods are primarily
used to handle authentication/reauthentication for the requests against
registries that require auth.
"""
@staticmethod
def check_status(session, request, allow_reauth=True):
""" Check request status and trigger reauth
This function can be used to check if we need to perform authentication
for a container registry request because we've gotten a 401.
"""
hash_request_id = hashlib.sha1(str(request.url).encode())
request_id = hash_request_id.hexdigest()
text = getattr(request, 'text', 'unknown')
reason = getattr(request, 'reason', 'unknown')
status_code = getattr(request, 'status_code', None)
headers = getattr(request, 'headers', {})
session_headers = getattr(session, 'headers', {})
if status_code >= 300:
LOG.info(
'Non-2xx: id {}, status {}, reason {}, text {}'.format(
request_id,
status_code,
reason,
text
)
)
if status_code == 401:
LOG.warning(
'Failure: id {}, status {}, reason {} text {}'.format(
request_id,
status_code,
reason,
text
)
)
LOG.debug(
'Request headers after 401: id {}, headers {}'.format(
request_id,
headers
)
)
LOG.debug(
'Session headers after 401: id {}, headers {}'.format(
request_id,
session_headers
)
)
www_auth = headers.get(
'www-authenticate',
headers.get(
'Www-Authenticate'
)
)
if www_auth:
error = None
# Handle docker.io shenanigans. docker.io will return 401
# for 403 and 404 but provide an error string. Other registries
# like registry.redhat.io and quay.io do not do this. So if
# we find an error string, check to see if we should reauth.
do_reauth = allow_reauth
if 'error=' in www_auth:
error = re.search('error="(.*?)"', www_auth).group(1)
LOG.warning(
'Error detected in auth headers: error {}'.format(
error
)
)
do_reauth = (error == 'invalid_token' and allow_reauth)
if do_reauth:
if hasattr(session, 'reauthenticate'):
reauth = int(session.headers.get('_TripleOReAuth', 0))
reauth += 1
session.headers['_TripleOReAuth'] = str(reauth)
LOG.warning(
'Re-authenticating: id {}, count {}'.format(
request_id,
reauth
)
)
session.reauthenticate(**session.auth_args)
if status_code == 429:
raise ImageRateLimitedException('Rate Limited while requesting '
'{}'.format(request.url))
request.raise_for_status()
@staticmethod
def check_redirect_trusted(request_response, request_session,
stream=True, timeout=30):
"""Check if we've been redirected to a trusted source
Because we may be using auth, we may not want to leak authentication
keys to an untrusted source. If we get a redirect, we need to check
that the redirect url is one of our sources that we trust. Otherwise
we drop the Authorization header from the redirect request. We'll
add the header back into the request session after performing the
request to ensure that future usage of the session.
:param: request_response: Response object of the request to check
:param: request_session: Session to use when redirecting
:param: stream: Should we stream the response of the redirect
:param: tiemout: Timeout for the redirect request
"""
# we're not a redirect, just return the original response
if not (request_response.status_code >= 300
and request_response.status_code < 400):
return request_response
# parse the destination location
redir_url = urlparse(request_response.headers['Location'])
# close the response since we're going to replace it
request_response.close()
auth_header = request_session.headers.pop('Authorization', None)
# ok we got a redirect, let's check where we are going
if len([h for h in SECURE_REGISTRIES if h in redir_url.netloc]) > 0:
# we're going to a trusted location, add the header back and
# return response
request_session.headers.update({'Authorization': auth_header})
request_response = request_session.get(redir_url.geturl(),
stream=stream,
timeout=timeout)
else:
# we didn't trust the place we're going, request without auth but
# add the auth back to the request session afterwards
request_response = request_session.get(redir_url.geturl(),
stream=stream,
timeout=timeout)
request_session.headers.update({'Authorization': auth_header})
request_response.encoding = 'utf-8'
# recheck status here to make sure we didn't get a 401 from
# our redirect host path.
RegistrySessionHelper.check_status(session=request_session,
request=request_response)
return request_response
@staticmethod
def get_cached_bearer_token(lock=None, scope=None):
if not lock:
return None
with lock.get_lock():
data = lock.sessions().get(scope)
if data and data.get('issued_at'):
token_time = dt_parse(data.get('issued_at'))
now = datetime.now(tzlocal())
expires_in = data.get('expires_in')
if not expires_in or (now - token_time).seconds < expires_in:
return data['token']
return None
@staticmethod
def get_bearer_token(session, lock=None, username=None, password=None,
realm=None, service=None, scope=None):
cached_token = RegistrySessionHelper.get_cached_bearer_token(lock,
scope)
if cached_token:
return cached_token
auth = None
token_param = {}
if service:
token_param['service'] = service
if scope:
token_param['scope'] = scope
if username:
auth = requests.auth.HTTPBasicAuth(username, password)
auth_req = session.get(realm, params=token_param, auth=auth,
timeout=30)
auth_req.raise_for_status()
resp = auth_req.json()
if lock and 'token' in resp:
with lock.get_lock():
lock.sessions().update({scope: resp})
elif lock and 'token' not in resp:
raise Exception('Invalid auth response, no token provide')
hash_request_id = hashlib.sha1(str(auth_req.url).encode())
LOG.debug(
'Session authenticated: id {}'.format(
hash_request_id.hexdigest()
)
)
return resp['token']
@staticmethod
def parse_www_authenticate(header):
auth_type = None
auth_type_match = re.search('^([A-Za-z]*) ', header)
if auth_type_match:
auth_type = auth_type_match.group(1)
if not auth_type:
return (None, None, None)
realm = None
service = None
if 'realm=' in header:
realm = re.search('realm="(.*?)"', header).group(1)
if 'service=' in header:
service = re.search('service="(.*?)"', header).group(1)
return (auth_type, realm, service)
@staticmethod
@tenacity.retry( # Retry up to 5 times with longer time for rate limit
reraise=True,
retry=tenacity.retry_if_exception_type(
ImageRateLimitedException
),
wait=tenacity.wait_random_exponential(multiplier=1.5, max=60),
stop=tenacity.stop_after_attempt(5)
)
def _action(action, request_session, *args, **kwargs):
""" Perform a session action and retry if auth fails
This function dynamically performs a specific type of call
using the provided session (get, patch, post, etc). It will
attempt a single re-authentication if the initial request
fails with a 401.
"""
_action = getattr(request_session, action)
try:
req = _action(*args, **kwargs)
RegistrySessionHelper.check_status(session=request_session,
request=req)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
req = _action(*args, **kwargs)
RegistrySessionHelper.check_status(session=request_session,
request=req)
else:
raise
return req
@staticmethod
def get(request_session, *args, **kwargs):
""" Perform a get and retry if auth fails
This function is designed to be used when we perform a get to
an authenticated source. This function will attempt a single
re-authentication request if the first one fails.
"""
return RegistrySessionHelper._action('get',
request_session,
*args,
**kwargs)
@staticmethod
def patch(request_session, *args, **kwargs):
""" Perform a patch and retry if auth fails
This function is designed to be used when we perform a path to
an authenticated source. This function will attempt a single
re-authentication request if the first one fails.
"""
return RegistrySessionHelper._action('patch',
request_session,
*args,
**kwargs)
@staticmethod
def post(request_session, *args, **kwargs):
""" Perform a post and retry if auth fails
This function is designed to be used when we perform a post to
an authenticated source. This function will attempt a single
re-authentication request if the first one fails.
"""
return RegistrySessionHelper._action('post',
request_session,
*args,
**kwargs)
@staticmethod
def put(request_session, *args, **kwargs):
""" Perform a put and retry if auth fails
This function is designed to be used when we perform a put to
an authenticated source. This function will attempt a single
re-authentication request if the first one fails.
"""
return RegistrySessionHelper._action('put',
request_session,
*args,
**kwargs)
class ImageUploadManager(BaseImageManager):
"""Manage the uploading of image files
Manage the uploading of images from a config file specified in YAML
syntax. Multiple config files can be specified. They will be merged.
"""
def __init__(self, config_files=None,
cleanup=CLEANUP_FULL,
mirrors=None, registry_credentials=None,
multi_arch=False, lock=None):
if config_files is None:
config_files = []
super(ImageUploadManager, self).__init__(config_files)
self.uploaders = {
'python': PythonImageUploader()
}
self.uploaders['python'].init_global_state(lock)
self.cleanup = cleanup
if mirrors:
for uploader in self.uploaders.values():
if hasattr(uploader, 'mirrors'):
uploader.mirrors.update(mirrors)
if registry_credentials:
self.validate_registry_credentials(registry_credentials)
for uploader in self.uploaders.values():
uploader.registry_credentials = registry_credentials
self.multi_arch = multi_arch
@staticmethod
def validate_registry_credentials(creds_data):
if not isinstance(creds_data, dict):
raise TypeError('Credentials data must be a dict')
for registry, cred_entry in creds_data.items():
if not isinstance(cred_entry, dict) or len(cred_entry) != 1:
raise TypeError('Credentials entry must be '
'a dict with a single item')
if not isinstance(registry, str):
raise TypeError('Key must be a registry host string: %s' %
registry)
username, password = next(iter(cred_entry.items()))
if not (isinstance(username, str) and
isinstance(password, str)):
raise TypeError('Username and password must be strings: %s' %
username)
def discover_image_tag(self, image, tag_from_label=None,
username=None, password=None):
uploader = self.uploader(DEFAULT_UPLOADER)
return uploader.discover_image_tag(
image, tag_from_label=tag_from_label,
username=username, password=password)
def uploader(self, uploader):
if uploader not in self.uploaders:
raise ImageUploaderException('Unknown image uploader type')
return self.uploaders[uploader]
def get_uploader(self, uploader):
return self.uploader(uploader)
@staticmethod
def get_push_destination(item):
push_destination = item.get('push_destination')
if not push_destination:
return get_undercloud_registry()
# If set to True, use discovered undercloud registry
if isinstance(push_destination, bool):
return get_undercloud_registry()
return push_destination
def upload(self):
"""Start the upload process"""
LOG.info('Using config files: %s' % self.config_files)
uploads = self.load_config_files(self.UPLOADS) or []
container_images = self.load_config_files(self.CONTAINER_IMAGES) or []
upload_images = uploads + container_images
tasks = []
for item in upload_images:
image_name = item.get('imagename')
uploader = item.get('uploader', DEFAULT_UPLOADER)
pull_source = item.get('pull_source')
push_destination = self.get_push_destination(item)
# This updates the parsed upload_images dict with real values
item['push_destination'] = push_destination
append_tag = item.get('modify_append_tag')
modify_role = item.get('modify_role')
modify_vars = item.get('modify_vars')
multi_arch = item.get('multi_arch', self.multi_arch)
uploader = self.uploader(uploader)
tasks.append(UploadTask(
image_name, pull_source, push_destination,
append_tag, modify_role, modify_vars,
self.cleanup, multi_arch))
# NOTE(mwhahaha): We want to randomize the upload process because of
# the shared nature of container layers. Because we multiprocess the
# handling of containers, if performed in an alphabetical order (the
# default) we end up duplicating fetching of container layers. Things
# Like cinder-volume and cinder-backup share almost all of the same
# layers so when they are fetched at the same time, we will duplicate
# the processing. By randomizing the list we will reduce the amount
# of duplicating that occurs. In my testing I went from ~30mins to
# ~20mins to run. In the future this could be improved if we added
# some locking to the container fetching based on layer hashes but
# will require a significant rewrite.
random.shuffle(tasks)
for task in tasks:
uploader.add_upload_task(task)
for uploader in self.uploaders.values():
uploader.run_tasks()
return upload_images # simply to make test validation easier
class BaseImageUploader(object):
lock = None
mirrors = {}
insecure_registries = set()
no_verify_registries = set(NO_VERIFY_REGISTRIES)
secure_registries = set(SECURE_REGISTRIES)
export_registries = set()
push_registries = set()
def __init__(self):
self.upload_tasks = []
# A mapping of layer hashs to the image which first copied that
# layer to the target
self.image_layers = {}
self.registry_credentials = {}
@classmethod
def init_registries_cache(cls):
cls.insecure_registries.clear()
cls.no_verify_registries.clear()
cls.no_verify_registries.update(NO_VERIFY_REGISTRIES)
cls.secure_registries.clear()
cls.secure_registries.update(SECURE_REGISTRIES)
cls.mirrors.clear()
cls.export_registries.clear()
cls.push_registries.clear()
def cleanup(self):
pass
def run_tasks(self):
pass
def credentials_for_registry(self, registry):
creds = self.registry_credentials.get(registry)
if not creds:
return None, None
username, password = next(iter(creds.items()))
return username, password
@classmethod
def run_modify_playbook(cls, modify_role, modify_vars,
source_image, target_image, append_tag,
container_build_tool='buildah'):
run_vars = {}
if modify_vars:
run_vars.update(modify_vars)
run_vars['source_image'] = source_image
run_vars['target_image'] = target_image
run_vars['modified_append_tag'] = append_tag
run_vars['container_build_tool'] = container_build_tool
LOG.info('Playbook variables: \n%s' % yaml.safe_dump(
run_vars, default_flow_style=False))
playbook = [{
'hosts': 'localhost',
'gather_facts': 'no',
'tasks': [{
'name': 'Import role %s' % modify_role,
'import_role': {
'name': modify_role
},
'vars': run_vars
}]
}]
LOG.info('Playbook: \n%s' % yaml.safe_dump(
playbook, default_flow_style=False))
work_dir = tempfile.mkdtemp(prefix='tripleo-modify-image-playbook-')
log_name = 'tripleo-container-image-prepare-ansible.log'
try:
for handler in LOG.logger.root.handlers:
if hasattr(handler, 'baseFilename'):
if os.path.isfile(handler.baseFilename):
log_f = os.path.join(
os.path.dirname(handler.baseFilename),
log_name
)
break
else:
raise OSError('Log output is not a file.')
except (AttributeError, OSError):
log_f = os.path.join('/var/log', log_name)
try:
LOG.info('Ansible action starting')
ansible.run_ansible_playbook(
playbook=playbook,
work_dir=work_dir,
verbosity=1,
extra_env_variables=dict(os.environ),
override_ansible_cfg=(
"[defaults]\n"
"stdout_callback=tripleo_dense\n"
"log_path=%s\n" % log_f
)
)
except processutils.ProcessExecutionError as e:
LOG.error(
'%s\n'
'Error running playbook in directory: %s\n'
'Playbook log information can be reviewed here: %s' % (
e.stdout,
work_dir,
log_f
)
)
raise ImageUploaderException(
'Modifying image %s failed' % target_image
)
else:
LOG.info('Ansible action completed')
finally:
shutil.rmtree(work_dir)
@classmethod
def _images_match(cls, image1, image2, session1=None):
try:
image1_digest = cls._image_digest(image1, session=session1)
except Exception:
return False
try:
image2_digest = cls._image_digest(image2)
except Exception:
return False
# missing digest, no way to know if they match
if not image1_digest or not image2_digest:
return False
return image1_digest == image2_digest
@classmethod
def _image_digest(cls, image, session=None):
image_url = cls._image_to_url(image)
i = cls._inspect(image_url, session)
return i.get('Digest')
@classmethod
def _image_labels(cls, image_url, session=None):
i = cls._inspect(image_url, session)
return i.get('Labels', {}) or {}
@classmethod
def _image_exists(cls, image, session=None):
try:
cls._image_digest(
image, session=session)
except ImageNotFoundException:
return False
else:
return True
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def authenticate(self, image_url, username=None, password=None,
session=None):
netloc = image_url.netloc
image, tag = self._image_tag_from_url(image_url)
scope = 'repository:%s:pull' % image[1:]
self.is_insecure_registry(registry_host=netloc)
url = self._build_url(image_url, path='/')
verify = (netloc not in self.no_verify_registries)
if not session:
session = MakeSession(verify=verify).create()
else:
session.headers.pop('Authorization', None)
session.verify = verify
cached_token = None
if getattr(self, 'lock', None):
cached_token = RegistrySessionHelper.\
get_cached_bearer_token(self.lock, scope)
if cached_token:
session.headers['Authorization'] = 'Bearer %s' % cached_token
r = session.get(url, timeout=30)
LOG.debug('%s status code %s' % (url, r.status_code))
if r.status_code == 200:
return session
if r.status_code != 401:
r.raise_for_status()
if 'www-authenticate' not in r.headers:
raise ImageUploaderException(
'Unknown authentication method for headers: %s' % r.headers)
auth = None
www_auth = r.headers['www-authenticate']
token_param = {}
(auth_type, realm, service) = \
RegistrySessionHelper.parse_www_authenticate(www_auth)
if auth_type and auth_type.lower() == 'bearer':
LOG.debug('Using bearer token auth')
if getattr(self, 'lock', None):
lock = self.lock
else:
lock = None
token = RegistrySessionHelper.get_bearer_token(session, lock=lock,
username=username,
password=password,
realm=realm,
service=service,
scope=scope)
elif auth_type and auth_type.lower() == 'basic':
LOG.debug('Using basic auth')
if not username or not password:
raise Exception('Authentication credentials required for '
'basic auth: %s' % url)
auth = requests_auth.HTTPBasicAuth(username, password)
rauth = session.get(url, params=token_param, auth=auth, timeout=30)
rauth.raise_for_status()
token = (
base64.b64encode(
bytes(username + ':' + password, 'utf-8')).decode('ascii')
)
hash_request_id = hashlib.sha1(str(rauth.url).encode())
LOG.debug(
'Session authenticated: id {}'.format(
hash_request_id.hexdigest()
)
)
else:
raise ImageUploaderException(
'Unknown www-authenticate value: %s' % www_auth)
auth_header = '%s %s' % (auth_type, token)
session.headers['Authorization'] = auth_header
setattr(session, 'reauthenticate', self.authenticate)
setattr(
session,
'auth_args',
dict(
image_url=image_url,
username=username,
password=password,
session=session
)
)
return session
@staticmethod
def _get_response_text(response, encoding='utf-8', force_encoding=False):
"""Return request response text
We need to set the encoding for the response other wise it
will attempt to detect the encoding which is very time consuming.
See https://github.com/psf/requests/issues/4235 for additional
context.
:param: response: requests Respoinse object
:param: encoding: encoding to set if not currently set
:param: force_encoding: set response encoding always
"""
if force_encoding or not response.encoding:
response.encoding = encoding
return response.text
@classmethod
def _build_url(cls, url, path):
netloc = url.netloc
if netloc in cls.mirrors:
mirror = cls.mirrors[netloc]
return '%sv2%s' % (mirror, path)
if (cls.is_insecure_registry(registry_host=netloc) and
netloc not in cls.no_verify_registries):
scheme = 'http'
else:
scheme = 'https'
if netloc == 'docker.io':
netloc = 'registry-1.docker.io'
return '%s://%s/v2%s' % (scheme, netloc, path)
@classmethod
def _image_tag_from_url(cls, image_url):
if '@' in image_url.path:
parts = image_url.path.split('@')
else:
parts = image_url.path.split(':')
tag = parts[-1]
image = ':'.join(parts[:-1])
return image, tag
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _inspect(cls, image_url, session=None, default_tag=False):
image, tag = cls._image_tag_from_url(image_url)
parts = {
'image': image,
'tag': tag
}
tags_url = cls._build_url(
image_url, CALL_TAGS % parts
)
tags_r = RegistrySessionHelper.get(session, tags_url, timeout=30)
tags = tags_r.json()['tags']
if default_tag and tag not in tags:
if tags:
parts['tag'] = tags[-1]
else:
raise ImageNotFoundException('Not found image: %s' %
image_url.geturl())
manifest_url = cls._build_url(
image_url, CALL_MANIFEST % parts
)
# prefer docker manifest over oci
manifest_headers = {'Accept': ", ".join([
MEDIA_MANIFEST_V2 + ";q=1", MEDIA_OCI_MANIFEST_V1 + ";q=0.5"])}
try:
manifest_r = RegistrySessionHelper.get(
session,
manifest_url,
headers=manifest_headers,
timeout=30
)
except requests.exceptions.HTTPError as e:
if e.response.status_code in (403, 404):
raise ImageNotFoundException('Not found image: %s' %
image_url.geturl())
raise
manifest_str = cls._get_response_text(manifest_r)
if 'Docker-Content-Digest' in manifest_r.headers:
digest = manifest_r.headers['Docker-Content-Digest']
else:
# The registry didn't supply the manifest digest, so calculate it
calc_digest = hashlib.sha256()
calc_digest.update(manifest_str.encode('utf-8'))
digest = 'sha256:%s' % calc_digest.hexdigest()
manifest = json.loads(manifest_str)
if manifest.get('schemaVersion', 2) == 1:
config = json.loads(manifest['history'][0]['v1Compatibility'])
layers = list(reversed([x['blobSum']
for x in manifest['fsLayers']]))
else:
layers = [x['digest'] for x in manifest['layers']]
parts['digest'] = manifest['config']['digest']
config_headers = {
'Accept': manifest['config']['mediaType']
}
config_url = cls._build_url(
image_url, CALL_BLOB % parts)
config_r = RegistrySessionHelper.get(
session,
config_url,
headers=config_headers,
timeout=30,
allow_redirects=False
)
# check if the blob is a redirect
config_r = RegistrySessionHelper.check_redirect_trusted(
config_r, session, stream=False)
config = config_r.json()
image, tag = cls._image_tag_from_url(image_url)
name = '%s%s' % (image_url.netloc, image)
created = config['created']
docker_version = config.get('docker_version', '')
labels = config['config'].get('Labels', {})
# NOTE: labels can be null
if labels is None:
labels = {}
architecture = config['architecture']
image_os = config['os']
return {
'Name': name,
'Tag': tag,
'Digest': digest,
'RepoTags': tags,
'Created': created,
'DockerVersion': docker_version,
'Labels': labels,
'Architecture': architecture,
'Os': image_os,
'Layers': layers,
}
def list(self, registry, session=None):
self.is_insecure_registry(registry_host=registry)
url = self._image_to_url(registry)
catalog_url = self._build_url(
url, CALL_CATALOG
)
catalog_resp = session.get(catalog_url, timeout=30)
if catalog_resp.status_code in [200]:
catalog = catalog_resp.json()
elif catalog_resp.status_code in [404]:
# just return since the catalog returned a 404
LOG.debug('catalog_url return 404')
return []
else:
raise ImageUploaderException(
'Image registry made invalid response: %s' %
catalog_resp.status_code
)
tags_get_args = []
for repo in catalog.get('repositories', []):
image = '%s/%s' % (registry, repo)
tags_get_args.append((self, image, session))
images = []
workers = min(max(2, processutils.get_worker_count() // 2), 8)
with futures.ThreadPoolExecutor(max_workers=workers) as p:
for image, tags in p.map(tags_for_image, tags_get_args):
if not tags:
continue
for tag in tags:
images.append('%s:%s' % (image, tag))
return images
def inspect(self, image, session=None):
image_url = self._image_to_url(image)
return self._inspect(image_url, session)
def delete(self, image, session=None):
image_url = self._image_to_url(image)
return self._delete(image_url, session)
@classmethod
def _delete(cls, image, session=None):
raise NotImplementedError()
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _tags_for_image(cls, image, session):
url = cls._image_to_url(image)
parts = {
'image': url.path,
}
tags_url = cls._build_url(
url, CALL_TAGS % parts
)
r = session.get(tags_url, timeout=30)
if r.status_code in (403, 404):
return image, []
tags = r.json()
return image, tags.get('tags', [])
@classmethod
def _image_to_url(cls, image):
if '://' not in image:
image = 'docker://' + image
url = urlparse(image)
return url
@classmethod
def _discover_tag_from_inspect(cls, i, image, tag_from_label=None,
fallback_tag=None):
labels = i.get('Labels', {})
if hasattr(labels, 'keys'):
label_keys = ', '.join(labels.keys())
else:
label_keys = ""
if not tag_from_label:
raise ImageUploaderException(
'No label specified. Available labels: %s' % label_keys
)
if "{" in tag_from_label:
try:
tag_label = tag_from_label.format(**labels)
except ValueError as e:
raise ImageUploaderException(e)
except (KeyError, TypeError) as e:
if fallback_tag:
tag_label = fallback_tag
else:
raise ImageUploaderException(
'Image %s %s. Available labels: %s' %
(image, e, label_keys)
)
else:
tag_label = None
if isinstance(labels, dict):
tag_label = labels.get(tag_from_label)
if tag_label is None:
if fallback_tag:
tag_label = fallback_tag
else:
raise ImageUploaderException(
'Image %s has no label %s. Available labels: %s' %
(image, tag_from_label, label_keys)
)
# confirm the tag exists by checking for an entry in RepoTags
repo_tags = i.get('RepoTags', [])
if tag_label not in repo_tags:
raise ImageUploaderException(
'Image %s has no tag %s.\nAvailable tags: %s' %
(image, tag_label, ', '.join(repo_tags))
)
return tag_label
def discover_image_tags(self, images, tag_from_label=None,
default_tag=False):
image_urls = [self._image_to_url(i) for i in images]
# prime self.insecure_registries by testing every image
for url in image_urls:
self.is_insecure_registry(registry_host=url)
discover_args = []
for image in images:
discover_args.append((self, image, tag_from_label,
default_tag))
versioned_images = {}
with futures.ThreadPoolExecutor(max_workers=16) as p:
for image, versioned_image in p.map(discover_tag_from_inspect,
discover_args):
versioned_images[image] = versioned_image
return versioned_images
def discover_image_tag(self, image, tag_from_label=None,
fallback_tag=None, username=None, password=None):
image_url = self._image_to_url(image)
self.is_insecure_registry(registry_host=image_url.netloc)
try:
session = self.authenticate(
image_url, username=username, password=password)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ImageUploaderException(
'Unable to authenticate. This may indicate '
'missing registry credentials or the provided '
'container or namespace does not exist. %s' % e)
raise
i = self._inspect(image_url, session)
return self._discover_tag_from_inspect(i, image, tag_from_label,
fallback_tag)
def filter_images_with_labels(self, images, labels,
username=None, password=None):
images_with_labels = []
for image in images:
url = self._image_to_url(image)
self.is_insecure_registry(registry_host=url.netloc)
try:
session = self.authenticate(
url, username=username, password=password)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ImageUploaderException(
'Unable to authenticate. This may indicate '
'missing registry credentials or the provided '
'container or namespace does not exist. %s' % e)
raise
image_labels = self._image_labels(
url, session=session)
# The logic is the following: if one of the labels in
# modify_only_with_labels parameter is present in the image, it
# will match and add the images that need to be modified.
for label in labels:
if label in image_labels:
# we found a matching label, adding the image
# and leave the loop.
images_with_labels.append(image)
break
return images_with_labels
def add_upload_task(self, task):
if task.modify_role and task.multi_arch:
raise ImageUploaderException(
'Cannot run a modify role on multi-arch image %s' %
task.image_name
)
# prime insecure_registries
if task.pull_source:
self.is_insecure_registry(
registry_host=self._image_to_url(task.pull_source).netloc
)
else:
self.is_insecure_registry(
registry_host=self._image_to_url(task.image_name).netloc
)
self.is_insecure_registry(
registry_host=self._image_to_url(task.push_destination).netloc
)
self.upload_tasks.append((self, task))
@classmethod
def is_insecure_registry(cls, registry_host):
if registry_host in cls.secure_registries:
return False
if (registry_host in cls.insecure_registries or
registry_host in cls.no_verify_registries):
return True
with requests.Session() as s:
try:
s.get('https://%s/v2' % registry_host, timeout=30)
except requests.exceptions.SSLError:
# Might be just a TLS certificate validation issue
# Just retry without the verification
try:
s.get('https://%s/v2' % registry_host, timeout=30,
verify=False)
cls.no_verify_registries.add(registry_host)
# Techinically these type of registries are insecure when
# the container engine tries to do a pull. The python
# uploader ignores the certificate problem, but they are
# still inscure so we return True here while we'll still
# use https when we access the registry. LP#1833751
return True
except requests.exceptions.SSLError:
# So nope, it's really not a certificate verification issue
cls.insecure_registries.add(registry_host)
return True
except Exception:
# for any other error assume it is a secure registry, because:
# - it is secure registry
# - the host is not accessible
pass
cls.secure_registries.add(registry_host)
return False
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _cross_repo_mount(cls, target_image_url, image_layers,
source_layers, session):
netloc = target_image_url.netloc
name = target_image_url.path.split(':')[0][1:]
export = netloc in cls.export_registries
if export:
# pylint: disable=no-member
linked_layers = image_export.cross_repo_mount(
target_image_url, image_layers, source_layers,
uploaded_layers=cls._global_view_proxy())
# track linked layers globally for future references
for layer, info in linked_layers.items():
# pylint: disable=no-member
cls._track_uploaded_layers(
layer, known_path=info['known_path'],
image_ref=info['ref_image'], scope='local')
return
if netloc in cls.insecure_registries:
scheme = 'http'
else:
scheme = 'https'
url = '%s://%s/v2/%s/blobs/uploads/' % (scheme, netloc, name)
for layer in source_layers:
# pylint: disable=no-member
known_path, existing_name = image_utils.uploaded_layers_details(
cls._global_view_proxy(), layer, scope='remote')
if layer not in image_layers and not existing_name:
continue
if not existing_name:
existing_name = image_layers[layer].path.split(':')[0][1:]
if existing_name != name:
LOG.debug('[%s] Layer %s ref. by image %s already exists '
'at %s' % (name, layer, existing_name, known_path))
LOG.info('[%s] Cross repository blob mount from %s' %
(layer, existing_name))
data = {
'mount': layer,
'from': existing_name
}
r = RegistrySessionHelper.post(session, url, data=data, timeout=30)
LOG.debug('%s %s' % (r.status_code, r.reason))
class PythonImageUploader(BaseImageUploader):
"""Upload images using a direct implementation of the registry API"""
uploaded_layers = {} # provides global view for multi-threading workers
lock = None # provides global locking info plus global view, if MP is used
@classmethod
def init_global_state(cls, lock):
if not cls.lock:
cls.lock = lock
@classmethod
@tenacity.retry( # Retry until we no longer have collisions
retry=tenacity.retry_if_exception_type(ImageUploaderThreadException),
wait=tenacity.wait_random_exponential(multiplier=1, max=10)
)
def _layer_fetch_lock(cls, layer):
if not cls.lock:
LOG.warning('No lock information provided for layer %s' % layer)
return
if layer in cls.lock.objects():
LOG.debug('[%s] Layer is being fetched by another thread' % layer)
raise ImageUploaderThreadException('layer being fetched')
known_path, image = image_utils.uploaded_layers_details(
cls._global_view_proxy(), layer, scope='local')
if not known_path or not image:
known_path, image = image_utils.uploaded_layers_details(
cls._global_view_proxy(), layer, scope='remote')
if image and known_path:
# already processed layers needs no further locking
return
with cls.lock.get_lock():
if layer in cls.lock.objects():
LOG.debug('Collision for lock %s' % layer)
raise ImageUploaderThreadException('layer conflict')
cls.lock.objects().append(layer)
LOG.debug('Got lock on layer %s' % layer)
@classmethod
def _layer_fetch_unlock(cls, layer):
if not cls.lock:
LOG.warning('No lock information provided for layer %s' % layer)
return
with cls.lock.get_lock():
while layer in cls.lock.objects():
cls.lock.objects().remove(layer)
LOG.debug('Released lock on layer %s' % layer)
@classmethod
def _global_view_proxy(cls, value=None, forget=False):
"""Represent the global view for mixed multi-workers concurrent access
Depending on worker's context target the corresponding shared data
structures (global view) for the requested value add/remove
operation. Also keep that global view always consolidated for all of
the supported MP/MT worker types. Threads will share common data via
its common class namespace in the threads-safe standard dictionary.
Processes will use multiprocess synchronization primitives stored in
the global lock context.
:param: value: Shared data to track in the global view
:param: forget: Defines either to add or remove the shared data
"""
if not cls.lock:
LOG.warning('No lock information provided for value %s' % value)
return
with cls.lock.get_lock():
if value and forget:
cls.uploaded_layers.pop(value, None)
if hasattr(cls.lock, '_global_view'):
cls.lock._global_view.pop(value, None)
elif value:
cls.uploaded_layers.update(value)
if hasattr(cls.lock, '_global_view'):
cls.lock._global_view.update(value)
if not value:
# return global view consolidated among MP/MT workers state
if hasattr(cls.lock, '_global_view'):
consolidated_view = cls.uploaded_layers.copy()
consolidated_view.update(cls.lock._global_view)
return consolidated_view
return cls.uploaded_layers
@classmethod
def _track_uploaded_layers(cls, layer, known_path=None, image_ref=None,
forget=False, scope='remote'):
"""Track an image layer info in the global view
Adds or removes layer info to/from the global view shared among
all workers of all supported types (MP/MT). An image layer hash and
scope pair provide a unique one-way entry tracked in the global view.
The layer info being forgotten will be untracked by any existing scope.
:param: layer: A container image layer hash to track in the global view
:param: known_path: Known URL or local path for the tracked layer
:param: image_ref: Name of the image cross-referencing tracked layer
:param: forget: Defines either to add or remove the tracked layer info
:param: scope: Specifies remote or local type of the tracked image
"""
if forget:
LOG.debug('Untracking processed layer %s for any scope' % layer)
cls._global_view_proxy(value=layer, forget=True)
else:
LOG.debug('Tracking processed layer %s for %s scope'
% (layer, scope))
cls._global_view_proxy(
value={layer: {scope: {'ref': image_ref, 'path': known_path}}})
def upload_image(self, task):
"""Upload image from a task
This function takes an UploadTask and pushes it to the appropriate
target destinations. It should be noted that if the source container
is prefix with 'containers-storage:' instead of 'docker://' or no
prefix, this process will assume that the source container is already
local to the system. The local container upload does not currently
support any of the modification actions. In order to run the
modification actions on a container prior to upload, the source must
be a remote image. Additionally, cleanup has no affect when
uploading a local image as well.
:param: task: UploadTask with container information
"""
t = task
LOG.info('[%s] Starting upload image process' % t.image_name)
source_local = t.source_image.startswith('containers-storage:')
target_image_local_url = urlparse('containers-storage:%s' %
t.target_image)
target_username, target_password = self.credentials_for_registry(
t.target_image_url.netloc)
try:
target_session = self.authenticate(
t.target_image_url,
username=target_username,
password=target_password
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ImageUploaderException(
'Unable to authenticate. This may indicate '
'missing registry credentials or the provided '
'container or namespace does not exist. %s' % e)
raise
try:
self._detect_target_export(t.target_image_url, target_session)
except Exception:
LOG.error('[%s] Failed uploading the target '
'image' % t.target_image)
# Close the session before raising it for more of retrying perhaps
target_session.close()
raise
if source_local:
if t.modify_role:
target_session.close()
raise NotImplementedError('Modify role not implemented for '
'local containers')
if t.cleanup:
LOG.warning('[%s] Cleanup has no effect with a local source '
'container.' % t.image_name)
try:
source_local_url = urlparse(t.source_image)
# Copy from local storage to target registry
self._copy_local_to_registry(
source_local_url,
t.target_image_url,
session=target_session
)
except Exception:
LOG.warning('[%s] Failed copying the target image '
'to the target registry' % t.target_image)
pass
target_session.close()
return []
if t.modify_role:
image_exists = False
try:
image_exists = self._image_exists(t.target_image,
target_session)
except Exception:
LOG.warning('[%s] Failed to check if the target '
'image exists' % t.target_image)
pass
if image_exists:
LOG.warning('[%s] Skipping upload for modified image %s' %
(t.image_name, t.target_image))
target_session.close()
return []
copy_target_url = t.target_image_source_tag_url
else:
copy_target_url = t.target_image_url
# Keep the target session open yet
source_username, source_password = self.credentials_for_registry(
t.source_image_url.netloc)
try:
source_session = self.authenticate(
t.source_image_url,
username=source_username,
password=source_password
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ImageUploaderException(
'Unable to authenticate. This may indicate '
'missing registry credentials or the provided '
'container or namespace does not exist. %s' % e)
raise
source_layers = []
manifests_str = []
try:
self._collect_manifests_layers(
t.source_image_url, source_session,
manifests_str, source_layers,
t.multi_arch
)
self._cross_repo_mount(
copy_target_url, self.image_layers, source_layers,
session=target_session)
to_cleanup = []
# Copy unmodified images from source to target
self._copy_registry_to_registry(
t.source_image_url,
copy_target_url,
source_manifests=manifests_str,
source_session=source_session,
target_session=target_session,
source_layers=source_layers,
multi_arch=t.multi_arch
)
except Exception:
LOG.error('[%s] Failed uploading the target '
'image' % t.target_image)
# Close the sessions before raising it for more of
# retrying perhaps
source_session.close()
target_session.close()
raise
if not t.modify_role:
LOG.info('[%s] Completed upload for image' % t.image_name)
else:
LOG.info('[%s] Copy ummodified image from target to local' %
t.image_name)
try:
self._copy_registry_to_local(t.target_image_source_tag_url)
if t.cleanup in (CLEANUP_FULL, CLEANUP_PARTIAL):
to_cleanup.append(t.target_image_source_tag)
self.run_modify_playbook(
t.modify_role,
t.modify_vars,
t.target_image_source_tag,
t.target_image_source_tag,
t.append_tag,
container_build_tool='buildah')
if t.cleanup == CLEANUP_FULL:
to_cleanup.append(t.target_image)
# cross-repo mount the unmodified image to the modified image
self._cross_repo_mount(
t.target_image_url, self.image_layers, source_layers,
session=target_session)
# Copy from local storage to target registry
self._copy_local_to_registry(
target_image_local_url,
t.target_image_url,
session=target_session
)
LOG.info('[%s] Completed modify and upload for image' %
t.image_name)
except Exception:
LOG.error('[%s] Failed processing the target '
'image' % t.target_image)
# Close the sessions before raising it for more of
# retrying perhaps
source_session.close()
target_session.close()
raise
try:
for layer in source_layers:
self.image_layers.setdefault(layer, t.target_image_url)
except Exception:
LOG.warning('[%s] Failed setting default layer %s for the '
'target image' % (t.target_image, layer))
pass
target_session.close()
source_session.close()
return to_cleanup
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _detect_target_export(cls, image_url, session):
if image_url.netloc in cls.export_registries:
return True
if image_url.netloc in cls.push_registries:
return False
# detect if the registry is push-capable by requesting an upload URL.
image, _ = cls._image_tag_from_url(image_url)
upload_req_url = cls._build_url(
image_url,
path=CALL_UPLOAD % {'image': image})
try:
RegistrySessionHelper.post(
session,
upload_req_url,
timeout=30
)
except requests.exceptions.HTTPError as e:
if e.response.status_code in (501, 403, 404, 405):
cls.export_registries.add(image_url.netloc)
return True
raise
cls.push_registries.add(image_url.netloc)
return False
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _fetch_manifest(cls, url, session, multi_arch):
image, tag = cls._image_tag_from_url(url)
parts = {
'image': image,
'tag': tag
}
url = cls._build_url(
url, CALL_MANIFEST % parts
)
if multi_arch:
manifest_headers = {'Accept': MEDIA_MANIFEST_V2_LIST}
else:
# prefer docker manifest over oci
manifest_headers = {'Accept': ", ".join([
MEDIA_MANIFEST_V2 + ";q=1", MEDIA_OCI_MANIFEST_V1 + ";q=0.5"])}
try:
r = RegistrySessionHelper.get(
session,
url,
headers=manifest_headers,
timeout=30
)
except requests.exceptions.HTTPError as e:
if e.response.status_code in (403, 404):
raise ImageNotFoundException('Not found image: %s' % url)
raise
return cls._get_response_text(r)
def _collect_manifests_layers(self, image_url, session,
manifests_str, layers,
multi_arch):
manifest_str = self._fetch_manifest(
image_url,
session=session,
multi_arch=multi_arch
)
manifests_str.append(manifest_str)
manifest = json.loads(manifest_str)
media_type = manifest.get('mediaType',
manifest.get('config', {}).get('mediaType'))
if manifest.get('schemaVersion', 2) == 1:
layers.extend(reversed([x['blobSum']
for x in manifest['fsLayers']]))
elif not media_type or media_type in [MEDIA_MANIFEST_V2,
MEDIA_OCI_MANIFEST_V1,
MEDIA_OCI_CONFIG_V1]:
layers.extend(x['digest'] for x in manifest['layers'])
elif media_type == MEDIA_MANIFEST_V2_LIST:
image, _, tag = image_url.geturl().rpartition(':')
for man in manifest.get('manifests', []):
# replace image tag with the manifest hash in the list
man_url = urlparse('%s@%s' % (image, man['digest']))
self._collect_manifests_layers(
man_url, session, manifests_str, layers,
multi_arch=False
)
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _upload_url(cls, image_url, session, previous_request=None):
if previous_request and 'Location' in previous_request.headers:
return previous_request.headers['Location']
image, tag = cls._image_tag_from_url(image_url)
upload_req_url = cls._build_url(
image_url,
path=CALL_UPLOAD % {'image': image})
r = RegistrySessionHelper.post(
session,
upload_req_url,
timeout=30
)
return r.headers['Location']
@classmethod
@tenacity.retry( # Retry up to 5 times with longer time
reraise=True,
retry=tenacity.retry_if_exception_type(
(requests.exceptions.RequestException,
ImageRateLimitedException)
),
wait=tenacity.wait_random_exponential(multiplier=1.5, max=60),
stop=tenacity.stop_after_attempt(5)
)
def _layer_stream_registry(cls, digest, source_url, calc_digest,
session):
image, tag = cls._image_tag_from_url(source_url)
parts = {
'image': image,
'tag': tag,
'digest': digest
}
source_blob_url = cls._build_url(
source_url, CALL_BLOB % parts)
# NOTE(aschultz): We specify None and let requests figure it out
chunk_size = None
LOG.info("[%s] Fetching layer %s from %s" %
(image, digest, source_blob_url))
with session.get(source_blob_url,
stream=True,
timeout=30,
allow_redirects=False) as blob_req:
blob_req.encoding = 'utf-8'
# raise for status here to ensure we didn't got a 401
RegistrySessionHelper.check_status(session=session,
request=blob_req)
# Requests to docker.io redirect to CDN for the actual content
# so we need to check if our initial blob request is a redirect
# and follow as necessary.
blob_req = RegistrySessionHelper.check_redirect_trusted(blob_req,
session)
for data in blob_req.iter_content(chunk_size):
LOG.debug("[%s] Read %i bytes for %s" %
(image, len(data), digest))
if not data:
break
calc_digest.update(data)
yield data
LOG.info("[%s] Done fetching layer %s from registry" % (image, digest))
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
IOError
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _copy_layer_registry_to_registry(cls, source_url, target_url,
layer,
source_session=None,
target_session=None):
layer_entry = {'digest': layer}
try:
cls._layer_fetch_lock(layer)
if cls._target_layer_exists_registry(
target_url, layer_entry, [layer_entry], target_session):
cls._layer_fetch_unlock(layer)
return
known_path, ref_image = image_utils.uploaded_layers_details(
cls._global_view_proxy(), layer, scope='local')
if known_path and ref_image:
# cross-link target from local source, skip fetching it again
image_export.layer_cross_link(
layer, ref_image, known_path, target_url)
cls._layer_fetch_unlock(layer)
return
except ImageUploaderThreadException:
# skip trying to unlock, because that's what threw the exception
raise
except Exception:
cls._layer_fetch_unlock(layer)
raise
digest = layer_entry['digest']
LOG.debug('[%s] Uploading layer' % digest)
calc_digest = hashlib.sha256()
known_path = None
layer_val = None
try:
layer_stream = cls._layer_stream_registry(
digest, source_url, calc_digest, source_session)
layer_val, known_path = cls._copy_stream_to_registry(
target_url, layer_entry, calc_digest, layer_stream,
target_session)
except (IOError, requests.exceptions.HTTPError):
cls._track_uploaded_layers(layer, forget=True, scope='remote')
LOG.error('[%s] Failed processing layer for the target '
'image %s' % (layer, target_url.geturl()))
raise
else:
if layer_val and known_path:
image_ref = target_url.path.split(':')[0][1:]
uploaded = urlparse(known_path).scheme
cls._track_uploaded_layers(
layer_val, known_path=known_path, image_ref=image_ref,
scope=('remote' if uploaded else 'local'))
return layer_val
finally:
cls._layer_fetch_unlock(layer)
@classmethod
def _assert_scheme(cls, url, scheme):
if url.scheme != scheme:
raise ImageUploaderException(
'Expected %s scheme: %s' % (scheme, url.geturl()))
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _copy_registry_to_registry(cls, source_url, target_url,
source_manifests,
source_session=None,
target_session=None,
source_layers=None,
multi_arch=False):
cls._assert_scheme(source_url, 'docker')
cls._assert_scheme(target_url, 'docker')
image, tag = cls._image_tag_from_url(source_url)
parts = {
'image': image,
'tag': tag
}
# Upload all layers
copy_jobs = []
jobs_count = 0
jobs_finished = 0
with futures.ThreadPoolExecutor(max_workers=4) as p:
if source_layers:
for layer in source_layers:
copy_jobs.append(p.submit(
cls._copy_layer_registry_to_registry,
source_url, target_url,
layer=layer,
source_session=source_session,
target_session=target_session
))
jobs_count = len(copy_jobs)
LOG.debug('[%s] Waiting for %i jobs to finish' %
(image, jobs_count))
for job in futures.as_completed(copy_jobs):
e = job.exception()
if e:
raise e
layer = job.result()
if layer:
LOG.debug('[%s] Upload complete for layer %s' %
(image, layer))
jobs_finished += 1
LOG.debug('[%s] Waiting for next job: %i of %i complete' %
(image, jobs_finished, jobs_count))
LOG.debug('[%s] Completed %i jobs' % (image, jobs_count))
for source_manifest in source_manifests:
manifest = json.loads(source_manifest)
config_str = None
# NOTE(mwhahaha): mediaType will not be set when it's
# schemaVersion 1
media_type = manifest.get('mediaType',
manifest.get('config',
{}).get('mediaType'))
if media_type in [MEDIA_MANIFEST_V2,
MEDIA_OCI_MANIFEST_V1,
MEDIA_OCI_CONFIG_V1]:
config_digest = manifest['config']['digest']
LOG.debug('[%s] Uploading config with digest: %s' %
(image, config_digest))
parts['digest'] = config_digest
source_config_url = cls._build_url(
source_url,
CALL_BLOB % parts
)
r = RegistrySessionHelper.get(
source_session,
source_config_url,
timeout=30,
allow_redirects=False
)
# check if the blob was a redirect
r = RegistrySessionHelper.check_redirect_trusted(
r, source_session, stream=False)
config_str = cls._get_response_text(r)
manifest['config']['size'] = len(config_str)
manifest['config']['mediaType'] = MEDIA_CONFIG
cls._copy_manifest_config_to_registry(
target_url=target_url,
manifest_str=source_manifest,
config_str=config_str,
target_session=target_session,
multi_arch=multi_arch
)
LOG.debug('[%s] Finished copying image' % image)
@classmethod
def _copy_manifest_config_to_registry(cls, target_url,
manifest_str,
config_str,
target_session=None,
multi_arch=False):
manifest = json.loads(manifest_str)
if manifest.get('schemaVersion', 2) == 1:
if 'signatures' in manifest:
manifest_type = MEDIA_MANIFEST_V1_SIGNED
else:
manifest_type = MEDIA_MANIFEST_V1
else:
# NOTE(mwhahaha): always force docker media format if not set or
# is explicitly OCI because buildah uses OCI by default but we
# convert the metadata to Docker format in the uploader.
# See LP#1860585
manifest_type = manifest.get('mediaType',
manifest.get('config',
{}).get('mediaType'))
if manifest_type in [MEDIA_OCI_MANIFEST_V1,
MEDIA_OCI_CONFIG_V1]:
manifest_type = MEDIA_MANIFEST_V2
# convert config mediaType to docker.container.image
manifest['config']['mediaType'] = MEDIA_CONFIG
layers = manifest.get('layers')
# convert layer type to docker layer type
if layers:
new_layers = []
for layer in layers:
layer_type = layer.get('mediaType')
if layer_type == MEDIA_OCI_LAYER_COMPRESSED:
layer['mediaType'] = MEDIA_BLOB_COMPRESSED
elif layer_type == MEDIA_OCI_LAYER:
layer['mediaType'] = MEDIA_BLOB
new_layers.append(layer)
manifest['layers'] = new_layers
elif manifest_type == MEDIA_CONFIG:
manifest_type = MEDIA_MANIFEST_V2
elif manifest_type == MEDIA_OCI_INDEX_V1:
manifest_type = MEDIA_MANIFEST_V2_LIST
manifest['mediaType'] = manifest_type
manifest_str = json.dumps(manifest, indent=3)
export = target_url.netloc in cls.export_registries
if export:
image_export.export_manifest_config(
target_url,
manifest_str,
manifest_type,
config_str,
multi_arch=multi_arch
)
return
if config_str is not None:
config_digest = manifest['config']['digest']
# Upload the config json as a blob
upload_url = cls._upload_url(
target_url,
session=target_session)
r = RegistrySessionHelper.put(
target_session,
upload_url,
timeout=30,
params={
'digest': config_digest
},
data=config_str.encode('utf-8'),
headers={
'Content-Length': str(len(config_str)),
'Content-Type': 'application/octet-stream'
}
)
# Upload the manifest
image, tag = cls._image_tag_from_url(target_url)
parts = {
'image': image,
'tag': tag
}
manifest_url = cls._build_url(
target_url, CALL_MANIFEST % parts)
LOG.debug('[%s] Uploading manifest of type %s to: %s' %
(image, manifest_type, manifest_url))
try:
r = RegistrySessionHelper.put(
target_session,
manifest_url,
timeout=30,
data=manifest_str.encode('utf-8'),
headers={
'Content-Type': manifest_type
}
)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 400:
LOG.error(cls._get_response_text(r))
raise ImageUploaderException('Pushing manifest failed')
raise
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _copy_registry_to_local(cls, source_url):
cls._assert_scheme(source_url, 'docker')
pull_source = source_url.netloc + source_url.path
cmd = ['buildah', '--debug', 'pull']
if source_url.netloc in [cls.insecure_registries,
cls.no_verify_registries]:
cmd.append('--tls-verify=false')
cmd.append(pull_source)
LOG.info('Pulling %s' % pull_source)
LOG.info('Running %s' % ' '.join(cmd))
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
close_fds=True
)
out, err = process.communicate()
if process.returncode != 0:
error_msg = (
'Pulling image failed: cmd "{}", stdout "{}",'
' stderr "{}"'.format(
' '.join(cmd),
out,
err
)
)
LOG.error(error_msg)
raise ImageUploaderException(error_msg)
except KeyboardInterrupt:
raise Exception('Action interrupted with ctrl+c')
return out
@classmethod
def _target_layer_exists_registry(cls, target_url, layer, check_layers,
session):
image, tag = cls._image_tag_from_url(target_url)
norm_image = (image[1:] if image.startswith('/') else image)
parts = {
'image': image,
'tag': tag
}
layer_found = None
# Check in global view or do a HEAD call for the supplied
# digests to see if the layer is already in the registry
for x in check_layers:
if not x:
continue
known_path, ref_image = image_utils.uploaded_layers_details(
cls._global_view_proxy(), x['digest'], scope='remote')
if ref_image == norm_image:
LOG.debug('[%s] Layer %s already exists at %s' %
(image, x['digest'], known_path))
layer_found = x
break
parts['digest'] = x['digest']
blob_url = cls._build_url(target_url, CALL_BLOB % parts)
if session.head(blob_url, timeout=30).status_code == 200:
LOG.debug('[%s] Layer already exists: %s' %
(image, x['digest']))
layer_found = x
break
if layer_found:
layer['digest'] = layer_found['digest']
if 'size' in layer_found:
layer['size'] = layer_found['size']
if 'mediaType' in layer_found:
layer['mediaType'] = layer_found['mediaType']
return True
return False
@classmethod
def _layer_stream_local(cls, layer_id, calc_digest):
LOG.debug('[%s] Exporting layer' % layer_id)
tar_split_path = cls._containers_file_path(
'overlay-layers',
'%s.tar-split.gz' % layer_id
)
overlay_path = cls._containers_file_path(
'overlay', layer_id, 'diff'
)
cmd = [
'tar-split', 'asm',
'--input', tar_split_path,
'--path', overlay_path,
'--compress'
]
LOG.debug(' '.join(cmd))
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
chunk_size = 2 ** 20
while True:
data = p.stdout.read(chunk_size)
if not data:
break
calc_digest.update(data)
yield data
p.wait()
if p.returncode != 0:
raise ImageUploaderException('Extracting layer failed')
except KeyboardInterrupt:
raise Exception('Action interrupted with ctrl+c')
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _copy_layer_local_to_registry(cls, target_url,
session, layer, layer_entry):
# Check in global view or do a HEAD call for the compressed-diff-digest
# and diff-digest to see if the layer is already in the registry
check_layers = []
compressed_digest = layer_entry.get('compressed-diff-digest')
if compressed_digest:
check_layers.append({
'digest': compressed_digest,
'size': layer_entry.get('compressed-size'),
'mediaType': MEDIA_BLOB_COMPRESSED,
})
digest = layer_entry.get('diff-digest')
if digest:
check_layers.append({
'digest': digest,
'size': layer_entry.get('diff-size'),
'mediaType': MEDIA_BLOB,
})
if cls._target_layer_exists_registry(target_url, layer, check_layers,
session):
return
layer_id = layer_entry['id']
LOG.debug('[%s] Uploading layer' % layer_id)
calc_digest = hashlib.sha256()
known_path = None
layer_val = None
try:
layer_stream = cls._layer_stream_local(layer_id, calc_digest)
layer_val, known_path = cls._copy_stream_to_registry(
target_url, layer, calc_digest, layer_stream, session,
verify_digest=False)
except (IOError, requests.exceptions.HTTPError):
cls._track_uploaded_layers(
layer['digest'], forget=True, scope='remote')
LOG.error('[%s] Failed processing layer for the target '
'image %s' % (layer['digest'], target_url.geturl()))
raise
else:
if layer_val and known_path:
image_ref = target_url.path.split(':')[0][1:]
uploaded = urlparse(known_path).scheme
cls._track_uploaded_layers(
layer_val, known_path=known_path, image_ref=image_ref,
scope=('remote' if uploaded else 'local'))
return layer_val
@classmethod
def _copy_stream_to_registry(cls, target_url, layer, calc_digest,
layer_stream, session, verify_digest=True):
layer['mediaType'] = MEDIA_BLOB_COMPRESSED
length = 0
upload_resp = None
export = target_url.netloc in cls.export_registries
if export:
return image_export.export_stream(
target_url, layer, layer_stream, verify_digest=verify_digest)
for chunk in layer_stream:
if not chunk:
break
chunk_length = len(chunk)
upload_url = cls._upload_url(
target_url, session, upload_resp)
upload_resp = RegistrySessionHelper.patch(
session,
upload_url,
timeout=30,
data=chunk,
headers={
'Content-Length': str(chunk_length),
'Content-Range': '%d-%d' % (
length, length + chunk_length - 1),
'Content-Type': 'application/octet-stream'
}
)
length += chunk_length
layer_digest = 'sha256:%s' % calc_digest.hexdigest()
LOG.debug('[%s] Calculated layer digest' % layer_digest)
upload_url = cls._upload_url(
target_url, session, upload_resp)
upload_resp = RegistrySessionHelper.put(
session,
upload_url,
timeout=30,
params={
'digest': layer_digest
},
)
layer['digest'] = layer_digest
layer['size'] = length
return (layer_digest, cls._build_url(target_url, target_url.path))
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _copy_local_to_registry(cls, source_url, target_url, session):
cls._assert_scheme(source_url, 'containers-storage')
cls._assert_scheme(target_url, 'docker')
name = '%s%s' % (source_url.netloc, source_url.path)
image, manifest, config_str = cls._image_manifest_config(name)
layers_by_digest = cls._get_all_local_layers_by_digest()
# Upload all layers
copy_jobs = []
jobs_count = 0
jobs_finished = 0
with futures.ThreadPoolExecutor(max_workers=4) as p:
for layer in manifest['layers']:
layer_entry = layers_by_digest[layer['digest']]
copy_jobs.append(p.submit(
cls._copy_layer_local_to_registry,
target_url, session, layer, layer_entry
))
jobs_count = len(copy_jobs)
LOG.debug('[%s] Waiting for %i jobs to finish' %
(name, jobs_count))
for job in futures.as_completed(copy_jobs):
e = job.exception()
if e:
raise e
layer = job.result()
if layer:
LOG.debug('[%s] Upload complete for layer: %s' %
(name, layer))
jobs_finished += 1
LOG.debug('[%s] Waiting for next job: %i of %i complete' %
(name, jobs_finished, jobs_count))
LOG.debug('[%s] Completed %i jobs' % (name, jobs_count))
manifest_str = json.dumps(manifest, indent=3)
cls._copy_manifest_config_to_registry(
target_url=target_url,
manifest_str=manifest_str,
config_str=config_str,
target_session=session
)
LOG.debug('[%s] Finished copying' % name)
@classmethod
def _containers_file_path(cls, *path):
full_path = os.path.join('/var/lib/containers/storage/', *path)
if not os.path.exists(full_path):
raise ImageUploaderException('Missing file %s' % full_path)
return full_path
@classmethod
def _containers_file(cls, *path):
full_path = cls._containers_file_path(*path)
try:
with open(full_path, 'r') as f:
return f.read()
except Exception as e:
raise ImageUploaderException(e)
@classmethod
def _containers_json(cls, *path):
return json.loads(cls._containers_file(*path))
@classmethod
def _get_all_local_layers_by_digest(cls):
all_layers = cls._containers_json('overlay-layers', 'layers.json')
layers_by_digest = {}
for x in all_layers:
if 'diff-digest' in x:
layers_by_digest[x['diff-digest']] = x
if 'compressed-diff-digest' in x:
layers_by_digest[x['compressed-diff-digest']] = x
return layers_by_digest
@classmethod
def _get_local_layers_manifest(cls, manifest, config_str):
"""Return a valid local manifest
The manifest that is kept in the container storage is the
original manifest but the layers may be different once processed
by libpod & company. We want a valid manifest for the local
file system so we need to use the root fs layers from the container
config rather than just assuming the original manifest is still
valid.
"""
layers = cls._get_all_local_layers_by_digest()
config = json.loads(config_str)
rootfs = config.get('rootfs', {})
layer_ids = rootfs.get('diff_ids', None)
if not layer_ids:
# TODO(aschultz): add container name/path
LOG.warning('Container missing rootfs layers')
return manifest
# clear out the manifest layers
manifest['layers'] = []
for layer in layer_ids:
layer_digest = {'mediaType': MEDIA_BLOB}
if layer not in layers:
raise ImageNotFoundException('Unable to find layer %s in the '
'local layers' % layer)
layer_digest['digest'] = layer
# podman currently doesn't do compressed layers so just use
# the diff-size
layer_digest['size'] = layers[layer]['diff-size']
manifest['layers'].append(layer_digest)
return manifest
@classmethod
def _image_manifest_config(cls, name):
image = None
images = cls._containers_json('overlay-images', 'images.json')
for i in images:
for n in i.get('names', []):
if name == n:
image = i
break
if image:
break
if not image:
raise ImageNotFoundException('Not found image: %s' % name)
image_id = image['id']
manifest = cls._containers_json('overlay-images', image_id, 'manifest')
config_digest = manifest['config']['digest']
config_id = '=' + base64.b64encode(
config_digest.encode()).decode('utf-8')
config_str = cls._containers_file('overlay-images', image_id,
config_id)
manifest = cls._get_local_layers_manifest(manifest, config_str)
manifest['config']['size'] = len(config_str)
manifest['config']['mediaType'] = MEDIA_CONFIG
return image, manifest, config_str
@classmethod
def _inspect(cls, image_url, session=None, default_tag=False):
if image_url.scheme == 'docker':
return super(PythonImageUploader, cls)._inspect(
image_url, session=session, default_tag=default_tag)
if image_url.scheme != 'containers-storage':
raise ImageUploaderException('Inspect not implemented for %s' %
image_url.geturl())
name = '%s%s' % (image_url.netloc, image_url.path)
image, manifest, config_str = cls._image_manifest_config(name)
config = json.loads(config_str)
layers = [x['digest'] for x in manifest['layers']]
i, _ = cls._image_tag_from_url(image_url)
digest = image['digest']
created = image['created']
labels = config['config'].get('Labels', {})
# NOTE: labels can be null
if labels is None:
labels = {}
architecture = config['architecture']
image_os = config['os']
return {
'Name': i,
'Digest': digest,
'RepoTags': [],
'Created': created,
'DockerVersion': '',
'Labels': labels,
'Architecture': architecture,
'Os': image_os,
'Layers': layers,
}
@classmethod
def _delete_from_registry(cls, image_url, session=None):
if not cls._detect_target_export(image_url, session):
raise NotImplementedError(
'Deleting not supported via the registry API')
return image_export.delete_image(image_url)
@classmethod
def _delete(cls, image_url, session=None):
image = image_url.geturl()
LOG.info('[%s] Deleting image' % image)
if image_url.scheme == 'docker':
return cls._delete_from_registry(image_url, session)
if image_url.scheme != 'containers-storage':
raise ImageUploaderException('Delete not implemented for %s' %
image_url.geturl())
cmd = ['buildah', 'rmi', image_url.path]
LOG.info('Running %s' % ' '.join(cmd))
env = os.environ.copy()
try:
process = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
universal_newlines=True)
out, err = process.communicate()
LOG.info(out)
if process.returncode != 0:
LOG.warning('Error deleting image:\n%s\n%s' %
(' '.join(cmd), err))
except KeyboardInterrupt:
raise Exception('Action interrupted with ctrl+c')
return out
def cleanup(self, local_images):
if not local_images:
return []
for image in sorted(local_images):
if not image:
continue
LOG.info('[%s] Removing local copy of image' % image)
image_url = urlparse('containers-storage:%s' % image)
self._delete(image_url)
def _get_executor(self):
"""Get executor type based on lock object
We check to see if the lock object is not set or if it is a threading
lock. We cannot check if it is a ProcessLock due to the side effect
of trying to include ProcessLock when running under Mistral breaks
Mistral.
"""
if not self.lock or isinstance(self.lock, threadinglock.ThreadingLock):
# workers will scale from 2 to 8 based on the cpu count // 2
workers = min(max(2, processutils.get_worker_count() // 2), 8)
return futures.ThreadPoolExecutor(max_workers=workers)
# there really isn't an improvement with > 4 workers due to the
# container layer overlaps. The higher the workers, the more
# RAM required which can lead to OOMs. It's best to limit to 4
return futures.ProcessPoolExecutor(max_workers=4)
def run_tasks(self):
if not self.upload_tasks:
return
local_images = []
with self._get_executor() as p:
for result in p.map(upload_task, self.upload_tasks):
local_images.extend(result)
LOG.info('result %s' % local_images)
# Do cleanup after all the uploads so common layers don't get deleted
# repeatedly
self.cleanup(local_images)
class UploadTask(object):
def __init__(self, image_name, pull_source, push_destination,
append_tag, modify_role, modify_vars, cleanup,
multi_arch):
self.image_name = image_name
self.pull_source = pull_source
self.push_destination = push_destination
self.append_tag = append_tag or ''
self.modify_role = modify_role
self.modify_vars = modify_vars
self.cleanup = cleanup
self.multi_arch = multi_arch
if ':' in image_name:
image = image_name.rpartition(':')[0]
self.source_tag = image_name.rpartition(':')[2]
else:
image = image_name
self.source_tag = 'latest'
if pull_source:
# prevent a double // in the url which causes auth problems
# with docker.io
if pull_source.endswith('/'):
pull_source = pull_source[:-1]
self.repo = pull_source + '/' + image
else:
self.repo = image
if push_destination.endswith('/'):
push_destination = push_destination[:-1]
self.target_image_no_tag = (push_destination + '/' +
self.repo.partition('/')[2])
self.target_tag = self.source_tag + self.append_tag
self.source_image = self.repo + ':' + self.source_tag
self.target_image_source_tag = (self.target_image_no_tag + ':' +
self.source_tag)
self.target_image = self.target_image_no_tag + ':' + self.target_tag
image_to_url = BaseImageUploader._image_to_url
self.source_image_url = image_to_url(self.source_image)
self.target_image_url = image_to_url(self.target_image)
self.target_image_source_tag_url = image_to_url(
self.target_image_source_tag
)
def upload_task(args):
uploader, task = args
return uploader.upload_image(task)
def discover_tag_from_inspect(args):
self, image, tag_from_label, default_tag = args
image_url = self._image_to_url(image)
username, password = self.credentials_for_registry(image_url.netloc)
try:
session = self.authenticate(
image_url, username=username, password=password)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ImageUploaderException(
'Unable to authenticate. This may indicate '
'missing registry credentials or the provided '
'container or namespace does not exist. %s' % e)
raise
i = self._inspect(image_url, session=session, default_tag=default_tag)
session.close()
if ':' in image_url.path:
# break out the tag from the url to be the fallback tag
path = image.rpartition(':')
fallback_tag = path[2]
image = path[0]
else:
fallback_tag = None
return image, self._discover_tag_from_inspect(
i, image, tag_from_label, fallback_tag)
def tags_for_image(args):
self, image, session = args
return self._tags_for_image(image, session)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/main/java/cloudant/store/VCAPHelper.java
|
/******************************************************************************
* Copyright (c) 2018 IBM Corp. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
******************************************************************************/
package cloudant.store;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import java.util.Set;
import java.util.Map.Entry;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
public class VCAPHelper {
static String VCAP_SERVICES = System.getenv("VCAP_SERVICES");
public static JsonObject getCloudCredentials(String serviceName) {
if(VCAP_SERVICES == null){
return null;
}
//Convert VCAP_SERVICES String to JSON
JsonObject obj = (JsonObject) new JsonParser().parse(VCAP_SERVICES);
// Look for the VCAP key that holds the service info
Entry<String, JsonElement> dbEntry = matchService(obj.entrySet(),
serviceName);
if (dbEntry == null) {
System.out.println("VCAP_SERVICES: Could not find " + serviceName);
return null;
}
obj = (JsonObject) ((JsonArray) dbEntry.getValue()).get(0);
System.out.println("VCAP_SERVICES: Found " + dbEntry.getKey());
return (JsonObject) obj.get("credentials");
}
private static Entry<String, JsonElement> matchService(Set<Entry<String, JsonElement>> entries, String serviceName) {
for (Entry<String, JsonElement> eachEntry : entries) {
// Service with 'serviceName' in the name
if (eachEntry.getKey().toLowerCase().contains(serviceName)) {
return eachEntry;
}
// user-provided service with 'serviceName' in the name
else if (eachEntry.getKey().equals("user-provided")) {
JsonArray upss = eachEntry.getValue().getAsJsonArray();
for (JsonElement ups : upss) {
String name = ups.getAsJsonObject().get("name").getAsString();
if (name.toLowerCase().contains(serviceName)) {
return eachEntry;
}
}
}
// Service with 'serviceName' in the db_type (if present)
else {
JsonElement element = eachEntry.getValue();
if (element.isJsonArray()) {
JsonArray array = element.getAsJsonArray();
if (array.size() > 0) {
JsonObject container = array.get(0).getAsJsonObject();
if (container.has("credentials")) {
JsonObject credentials = container.getAsJsonObject("credentials");
if (credentials.has("db_type")) {
String dbType = credentials.get("db_type").getAsString();
if (dbType.equals(serviceName)) {
return eachEntry;
}
}
}
}
}
}
}
return null;
}
public static Properties getLocalProperties(String fileName){
Properties properties = new Properties();
InputStream inputStream = VCAPHelper.class.getClassLoader().getResourceAsStream(fileName);
try {
properties.load(inputStream);
} catch (IOException e) {
e.printStackTrace();
}
return properties;
}
}
|
[
"\"VCAP_SERVICES\""
] |
[] |
[
"VCAP_SERVICES"
] |
[]
|
["VCAP_SERVICES"]
|
java
| 1 | 0 | |
main.go
|
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type Command interface {
Name() string
Parse(args []string) error
Exec()
RepoDir() string
Path(args ...string) string
}
type Common struct {
Root string
RootAbs string
Package string
Repo string
}
func (cmd Common) RepoDir() string {
return cmd.Path("src", filepath.FromSlash(cmd.Package))
}
func (cmd Common) Path(args ...string) string {
return filepath.Join(append([]string{cmd.RootAbs}, args...)...)
}
func main() {
common := Common{}
cmdname := ""
args := []string{}
{
set := flag.NewFlagSet("", flag.ContinueOnError)
set.StringVar(&common.Root, "root", os.Getenv("GOSPACE_ROOT"), "root directory (default GOSPACE_ROOT)")
set.StringVar(&common.Package, "pkg", os.Getenv("GOSPACE_PKG"), "package name (default GOSPACE_PKG)")
set.StringVar(&common.Repo, "repo", os.Getenv("GOSPACE_REPO"), "package name (default GOSPACE_REPO)")
if err := set.Parse(os.Args[1:]); err != nil {
fmt.Fprintln(os.Stderr, "invalid args")
os.Exit(1)
}
fail := false
if common.Root == "" {
fmt.Fprintln(os.Stderr, "root directory is missing, please specify `-root` or GOSPACE_ROOT environment variable")
fail = true
}
if common.Package == "" {
fmt.Fprintln(os.Stderr, "package name is missing, please specify `-pkg` or GOSPACE_PKG environment variable")
fail = true
}
if common.Repo == "" {
fmt.Fprintln(os.Stderr, "repo name is missing, please specify `-repo` or GOSPACE_REPO environment variable")
fail = true
}
if fail {
os.Exit(1)
}
cmdname = set.Arg(0)
if set.NArg() > 1 {
args = set.Args()[1:]
}
common.RootAbs, _ = filepath.Abs(common.Root)
}
cmds := []Command{
&Setup{Common: common},
&Update{Common: common},
// &Cache{Common: common},
&IsTidy{Common: common},
&Hash{Common: common},
&ZipVendor{Common: common},
&UnzipVendor{Common: common},
&FlattenVendor{Common: common},
}
for _, cmd := range cmds {
if strings.EqualFold(cmdname, cmd.Name()) {
if err := cmd.Parse(args); err != nil {
fmt.Fprintln(os.Stderr, "invalid args", err)
os.Exit(1)
}
Exec(cmd)
return
}
}
fmt.Fprintln(os.Stderr, "unknown command:", cmdname)
fmt.Fprintln(os.Stderr, "supported:")
for _, cmd := range cmds {
fmt.Fprintln(os.Stderr, "\t"+cmd.Name())
}
os.Exit(1)
}
func Exec(cmd Command) {
gomodfilename := filepath.Join(cmd.RepoDir(), "go.mod")
gomod, gomoderr := ioutil.ReadFile(gomodfilename)
defer func() {
if gomoderr != nil {
return
}
gomodnew, gomodnewerr := ioutil.ReadFile(gomodfilename)
if gomodnewerr == nil && !bytes.Equal(gomod, gomodnew) {
ioutil.WriteFile(gomodfilename, gomod, 0644)
}
}()
cmd.Exec()
}
|
[
"\"GOSPACE_ROOT\"",
"\"GOSPACE_PKG\"",
"\"GOSPACE_REPO\""
] |
[] |
[
"GOSPACE_ROOT",
"GOSPACE_REPO",
"GOSPACE_PKG"
] |
[]
|
["GOSPACE_ROOT", "GOSPACE_REPO", "GOSPACE_PKG"]
|
go
| 3 | 0 | |
cmd/grpcurl/grpcurl.go
|
// Command grpcurl makes gRPC requests (a la cURL, but HTTP/2). It can use a supplied descriptor
// file, protobuf sources, or service reflection to translate JSON or text request data into the
// appropriate protobuf messages and vice versa for presenting the response contents.
package main
import (
"context"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
reflectpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/descriptorpb"
// Register gzip compressor so compressed responses will work
_ "google.golang.org/grpc/encoding/gzip"
// Register xds so xds and xds-experimental resolver schemes work
_ "google.golang.org/grpc/xds"
"github.com/fullstorydev/grpcurl"
)
// To avoid confusion between program error codes and the gRPC resonse
// status codes 'Cancelled' and 'Unknown', 1 and 2 respectively,
// the response status codes emitted use an offest of 64
const statusCodeOffset = 64
const no_version = "dev build <no version set>"
var version = no_version
var (
exit = os.Exit
isUnixSocket func() bool // nil when run on non-unix platform
flags = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
help = flags.Bool("help", false, prettify(`
Print usage instructions and exit.`))
printVersion = flags.Bool("version", false, prettify(`
Print version.`))
plaintext = flags.Bool("plaintext", false, prettify(`
Use plain-text HTTP/2 when connecting to server (no TLS).`))
insecure = flags.Bool("insecure", false, prettify(`
Skip server certificate and domain verification. (NOT SECURE!) Not
valid with -plaintext option.`))
cacert = flags.String("cacert", "", prettify(`
File containing trusted root certificates for verifying the server.
Ignored if -insecure is specified.`))
cert = flags.String("cert", "", prettify(`
File containing client certificate (public key), to present to the
server. Not valid with -plaintext option. Must also provide -key option.`))
key = flags.String("key", "", prettify(`
File containing client private key, to present to the server. Not valid
with -plaintext option. Must also provide -cert option.`))
protoset multiString
protoFiles multiString
importPaths multiString
addlHeaders multiString
rpcHeaders multiString
reflHeaders multiString
expandHeaders = flags.Bool("expand-headers", false, prettify(`
If set, headers may use '${NAME}' syntax to reference environment
variables. These will be expanded to the actual environment variable
value before sending to the server. For example, if there is an
environment variable defined like FOO=bar, then a header of
'key: ${FOO}' would expand to 'key: bar'. This applies to -H,
-rpc-header, and -reflect-header options. No other expansion/escaping is
performed. This can be used to supply credentials/secrets without having
to put them in command-line arguments.`))
authority = flags.String("authority", "", prettify(`
The authoritative name of the remote server. This value is passed as the
value of the ":authority" pseudo-header in the HTTP/2 protocol. When TLS
is used, this will also be used as the server name when verifying the
server's certificate. It defaults to the address that is provided in the
positional arguments.`))
userAgent = flags.String("user-agent", "", prettify(`
If set, the specified value will be added to the User-Agent header set
by the grpc-go library.
`))
data = flags.String("d", "", prettify(`
Data for request contents. If the value is '@' then the request contents
are read from stdin. For calls that accept a stream of requests, the
contents should include all such request messages concatenated together
(possibly delimited; see -format).`))
format = flags.String("format", "json", prettify(`
The format of request data. The allowed values are 'json' or 'text'. For
'json', the input data must be in JSON format. Multiple request values
may be concatenated (messages with a JSON representation other than
object must be separated by whitespace, such as a newline). For 'text',
the input data must be in the protobuf text format, in which case
multiple request values must be separated by the "record separator"
ASCII character: 0x1E. The stream should not end in a record separator.
If it does, it will be interpreted as a final, blank message after the
separator.`))
allowUnknownFields = flags.Bool("allow-unknown-fields", false, prettify(`
When true, the request contents, if 'json' format is used, allows
unkown fields to be present. They will be ignored when parsing
the request.`))
connectTimeout = flags.Float64("connect-timeout", 0, prettify(`
The maximum time, in seconds, to wait for connection to be established.
Defaults to 10 seconds.`))
formatError = flags.Bool("format-error", false, prettify(`
When a non-zero status is returned, format the response using the
value set by the -format flag .`))
keepaliveTime = flags.Float64("keepalive-time", 0, prettify(`
If present, the maximum idle time in seconds, after which a keepalive
probe is sent. If the connection remains idle and no keepalive response
is received for this same period then the connection is closed and the
operation fails.`))
maxTime = flags.Float64("max-time", 0, prettify(`
The maximum total time the operation can take, in seconds. This is
useful for preventing batch jobs that use grpcurl from hanging due to
slow or bad network links or due to incorrect stream method usage.`))
maxMsgSz = flags.Int("max-msg-sz", 0, prettify(`
The maximum encoded size of a response message, in bytes, that grpcurl
will accept. If not specified, defaults to 4,194,304 (4 megabytes).`))
emitDefaults = flags.Bool("emit-defaults", false, prettify(`
Emit default values for JSON-encoded responses.`))
protosetOut = flags.String("protoset-out", "", prettify(`
The name of a file to be written that will contain a FileDescriptorSet
proto. With the list and describe verbs, the listed or described
elements and their transitive dependencies will be written to the named
file if this option is given. When invoking an RPC and this option is
given, the method being invoked and its transitive dependencies will be
included in the output file.`))
msgTemplate = flags.Bool("msg-template", false, prettify(`
When describing messages, show a template of input data.`))
verbose = flags.Bool("v", false, prettify(`
Enable verbose output.`))
veryVerbose = flags.Bool("vv", false, prettify(`
Enable very verbose output.`))
serverName = flags.String("servername", "", prettify(`
Override server name when validating TLS certificate. This flag is
ignored if -plaintext or -insecure is used.
NOTE: Prefer -authority. This flag may be removed in the future. It is
an error to use both -authority and -servername (though this will be
permitted if they are both set to the same value, to increase backwards
compatibility with earlier releases that allowed both to be set).`))
reflection = optionalBoolFlag{val: true}
)
func init() {
flags.Var(&addlHeaders, "H", prettify(`
Additional headers in 'name: value' format. May specify more than one
via multiple flags. These headers will also be included in reflection
requests to a server.`))
flags.Var(&rpcHeaders, "rpc-header", prettify(`
Additional RPC headers in 'name: value' format. May specify more than
one via multiple flags. These headers will *only* be used when invoking
the requested RPC method. They are excluded from reflection requests.`))
flags.Var(&reflHeaders, "reflect-header", prettify(`
Additional reflection headers in 'name: value' format. May specify more
than one via multiple flags. These headers will *only* be used during
reflection requests and will be excluded when invoking the requested RPC
method.`))
flags.Var(&protoset, "protoset", prettify(`
The name of a file containing an encoded FileDescriptorSet. This file's
contents will be used to determine the RPC schema instead of querying
for it from the remote server via the gRPC reflection API. When set: the
'list' action lists the services found in the given descriptors (vs.
those exposed by the remote server), and the 'describe' action describes
symbols found in the given descriptors. May specify more than one via
multiple -protoset flags. It is an error to use both -protoset and
-proto flags.`))
flags.Var(&protoFiles, "proto", prettify(`
The name of a proto source file. Source files given will be used to
determine the RPC schema instead of querying for it from the remote
server via the gRPC reflection API. When set: the 'list' action lists
the services found in the given files and their imports (vs. those
exposed by the remote server), and the 'describe' action describes
symbols found in the given files. May specify more than one via multiple
-proto flags. Imports will be resolved using the given -import-path
flags. Multiple proto files can be specified by specifying multiple
-proto flags. It is an error to use both -protoset and -proto flags.`))
flags.Var(&importPaths, "import-path", prettify(`
The path to a directory from which proto sources can be imported, for
use with -proto flags. Multiple import paths can be configured by
specifying multiple -import-path flags. Paths will be searched in the
order given. If no import paths are given, all files (including all
imports) must be provided as -proto flags, and grpcurl will attempt to
resolve all import statements from the set of file names given.`))
flags.Var(&reflection, "use-reflection", prettify(`
When true, server reflection will be used to determine the RPC schema.
Defaults to true unless a -proto or -protoset option is provided. If
-use-reflection is used in combination with a -proto or -protoset flag,
the provided descriptor sources will be used in addition to server
reflection to resolve messages and extensions.`))
}
type multiString []string
func (s *multiString) String() string {
return strings.Join(*s, ",")
}
func (s *multiString) Set(value string) error {
*s = append(*s, value)
return nil
}
// Uses a file source as a fallback for resolving symbols and extensions, but
// only uses the reflection source for listing services
type compositeSource struct {
reflection grpcurl.DescriptorSource
file grpcurl.DescriptorSource
}
func (cs compositeSource) ListServices() ([]string, error) {
return cs.reflection.ListServices()
}
func (cs compositeSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
d, err := cs.reflection.FindSymbol(fullyQualifiedName)
if err == nil {
return d, nil
}
return cs.file.FindSymbol(fullyQualifiedName)
}
func (cs compositeSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
exts, err := cs.reflection.AllExtensionsForType(typeName)
if err != nil {
// On error fall back to file source
return cs.file.AllExtensionsForType(typeName)
}
// Track the tag numbers from the reflection source
tags := make(map[int32]bool)
for _, ext := range exts {
tags[ext.GetNumber()] = true
}
fileExts, err := cs.file.AllExtensionsForType(typeName)
if err != nil {
return exts, nil
}
for _, ext := range fileExts {
// Prioritize extensions found via reflection
if !tags[ext.GetNumber()] {
exts = append(exts, ext)
}
}
return exts, nil
}
func main() {
flags.Usage = usage
flags.Parse(os.Args[1:])
if *help {
usage()
os.Exit(0)
}
if *printVersion {
fmt.Fprintf(os.Stderr, "%s %s\n", filepath.Base(os.Args[0]), version)
os.Exit(0)
}
// Do extra validation on arguments and figure out what user asked us to do.
if *connectTimeout < 0 {
fail(nil, "The -connect-timeout argument must not be negative.")
}
if *keepaliveTime < 0 {
fail(nil, "The -keepalive-time argument must not be negative.")
}
if *maxTime < 0 {
fail(nil, "The -max-time argument must not be negative.")
}
if *maxMsgSz < 0 {
fail(nil, "The -max-msg-sz argument must not be negative.")
}
if *plaintext && *insecure {
fail(nil, "The -plaintext and -insecure arguments are mutually exclusive.")
}
if *plaintext && *cert != "" {
fail(nil, "The -plaintext and -cert arguments are mutually exclusive.")
}
if *plaintext && *key != "" {
fail(nil, "The -plaintext and -key arguments are mutually exclusive.")
}
if (*key == "") != (*cert == "") {
fail(nil, "The -cert and -key arguments must be used together and both be present.")
}
if *format != "json" && *format != "text" {
fail(nil, "The -format option must be 'json' or 'text'.")
}
if *emitDefaults && *format != "json" {
warn("The -emit-defaults is only used when using json format.")
}
args := flags.Args()
if len(args) == 0 {
fail(nil, "Too few arguments.")
}
var target string
if args[0] != "list" && args[0] != "describe" {
target = args[0]
args = args[1:]
}
if len(args) == 0 {
fail(nil, "Too few arguments.")
}
var list, describe, invoke bool
if args[0] == "list" {
list = true
args = args[1:]
} else if args[0] == "describe" {
describe = true
args = args[1:]
} else {
invoke = true
}
verbosityLevel := 0
if *verbose {
verbosityLevel = 1
}
if *veryVerbose {
verbosityLevel = 2
}
var symbol string
if invoke {
if len(args) == 0 {
fail(nil, "Too few arguments.")
}
symbol = args[0]
args = args[1:]
} else {
if *data != "" {
warn("The -d argument is not used with 'list' or 'describe' verb.")
}
if len(rpcHeaders) > 0 {
warn("The -rpc-header argument is not used with 'list' or 'describe' verb.")
}
if len(args) > 0 {
symbol = args[0]
args = args[1:]
}
}
if len(args) > 0 {
fail(nil, "Too many arguments.")
}
if invoke && target == "" {
fail(nil, "No host:port specified.")
}
if len(protoset) == 0 && len(protoFiles) == 0 && target == "" {
fail(nil, "No host:port specified, no protoset specified, and no proto sources specified.")
}
if len(protoset) > 0 && len(reflHeaders) > 0 {
warn("The -reflect-header argument is not used when -protoset files are used.")
}
if len(protoset) > 0 && len(protoFiles) > 0 {
fail(nil, "Use either -protoset files or -proto files, but not both.")
}
if len(importPaths) > 0 && len(protoFiles) == 0 {
warn("The -import-path argument is not used unless -proto files are used.")
}
if !reflection.val && len(protoset) == 0 && len(protoFiles) == 0 {
fail(nil, "No protoset files or proto files specified and -use-reflection set to false.")
}
// Protoset or protofiles provided and -use-reflection unset
if !reflection.set && (len(protoset) > 0 || len(protoFiles) > 0) {
reflection.val = false
}
ctx := context.Background()
if *maxTime > 0 {
timeout := time.Duration(*maxTime * float64(time.Second))
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
dial := func() *grpc.ClientConn {
dialTime := 10 * time.Second
if *connectTimeout > 0 {
dialTime = time.Duration(*connectTimeout * float64(time.Second))
}
ctx, cancel := context.WithTimeout(ctx, dialTime)
defer cancel()
var opts []grpc.DialOption
if *keepaliveTime > 0 {
timeout := time.Duration(*keepaliveTime * float64(time.Second))
opts = append(opts, grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: timeout,
Timeout: timeout,
}))
}
if *maxMsgSz > 0 {
opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*maxMsgSz)))
}
var creds credentials.TransportCredentials
if !*plaintext {
tlsConf, err := grpcurl.ClientTLSConfig(*insecure, *cacert, *cert, *key)
if err != nil {
fail(err, "Failed to create TLS config")
}
sslKeylogFile := os.Getenv("SSLKEYLOGFILE")
if sslKeylogFile != "" {
w, err := os.OpenFile(sslKeylogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
fail(err, "Could not open SSLKEYLOGFILE %s", sslKeylogFile)
}
tlsConf.KeyLogWriter = w
}
creds = credentials.NewTLS(tlsConf)
// can use either -servername or -authority; but not both
if *serverName != "" && *authority != "" {
if *serverName == *authority {
warn("Both -servername and -authority are present; prefer only -authority.")
} else {
fail(nil, "Cannot specify different values for -servername and -authority.")
}
}
overrideName := *serverName
if overrideName == "" {
overrideName = *authority
}
if overrideName != "" {
if err := creds.OverrideServerName(overrideName); err != nil {
fail(err, "Failed to override server name as %q", overrideName)
}
}
} else if *authority != "" {
opts = append(opts, grpc.WithAuthority(*authority))
}
grpcurlUA := "grpcurl/" + version
if version == no_version {
grpcurlUA = "grpcurl/dev-build (no version set)"
}
if *userAgent != "" {
grpcurlUA = *userAgent + " " + grpcurlUA
}
opts = append(opts, grpc.WithUserAgent(grpcurlUA))
network := "tcp"
if isUnixSocket != nil && isUnixSocket() {
network = "unix"
}
cc, err := grpcurl.BlockingDial(ctx, network, target, creds, opts...)
if err != nil {
fail(err, "Failed to dial target host %q", target)
}
return cc
}
printFormattedStatus := func(w io.Writer, stat *status.Status, formatter grpcurl.Formatter) {
formattedStatus, err := formatter(stat.Proto())
if err != nil {
fmt.Fprintf(w, "ERROR: %v", err.Error())
}
fmt.Fprint(w, formattedStatus)
}
if *expandHeaders {
var err error
addlHeaders, err = grpcurl.ExpandHeaders(addlHeaders)
if err != nil {
fail(err, "Failed to expand additional headers")
}
rpcHeaders, err = grpcurl.ExpandHeaders(rpcHeaders)
if err != nil {
fail(err, "Failed to expand rpc headers")
}
reflHeaders, err = grpcurl.ExpandHeaders(reflHeaders)
if err != nil {
fail(err, "Failed to expand reflection headers")
}
}
var cc *grpc.ClientConn
var descSource grpcurl.DescriptorSource
var refClient *grpcreflect.Client
var fileSource grpcurl.DescriptorSource
if len(protoset) > 0 {
var err error
fileSource, err = grpcurl.DescriptorSourceFromProtoSets(protoset...)
if err != nil {
fail(err, "Failed to process proto descriptor sets.")
}
} else if len(protoFiles) > 0 {
var err error
fileSource, err = grpcurl.DescriptorSourceFromProtoFiles(importPaths, protoFiles...)
if err != nil {
fail(err, "Failed to process proto source files.")
}
}
if reflection.val {
md := grpcurl.MetadataFromHeaders(append(addlHeaders, reflHeaders...))
refCtx := metadata.NewOutgoingContext(ctx, md)
cc = dial()
refClient = grpcreflect.NewClient(refCtx, reflectpb.NewServerReflectionClient(cc))
reflSource := grpcurl.DescriptorSourceFromServer(ctx, refClient)
if fileSource != nil {
descSource = compositeSource{reflSource, fileSource}
} else {
descSource = reflSource
}
} else {
descSource = fileSource
}
// arrange for the RPCs to be cleanly shutdown
reset := func() {
if refClient != nil {
refClient.Reset()
refClient = nil
}
if cc != nil {
cc.Close()
cc = nil
}
}
defer reset()
exit = func(code int) {
// since defers aren't run by os.Exit...
reset()
os.Exit(code)
}
if list {
if symbol == "" {
svcs, err := grpcurl.ListServices(descSource)
if err != nil {
fail(err, "Failed to list services")
}
if len(svcs) == 0 {
fmt.Println("(No services)")
} else {
for _, svc := range svcs {
fmt.Printf("%s\n", svc)
}
}
if err := writeProtoset(descSource, svcs...); err != nil {
fail(err, "Failed to write protoset to %s", *protosetOut)
}
} else {
methods, err := grpcurl.ListMethods(descSource, symbol)
if err != nil {
fail(err, "Failed to list methods for service %q", symbol)
}
if len(methods) == 0 {
fmt.Println("(No methods)") // probably unlikely
} else {
for _, m := range methods {
fmt.Printf("%s\n", m)
}
}
if err := writeProtoset(descSource, symbol); err != nil {
fail(err, "Failed to write protoset to %s", *protosetOut)
}
}
} else if describe {
var symbols []string
if symbol != "" {
symbols = []string{symbol}
} else {
// if no symbol given, describe all exposed services
svcs, err := descSource.ListServices()
if err != nil {
fail(err, "Failed to list services")
}
if len(svcs) == 0 {
fmt.Println("Server returned an empty list of exposed services")
}
symbols = svcs
}
for _, s := range symbols {
if s[0] == '.' {
s = s[1:]
}
dsc, err := descSource.FindSymbol(s)
if err != nil {
fail(err, "Failed to resolve symbol %q", s)
}
fqn := dsc.GetFullyQualifiedName()
var elementType string
switch d := dsc.(type) {
case *desc.MessageDescriptor:
elementType = "a message"
parent, ok := d.GetParent().(*desc.MessageDescriptor)
if ok {
if d.IsMapEntry() {
for _, f := range parent.GetFields() {
if f.IsMap() && f.GetMessageType() == d {
// found it: describe the map field instead
elementType = "the entry type for a map field"
dsc = f
break
}
}
} else {
// see if it's a group
for _, f := range parent.GetFields() {
if f.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && f.GetMessageType() == d {
// found it: describe the map field instead
elementType = "the type of a group field"
dsc = f
break
}
}
}
}
case *desc.FieldDescriptor:
elementType = "a field"
if d.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
elementType = "a group field"
} else if d.IsExtension() {
elementType = "an extension"
}
case *desc.OneOfDescriptor:
elementType = "a one-of"
case *desc.EnumDescriptor:
elementType = "an enum"
case *desc.EnumValueDescriptor:
elementType = "an enum value"
case *desc.ServiceDescriptor:
elementType = "a service"
case *desc.MethodDescriptor:
elementType = "a method"
default:
err = fmt.Errorf("descriptor has unrecognized type %T", dsc)
fail(err, "Failed to describe symbol %q", s)
}
txt, err := grpcurl.GetDescriptorText(dsc, descSource)
if err != nil {
fail(err, "Failed to describe symbol %q", s)
}
fmt.Printf("%s is %s:\n", fqn, elementType)
fmt.Println(txt)
if dsc, ok := dsc.(*desc.MessageDescriptor); ok && *msgTemplate {
// for messages, also show a template in JSON, to make it easier to
// create a request to invoke an RPC
tmpl := grpcurl.MakeTemplate(dsc)
options := grpcurl.FormatOptions{EmitJSONDefaultFields: true}
_, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.Format(*format), descSource, nil, options)
if err != nil {
fail(err, "Failed to construct formatter for %q", *format)
}
str, err := formatter(tmpl)
if err != nil {
fail(err, "Failed to print template for message %s", s)
}
fmt.Println("\nMessage template:")
fmt.Println(str)
}
}
if err := writeProtoset(descSource, symbols...); err != nil {
fail(err, "Failed to write protoset to %s", *protosetOut)
}
} else {
// Invoke an RPC
if cc == nil {
cc = dial()
}
var in io.Reader
if *data == "@" {
in = os.Stdin
} else {
in = strings.NewReader(*data)
}
// if not verbose output, then also include record delimiters
// between each message, so output could potentially be piped
// to another grpcurl process
includeSeparators := verbosityLevel == 0
options := grpcurl.FormatOptions{
EmitJSONDefaultFields: *emitDefaults,
IncludeTextSeparator: includeSeparators,
AllowUnknownFields: *allowUnknownFields,
}
rf, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.Format(*format), descSource, in, options)
if err != nil {
fail(err, "Failed to construct request parser and formatter for %q", *format)
}
h := &grpcurl.DefaultEventHandler{
Out: os.Stdout,
Formatter: formatter,
VerbosityLevel: verbosityLevel,
}
err = grpcurl.InvokeRPC(ctx, descSource, cc, symbol, append(addlHeaders, rpcHeaders...), h, rf.Next)
if err != nil {
if errStatus, ok := status.FromError(err); ok && *formatError {
h.Status = errStatus
} else {
fail(err, "Error invoking method %q", symbol)
}
}
reqSuffix := ""
respSuffix := ""
reqCount := rf.NumRequests()
if reqCount != 1 {
reqSuffix = "s"
}
if h.NumResponses != 1 {
respSuffix = "s"
}
if verbosityLevel > 0 {
fmt.Printf("Sent %d request%s and received %d response%s\n", reqCount, reqSuffix, h.NumResponses, respSuffix)
}
if h.Status.Code() != codes.OK {
if *formatError {
printFormattedStatus(os.Stderr, h.Status, formatter)
} else {
grpcurl.PrintStatus(os.Stderr, h.Status, formatter)
}
exit(statusCodeOffset + int(h.Status.Code()))
}
}
}
func usage() {
fmt.Fprintf(os.Stderr, `Usage:
%s [flags] [address] [list|describe] [symbol]
The 'address' is only optional when used with 'list' or 'describe' and a
protoset or proto flag is provided.
If 'list' is indicated, the symbol (if present) should be a fully-qualified
service name. If present, all methods of that service are listed. If not
present, all exposed services are listed, or all services defined in protosets.
If 'describe' is indicated, the descriptor for the given symbol is shown. The
symbol should be a fully-qualified service, enum, or message name. If no symbol
is given then the descriptors for all exposed or known services are shown.
If neither verb is present, the symbol must be a fully-qualified method name in
'service/method' or 'service.method' format. In this case, the request body will
be used to invoke the named method. If no body is given but one is required
(i.e. the method is unary or server-streaming), an empty instance of the
method's request type will be sent.
The address will typically be in the form "host:port" where host can be an IP
address or a hostname and port is a numeric port or service name. If an IPv6
address is given, it must be surrounded by brackets, like "[2001:db8::1]". For
Unix variants, if a -unix=true flag is present, then the address must be the
path to the domain socket.
Available flags:
`, os.Args[0])
flags.PrintDefaults()
}
func prettify(docString string) string {
parts := strings.Split(docString, "\n")
// cull empty lines and also remove trailing and leading spaces
// from each line in the doc string
j := 0
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
parts[j] = part
j++
}
return strings.Join(parts[:j], "\n"+indent())
}
func warn(msg string, args ...interface{}) {
msg = fmt.Sprintf("Warning: %s\n", msg)
fmt.Fprintf(os.Stderr, msg, args...)
}
func fail(err error, msg string, args ...interface{}) {
if err != nil {
msg += ": %v"
args = append(args, err)
}
fmt.Fprintf(os.Stderr, msg, args...)
fmt.Fprintln(os.Stderr)
if err != nil {
exit(1)
} else {
// nil error means it was CLI usage issue
fmt.Fprintf(os.Stderr, "Try '%s -help' for more details.\n", os.Args[0])
exit(2)
}
}
func writeProtoset(descSource grpcurl.DescriptorSource, symbols ...string) error {
if *protosetOut == "" {
return nil
}
f, err := os.Create(*protosetOut)
if err != nil {
return err
}
defer f.Close()
return grpcurl.WriteProtoset(f, descSource, symbols...)
}
type optionalBoolFlag struct {
set, val bool
}
func (f *optionalBoolFlag) String() string {
if !f.set {
return "unset"
}
return strconv.FormatBool(f.val)
}
func (f *optionalBoolFlag) Set(s string) error {
v, err := strconv.ParseBool(s)
if err != nil {
return err
}
f.set = true
f.val = v
return nil
}
func (f *optionalBoolFlag) IsBoolFlag() bool {
return true
}
|
[
"\"SSLKEYLOGFILE\""
] |
[] |
[
"SSLKEYLOGFILE"
] |
[]
|
["SSLKEYLOGFILE"]
|
go
| 1 | 0 | |
management/example_test.go
|
package management_test
import (
"fmt"
"os"
"gopkg.in/auth0.v1"
"gopkg.in/auth0.v1/management"
)
var (
domain = os.Getenv("AUTH0_DOMAIN")
id = os.Getenv("AUTH0_CLIENT_ID")
secret = os.Getenv("AUTH0_CLIENT_SECRET")
)
func ExampleUser() {
m, err := management.New(domain, id, secret)
if err != nil {
fmt.Printf("Failed creating management client. %s", err)
}
u := &management.User{
Connection: auth0.String("Username-Password-Authentication"),
Email: auth0.String("[email protected]"),
Password: auth0.String("F4e3DA1a6cDD"),
}
err = m.User.Create(u)
if err != nil {
fmt.Printf("Failed creating user. %s", err)
}
defer m.User.Delete(auth0.StringValue(u.ID))
fmt.Printf("User created!")
// Output: User created!
}
|
[
"\"AUTH0_DOMAIN\"",
"\"AUTH0_CLIENT_ID\"",
"\"AUTH0_CLIENT_SECRET\""
] |
[] |
[
"AUTH0_DOMAIN",
"AUTH0_CLIENT_SECRET",
"AUTH0_CLIENT_ID"
] |
[]
|
["AUTH0_DOMAIN", "AUTH0_CLIENT_SECRET", "AUTH0_CLIENT_ID"]
|
go
| 3 | 0 | |
pkg/network/linux-bridge.go
|
package network
import (
"os"
"path/filepath"
"reflect"
"github.com/kubevirt/cluster-network-addons-operator/pkg/render"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
opv1alpha1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1alpha1"
)
func changeSafeLinuxBridge(prev, next *opv1alpha1.NetworkAddonsConfigSpec) []error {
if prev.LinuxBridge != nil && !reflect.DeepEqual(prev.LinuxBridge, next.LinuxBridge) {
return []error{errors.Errorf("cannot modify Linux Bridge configuration once it is deployed")}
}
return nil
}
// renderLinuxBridge generates the manifests of Linux Bridge
func renderLinuxBridge(conf *opv1alpha1.NetworkAddonsConfigSpec, manifestDir string, enableSCC bool) ([]*unstructured.Unstructured, error) {
if conf.LinuxBridge == nil {
return nil, nil
}
// render the manifests on disk
data := render.MakeRenderData()
data.Data["LinuxBridgeImage"] = os.Getenv("LINUX_BRIDGE_IMAGE")
data.Data["ImagePullPolicy"] = conf.ImagePullPolicy
data.Data["EnableSCC"] = enableSCC
objs, err := render.RenderDir(filepath.Join(manifestDir, "linux-bridge"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render linux-bridge manifests")
}
return objs, nil
}
|
[
"\"LINUX_BRIDGE_IMAGE\""
] |
[] |
[
"LINUX_BRIDGE_IMAGE"
] |
[]
|
["LINUX_BRIDGE_IMAGE"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.